From fa73055b8442c97b3ba7cd0aa57cd2ad32124201 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed, 7 Mar 2018 17:13:03 +0000
Subject: [PATCH 0001/1286] drm/i915: Only prune fences after wait-for-all

Currently, we only allow ourselves to prune the fences so long as
all the waits completed (i.e. all the fences we checked were signaled),
and that the reservation snapshot did not change across the wait.
However, if we only waited for a subset of the reservation object, i.e.
just waiting for the last writer to complete as opposed to all readers
as well, then we would erroneously conclude we could prune the fences as
indeed although all of our waits were successful, they did not represent
the totality of the reservation object.

v2: We only need to check the shared fences due to construction (i.e.
all of the shared fences will be later than the exclusive fence, if
any).

Fixes: e54ca9774777 ("drm/i915: Remove completed fences after a wait")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180307171303.29466-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_gem.c | 16 ++++++++++++----
 1 file changed, 12 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a5bd07338b46..ab88ca53c9a0 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -433,20 +433,28 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
 			dma_fence_put(shared[i]);
 		kfree(shared);
 
+		/*
+		 * If both shared fences and an exclusive fence exist,
+		 * then by construction the shared fences must be later
+		 * than the exclusive fence. If we successfully wait for
+		 * all the shared fences, we know that the exclusive fence
+		 * must all be signaled. If all the shared fences are
+		 * signaled, we can prune the array and recover the
+		 * floating references on the fences/requests.
+		 */
 		prune_fences = count && timeout >= 0;
 	} else {
 		excl = reservation_object_get_excl_rcu(resv);
 	}
 
-	if (excl && timeout >= 0) {
+	if (excl && timeout >= 0)
 		timeout = i915_gem_object_wait_fence(excl, flags, timeout,
 						     rps_client);
-		prune_fences = timeout >= 0;
-	}
 
 	dma_fence_put(excl);
 
-	/* Oportunistically prune the fences iff we know they have *all* been
+	/*
+	 * Opportunistically prune the fences iff we know they have *all* been
 	 * signaled and that the reservation object has not been changed (i.e.
 	 * no new fences have been added).
 	 */

From 033b7a230cfa759d1b582fa46bf2cd54db406cf3 Mon Sep 17 00:00:00 2001
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Date: Thu, 8 Mar 2018 13:02:02 +0100
Subject: [PATCH 0002/1286] drm/i915: Handle pipe CRC around enabling/disabling
 pipe.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This will get rid of the following error:
[   74.730271] WARNING: CPU: 4 PID: 0 at drivers/gpu/drm/drm_vblank.c:614 drm_calc_vbltimestamp_from_scanoutpos+0x13e/0x2f0
[   74.730311] Modules linked in: vgem snd_hda_codec_hdmi snd_hda_codec_realtek snd_hda_codec_generic i915 x86_pkg_temp_thermal intel_powerclamp coretemp snd_hda_intel crct10dif_pclmul snd_hda_codec crc32_pclmul snd_hwdep broadcom ghash_clmulni_intel snd_hda_core bcm_phy_lib snd_pcm tg3 lpc_ich mei_me mei prime_numbers
[   74.730353] CPU: 4 PID: 0 Comm: swapper/4 Tainted: G     U           4.16.0-rc2-CI-CI_DRM_3822+ #1
[   74.730355] Hardware name: Dell Inc. XPS 8300  /0Y2MRG, BIOS A06 10/17/2011
[   74.730359] RIP: 0010:drm_calc_vbltimestamp_from_scanoutpos+0x13e/0x2f0
[   74.730361] RSP: 0018:ffff88022fb03d10 EFLAGS: 00010086
[   74.730365] RAX: ffffffffa0291d20 RBX: ffff88021a180000 RCX: 0000000000000001
[   74.730367] RDX: ffffffff820e7db8 RSI: 0000000000000001 RDI: ffffffff82068cea
[   74.730369] RBP: ffff88022fb03d70 R08: 0000000000000000 R09: ffffffff815d26d0
[   74.730371] R10: 0000000000000000 R11: ffffffffa0161ca0 R12: 0000000000000001
[   74.730373] R13: ffff880212448008 R14: ffff880212448330 R15: 0000000000000000
[   74.730376] FS:  0000000000000000(0000) GS:ffff88022fb00000(0000) knlGS:0000000000000000
[   74.730378] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[   74.730380] CR2: 000055edcbec9000 CR3: 0000000002210001 CR4: 00000000000606e0
[   74.730382] Call Trace:
[   74.730385]  <IRQ>
[   74.730397]  drm_get_last_vbltimestamp+0x36/0x50
[   74.730401]  drm_update_vblank_count+0x64/0x240
[   74.730409]  drm_crtc_accurate_vblank_count+0x41/0x90
[   74.730453]  display_pipe_crc_irq_handler+0x176/0x220 [i915]
[   74.730497]  i9xx_pipe_crc_irq_handler+0xfe/0x150 [i915]
[   74.730537]  ironlake_irq_handler+0x618/0xa30 [i915]
[   74.730548]  __handle_irq_event_percpu+0x3c/0x340
[   74.730556]  handle_irq_event_percpu+0x1b/0x50
[   74.730561]  handle_irq_event+0x2f/0x50
[   74.730566]  handle_edge_irq+0xe4/0x1b0
[   74.730572]  handle_irq+0x11/0x20
[   74.730576]  do_IRQ+0x5e/0x120
[   74.730584]  common_interrupt+0x84/0x84
[   74.730586]  </IRQ>
[   74.730591] RIP: 0010:cpuidle_enter_state+0xaa/0x350
[   74.730593] RSP: 0018:ffffc9000008beb8 EFLAGS: 00000212 ORIG_RAX: ffffffffffffffde
[   74.730597] RAX: ffff880226b80040 RBX: 000000000031fc3e RCX: 0000000000000001
[   74.730599] RDX: 0000000000000000 RSI: ffffffff8210fb59 RDI: ffffffff820c02e7
[   74.730601] RBP: 0000000000000004 R08: 00000000000040af R09: 0000000000000018
[   74.730603] R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000004
[   74.730606] R13: ffffe8ffffd00430 R14: 0000001166120bf4 R15: ffffffff82294460
[   74.730621]  ? cpuidle_enter_state+0xa6/0x350
[   74.730629]  do_idle+0x188/0x1d0
[   74.730636]  cpu_startup_entry+0x14/0x20
[   74.730641]  start_secondary+0x129/0x160
[   74.730646]  secondary_startup_64+0xa5/0xb0
[   74.730660] Code: e1 48 c7 c2 b8 7d 0e 82 be 01 00 00 00 48 c7 c7 ea 8c 06 82 e8 64 ec ff ff 48 8b 83 c8 07 00 00 48 83 78 28 00 0f 84 e2 fe ff ff <0f> 0b 45 31 ed e9 db fe ff ff 41 b8 d3 4d 62 10 89 c8 6a 03 41
[   74.730754] ---[ end trace 14b1345705b68565 ]---

Changes since v1:
- Don't try to apply CRC workaround when enabling pipe, it should already be enabled.
Changes since v2:
- Make crc functions for !DEBUGFS case inline.
- Pass intel_crtc to crc functions.
- Add comments to callsites.
Changes since v3:
- Cache selected source to pipe_crc->source.
- Set pipe_crc->skipped to MIN_INT during disable to close a race condition.
Changes since v4:
- Handle fallout from setting pipe_crc->source in irq handler.

Cc: Marta Löfstedt <marta.lofstedt@intel.com>
Reported-by: Marta Löfstedt <marta.lofstedt@intel.com>
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=105185
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180308120202.52446-1-maarten.lankhorst@linux.intel.com
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_irq.c       |  4 +-
 drivers/gpu/drm/i915/intel_display.c  | 10 +++++
 drivers/gpu/drm/i915/intel_drv.h      |  9 +++++
 drivers/gpu/drm/i915/intel_pipe_crc.c | 53 +++++++++++++++++++++++----
 4 files changed, 67 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 633c18785c1e..babf81cf668b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1627,7 +1627,7 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
 	int head, tail;
 
 	spin_lock(&pipe_crc->lock);
-	if (pipe_crc->source) {
+	if (pipe_crc->source && !crtc->base.crc.opened) {
 		if (!pipe_crc->entries) {
 			spin_unlock(&pipe_crc->lock);
 			DRM_DEBUG_KMS("spurious interrupt\n");
@@ -1667,7 +1667,7 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
 		 * On GEN8+ sometimes the second CRC is bonkers as well, so
 		 * don't trust that one either.
 		 */
-		if (pipe_crc->skipped == 0 ||
+		if (pipe_crc->skipped <= 0 ||
 		    (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
 			pipe_crc->skipped++;
 			spin_unlock(&pipe_crc->lock);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index ceed0821b37d..f424fff477f6 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12145,6 +12145,9 @@ static void intel_update_crtc(struct drm_crtc *crtc,
 	if (modeset) {
 		update_scanline_offset(intel_crtc);
 		dev_priv->display.crtc_enable(pipe_config, state);
+
+		/* vblanks work again, re-enable pipe CRC. */
+		intel_crtc_enable_pipe_crc(intel_crtc);
 	} else {
 		intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
 				       pipe_config);
@@ -12325,6 +12328,13 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 
 		if (old_crtc_state->active) {
 			intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
+
+			/*
+			 * We need to disable pipe CRC before disabling the pipe,
+			 * or we race against vblank off.
+			 */
+			intel_crtc_disable_pipe_crc(intel_crtc);
+
 			dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state);
 			intel_crtc->active = false;
 			intel_fbc_disable(intel_crtc);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 37d5412af8f5..83e5ca889d9c 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -2136,8 +2136,17 @@ int intel_pipe_crc_create(struct drm_minor *minor);
 #ifdef CONFIG_DEBUG_FS
 int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
 			      size_t *values_cnt);
+void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc);
+void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc);
 #else
 #define intel_crtc_set_crc_source NULL
+static inline void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc)
+{
+}
+
+static inline void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc)
+{
+}
 #endif
 extern const struct file_operations i915_display_crc_ctl_fops;
 #endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c
index 1f5cd572a7ff..4f367c16e9e5 100644
--- a/drivers/gpu/drm/i915/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.c
@@ -569,7 +569,8 @@ unlock:
 static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
 				enum pipe pipe,
 				enum intel_pipe_crc_source *source,
-				uint32_t *val)
+				uint32_t *val,
+				bool set_wa)
 {
 	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
 		*source = INTEL_PIPE_CRC_SOURCE_PF;
@@ -582,7 +583,7 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
 		break;
 	case INTEL_PIPE_CRC_SOURCE_PF:
-		if ((IS_HASWELL(dev_priv) ||
+		if (set_wa && (IS_HASWELL(dev_priv) ||
 		     IS_BROADWELL(dev_priv)) && pipe == PIPE_A)
 			hsw_pipe_A_crc_wa(dev_priv, true);
 
@@ -600,7 +601,8 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
 
 static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
 			       enum pipe pipe,
-			       enum intel_pipe_crc_source *source, u32 *val)
+			       enum intel_pipe_crc_source *source, u32 *val,
+			       bool set_wa)
 {
 	if (IS_GEN2(dev_priv))
 		return i8xx_pipe_crc_ctl_reg(source, val);
@@ -611,7 +613,7 @@ static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
 	else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
 		return ilk_pipe_crc_ctl_reg(source, val);
 	else
-		return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
+		return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val, set_wa);
 }
 
 static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
@@ -636,7 +638,7 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
 		return -EIO;
 	}
 
-	ret = get_new_crc_ctl_reg(dev_priv, pipe, &source, &val);
+	ret = get_new_crc_ctl_reg(dev_priv, pipe, &source, &val, true);
 	if (ret != 0)
 		goto out;
 
@@ -916,7 +918,7 @@ int intel_pipe_crc_create(struct drm_minor *minor)
 int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
 			      size_t *values_cnt)
 {
-	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
 	enum intel_display_power_domain power_domain;
 	enum intel_pipe_crc_source source;
@@ -934,10 +936,11 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
 		return -EIO;
 	}
 
-	ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val);
+	ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val, true);
 	if (ret != 0)
 		goto out;
 
+	pipe_crc->source = source;
 	I915_WRITE(PIPE_CRC_CTL(crtc->index), val);
 	POSTING_READ(PIPE_CRC_CTL(crtc->index));
 
@@ -959,3 +962,39 @@ out:
 
 	return ret;
 }
+
+void intel_crtc_enable_pipe_crc(struct intel_crtc *intel_crtc)
+{
+	struct drm_crtc *crtc = &intel_crtc->base;
+	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
+	u32 val = 0;
+
+	if (!crtc->crc.opened)
+		return;
+
+	if (get_new_crc_ctl_reg(dev_priv, crtc->index, &pipe_crc->source, &val, false) < 0)
+		return;
+
+	/* Don't need pipe_crc->lock here, IRQs are not generated. */
+	pipe_crc->skipped = 0;
+
+	I915_WRITE(PIPE_CRC_CTL(crtc->index), val);
+	POSTING_READ(PIPE_CRC_CTL(crtc->index));
+}
+
+void intel_crtc_disable_pipe_crc(struct intel_crtc *intel_crtc)
+{
+	struct drm_crtc *crtc = &intel_crtc->base;
+	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
+
+	/* Swallow crc's until we stop generating them. */
+	spin_lock_irq(&pipe_crc->lock);
+	pipe_crc->skipped = INT_MIN;
+	spin_unlock_irq(&pipe_crc->lock);
+
+	I915_WRITE(PIPE_CRC_CTL(crtc->index), 0);
+	POSTING_READ(PIPE_CRC_CTL(crtc->index));
+	synchronize_irq(dev_priv->drm.irq);
+}

From 59cd31f177b34deb834a5c97478502741be1cf2e Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Thu, 8 Mar 2018 14:26:47 +0000
Subject: [PATCH 0003/1286] drm/i915: Kick the rps worker when changing the
 boost frequency

The boost frequency is only applied from the RPS worker while someone is
waiting on a request and requested a boost. As such, when the user
wishes to change the frequency, we have to kick the worker in order to
re-evaluate whether to apply the boost frequency.

v2: Check num_waiters to decide if we should kick the worker to handle
boosting.

Fixes: 29ecd78d3b79 ("drm/i915: Define a separate variable and control for RPS waitboost frequency")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180308142648.4016-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_sysfs.c | 10 ++++++++--
 1 file changed, 8 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index b33d2158c234..e5e6f6bb2b05 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -304,8 +304,9 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
 {
 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-	u32 val;
+	bool boost = false;
 	ssize_t ret;
+	u32 val;
 
 	ret = kstrtou32(buf, 0, &val);
 	if (ret)
@@ -317,8 +318,13 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
 		return -EINVAL;
 
 	mutex_lock(&dev_priv->pcu_lock);
-	rps->boost_freq = val;
+	if (val != rps->boost_freq) {
+		rps->boost_freq = val;
+		boost = atomic_read(&rps->num_waiters);
+	}
 	mutex_unlock(&dev_priv->pcu_lock);
+	if (boost)
+		schedule_work(&rps->work);
 
 	return count;
 }

From d586b5f4cf0792f69644d1aea171f82d029fb5ed Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Thu, 8 Mar 2018 14:26:48 +0000
Subject: [PATCH 0004/1286] drm/i915: Index the ring frequency table by HW
 frequency range

When reporting the frequency table stored in the punit, report the full
range and not just the user restricted frequency range. In the process
keep the code to set the frequency table and read it the same.

v3: As we haven't separated the sb_lock from the pcu_lock yet, there's a
cycle between the pcu_lock and intel_runtime_pm_get.

References: f936ec34dea8 ("drm/i915/skl: Updated the i915_ring_freq_table debugfs function")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com> #v1
Link: https://patchwork.freedesktop.org/patch/msgid/20180308142648.4016-2-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_debugfs.c | 13 ++++++-------
 drivers/gpu/drm/i915/intel_pm.c     |  9 ++++-----
 2 files changed, 10 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 89f7ff2c652e..d8bc1bb30cb4 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1796,9 +1796,9 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-	int ret = 0;
-	int gpu_freq, ia_freq;
 	unsigned int max_gpu_freq, min_gpu_freq;
+	int gpu_freq, ia_freq;
+	int ret;
 
 	if (!HAS_LLC(dev_priv))
 		return -ENODEV;
@@ -1809,13 +1809,12 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
 	if (ret)
 		goto out;
 
+	min_gpu_freq = rps->min_freq;
+	max_gpu_freq = rps->max_freq;
 	if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
 		/* Convert GT frequency to 50 HZ units */
-		min_gpu_freq = rps->min_freq_softlimit / GEN9_FREQ_SCALER;
-		max_gpu_freq = rps->max_freq_softlimit / GEN9_FREQ_SCALER;
-	} else {
-		min_gpu_freq = rps->min_freq_softlimit;
-		max_gpu_freq = rps->max_freq_softlimit;
+		min_gpu_freq /= GEN9_FREQ_SCALER;
+		max_gpu_freq /= GEN9_FREQ_SCALER;
 	}
 
 	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index b8da4dcdd584..dd5ddb77b306 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -6918,13 +6918,12 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
 	/* convert DDR frequency from units of 266.6MHz to bandwidth */
 	min_ring_freq = mult_frac(min_ring_freq, 8, 3);
 
+	min_gpu_freq = rps->min_freq;
+	max_gpu_freq = rps->max_freq;
 	if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
 		/* Convert GT frequency to 50 HZ units */
-		min_gpu_freq = rps->min_freq / GEN9_FREQ_SCALER;
-		max_gpu_freq = rps->max_freq / GEN9_FREQ_SCALER;
-	} else {
-		min_gpu_freq = rps->min_freq;
-		max_gpu_freq = rps->max_freq;
+		min_gpu_freq /= GEN9_FREQ_SCALER;
+		max_gpu_freq /= GEN9_FREQ_SCALER;
 	}
 
 	/*

From 51f6b0f99cab765477a636443ce63295b76b9bb4 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Fri, 9 Mar 2018 01:08:08 +0000
Subject: [PATCH 0005/1286] drm/i915: Push irq_shift from gen8_cs_irq_handler()
 to caller

Originally we were inlining gen8_cs_irq_handler() and so expected the
compiler to constant-fold away the irq_shift (so we had hardcoded it as
opposed to use engine->irq_shift). However, we dropped the inline given
the proliferation of gen8_cs_irq_handler()s. If we pull the shifting
of the iir into the caller, we can shrink the code still further:

add/remove: 0/0 grow/shrink: 0/3 up/down: 0/-34 (-34)
Function                                     old     new   delta
gen8_cs_irq_handler                          123     118      -5
gen8_gt_irq_handler                          261     248     -13
gen11_irq_handler                            722     706     -16

v2: Drop gen11_cs_irq_handler now that it is a simple
stub around gen8_cs_irq_handler (Daniele)

References: 5d3d69d5c119 ("drm/i915: Stop inlining the execlists IRQ handler")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180309010808.11921-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_irq.c | 38 ++++++++++++++-------------------
 1 file changed, 16 insertions(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index babf81cf668b..c8c29d8ecbab 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1399,19 +1399,19 @@ static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
 }
 
 static void
-gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
+gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
 	bool tasklet = false;
 
-	if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) {
+	if (iir & GT_CONTEXT_SWITCH_INTERRUPT) {
 		if (READ_ONCE(engine->execlists.active)) {
 			__set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
 			tasklet = true;
 		}
 	}
 
-	if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) {
+	if (iir & GT_RENDER_USER_INTERRUPT) {
 		notify_ring(engine);
 		tasklet |= USES_GUC_SUBMISSION(engine->i915);
 	}
@@ -1466,21 +1466,21 @@ static void gen8_gt_irq_handler(struct drm_i915_private *i915,
 {
 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
 		gen8_cs_irq_handler(i915->engine[RCS],
-				    gt_iir[0], GEN8_RCS_IRQ_SHIFT);
+				    gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
 		gen8_cs_irq_handler(i915->engine[BCS],
-				    gt_iir[0], GEN8_BCS_IRQ_SHIFT);
+				    gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
 	}
 
 	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
 		gen8_cs_irq_handler(i915->engine[VCS],
-				    gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
+				    gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
 		gen8_cs_irq_handler(i915->engine[VCS2],
-				    gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
+				    gt_iir[1] >> GEN8_VCS2_IRQ_SHIFT);
 	}
 
 	if (master_ctl & GEN8_GT_VECS_IRQ) {
 		gen8_cs_irq_handler(i915->engine[VECS],
-				    gt_iir[3], GEN8_VECS_IRQ_SHIFT);
+				    gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
 	}
 
 	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
@@ -2762,12 +2762,6 @@ static void __fini_wedge(struct wedge_me *w)
 	     (W)->i915;							\
 	     __fini_wedge((W)))
 
-static __always_inline void
-gen11_cs_irq_handler(struct intel_engine_cs * const engine, const u32 iir)
-{
-	gen8_cs_irq_handler(engine, iir, 0);
-}
-
 static void
 gen11_gt_engine_irq_handler(struct drm_i915_private * const i915,
 			    const unsigned int bank,
@@ -2781,27 +2775,27 @@ gen11_gt_engine_irq_handler(struct drm_i915_private * const i915,
 		switch (engine_n) {
 
 		case GEN11_RCS0:
-			return gen11_cs_irq_handler(engine[RCS], iir);
+			return gen8_cs_irq_handler(engine[RCS], iir);
 
 		case GEN11_BCS:
-			return gen11_cs_irq_handler(engine[BCS], iir);
+			return gen8_cs_irq_handler(engine[BCS], iir);
 		}
 	case 1:
 		switch (engine_n) {
 
 		case GEN11_VCS(0):
-			return gen11_cs_irq_handler(engine[_VCS(0)], iir);
+			return gen8_cs_irq_handler(engine[_VCS(0)], iir);
 		case GEN11_VCS(1):
-			return gen11_cs_irq_handler(engine[_VCS(1)], iir);
+			return gen8_cs_irq_handler(engine[_VCS(1)], iir);
 		case GEN11_VCS(2):
-			return gen11_cs_irq_handler(engine[_VCS(2)], iir);
+			return gen8_cs_irq_handler(engine[_VCS(2)], iir);
 		case GEN11_VCS(3):
-			return gen11_cs_irq_handler(engine[_VCS(3)], iir);
+			return gen8_cs_irq_handler(engine[_VCS(3)], iir);
 
 		case GEN11_VECS(0):
-			return gen11_cs_irq_handler(engine[_VECS(0)], iir);
+			return gen8_cs_irq_handler(engine[_VECS(0)], iir);
 		case GEN11_VECS(1):
-			return gen11_cs_irq_handler(engine[_VECS(1)], iir);
+			return gen8_cs_irq_handler(engine[_VECS(1)], iir);
 		}
 	}
 }

From 1e6aa7e55c28ecd842b8b4599e4273c2429ee061 Mon Sep 17 00:00:00 2001
From: Jani Nikula <jani.nikula@intel.com>
Date: Tue, 6 Mar 2018 12:41:55 +0200
Subject: [PATCH 0006/1286] drm/i915/icl: do not save DDI A/E sharing bit for
 ICL
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

We don't want to preserve the DDI A 4 lane bit on ICL.

Fixes: 3d2011cfa41f ("drm/i915/icl: remove port A/E lane sharing limitation.")
Cc: Mahesh Kumar <mahesh1.kumar@intel.com>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180306104155.3526-1-jani.nikula@intel.com
---
 drivers/gpu/drm/i915/intel_ddi.c | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index ac8fc2a44ac6..dbcf1a0586f9 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -3080,9 +3080,12 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 	intel_encoder->cloneable = 0;
 
-	intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
-					  (DDI_BUF_PORT_REVERSAL |
-					   DDI_A_4_LANES);
+	if (INTEL_GEN(dev_priv) >= 11)
+		intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
+			DDI_BUF_PORT_REVERSAL;
+	else
+		intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
+			(DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
 	intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
 	intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port);
 

From 2d4ecace3a7861c6071235a6cc88067b8c3eec4a Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed, 7 Mar 2018 13:42:21 +0000
Subject: [PATCH 0007/1286] drm/i915: Finish the wait-for-wedge by retiring all
 the inflight requests

Before we reset the GPU after marking the device as wedged, we wait for
all the remaining requests to be completed (and marked as EIO).
Afterwards, we should flush the request lists so the next batch start
with the driver in an idle state.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180307134226.25492-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_gem.c | 11 ++++++++---
 1 file changed, 8 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ab88ca53c9a0..c3d650706329 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3281,7 +3281,8 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
 	if (!test_bit(I915_WEDGED, &i915->gpu_error.flags))
 		return true;
 
-	/* Before unwedging, make sure that all pending operations
+	/*
+	 * Before unwedging, make sure that all pending operations
 	 * are flushed and errored out - we may have requests waiting upon
 	 * third party fences. We marked all inflight requests as EIO, and
 	 * every execbuf since returned EIO, for consistency we want all
@@ -3299,7 +3300,8 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
 			if (!rq)
 				continue;
 
-			/* We can't use our normal waiter as we want to
+			/*
+			 * We can't use our normal waiter as we want to
 			 * avoid recursively trying to handle the current
 			 * reset. The basic dma_fence_default_wait() installs
 			 * a callback for dma_fence_signal(), which is
@@ -3314,8 +3316,11 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
 				return false;
 		}
 	}
+	i915_retire_requests(i915);
+	GEM_BUG_ON(i915->gt.active_requests);
 
-	/* Undo nop_submit_request. We prevent all new i915 requests from
+	/*
+	 * Undo nop_submit_request. We prevent all new i915 requests from
 	 * being queued (by disallowing execbuf whilst wedged) so having
 	 * waited for all active requests above, we know the system is idle
 	 * and do not have to worry about a thread being inside

From 36620032ceccb4bf07bbe780a3998e88a585ad69 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed, 7 Mar 2018 13:42:23 +0000
Subject: [PATCH 0008/1286] drm/i915: Update ring position from request on
 retiring

When wedged, we do not update the ring->tail as we submit the requests
causing us to leak the ring->space upon cleaning up the wedged driver.
We can just use the value stored in rq->tail, and keep the submission
backend details away from set-wedge.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180307134226.25492-3-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_request.c     | 2 +-
 drivers/gpu/drm/i915/intel_ringbuffer.c | 1 +
 2 files changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index d437beac3969..75c8826c8cae 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -358,7 +358,7 @@ static void advance_ring(struct i915_request *request)
 		 * is just about to be. Either works, if we miss the last two
 		 * noops - they are safe to be replayed on a reset.
 		 */
-		tail = READ_ONCE(request->ring->tail);
+		tail = READ_ONCE(request->tail);
 	} else {
 		tail = request->postfix;
 	}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 1d599524a759..88eeb64041ae 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1593,6 +1593,7 @@ static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
 	if (intel_ring_update_space(ring) >= bytes)
 		return 0;
 
+	GEM_BUG_ON(list_empty(&ring->request_list));
 	list_for_each_entry(target, &ring->request_list, ring_link) {
 		/* Would completion of this request free enough space? */
 		if (bytes <= __intel_ring_space(target->postfix,

From ef5032a06a73ca5f40ce6975d956aa478536c411 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed, 7 Mar 2018 13:42:24 +0000
Subject: [PATCH 0009/1286] drm/i915: Include ring->emit in debugging

Include ring->emit and ring->space alongside ring->(head,tail) when
printing debug information.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180307134226.25492-4-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_debugfs.c    |  4 ++--
 drivers/gpu/drm/i915/intel_engine_cs.c | 10 +++++++---
 2 files changed, 9 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index d8bc1bb30cb4..34d12522a1da 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1922,8 +1922,8 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
 
 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
 {
-	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u)",
-		   ring->space, ring->head, ring->tail);
+	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
+		   ring->space, ring->head, ring->tail, ring->emit);
 }
 
 static int i915_context_status(struct seq_file *m, void *unused)
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 4ba139c27fba..048cd011484c 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1929,12 +1929,16 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 			   rq->head, rq->postfix, rq->tail,
 			   rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
 			   rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
-		drm_printf(m, "\t\tring->start: 0x%08x\n",
+		drm_printf(m, "\t\tring->start:  0x%08x\n",
 			   i915_ggtt_offset(rq->ring->vma));
-		drm_printf(m, "\t\tring->head:  0x%08x\n",
+		drm_printf(m, "\t\tring->head:   0x%08x\n",
 			   rq->ring->head);
-		drm_printf(m, "\t\tring->tail:  0x%08x\n",
+		drm_printf(m, "\t\tring->tail:   0x%08x\n",
 			   rq->ring->tail);
+		drm_printf(m, "\t\tring->emit:   0x%08x\n",
+			   rq->ring->emit);
+		drm_printf(m, "\t\tring->space:  0x%08x\n",
+			   rq->ring->space);
 	}
 
 	rcu_read_unlock();

From 47650db02dd52267953df81438c93cf8a0eb0e5e Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed, 7 Mar 2018 13:42:25 +0000
Subject: [PATCH 0010/1286] drm/i915: Wrap engine->schedule in RCU locks for
 set-wedge protection

Similar to the staging around handling of engine->submit_request, we
need to stop adding to the execlists->queue prior to calling
engine->cancel_requests. cancel_requests will move requests from the
queue onto the timeline, so if we add a request onto the queue after that
point, it will be lost.

Fixes: af7a8ffad9c5 ("drm/i915: Use rcu instead of stop_machine in set_wedged")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180307134226.25492-5-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_gem.c     | 13 +++++++------
 drivers/gpu/drm/i915/i915_request.c |  2 ++
 2 files changed, 9 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c3d650706329..50e165b5b60d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -479,10 +479,11 @@ static void __fence_set_priority(struct dma_fence *fence, int prio)
 
 	rq = to_request(fence);
 	engine = rq->engine;
-	if (!engine->schedule)
-		return;
 
-	engine->schedule(rq, prio);
+	rcu_read_lock();
+	if (engine->schedule)
+		engine->schedule(rq, prio);
+	rcu_read_unlock();
 }
 
 static void fence_set_priority(struct dma_fence *fence, int prio)
@@ -3222,8 +3223,11 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
 	 */
 	for_each_engine(engine, i915, id) {
 		i915_gem_reset_prepare_engine(engine);
+
 		engine->submit_request = nop_submit_request;
+		engine->schedule = NULL;
 	}
+	i915->caps.scheduler = 0;
 
 	/*
 	 * Make sure no one is running the old callback before we proceed with
@@ -3241,11 +3245,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
 		 * start to complete all requests.
 		 */
 		engine->submit_request = nop_complete_submit_request;
-		engine->schedule = NULL;
 	}
 
-	i915->caps.scheduler = 0;
-
 	/*
 	 * Make sure no request can slip through without getting completed by
 	 * either this call here to intel_engine_init_global_seqno, or the one
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 75c8826c8cae..2f62acd2dc3d 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1081,8 +1081,10 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
 	 * decide whether to preempt the entire chain so that it is ready to
 	 * run at the earliest possible convenience.
 	 */
+	rcu_read_lock();
 	if (engine->schedule)
 		engine->schedule(request, request->ctx->priority);
+	rcu_read_unlock();
 
 	local_bh_disable();
 	i915_sw_fence_commit(&request->submit);

From 68ad361285a9cc73b259f59adbaafde196c15987 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed, 7 Mar 2018 13:42:26 +0000
Subject: [PATCH 0011/1286] drm/i915: Only call tasklet_kill() on the first
 prepare_reset

tasklet_kill() will spin waiting for the current tasklet to be executed.
However, if tasklet_disable() has been called, then the tasklet is never
executed but permanently put back onto the runlist until
tasklet_enable() is called. Ergo, we cannot use tasklet_kill() inside a
disable/enable pair. This is the case when we call set-wedge from inside
i915_reset(), and another request was submitted to us concurrent to the
reset.

Fixes: 963ddd63c314 ("drm/i915: Suspend submission tasklets around wedging")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180307134226.25492-6-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_gem.c | 10 +++++++++-
 1 file changed, 9 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 50e165b5b60d..e58b741e2ec0 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2948,8 +2948,16 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
 	 * calling engine->init_hw() and also writing the ELSP.
 	 * Turning off the execlists->tasklet until the reset is over
 	 * prevents the race.
+	 *
+	 * Note that this needs to be a single atomic operation on the
+	 * tasklet (flush existing tasks, prevent new tasks) to prevent
+	 * a race between reset and set-wedged. It is not, so we do the best
+	 * we can atm and make sure we don't lock the machine up in the more
+	 * common case of recursively being called from set-wedged from inside
+	 * i915_reset.
 	 */
-	tasklet_kill(&engine->execlists.tasklet);
+	if (!atomic_read(&engine->execlists.tasklet.count))
+		tasklet_kill(&engine->execlists.tasklet);
 	tasklet_disable(&engine->execlists.tasklet);
 
 	/*

From df9471689685857c38d5e095652a1bc867ee11cf Mon Sep 17 00:00:00 2001
From: Lyude Paul <lyude@redhat.com>
Date: Thu, 8 Mar 2018 18:24:15 -0500
Subject: [PATCH 0012/1286] drm/i915: Remove unused DP_LINK_CHECK_TIMEOUT
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Lyude Paul <lyude@redhat.com>
Cc: Manasi Navare <manasi.d.navare@intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Manasi Navare <manasi.d.navare@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180308232421.14049-2-lyude@redhat.com
---
 drivers/gpu/drm/i915/intel_dp.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 9a4a51e79fa1..4dd1b2287dd6 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -43,7 +43,6 @@
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
-#define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
 #define DP_DPRX_ESI_LEN 14
 
 /* Compliance test status bits  */

From 86aa82476cffdfa2cb85c2dc8b198e1675773982 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Micha=C5=82=20Winiarski?= <michal.winiarski@intel.com>
Date: Thu, 8 Mar 2018 16:46:53 +0100
Subject: [PATCH 0013/1286] drm/i915/guc: Tidy guc_log_control
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

We plan to decouple log runtime (mapping + relay) from verbosity control.
Let's tidy the code now to reduce the churn in the following patches.

v2: Tidy macros, keep debug messages, use helper var for enable,
    correct typo (Michał)
    Fix incorrect input validaction (Sagar)

Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Reviewed-by: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180308154707.21716-1-michal.winiarski@intel.com
---
 drivers/gpu/drm/i915/i915_debugfs.c  | 11 ++--
 drivers/gpu/drm/i915/intel_guc_log.c | 86 ++++++++++++++++------------
 drivers/gpu/drm/i915/intel_guc_log.h |  3 +-
 3 files changed, 56 insertions(+), 44 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 34d12522a1da..c4cc8fef11a0 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2499,13 +2499,10 @@ static int i915_guc_log_control_get(void *data, u64 *val)
 {
 	struct drm_i915_private *dev_priv = data;
 
-	if (!HAS_GUC(dev_priv))
+	if (!USES_GUC(dev_priv))
 		return -ENODEV;
 
-	if (!dev_priv->guc.log.vma)
-		return -EINVAL;
-
-	*val = i915_modparams.guc_log_level;
+	*val = intel_guc_log_control_get(&dev_priv->guc);
 
 	return 0;
 }
@@ -2514,10 +2511,10 @@ static int i915_guc_log_control_set(void *data, u64 val)
 {
 	struct drm_i915_private *dev_priv = data;
 
-	if (!HAS_GUC(dev_priv))
+	if (!USES_GUC(dev_priv))
 		return -ENODEV;
 
-	return intel_guc_log_control(&dev_priv->guc, val);
+	return intel_guc_log_control_set(&dev_priv->guc, val);
 }
 
 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops,
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index c0c2e7d1c7d7..7e59fb07b06b 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -659,51 +659,63 @@ void intel_guc_log_destroy(struct intel_guc *guc)
 	i915_vma_unpin_and_release(&guc->log.vma);
 }
 
-int intel_guc_log_control(struct intel_guc *guc, u64 control_val)
+int intel_guc_log_control_get(struct intel_guc *guc)
+{
+	GEM_BUG_ON(!guc->log.vma);
+	GEM_BUG_ON(i915_modparams.guc_log_level < 0);
+
+	return i915_modparams.guc_log_level;
+}
+
+#define GUC_LOG_LEVEL_DISABLED		0
+#define LOG_LEVEL_TO_ENABLED(x)		((x) > 0)
+#define LOG_LEVEL_TO_VERBOSITY(x) ({		\
+	typeof(x) _x = (x);			\
+	LOG_LEVEL_TO_ENABLED(_x) ? _x - 1 : 0;	\
+})
+#define VERBOSITY_TO_LOG_LEVEL(x)  ((x) + 1)
+int intel_guc_log_control_set(struct intel_guc *guc, u64 val)
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
-	bool enable_logging = control_val > 0;
-	u32 verbosity;
+	bool enabled = LOG_LEVEL_TO_ENABLED(val);
 	int ret;
 
-	if (!guc->log.vma)
-		return -ENODEV;
+	BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0);
+	GEM_BUG_ON(!guc->log.vma);
+	GEM_BUG_ON(i915_modparams.guc_log_level < 0);
 
-	BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN);
-	if (control_val > 1 + GUC_LOG_VERBOSITY_MAX)
+	/*
+	 * GuC is recognizing log levels starting from 0 to max, we're using 0
+	 * as indication that logging should be disabled.
+	 */
+	if (val < GUC_LOG_LEVEL_DISABLED ||
+	    val > VERBOSITY_TO_LOG_LEVEL(GUC_LOG_VERBOSITY_MAX))
 		return -EINVAL;
 
-	/* This combination doesn't make sense & won't have any effect */
-	if (!enable_logging && !i915_modparams.guc_log_level)
-		return 0;
+	mutex_lock(&dev_priv->drm.struct_mutex);
 
-	verbosity = enable_logging ? control_val - 1 : 0;
-
-	ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
-	if (ret)
-		return ret;
-	intel_runtime_pm_get(dev_priv);
-	ret = guc_log_control(guc, enable_logging, verbosity);
-	intel_runtime_pm_put(dev_priv);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
-
-	if (ret < 0) {
-		DRM_DEBUG_DRIVER("guc_logging_control action failed %d\n", ret);
-		return ret;
+	if (i915_modparams.guc_log_level == val) {
+		ret = 0;
+		goto out_unlock;
 	}
 
-	if (enable_logging) {
-		i915_modparams.guc_log_level = 1 + verbosity;
+	intel_runtime_pm_get(dev_priv);
+	ret = guc_log_control(guc, enabled, LOG_LEVEL_TO_VERBOSITY(val));
+	intel_runtime_pm_put(dev_priv);
+	if (ret) {
+		DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret);
+		goto out_unlock;
+	}
 
-		/*
-		 * If log was disabled at boot time, then the relay channel file
-		 * wouldn't have been created by now and interrupts also would
-		 * not have been enabled. Try again now, just in case.
-		 */
+	i915_modparams.guc_log_level = val;
+
+	mutex_unlock(&dev_priv->drm.struct_mutex);
+
+	if (enabled && !guc_log_has_runtime(guc)) {
 		ret = guc_log_late_setup(guc);
-		if (ret < 0) {
+		if (ret) {
 			DRM_DEBUG_DRIVER("GuC log late setup failed %d\n", ret);
-			return ret;
+			goto out;
 		}
 
 		/* GuC logging is currently the only user of Guc2Host interrupts */
@@ -712,7 +724,7 @@ int intel_guc_log_control(struct intel_guc *guc, u64 control_val)
 		gen9_enable_guc_interrupts(dev_priv);
 		intel_runtime_pm_put(dev_priv);
 		mutex_unlock(&dev_priv->drm.struct_mutex);
-	} else {
+	} else if (!enabled && guc_log_has_runtime(guc)) {
 		/*
 		 * Once logging is disabled, GuC won't generate logs & send an
 		 * interrupt. But there could be some data in the log buffer
@@ -720,11 +732,13 @@ int intel_guc_log_control(struct intel_guc *guc, u64 control_val)
 		 * buffer state and then collect the left over logs.
 		 */
 		guc_flush_logs(guc);
-
-		/* As logging is disabled, update log level to reflect that */
-		i915_modparams.guc_log_level = 0;
 	}
 
+	return 0;
+
+out_unlock:
+	mutex_unlock(&dev_priv->drm.struct_mutex);
+out:
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_guc_log.h b/drivers/gpu/drm/i915/intel_guc_log.h
index dab0e949567a..141ce9ca22ce 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/intel_guc_log.h
@@ -64,7 +64,8 @@ void intel_guc_log_destroy(struct intel_guc *guc);
 void intel_guc_log_init_early(struct intel_guc *guc);
 int intel_guc_log_relay_create(struct intel_guc *guc);
 void intel_guc_log_relay_destroy(struct intel_guc *guc);
-int intel_guc_log_control(struct intel_guc *guc, u64 control_val);
+int intel_guc_log_control_get(struct intel_guc *guc);
+int intel_guc_log_control_set(struct intel_guc *guc, u64 control_val);
 void i915_guc_log_register(struct drm_i915_private *dev_priv);
 void i915_guc_log_unregister(struct drm_i915_private *dev_priv);
 

From 950724ba88521195b4aefd092c8a0337487be352 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Micha=C5=82=20Winiarski?= <michal.winiarski@intel.com>
Date: Thu, 8 Mar 2018 16:46:54 +0100
Subject: [PATCH 0014/1286] drm/i915/guc: Create common entry points for log
 register/unregister
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

We have many functions responsible for allocating different parts of
GuC log runtime called from multiple places. Let's stick with keeping
everything in guc_log_register instead.

v2: Use more generic intel_uc_register name, keep using "misc" suffix (Michał)
    s/dev_priv/i915 (Sagar)
    Make guc_log_relay_* static (sparse)

Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Reviewed-by: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180308154707.21716-2-michal.winiarski@intel.com
---
 drivers/gpu/drm/i915/i915_drv.c      |   6 +-
 drivers/gpu/drm/i915/intel_guc_log.c | 156 +++++++++++----------------
 drivers/gpu/drm/i915/intel_guc_log.h |   6 +-
 drivers/gpu/drm/i915/intel_uc.c      |  41 ++++---
 drivers/gpu/drm/i915/intel_uc.h      |   2 +
 5 files changed, 95 insertions(+), 116 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index d7c4de45644d..987c6770d1a6 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1238,9 +1238,11 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
 	/* Reveal our presence to userspace */
 	if (drm_dev_register(dev, 0) == 0) {
 		i915_debugfs_register(dev_priv);
-		i915_guc_log_register(dev_priv);
 		i915_setup_sysfs(dev_priv);
 
+		/* Depends on debugfs having been initialized */
+		intel_uc_register(dev_priv);
+
 		/* Depends on sysfs having been initialized */
 		i915_perf_register(dev_priv);
 	} else
@@ -1298,7 +1300,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
 	i915_pmu_unregister(dev_priv);
 
 	i915_teardown_sysfs(dev_priv);
-	i915_guc_log_unregister(dev_priv);
+	intel_uc_unregister(dev_priv);
 	drm_dev_unregister(&dev_priv->drm);
 
 	i915_gem_shrinker_unregister(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 7e59fb07b06b..90b395f34808 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -443,7 +443,7 @@ void intel_guc_log_init_early(struct intel_guc *guc)
 	INIT_WORK(&guc->log.runtime.flush_work, capture_logs_work);
 }
 
-int intel_guc_log_relay_create(struct intel_guc *guc)
+static int guc_log_relay_create(struct intel_guc *guc)
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 	struct rchan *guc_log_relay_chan;
@@ -496,7 +496,7 @@ err:
 	return ret;
 }
 
-void intel_guc_log_relay_destroy(struct intel_guc *guc)
+static void guc_log_relay_destroy(struct intel_guc *guc)
 {
 	mutex_lock(&guc->log.runtime.relay_lock);
 
@@ -514,49 +514,6 @@ out_unlock:
 	mutex_unlock(&guc->log.runtime.relay_lock);
 }
 
-static int guc_log_late_setup(struct intel_guc *guc)
-{
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
-	int ret;
-
-	if (!guc_log_has_runtime(guc)) {
-		/*
-		 * If log was disabled at boot time, then setup needed to handle
-		 * log buffer flush interrupts would not have been done yet, so
-		 * do that now.
-		 */
-		ret = intel_guc_log_relay_create(guc);
-		if (ret)
-			goto err;
-
-		mutex_lock(&dev_priv->drm.struct_mutex);
-		intel_runtime_pm_get(dev_priv);
-		ret = guc_log_runtime_create(guc);
-		intel_runtime_pm_put(dev_priv);
-		mutex_unlock(&dev_priv->drm.struct_mutex);
-
-		if (ret)
-			goto err_relay;
-	}
-
-	ret = guc_log_relay_file_create(guc);
-	if (ret)
-		goto err_runtime;
-
-	return 0;
-
-err_runtime:
-	mutex_lock(&dev_priv->drm.struct_mutex);
-	guc_log_runtime_destroy(guc);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
-err_relay:
-	intel_guc_log_relay_destroy(guc);
-err:
-	/* logging will remain off */
-	i915_modparams.guc_log_level = 0;
-	return ret;
-}
-
 static void guc_log_capture_logs(struct intel_guc *guc)
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -576,16 +533,6 @@ static void guc_flush_logs(struct intel_guc *guc)
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 
-	if (!USES_GUC_SUBMISSION(dev_priv) || !i915_modparams.guc_log_level)
-		return;
-
-	/* First disable the interrupts, will be renabled afterwards */
-	mutex_lock(&dev_priv->drm.struct_mutex);
-	intel_runtime_pm_get(dev_priv);
-	gen9_disable_guc_interrupts(dev_priv);
-	intel_runtime_pm_put(dev_priv);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
-
 	/*
 	 * Before initiating the forceful flush, wait for any pending/ongoing
 	 * flush to complete otherwise forceful flush may not actually happen.
@@ -628,12 +575,6 @@ int intel_guc_log_create(struct intel_guc *guc)
 
 	guc->log.vma = vma;
 
-	if (i915_modparams.guc_log_level) {
-		ret = guc_log_runtime_create(guc);
-		if (ret < 0)
-			goto err_vma;
-	}
-
 	/* each allocated unit is a page */
 	flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
 		(GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) |
@@ -645,8 +586,6 @@ int intel_guc_log_create(struct intel_guc *guc)
 
 	return 0;
 
-err_vma:
-	i915_vma_unpin_and_release(&guc->log.vma);
 err:
 	/* logging will be off */
 	i915_modparams.guc_log_level = 0;
@@ -712,26 +651,14 @@ int intel_guc_log_control_set(struct intel_guc *guc, u64 val)
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 
 	if (enabled && !guc_log_has_runtime(guc)) {
-		ret = guc_log_late_setup(guc);
+		ret = intel_guc_log_register(guc);
 		if (ret) {
-			DRM_DEBUG_DRIVER("GuC log late setup failed %d\n", ret);
+			/* logging will remain off */
+			i915_modparams.guc_log_level = 0;
 			goto out;
 		}
-
-		/* GuC logging is currently the only user of Guc2Host interrupts */
-		mutex_lock(&dev_priv->drm.struct_mutex);
-		intel_runtime_pm_get(dev_priv);
-		gen9_enable_guc_interrupts(dev_priv);
-		intel_runtime_pm_put(dev_priv);
-		mutex_unlock(&dev_priv->drm.struct_mutex);
 	} else if (!enabled && guc_log_has_runtime(guc)) {
-		/*
-		 * Once logging is disabled, GuC won't generate logs & send an
-		 * interrupt. But there could be some data in the log buffer
-		 * which is yet to be captured. So request GuC to update the log
-		 * buffer state and then collect the left over logs.
-		 */
-		guc_flush_logs(guc);
+		intel_guc_log_unregister(guc);
 	}
 
 	return 0;
@@ -742,29 +669,72 @@ out:
 	return ret;
 }
 
-void i915_guc_log_register(struct drm_i915_private *dev_priv)
+int intel_guc_log_register(struct intel_guc *guc)
 {
-	if (!USES_GUC_SUBMISSION(dev_priv) || !i915_modparams.guc_log_level)
-		return;
+	struct drm_i915_private *i915 = guc_to_i915(guc);
+	int ret;
 
-	guc_log_late_setup(&dev_priv->guc);
+	GEM_BUG_ON(guc_log_has_runtime(guc));
+
+	/*
+	 * If log was disabled at boot time, then setup needed to handle
+	 * log buffer flush interrupts would not have been done yet, so
+	 * do that now.
+	 */
+	ret = guc_log_relay_create(guc);
+	if (ret)
+		goto err;
+
+	mutex_lock(&i915->drm.struct_mutex);
+	ret = guc_log_runtime_create(guc);
+	mutex_unlock(&i915->drm.struct_mutex);
+
+	if (ret)
+		goto err_relay;
+
+	ret = guc_log_relay_file_create(guc);
+	if (ret)
+		goto err_runtime;
+
+	/* GuC logging is currently the only user of Guc2Host interrupts */
+	mutex_lock(&i915->drm.struct_mutex);
+	intel_runtime_pm_get(i915);
+	gen9_enable_guc_interrupts(i915);
+	intel_runtime_pm_put(i915);
+	mutex_unlock(&i915->drm.struct_mutex);
+
+	return 0;
+
+err_runtime:
+	mutex_lock(&i915->drm.struct_mutex);
+	guc_log_runtime_destroy(guc);
+	mutex_unlock(&i915->drm.struct_mutex);
+err_relay:
+	guc_log_relay_destroy(guc);
+err:
+	return ret;
 }
 
-void i915_guc_log_unregister(struct drm_i915_private *dev_priv)
+void intel_guc_log_unregister(struct intel_guc *guc)
 {
-	struct intel_guc *guc = &dev_priv->guc;
+	struct drm_i915_private *i915 = guc_to_i915(guc);
 
-	if (!USES_GUC_SUBMISSION(dev_priv))
-		return;
+	/*
+	 * Once logging is disabled, GuC won't generate logs & send an
+	 * interrupt. But there could be some data in the log buffer
+	 * which is yet to be captured. So request GuC to update the log
+	 * buffer state and then collect the left over logs.
+	 */
+	guc_flush_logs(guc);
 
-	mutex_lock(&dev_priv->drm.struct_mutex);
+	mutex_lock(&i915->drm.struct_mutex);
 	/* GuC logging is currently the only user of Guc2Host interrupts */
-	intel_runtime_pm_get(dev_priv);
-	gen9_disable_guc_interrupts(dev_priv);
-	intel_runtime_pm_put(dev_priv);
+	intel_runtime_pm_get(i915);
+	gen9_disable_guc_interrupts(i915);
+	intel_runtime_pm_put(i915);
 
 	guc_log_runtime_destroy(guc);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	mutex_unlock(&i915->drm.struct_mutex);
 
-	intel_guc_log_relay_destroy(guc);
+	guc_log_relay_destroy(guc);
 }
diff --git a/drivers/gpu/drm/i915/intel_guc_log.h b/drivers/gpu/drm/i915/intel_guc_log.h
index 141ce9ca22ce..09dd2ef1933d 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/intel_guc_log.h
@@ -62,11 +62,9 @@ struct intel_guc_log {
 int intel_guc_log_create(struct intel_guc *guc);
 void intel_guc_log_destroy(struct intel_guc *guc);
 void intel_guc_log_init_early(struct intel_guc *guc);
-int intel_guc_log_relay_create(struct intel_guc *guc);
-void intel_guc_log_relay_destroy(struct intel_guc *guc);
 int intel_guc_log_control_get(struct intel_guc *guc);
 int intel_guc_log_control_set(struct intel_guc *guc, u64 control_val);
-void i915_guc_log_register(struct drm_i915_private *dev_priv);
-void i915_guc_log_unregister(struct drm_i915_private *dev_priv);
+int intel_guc_log_register(struct intel_guc *guc);
+void intel_guc_log_unregister(struct intel_guc *guc);
 
 #endif
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index e5bf0d37bf43..1c1a00df010b 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -219,6 +219,28 @@ static void guc_free_load_err_log(struct intel_guc *guc)
 		i915_gem_object_put(guc->load_err_log);
 }
 
+int intel_uc_register(struct drm_i915_private *i915)
+{
+	int ret = 0;
+
+	if (!USES_GUC(i915))
+		return 0;
+
+	if (i915_modparams.guc_log_level)
+		ret = intel_guc_log_register(&i915->guc);
+
+	return ret;
+}
+
+void intel_uc_unregister(struct drm_i915_private *i915)
+{
+	if (!USES_GUC(i915))
+		return;
+
+	if (i915_modparams.guc_log_level)
+		intel_guc_log_unregister(&i915->guc);
+}
+
 static int guc_enable_communication(struct intel_guc *guc)
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -249,23 +271,10 @@ int intel_uc_init_misc(struct drm_i915_private *dev_priv)
 		return 0;
 
 	ret = intel_guc_init_wq(guc);
-	if (ret) {
-		DRM_ERROR("Couldn't allocate workqueues for GuC\n");
-		goto err;
-	}
-
-	ret = intel_guc_log_relay_create(guc);
-	if (ret) {
-		DRM_ERROR("Couldn't allocate relay for GuC log\n");
-		goto err_relay;
-	}
+	if (ret)
+		return ret;
 
 	return 0;
-
-err_relay:
-	intel_guc_fini_wq(guc);
-err:
-	return ret;
 }
 
 void intel_uc_fini_misc(struct drm_i915_private *dev_priv)
@@ -276,8 +285,6 @@ void intel_uc_fini_misc(struct drm_i915_private *dev_priv)
 		return;
 
 	intel_guc_fini_wq(guc);
-
-	intel_guc_log_relay_destroy(guc);
 }
 
 int intel_uc_init(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index f76d51d1ce70..d6af984cd789 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -31,6 +31,8 @@
 void intel_uc_sanitize_options(struct drm_i915_private *dev_priv);
 void intel_uc_init_early(struct drm_i915_private *dev_priv);
 void intel_uc_init_mmio(struct drm_i915_private *dev_priv);
+int intel_uc_register(struct drm_i915_private *dev_priv);
+void intel_uc_unregister(struct drm_i915_private *dev_priv);
 void intel_uc_init_fw(struct drm_i915_private *dev_priv);
 void intel_uc_fini_fw(struct drm_i915_private *dev_priv);
 int intel_uc_init_misc(struct drm_i915_private *dev_priv);

From 93bf8096c7daa78f0dabc811dad62fa98fe01742 Mon Sep 17 00:00:00 2001
From: Michal Wajdeczko <michal.wajdeczko@intel.com>
Date: Thu, 8 Mar 2018 16:46:55 +0100
Subject: [PATCH 0015/1286] drm/i915/guc: Move GuC notification handling to
 separate function
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

To allow future code reuse. While here, fix comment style.

v2: Notifications are a separate thing - rename the handler (Sagar)

Suggested-by: Oscar Mateo <oscar.mateo@intel.com>
Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Oscar Mateo <oscar.mateo@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Reviewed-by: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180308154707.21716-3-michal.winiarski@intel.com
---
 drivers/gpu/drm/i915/i915_irq.c  | 33 ++--------------------------
 drivers/gpu/drm/i915/intel_guc.c | 37 ++++++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/intel_guc.h |  1 +
 3 files changed, 40 insertions(+), 31 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index c8c29d8ecbab..828f3104488c 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1766,37 +1766,8 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
 
 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
 {
-	if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) {
-		/* Sample the log buffer flush related bits & clear them out now
-		 * itself from the message identity register to minimize the
-		 * probability of losing a flush interrupt, when there are back
-		 * to back flush interrupts.
-		 * There can be a new flush interrupt, for different log buffer
-		 * type (like for ISR), whilst Host is handling one (for DPC).
-		 * Since same bit is used in message register for ISR & DPC, it
-		 * could happen that GuC sets the bit for 2nd interrupt but Host
-		 * clears out the bit on handling the 1st interrupt.
-		 */
-		u32 msg, flush;
-
-		msg = I915_READ(SOFT_SCRATCH(15));
-		flush = msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED |
-			       INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER);
-		if (flush) {
-			/* Clear the message bits that are handled */
-			I915_WRITE(SOFT_SCRATCH(15), msg & ~flush);
-
-			/* Handle flush interrupt in bottom half */
-			queue_work(dev_priv->guc.log.runtime.flush_wq,
-				   &dev_priv->guc.log.runtime.flush_work);
-
-			dev_priv->guc.log.flush_interrupt_count++;
-		} else {
-			/* Not clearing of unhandled event bits won't result in
-			 * re-triggering of the interrupt.
-			 */
-		}
-	}
+	if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT)
+		intel_guc_to_host_event_handler(&dev_priv->guc);
 }
 
 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index ff08ea0ebf49..25f92291fd40 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -364,6 +364,43 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len)
 	return ret;
 }
 
+void intel_guc_to_host_event_handler(struct intel_guc *guc)
+{
+	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	u32 msg, flush;
+
+	/*
+	 * Sample the log buffer flush related bits & clear them out now
+	 * itself from the message identity register to minimize the
+	 * probability of losing a flush interrupt, when there are back
+	 * to back flush interrupts.
+	 * There can be a new flush interrupt, for different log buffer
+	 * type (like for ISR), whilst Host is handling one (for DPC).
+	 * Since same bit is used in message register for ISR & DPC, it
+	 * could happen that GuC sets the bit for 2nd interrupt but Host
+	 * clears out the bit on handling the 1st interrupt.
+	 */
+
+	msg = I915_READ(SOFT_SCRATCH(15));
+	flush = msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED |
+		       INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER);
+	if (flush) {
+		/* Clear the message bits that are handled */
+		I915_WRITE(SOFT_SCRATCH(15), msg & ~flush);
+
+		/* Handle flush interrupt in bottom half */
+		queue_work(guc->log.runtime.flush_wq,
+			   &guc->log.runtime.flush_work);
+
+		guc->log.flush_interrupt_count++;
+	} else {
+		/*
+		 * Not clearing of unhandled event bits won't result in
+		 * re-triggering of the interrupt.
+		 */
+	}
+}
+
 int intel_guc_sample_forcewake(struct intel_guc *guc)
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index b9424ac644ac..6d5aebe55039 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -125,6 +125,7 @@ int intel_guc_init(struct intel_guc *guc);
 void intel_guc_fini(struct intel_guc *guc);
 int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len);
 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len);
+void intel_guc_to_host_event_handler(struct intel_guc *guc);
 int intel_guc_sample_forcewake(struct intel_guc *guc);
 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
 int intel_guc_suspend(struct intel_guc *guc);

From ff491603ffec80d79b970d540f066535c8743796 Mon Sep 17 00:00:00 2001
From: Michal Wajdeczko <michal.wajdeczko@intel.com>
Date: Thu, 8 Mar 2018 09:50:34 +0000
Subject: [PATCH 0016/1286] drm/i915: Include i915_reg.h in intel_ringbuffer.h

Header intel_ringbuffer.h is using definitions from i915_reg.h
but forget to include it. Remove this hidden dependency by
explicitly include missing header.

v2: add reminder (Chris)

Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180308095037.18264-2-michal.wajdeczko@intel.com
---
 drivers/gpu/drm/i915/intel_ringbuffer.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 0320c2c4cfba..c31258d27e20 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -7,6 +7,7 @@
 #include "i915_gem_batch_pool.h"
 #include "i915_gem_timeline.h"
 
+#include "i915_reg.h" /* FIXME split out i915_gpu_commands.h */
 #include "i915_pmu.h"
 #include "i915_request.h"
 #include "i915_selftest.h"

From c5781351450db8ff8374657a8c568772967f3795 Mon Sep 17 00:00:00 2001
From: Michal Wajdeczko <michal.wajdeczko@intel.com>
Date: Thu, 8 Mar 2018 09:50:35 +0000
Subject: [PATCH 0017/1286] drm/i915: Change parameters order in
 i915_gem_batch_pool_init

Function i915_gem_batch_pool_init() failed to follow obj-verb
naming schema. Fix that by swapping function parameters.
While here, change license text to SPDX format.

v2: use intel_engine_init_batch_pool (Chris) as proxy (Michal)

Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180308095037.18264-3-michal.wajdeczko@intel.com
---
 drivers/gpu/drm/i915/i915_gem_batch_pool.c | 30 +++++-----------------
 drivers/gpu/drm/i915/i915_gem_batch_pool.h | 29 ++++-----------------
 drivers/gpu/drm/i915/intel_engine_cs.c     |  9 ++++---
 3 files changed, 17 insertions(+), 51 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index d3cbe8432f48..f3890b664e3f 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -1,29 +1,11 @@
 /*
- * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
  *
+ * Copyright © 2014-2018 Intel Corporation
  */
 
-#include "i915_drv.h"
 #include "i915_gem_batch_pool.h"
+#include "i915_drv.h"
 
 /**
  * DOC: batch pool
@@ -41,11 +23,11 @@
 
 /**
  * i915_gem_batch_pool_init() - initialize a batch buffer pool
- * @engine: the associated request submission engine
  * @pool: the batch buffer pool
+ * @engine: the associated request submission engine
  */
-void i915_gem_batch_pool_init(struct intel_engine_cs *engine,
-			      struct i915_gem_batch_pool *pool)
+void i915_gem_batch_pool_init(struct i915_gem_batch_pool *pool,
+			      struct intel_engine_cs *engine)
 {
 	int n;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.h b/drivers/gpu/drm/i915/i915_gem_batch_pool.h
index 10d5ac4c00d3..56947daaaf65 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.h
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.h
@@ -1,31 +1,13 @@
 /*
- * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
  *
+ * Copyright © 2014-2018 Intel Corporation
  */
 
 #ifndef I915_GEM_BATCH_POOL_H
 #define I915_GEM_BATCH_POOL_H
 
-#include "i915_drv.h"
+#include <linux/types.h>
 
 struct intel_engine_cs;
 
@@ -34,9 +16,8 @@ struct i915_gem_batch_pool {
 	struct list_head cache_list[4];
 };
 
-/* i915_gem_batch_pool.c */
-void i915_gem_batch_pool_init(struct intel_engine_cs *engine,
-			      struct i915_gem_batch_pool *pool);
+void i915_gem_batch_pool_init(struct i915_gem_batch_pool *pool,
+			      struct intel_engine_cs *engine);
 void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool);
 struct drm_i915_gem_object*
 i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, size_t size);
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 048cd011484c..a2b1e9e2c008 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -441,6 +441,11 @@ static void intel_engine_init_timeline(struct intel_engine_cs *engine)
 	engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id];
 }
 
+static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
+{
+	i915_gem_batch_pool_init(&engine->batch_pool, engine);
+}
+
 static bool csb_force_mmio(struct drm_i915_private *i915)
 {
 	/*
@@ -485,11 +490,9 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
 void intel_engine_setup_common(struct intel_engine_cs *engine)
 {
 	intel_engine_init_execlist(engine);
-
 	intel_engine_init_timeline(engine);
 	intel_engine_init_hangcheck(engine);
-	i915_gem_batch_pool_init(engine, &engine->batch_pool);
-
+	intel_engine_init_batch_pool(engine);
 	intel_engine_init_cmd_parser(engine);
 }
 

From 058a9b43a37a2406a574752707c5346e7b6444f4 Mon Sep 17 00:00:00 2001
From: Michal Wajdeczko <michal.wajdeczko@intel.com>
Date: Thu, 8 Mar 2018 09:50:36 +0000
Subject: [PATCH 0018/1286] drm/i915: Make header i915_pmu.h more robust

Definitions in i915_pmu.h header depend on other types and
declarations that were not explicitly included. Fix that by
adding related headers and forward declarations.
While here, change license text to SPDX format.

v2: don't drop "intel_ringbuffer.h" (Tvrtko)

Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180308095037.18264-4-michal.wajdeczko@intel.com
---
 drivers/gpu/drm/i915/i915_pmu.c | 27 +++------------------------
 drivers/gpu/drm/i915/i915_pmu.h | 30 ++++++++++--------------------
 2 files changed, 13 insertions(+), 44 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 964467b03e4d..4bc7aefa9541 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -1,33 +1,12 @@
 /*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
  *
+ * Copyright © 2017-2018 Intel Corporation
  */
 
-#include <linux/perf_event.h>
-#include <linux/pm_runtime.h>
-
-#include "i915_drv.h"
 #include "i915_pmu.h"
 #include "intel_ringbuffer.h"
+#include "i915_drv.h"
 
 /* Frequency for the sampling timer for events which need it. */
 #define FREQUENCY 200
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index aa1b1a987ea1..2ba735299f7c 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -1,29 +1,19 @@
 /*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
  *
+ * Copyright © 2017-2018 Intel Corporation
  */
+
 #ifndef __I915_PMU_H__
 #define __I915_PMU_H__
 
+#include <linux/hrtimer.h>
+#include <linux/perf_event.h>
+#include <linux/spinlock_types.h>
+#include <drm/i915_drm.h>
+
+struct drm_i915_private;
+
 enum {
 	__I915_SAMPLE_FREQ_ACT = 0,
 	__I915_SAMPLE_FREQ_REQ,

From d897a111940fe2d644172466914d7c97791bda05 Mon Sep 17 00:00:00 2001
From: Michal Wajdeczko <michal.wajdeczko@intel.com>
Date: Thu, 8 Mar 2018 09:50:37 +0000
Subject: [PATCH 0019/1286] drm/i915: Move i915_gpu_error into its own header

Error state management code was moved into separate .c unit
but we didn't move related definitions into own header.

v2: move also intel_display_error_state forward decl
    fix ("Prefer 'unsigned int' to bare use of 'unsigned'")
    warnings detected by checkpatch in moved code (Michal)

Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180308095037.18264-5-michal.wajdeczko@intel.com
---
 drivers/gpu/drm/i915/i915_drv.h       | 332 +-----------------------
 drivers/gpu/drm/i915/i915_gpu_error.c |   1 +
 drivers/gpu/drm/i915/i915_gpu_error.h | 356 ++++++++++++++++++++++++++
 3 files changed, 358 insertions(+), 331 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/i915_gpu_error.h

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6e740f6fe33f..d35f805cb177 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -72,7 +72,7 @@
 #include "i915_gem_object.h"
 #include "i915_gem_gtt.h"
 #include "i915_gem_timeline.h"
-
+#include "i915_gpu_error.h"
 #include "i915_request.h"
 #include "i915_vma.h"
 
@@ -453,172 +453,6 @@ struct intel_csr {
 	uint32_t allowed_dc_mask;
 };
 
-struct intel_display_error_state;
-
-struct i915_gpu_state {
-	struct kref ref;
-	ktime_t time;
-	ktime_t boottime;
-	ktime_t uptime;
-
-	struct drm_i915_private *i915;
-
-	char error_msg[128];
-	bool simulated;
-	bool awake;
-	bool wakelock;
-	bool suspended;
-	int iommu;
-	u32 reset_count;
-	u32 suspend_count;
-	struct intel_device_info device_info;
-	struct intel_driver_caps driver_caps;
-	struct i915_params params;
-
-	struct i915_error_uc {
-		struct intel_uc_fw guc_fw;
-		struct intel_uc_fw huc_fw;
-		struct drm_i915_error_object *guc_log;
-	} uc;
-
-	/* Generic register state */
-	u32 eir;
-	u32 pgtbl_er;
-	u32 ier;
-	u32 gtier[4], ngtier;
-	u32 ccid;
-	u32 derrmr;
-	u32 forcewake;
-	u32 error; /* gen6+ */
-	u32 err_int; /* gen7 */
-	u32 fault_data0; /* gen8, gen9 */
-	u32 fault_data1; /* gen8, gen9 */
-	u32 done_reg;
-	u32 gac_eco;
-	u32 gam_ecochk;
-	u32 gab_ctl;
-	u32 gfx_mode;
-
-	u32 nfence;
-	u64 fence[I915_MAX_NUM_FENCES];
-	struct intel_overlay_error_state *overlay;
-	struct intel_display_error_state *display;
-
-	struct drm_i915_error_engine {
-		int engine_id;
-		/* Software tracked state */
-		bool idle;
-		bool waiting;
-		int num_waiters;
-		unsigned long hangcheck_timestamp;
-		bool hangcheck_stalled;
-		enum intel_engine_hangcheck_action hangcheck_action;
-		struct i915_address_space *vm;
-		int num_requests;
-		u32 reset_count;
-
-		/* position of active request inside the ring */
-		u32 rq_head, rq_post, rq_tail;
-
-		/* our own tracking of ring head and tail */
-		u32 cpu_ring_head;
-		u32 cpu_ring_tail;
-
-		u32 last_seqno;
-
-		/* Register state */
-		u32 start;
-		u32 tail;
-		u32 head;
-		u32 ctl;
-		u32 mode;
-		u32 hws;
-		u32 ipeir;
-		u32 ipehr;
-		u32 bbstate;
-		u32 instpm;
-		u32 instps;
-		u32 seqno;
-		u64 bbaddr;
-		u64 acthd;
-		u32 fault_reg;
-		u64 faddr;
-		u32 rc_psmi; /* sleep state */
-		u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
-		struct intel_instdone instdone;
-
-		struct drm_i915_error_context {
-			char comm[TASK_COMM_LEN];
-			pid_t pid;
-			u32 handle;
-			u32 hw_id;
-			int priority;
-			int ban_score;
-			int active;
-			int guilty;
-			bool bannable;
-		} context;
-
-		struct drm_i915_error_object {
-			u64 gtt_offset;
-			u64 gtt_size;
-			int page_count;
-			int unused;
-			u32 *pages[0];
-		} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
-
-		struct drm_i915_error_object **user_bo;
-		long user_bo_count;
-
-		struct drm_i915_error_object *wa_ctx;
-		struct drm_i915_error_object *default_state;
-
-		struct drm_i915_error_request {
-			long jiffies;
-			pid_t pid;
-			u32 context;
-			int priority;
-			int ban_score;
-			u32 seqno;
-			u32 head;
-			u32 tail;
-		} *requests, execlist[EXECLIST_MAX_PORTS];
-		unsigned int num_ports;
-
-		struct drm_i915_error_waiter {
-			char comm[TASK_COMM_LEN];
-			pid_t pid;
-			u32 seqno;
-		} *waiters;
-
-		struct {
-			u32 gfx_mode;
-			union {
-				u64 pdp[4];
-				u32 pp_dir_base;
-			};
-		} vm_info;
-	} engine[I915_NUM_ENGINES];
-
-	struct drm_i915_error_buffer {
-		u32 size;
-		u32 name;
-		u32 rseqno[I915_NUM_ENGINES], wseqno;
-		u64 gtt_offset;
-		u32 read_domains;
-		u32 write_domain;
-		s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
-		u32 tiling:2;
-		u32 dirty:1;
-		u32 purgeable:1;
-		u32 userptr:1;
-		s32 engine:4;
-		u32 cache_level:3;
-	} *active_bo[I915_NUM_ENGINES], *pinned_bo;
-	u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
-	struct i915_address_space *active_vm[I915_NUM_ENGINES];
-};
-
 enum i915_cache_level {
 	I915_CACHE_NONE = 0,
 	I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
@@ -1146,16 +980,6 @@ struct i915_gem_mm {
 	u32 object_count;
 };
 
-struct drm_i915_error_state_buf {
-	struct drm_i915_private *i915;
-	unsigned bytes;
-	unsigned size;
-	int err;
-	u8 *buf;
-	loff_t start;
-	loff_t pos;
-};
-
 #define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */
 
 #define I915_RESET_TIMEOUT (10 * HZ) /* 10s */
@@ -1164,102 +988,6 @@ struct drm_i915_error_state_buf {
 #define I915_ENGINE_DEAD_TIMEOUT  (4 * HZ)  /* Seqno, head and subunits dead */
 #define I915_SEQNO_DEAD_TIMEOUT   (12 * HZ) /* Seqno dead with active head */
 
-struct i915_gpu_error {
-	/* For hangcheck timer */
-#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
-#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
-
-	struct delayed_work hangcheck_work;
-
-	/* For reset and error_state handling. */
-	spinlock_t lock;
-	/* Protected by the above dev->gpu_error.lock. */
-	struct i915_gpu_state *first_error;
-
-	atomic_t pending_fb_pin;
-
-	unsigned long missed_irq_rings;
-
-	/**
-	 * State variable controlling the reset flow and count
-	 *
-	 * This is a counter which gets incremented when reset is triggered,
-	 *
-	 * Before the reset commences, the I915_RESET_BACKOFF bit is set
-	 * meaning that any waiters holding onto the struct_mutex should
-	 * relinquish the lock immediately in order for the reset to start.
-	 *
-	 * If reset is not completed succesfully, the I915_WEDGE bit is
-	 * set meaning that hardware is terminally sour and there is no
-	 * recovery. All waiters on the reset_queue will be woken when
-	 * that happens.
-	 *
-	 * This counter is used by the wait_seqno code to notice that reset
-	 * event happened and it needs to restart the entire ioctl (since most
-	 * likely the seqno it waited for won't ever signal anytime soon).
-	 *
-	 * This is important for lock-free wait paths, where no contended lock
-	 * naturally enforces the correct ordering between the bail-out of the
-	 * waiter and the gpu reset work code.
-	 */
-	unsigned long reset_count;
-
-	/**
-	 * flags: Control various stages of the GPU reset
-	 *
-	 * #I915_RESET_BACKOFF - When we start a reset, we want to stop any
-	 * other users acquiring the struct_mutex. To do this we set the
-	 * #I915_RESET_BACKOFF bit in the error flags when we detect a reset
-	 * and then check for that bit before acquiring the struct_mutex (in
-	 * i915_mutex_lock_interruptible()?). I915_RESET_BACKOFF serves a
-	 * secondary role in preventing two concurrent global reset attempts.
-	 *
-	 * #I915_RESET_HANDOFF - To perform the actual GPU reset, we need the
-	 * struct_mutex. We try to acquire the struct_mutex in the reset worker,
-	 * but it may be held by some long running waiter (that we cannot
-	 * interrupt without causing trouble). Once we are ready to do the GPU
-	 * reset, we set the I915_RESET_HANDOFF bit and wakeup any waiters. If
-	 * they already hold the struct_mutex and want to participate they can
-	 * inspect the bit and do the reset directly, otherwise the worker
-	 * waits for the struct_mutex.
-	 *
-	 * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to
-	 * acquire the struct_mutex to reset an engine, we need an explicit
-	 * flag to prevent two concurrent reset attempts in the same engine.
-	 * As the number of engines continues to grow, allocate the flags from
-	 * the most significant bits.
-	 *
-	 * #I915_WEDGED - If reset fails and we can no longer use the GPU,
-	 * we set the #I915_WEDGED bit. Prior to command submission, e.g.
-	 * i915_request_alloc(), this bit is checked and the sequence
-	 * aborted (with -EIO reported to userspace) if set.
-	 */
-	unsigned long flags;
-#define I915_RESET_BACKOFF	0
-#define I915_RESET_HANDOFF	1
-#define I915_RESET_MODESET	2
-#define I915_WEDGED		(BITS_PER_LONG - 1)
-#define I915_RESET_ENGINE	(I915_WEDGED - I915_NUM_ENGINES)
-
-	/** Number of times an engine has been reset */
-	u32 reset_engine_count[I915_NUM_ENGINES];
-
-	/**
-	 * Waitqueue to signal when a hang is detected. Used to for waiters
-	 * to release the struct_mutex for the reset to procede.
-	 */
-	wait_queue_head_t wait_queue;
-
-	/**
-	 * Waitqueue to signal when the reset has completed. Used by clients
-	 * that wait for dev_priv->mm.wedged to settle.
-	 */
-	wait_queue_head_t reset_queue;
-
-	/* For missed irq/seqno simulation. */
-	unsigned long test_irq_rings;
-};
-
 enum modeset_restore {
 	MODESET_ON_LID_OPEN,
 	MODESET_DONE,
@@ -3589,64 +3317,6 @@ static inline int i915_debugfs_connector_add(struct drm_connector *connector)
 static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
 #endif
 
-/* i915_gpu_error.c */
-#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
-
-__printf(2, 3)
-void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
-int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
-			    const struct i915_gpu_state *gpu);
-int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
-			      struct drm_i915_private *i915,
-			      size_t count, loff_t pos);
-static inline void i915_error_state_buf_release(
-	struct drm_i915_error_state_buf *eb)
-{
-	kfree(eb->buf);
-}
-
-struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
-void i915_capture_error_state(struct drm_i915_private *dev_priv,
-			      u32 engine_mask,
-			      const char *error_msg);
-
-static inline struct i915_gpu_state *
-i915_gpu_state_get(struct i915_gpu_state *gpu)
-{
-	kref_get(&gpu->ref);
-	return gpu;
-}
-
-void __i915_gpu_state_free(struct kref *kref);
-static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
-{
-	if (gpu)
-		kref_put(&gpu->ref, __i915_gpu_state_free);
-}
-
-struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
-void i915_reset_error_state(struct drm_i915_private *i915);
-
-#else
-
-static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
-					    u32 engine_mask,
-					    const char *error_msg)
-{
-}
-
-static inline struct i915_gpu_state *
-i915_first_error_state(struct drm_i915_private *i915)
-{
-	return NULL;
-}
-
-static inline void i915_reset_error_state(struct drm_i915_private *i915)
-{
-}
-
-#endif
-
 const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
 
 /* i915_cmd_parser.c */
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index f89ac7a8f95f..effaf982b19b 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -32,6 +32,7 @@
 #include <linux/zlib.h>
 #include <drm/drm_print.h>
 
+#include "i915_gpu_error.h"
 #include "i915_drv.h"
 
 static inline const struct intel_engine_cs *
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
new file mode 100644
index 000000000000..ebbdf37e2879
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -0,0 +1,356 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright � 2008-2018 Intel Corporation
+ */
+
+#ifndef _I915_GPU_ERROR_H_
+#define _I915_GPU_ERROR_H_
+
+#include <linux/kref.h>
+#include <linux/ktime.h>
+#include <linux/sched.h>
+
+#include <drm/drm_mm.h>
+
+#include "intel_device_info.h"
+#include "intel_ringbuffer.h"
+#include "intel_uc_fw.h"
+
+#include "i915_gem.h"
+#include "i915_gem_gtt.h"
+#include "i915_params.h"
+
+struct drm_i915_private;
+struct intel_overlay_error_state;
+struct intel_display_error_state;
+
+struct i915_gpu_state {
+	struct kref ref;
+	ktime_t time;
+	ktime_t boottime;
+	ktime_t uptime;
+
+	struct drm_i915_private *i915;
+
+	char error_msg[128];
+	bool simulated;
+	bool awake;
+	bool wakelock;
+	bool suspended;
+	int iommu;
+	u32 reset_count;
+	u32 suspend_count;
+	struct intel_device_info device_info;
+	struct intel_driver_caps driver_caps;
+	struct i915_params params;
+
+	struct i915_error_uc {
+		struct intel_uc_fw guc_fw;
+		struct intel_uc_fw huc_fw;
+		struct drm_i915_error_object *guc_log;
+	} uc;
+
+	/* Generic register state */
+	u32 eir;
+	u32 pgtbl_er;
+	u32 ier;
+	u32 gtier[4], ngtier;
+	u32 ccid;
+	u32 derrmr;
+	u32 forcewake;
+	u32 error; /* gen6+ */
+	u32 err_int; /* gen7 */
+	u32 fault_data0; /* gen8, gen9 */
+	u32 fault_data1; /* gen8, gen9 */
+	u32 done_reg;
+	u32 gac_eco;
+	u32 gam_ecochk;
+	u32 gab_ctl;
+	u32 gfx_mode;
+
+	u32 nfence;
+	u64 fence[I915_MAX_NUM_FENCES];
+	struct intel_overlay_error_state *overlay;
+	struct intel_display_error_state *display;
+
+	struct drm_i915_error_engine {
+		int engine_id;
+		/* Software tracked state */
+		bool idle;
+		bool waiting;
+		int num_waiters;
+		unsigned long hangcheck_timestamp;
+		bool hangcheck_stalled;
+		enum intel_engine_hangcheck_action hangcheck_action;
+		struct i915_address_space *vm;
+		int num_requests;
+		u32 reset_count;
+
+		/* position of active request inside the ring */
+		u32 rq_head, rq_post, rq_tail;
+
+		/* our own tracking of ring head and tail */
+		u32 cpu_ring_head;
+		u32 cpu_ring_tail;
+
+		u32 last_seqno;
+
+		/* Register state */
+		u32 start;
+		u32 tail;
+		u32 head;
+		u32 ctl;
+		u32 mode;
+		u32 hws;
+		u32 ipeir;
+		u32 ipehr;
+		u32 bbstate;
+		u32 instpm;
+		u32 instps;
+		u32 seqno;
+		u64 bbaddr;
+		u64 acthd;
+		u32 fault_reg;
+		u64 faddr;
+		u32 rc_psmi; /* sleep state */
+		u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
+		struct intel_instdone instdone;
+
+		struct drm_i915_error_context {
+			char comm[TASK_COMM_LEN];
+			pid_t pid;
+			u32 handle;
+			u32 hw_id;
+			int priority;
+			int ban_score;
+			int active;
+			int guilty;
+			bool bannable;
+		} context;
+
+		struct drm_i915_error_object {
+			u64 gtt_offset;
+			u64 gtt_size;
+			int page_count;
+			int unused;
+			u32 *pages[0];
+		} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
+
+		struct drm_i915_error_object **user_bo;
+		long user_bo_count;
+
+		struct drm_i915_error_object *wa_ctx;
+		struct drm_i915_error_object *default_state;
+
+		struct drm_i915_error_request {
+			long jiffies;
+			pid_t pid;
+			u32 context;
+			int priority;
+			int ban_score;
+			u32 seqno;
+			u32 head;
+			u32 tail;
+		} *requests, execlist[EXECLIST_MAX_PORTS];
+		unsigned int num_ports;
+
+		struct drm_i915_error_waiter {
+			char comm[TASK_COMM_LEN];
+			pid_t pid;
+			u32 seqno;
+		} *waiters;
+
+		struct {
+			u32 gfx_mode;
+			union {
+				u64 pdp[4];
+				u32 pp_dir_base;
+			};
+		} vm_info;
+	} engine[I915_NUM_ENGINES];
+
+	struct drm_i915_error_buffer {
+		u32 size;
+		u32 name;
+		u32 rseqno[I915_NUM_ENGINES], wseqno;
+		u64 gtt_offset;
+		u32 read_domains;
+		u32 write_domain;
+		s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
+		u32 tiling:2;
+		u32 dirty:1;
+		u32 purgeable:1;
+		u32 userptr:1;
+		s32 engine:4;
+		u32 cache_level:3;
+	} *active_bo[I915_NUM_ENGINES], *pinned_bo;
+	u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
+	struct i915_address_space *active_vm[I915_NUM_ENGINES];
+};
+
+struct i915_gpu_error {
+	/* For hangcheck timer */
+#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
+#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
+
+	struct delayed_work hangcheck_work;
+
+	/* For reset and error_state handling. */
+	spinlock_t lock;
+	/* Protected by the above dev->gpu_error.lock. */
+	struct i915_gpu_state *first_error;
+
+	atomic_t pending_fb_pin;
+
+	unsigned long missed_irq_rings;
+
+	/**
+	 * State variable controlling the reset flow and count
+	 *
+	 * This is a counter which gets incremented when reset is triggered,
+	 *
+	 * Before the reset commences, the I915_RESET_BACKOFF bit is set
+	 * meaning that any waiters holding onto the struct_mutex should
+	 * relinquish the lock immediately in order for the reset to start.
+	 *
+	 * If reset is not completed successfully, the I915_WEDGE bit is
+	 * set meaning that hardware is terminally sour and there is no
+	 * recovery. All waiters on the reset_queue will be woken when
+	 * that happens.
+	 *
+	 * This counter is used by the wait_seqno code to notice that reset
+	 * event happened and it needs to restart the entire ioctl (since most
+	 * likely the seqno it waited for won't ever signal anytime soon).
+	 *
+	 * This is important for lock-free wait paths, where no contended lock
+	 * naturally enforces the correct ordering between the bail-out of the
+	 * waiter and the gpu reset work code.
+	 */
+	unsigned long reset_count;
+
+	/**
+	 * flags: Control various stages of the GPU reset
+	 *
+	 * #I915_RESET_BACKOFF - When we start a reset, we want to stop any
+	 * other users acquiring the struct_mutex. To do this we set the
+	 * #I915_RESET_BACKOFF bit in the error flags when we detect a reset
+	 * and then check for that bit before acquiring the struct_mutex (in
+	 * i915_mutex_lock_interruptible()?). I915_RESET_BACKOFF serves a
+	 * secondary role in preventing two concurrent global reset attempts.
+	 *
+	 * #I915_RESET_HANDOFF - To perform the actual GPU reset, we need the
+	 * struct_mutex. We try to acquire the struct_mutex in the reset worker,
+	 * but it may be held by some long running waiter (that we cannot
+	 * interrupt without causing trouble). Once we are ready to do the GPU
+	 * reset, we set the I915_RESET_HANDOFF bit and wakeup any waiters. If
+	 * they already hold the struct_mutex and want to participate they can
+	 * inspect the bit and do the reset directly, otherwise the worker
+	 * waits for the struct_mutex.
+	 *
+	 * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to
+	 * acquire the struct_mutex to reset an engine, we need an explicit
+	 * flag to prevent two concurrent reset attempts in the same engine.
+	 * As the number of engines continues to grow, allocate the flags from
+	 * the most significant bits.
+	 *
+	 * #I915_WEDGED - If reset fails and we can no longer use the GPU,
+	 * we set the #I915_WEDGED bit. Prior to command submission, e.g.
+	 * i915_request_alloc(), this bit is checked and the sequence
+	 * aborted (with -EIO reported to userspace) if set.
+	 */
+	unsigned long flags;
+#define I915_RESET_BACKOFF	0
+#define I915_RESET_HANDOFF	1
+#define I915_RESET_MODESET	2
+#define I915_WEDGED		(BITS_PER_LONG - 1)
+#define I915_RESET_ENGINE	(I915_WEDGED - I915_NUM_ENGINES)
+
+	/** Number of times an engine has been reset */
+	u32 reset_engine_count[I915_NUM_ENGINES];
+
+	/**
+	 * Waitqueue to signal when a hang is detected. Used to for waiters
+	 * to release the struct_mutex for the reset to procede.
+	 */
+	wait_queue_head_t wait_queue;
+
+	/**
+	 * Waitqueue to signal when the reset has completed. Used by clients
+	 * that wait for dev_priv->mm.wedged to settle.
+	 */
+	wait_queue_head_t reset_queue;
+
+	/* For missed irq/seqno simulation. */
+	unsigned long test_irq_rings;
+};
+
+struct drm_i915_error_state_buf {
+	struct drm_i915_private *i915;
+	unsigned int bytes;
+	unsigned int size;
+	int err;
+	u8 *buf;
+	loff_t start;
+	loff_t pos;
+};
+
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
+__printf(2, 3)
+void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
+int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
+			    const struct i915_gpu_state *gpu);
+int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
+			      struct drm_i915_private *i915,
+			      size_t count, loff_t pos);
+
+static inline void
+i915_error_state_buf_release(struct drm_i915_error_state_buf *eb)
+{
+	kfree(eb->buf);
+}
+
+struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
+void i915_capture_error_state(struct drm_i915_private *dev_priv,
+			      u32 engine_mask,
+			      const char *error_msg);
+
+static inline struct i915_gpu_state *
+i915_gpu_state_get(struct i915_gpu_state *gpu)
+{
+	kref_get(&gpu->ref);
+	return gpu;
+}
+
+void __i915_gpu_state_free(struct kref *kref);
+static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
+{
+	if (gpu)
+		kref_put(&gpu->ref, __i915_gpu_state_free);
+}
+
+struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
+void i915_reset_error_state(struct drm_i915_private *i915);
+
+#else
+
+static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
+					    u32 engine_mask,
+					    const char *error_msg)
+{
+}
+
+static inline struct i915_gpu_state *
+i915_first_error_state(struct drm_i915_private *i915)
+{
+	return NULL;
+}
+
+static inline void i915_reset_error_state(struct drm_i915_private *i915)
+{
+}
+
+#endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */
+
+#endif /* _I915_GPU_ERROR_H_ */

From caa1fd660e6f6701255544a3586a498676527fa4 Mon Sep 17 00:00:00 2001
From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Date: Thu, 8 Mar 2018 16:52:18 -0800
Subject: [PATCH 0020/1286] drm/i915/psr: Display WA 0884 applied broadly for
 more HW tracking.

WA 0884:bxt:all,cnl:*:A - "When FBC is enabled with eDP PSR,
the CPU host modify writes may not get updated on the Display
as expected.
WA: Write 0x00000000 to CUR_SURFLIVE_A with every CPU
host modify write to trigger PSR exit."

We can also find on spec other cases where they describe
bogus writes to cursor registers to force PSR exit with
HW tracking. And it was confirmed by HW engineers that
this Wa can be safely applied for any frontbuffer activity.

So let's use this more and more here instead of forcibly
disable and re-enable PSR everytime that we have a simple
reliable flush case.

Other commits improve the fbcon/fbdev use a lot, but this
approach is the only when where we can get a fully reliable
console with no slowness or missed frames and PSR still
enabled and active.

v2: - Rebase on drm-tip
    - (DK) Add a comment to explain that WA
    tells about writing 0 to CUR_SURFLIVE_A but we write to
    CUR_SURFLIVE(pipe).
v3: Wa doesn't work on PSR2.

Cc: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180309005218.26772-1-rodrigo.vivi@intel.com
---
 drivers/gpu/drm/i915/i915_reg.h  |  3 +++
 drivers/gpu/drm/i915/intel_psr.c | 19 +++++++++++++++++--
 2 files changed, 20 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 9e765462ca44..60febfb33154 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -6032,6 +6032,9 @@ enum {
 #define IVB_CURSOR_B_OFFSET 0x71080
 #define IVB_CURSOR_C_OFFSET 0x72080
 
+#define _CUR_SURLIVE		0x700AC
+#define CUR_SURLIVE(pipe)	_CURSOR2(pipe, _CUR_SURLIVE)
+
 /* Display A control */
 #define _DSPACNTR				0x70180
 #define   DISPLAY_PLANE_ENABLE			(1<<31)
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 23175c5c4a50..975ebb51c7af 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -1027,8 +1027,23 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
 	dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
 
 	/* By definition flush = invalidate + flush */
-	if (frontbuffer_bits)
-		intel_psr_exit(dev_priv);
+	if (frontbuffer_bits) {
+		if (dev_priv->psr.psr2_support ||
+		    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+			intel_psr_exit(dev_priv);
+		} else {
+			/*
+			 * Display WA #0884: all
+			 * This documented WA for bxt can be safely applied
+			 * broadly so we can force HW tracking to exit PSR
+			 * instead of disabling and re-enabling.
+			 * Workaround tells us to write 0 to CUR_SURLIVE_A,
+			 * but it makes more sense write to the current active
+			 * pipe.
+			 */
+			I915_WRITE(CUR_SURLIVE(pipe), 0);
+		}
+	}
 
 	if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
 		if (!work_busy(&dev_priv->psr.work.work))

From 6f9ec414ec47eea3f3e2c5ad4c67b4265bbff2a3 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Thu, 8 Mar 2018 14:07:32 +0000
Subject: [PATCH 0021/1286] drm/i915: Remove the impedance mismatch around
 intel_engine_enable_signaling

There is some redundancy between dma_fence->ops->enable_signaling (via
i915_fence_enable_signaling) and our backend,
intel_engine_enable_signaling() in that both levels recheck the fence
status multiple times. If we convert intel_engine_enable_signaling() to
return the information desired by dma_fence->ops->enable_signaling, we
can reduce i915_fence_enable_signaling to a simple stub and avoid
trying to reinterpret the same information.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Cc: Michal Winiarski <michal.winiarski@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180308140732.25090-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_request.c      |  6 +-----
 drivers/gpu/drm/i915/intel_breadcrumbs.c | 21 +++++++++++++--------
 drivers/gpu/drm/i915/intel_ringbuffer.h  |  2 +-
 3 files changed, 15 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 2f62acd2dc3d..1810fa1b81cb 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -59,11 +59,7 @@ static bool i915_fence_signaled(struct dma_fence *fence)
 
 static bool i915_fence_enable_signaling(struct dma_fence *fence)
 {
-	if (i915_fence_signaled(fence))
-		return false;
-
-	intel_engine_enable_signaling(to_request(fence), true);
-	return !i915_fence_signaled(fence);
+	return intel_engine_enable_signaling(to_request(fence), true);
 }
 
 static signed long i915_fence_wait(struct dma_fence *fence,
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 1f79e7a47433..671a6d61e29d 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -730,10 +730,11 @@ static void insert_signal(struct intel_breadcrumbs *b,
 	list_add(&request->signaling.link, &iter->signaling.link);
 }
 
-void intel_engine_enable_signaling(struct i915_request *request, bool wakeup)
+bool intel_engine_enable_signaling(struct i915_request *request, bool wakeup)
 {
 	struct intel_engine_cs *engine = request->engine;
 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
+	struct intel_wait *wait = &request->signaling.wait;
 	u32 seqno;
 
 	/*
@@ -750,12 +751,12 @@ void intel_engine_enable_signaling(struct i915_request *request, bool wakeup)
 
 	seqno = i915_request_global_seqno(request);
 	if (!seqno) /* will be enabled later upon execution */
-		return;
+		return true;
 
-	GEM_BUG_ON(request->signaling.wait.seqno);
-	request->signaling.wait.tsk = b->signaler;
-	request->signaling.wait.request = request;
-	request->signaling.wait.seqno = seqno;
+	GEM_BUG_ON(wait->seqno);
+	wait->tsk = b->signaler;
+	wait->request = request;
+	wait->seqno = seqno;
 
 	/*
 	 * Add ourselves into the list of waiters, but registering our
@@ -768,11 +769,15 @@ void intel_engine_enable_signaling(struct i915_request *request, bool wakeup)
 	 */
 	spin_lock(&b->rb_lock);
 	insert_signal(b, request, seqno);
-	wakeup &= __intel_engine_add_wait(engine, &request->signaling.wait);
+	wakeup &= __intel_engine_add_wait(engine, wait);
 	spin_unlock(&b->rb_lock);
 
-	if (wakeup)
+	if (wakeup) {
 		wake_up_process(b->signaler);
+		return !intel_wait_complete(wait);
+	}
+
+	return true;
 }
 
 void intel_engine_cancel_signaling(struct i915_request *request)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index c31258d27e20..81cdbbf257ec 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -940,7 +940,7 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine,
 			   struct intel_wait *wait);
 void intel_engine_remove_wait(struct intel_engine_cs *engine,
 			      struct intel_wait *wait);
-void intel_engine_enable_signaling(struct i915_request *request, bool wakeup);
+bool intel_engine_enable_signaling(struct i915_request *request, bool wakeup);
 void intel_engine_cancel_signaling(struct i915_request *request);
 
 static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)

From 3c33fc7c1af9a3426eff9015e5bab08a21a5fa9d Mon Sep 17 00:00:00 2001
From: Michal Wajdeczko <michal.wajdeczko@intel.com>
Date: Mon, 12 Mar 2018 13:03:06 +0000
Subject: [PATCH 0022/1286] drm/i915/uc: Sanitize uC options early

We are sanitizing uC related modparams together with other driver
modparams in intel_sanitize_options called from i915_driver_init_hw,
but this is too late for us as we will want to use USES_GUC/USES_HUC
macros at earlier stage. Since our sanitizing does not require any
MMIO access, we can do it in intel_uc_init_early right after we resolve
firmware names.

Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180312130308.22952-2-michal.wajdeczko@intel.com
---
 drivers/gpu/drm/i915/i915_drv.c | 2 --
 drivers/gpu/drm/i915/intel_uc.c | 6 ++++--
 drivers/gpu/drm/i915/intel_uc.h | 1 -
 3 files changed, 4 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 987c6770d1a6..0126b222ab7f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1074,8 +1074,6 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
 					    i915_modparams.enable_ppgtt);
 	DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915_modparams.enable_ppgtt);
 
-	intel_uc_sanitize_options(dev_priv);
-
 	intel_gvt_sanitize_options(dev_priv);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 1c1a00df010b..6dec6d67f4ad 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -83,7 +83,7 @@ static int __get_default_guc_log_level(struct drm_i915_private *dev_priv)
 }
 
 /**
- * intel_uc_sanitize_options - sanitize uC related modparam options
+ * sanitize_options_early - sanitize uC related modparam options
  * @dev_priv: device private
  *
  * In case of "enable_guc" option this function will attempt to modify
@@ -99,7 +99,7 @@ static int __get_default_guc_log_level(struct drm_i915_private *dev_priv)
  * unless GuC is enabled on given platform and the driver is compiled with
  * debug config when this modparam will default to "enable(1..4)".
  */
-void intel_uc_sanitize_options(struct drm_i915_private *dev_priv)
+static void sanitize_options_early(struct drm_i915_private *dev_priv)
 {
 	struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
 	struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
@@ -163,6 +163,8 @@ void intel_uc_init_early(struct drm_i915_private *dev_priv)
 {
 	intel_guc_init_early(&dev_priv->guc);
 	intel_huc_init_early(&dev_priv->huc);
+
+	sanitize_options_early(dev_priv);
 }
 
 void intel_uc_init_fw(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index d6af984cd789..49b5b2f274bc 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -28,7 +28,6 @@
 #include "intel_huc.h"
 #include "i915_params.h"
 
-void intel_uc_sanitize_options(struct drm_i915_private *dev_priv);
 void intel_uc_init_early(struct drm_i915_private *dev_priv);
 void intel_uc_init_mmio(struct drm_i915_private *dev_priv);
 int intel_uc_register(struct drm_i915_private *dev_priv);

From c37d57282033067edf60e044229a4b4f367cc81b Mon Sep 17 00:00:00 2001
From: Michal Wajdeczko <michal.wajdeczko@intel.com>
Date: Mon, 12 Mar 2018 13:03:07 +0000
Subject: [PATCH 0023/1286] drm/i915/uc: Sanitize uC together with GEM

Instead of dancing around uC on reset/suspend/resume scenarios,
explicitly sanitize uC when we sanitize GEM to force uC reload
and start from known beginning.

v2: don't forget about reset path (Daniele)
    sanitize uc before gem initiated full reset (Daniele)
v3: drop redundant disable_communication in init_hw (Daniele)

Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michel Thierry <michel.thierry@intel.com>
Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180312130308.22952-3-michal.wajdeczko@intel.com
---
 drivers/gpu/drm/i915/i915_gem.c    |  2 ++
 drivers/gpu/drm/i915/intel_guc.h   |  6 ++++++
 drivers/gpu/drm/i915/intel_huc.h   |  6 ++++++
 drivers/gpu/drm/i915/intel_uc.c    | 19 ++++++++++++++++++-
 drivers/gpu/drm/i915/intel_uc.h    |  1 +
 drivers/gpu/drm/i915/intel_uc_fw.h |  6 ++++++
 6 files changed, 39 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e58b741e2ec0..05b0724b60dc 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2998,6 +2998,7 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
 	}
 
 	i915_gem_revoke_fences(dev_priv);
+	intel_uc_sanitize(dev_priv);
 
 	return err;
 }
@@ -4978,6 +4979,7 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
 	 * machines is a good idea, we don't - just in case it leaves the
 	 * machine in an unusable condition.
 	 */
+	intel_uc_sanitize(dev_priv);
 	i915_gem_sanitize(dev_priv);
 
 	intel_runtime_pm_put(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 6d5aebe55039..d878160ee6e5 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -133,4 +133,10 @@ int intel_guc_resume(struct intel_guc *guc);
 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
 u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
 
+static inline int intel_guc_sanitize(struct intel_guc *guc)
+{
+	intel_uc_fw_sanitize(&guc->fw);
+	return 0;
+}
+
 #endif
diff --git a/drivers/gpu/drm/i915/intel_huc.h b/drivers/gpu/drm/i915/intel_huc.h
index 5d6e804f9771..b1858503c451 100644
--- a/drivers/gpu/drm/i915/intel_huc.h
+++ b/drivers/gpu/drm/i915/intel_huc.h
@@ -38,4 +38,10 @@ struct intel_huc {
 void intel_huc_init_early(struct intel_huc *huc);
 int intel_huc_auth(struct intel_huc *huc);
 
+static inline int intel_huc_sanitize(struct intel_huc *huc)
+{
+	intel_uc_fw_sanitize(&huc->fw);
+	return 0;
+}
+
 #endif
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 6dec6d67f4ad..9d5ffd74c16a 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -334,6 +334,24 @@ void intel_uc_fini(struct drm_i915_private *dev_priv)
 	intel_guc_fini(guc);
 }
 
+void intel_uc_sanitize(struct drm_i915_private *i915)
+{
+	struct intel_guc *guc = &i915->guc;
+	struct intel_huc *huc = &i915->huc;
+
+	if (!USES_GUC(i915))
+		return;
+
+	GEM_BUG_ON(!HAS_GUC(i915));
+
+	guc_disable_communication(guc);
+
+	intel_huc_sanitize(huc);
+	intel_guc_sanitize(guc);
+
+	__intel_uc_reset_hw(i915);
+}
+
 int intel_uc_init_hw(struct drm_i915_private *dev_priv)
 {
 	struct intel_guc *guc = &dev_priv->guc;
@@ -345,7 +363,6 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
 
 	GEM_BUG_ON(!HAS_GUC(dev_priv));
 
-	guc_disable_communication(guc);
 	gen9_reset_guc_interrupts(dev_priv);
 
 	/* init WOPCM */
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index 49b5b2f274bc..0a2b413e9cd0 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -36,6 +36,7 @@ void intel_uc_init_fw(struct drm_i915_private *dev_priv);
 void intel_uc_fini_fw(struct drm_i915_private *dev_priv);
 int intel_uc_init_misc(struct drm_i915_private *dev_priv);
 void intel_uc_fini_misc(struct drm_i915_private *dev_priv);
+void intel_uc_sanitize(struct drm_i915_private *dev_priv);
 int intel_uc_init_hw(struct drm_i915_private *dev_priv);
 void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
 int intel_uc_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.h b/drivers/gpu/drm/i915/intel_uc_fw.h
index d5fd4609c785..2601521a4006 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/intel_uc_fw.h
@@ -115,6 +115,12 @@ static inline bool intel_uc_fw_is_selected(struct intel_uc_fw *uc_fw)
 	return uc_fw->path != NULL;
 }
 
+static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw)
+{
+	if (uc_fw->load_status == INTEL_UC_FIRMWARE_SUCCESS)
+		uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
+}
+
 void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
 		       struct intel_uc_fw *uc_fw);
 int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,

From 7aa0b14ede643fb7c33aaa8e0041de04a0d6f278 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Tue, 13 Mar 2018 00:40:54 +0000
Subject: [PATCH 0024/1286] drm/i915: Remove variable length arrays from sseu
 debugfs printers

In order to enable -Wvla to prevent new variable length arrays being
used in i915.ko, we first must remove the existing VLA. Inside
i915_print_sseu_info(), VLA are used as the actual size of the sseu
depends on platform. Replace the VLA with the maximum required.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180313004055.25411-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_debugfs.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index c4cc8fef11a0..0eac7dcdddbf 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -4312,9 +4312,10 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
 					  struct sseu_dev_info *sseu)
 {
-	int ss_max = 2;
+#define SS_MAX 2
+	const int ss_max = SS_MAX;
+	u32 sig1[SS_MAX], sig2[SS_MAX];
 	int ss;
-	u32 sig1[ss_max], sig2[ss_max];
 
 	sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
 	sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
@@ -4338,15 +4339,15 @@ static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
 		sseu->eu_per_subslice = max_t(unsigned int,
 					      sseu->eu_per_subslice, eu_cnt);
 	}
+#undef SS_MAX
 }
 
 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
 				     struct sseu_dev_info *sseu)
 {
 	const struct intel_device_info *info = INTEL_INFO(dev_priv);
+	u32 s_reg[6], eu_reg[2 * 4], eu_mask[2];
 	int s, ss;
-	u32 s_reg[info->sseu.max_slices];
-	u32 eu_reg[2 * info->sseu.max_subslices], eu_mask[2];
 
 	for (s = 0; s < info->sseu.max_slices; s++) {
 		/*
@@ -4399,9 +4400,8 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
 				    struct sseu_dev_info *sseu)
 {
 	const struct intel_device_info *info = INTEL_INFO(dev_priv);
+	u32 s_reg[3], eu_reg[2 * 4], eu_mask[2];
 	int s, ss;
-	u32 s_reg[info->sseu.max_slices];
-	u32 eu_reg[2 * info->sseu.max_subslices], eu_mask[2];
 
 	for (s = 0; s < info->sseu.max_slices; s++) {
 		s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));

From c5c2b11894f4f862cf243b955ac59bb1a5fe61b9 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Tue, 13 Mar 2018 00:40:55 +0000
Subject: [PATCH 0025/1286] drm/i915: Warn against variable length arrays

VLA are strongly discouraged in the kernel due to ambiguity they impose
on the limited stack space and security concerns over manipulating the
stack frame. Add -Wvla to our compiler flags so that CI rejects them.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Acked-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180313004055.25411-2-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/Makefile | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 4eee91a3a236..fcb8a7b27ae2 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -12,7 +12,7 @@
 # Note the danger in using -Wall -Wextra is that when CI updates gcc we
 # will most likely get a sudden build breakage... Hopefully we will fix
 # new warnings before CI updates!
-subdir-ccflags-y := -Wall -Wextra
+subdir-ccflags-y := -Wall -Wextra -Wvla
 subdir-ccflags-y += $(call cc-disable-warning, unused-parameter)
 subdir-ccflags-y += $(call cc-disable-warning, type-limits)
 subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers)

From c7fb3c6c1893fddbbd39e13066489050c29397c1 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Tue, 13 Mar 2018 11:31:49 +0000
Subject: [PATCH 0026/1286] drm/i915: Use sseu size for determining eu_regs[]

eu_regs[] is written 2*max_slices times (like s_reg[]) but oddly read
2*max_slices + max_subslices/2 times. Allocate the array large enough
for the writes to avoid overwriting our stack and worry about the logic
later.

Fixes: 7aa0b14ede64 ("drm/i915: Remove variable length arrays from sseu debugfs printers")
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=105479
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180313113149.1094-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_debugfs.c | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0eac7dcdddbf..bc3f7d546d53 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -4345,8 +4345,9 @@ static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
 				     struct sseu_dev_info *sseu)
 {
+#define SS_MAX 6
 	const struct intel_device_info *info = INTEL_INFO(dev_priv);
-	u32 s_reg[6], eu_reg[2 * 4], eu_mask[2];
+	u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
 	int s, ss;
 
 	for (s = 0; s < info->sseu.max_slices; s++) {
@@ -4394,13 +4395,15 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
 						      eu_cnt);
 		}
 	}
+#undef SS_MAX
 }
 
 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
 				    struct sseu_dev_info *sseu)
 {
+#define SS_MAX 3
 	const struct intel_device_info *info = INTEL_INFO(dev_priv);
-	u32 s_reg[3], eu_reg[2 * 4], eu_mask[2];
+	u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
 	int s, ss;
 
 	for (s = 0; s < info->sseu.max_slices; s++) {
@@ -4448,6 +4451,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
 						      eu_cnt);
 		}
 	}
+#undef SS_MAX
 }
 
 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,

From 07bcd99b80477cc4f1b878afb3dec26877fa0ed0 Mon Sep 17 00:00:00 2001
From: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Date: Tue, 6 Mar 2018 19:34:18 -0800
Subject: [PATCH 0027/1286] drm/i915/frontbuffer: Pull frontbuffer_flush out of
 gem_obj_pin_to_display
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

i915_gem_obj_pin_to_display() calls frontbuffer_flush with origin set to
DIRTYFB. The callers however are at a vantage point to decide if hardware
frontbuffer tracking can do the flush for us. For example, legacy cursor
updates, like flips, write to MMIO registers, which then triggers PSR flush
by the hardware. Moving frontbuffer_flush out will enable us to skip a
software initiated flush by setting origin to FLIP. Thanks to Chris for the
idea.

v2:
Rebased due to Ville adding intel_plane_pin_fb().
Minor code reordering as fb_obj_flush doesn't need struct_mutex (Chris)

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Signed-off-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180307033420.3086-1-dhinakaran.pandiyan@intel.com
---
 drivers/gpu/drm/i915/i915_gem.c      | 9 ++++-----
 drivers/gpu/drm/i915/intel_display.c | 9 +++++++--
 drivers/gpu/drm/i915/intel_fbdev.c   | 5 +++--
 drivers/gpu/drm/i915/intel_overlay.c | 1 +
 4 files changed, 15 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 05b0724b60dc..58f8cf7d3b40 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4094,9 +4094,10 @@ out:
 }
 
 /*
- * Prepare buffer for display plane (scanout, cursors, etc).
- * Can be called from an uninterruptible phase (modesetting) and allows
- * any flushes to be pipelined (for pageflips).
+ * Prepare buffer for display plane (scanout, cursors, etc). Can be called from
+ * an uninterruptible phase (modesetting) and allows any flushes to be pipelined
+ * (for pageflips). We only flush the caches while preparing the buffer for
+ * display, the callers are responsible for frontbuffer flush.
  */
 struct i915_vma *
 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
@@ -4152,9 +4153,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 
 	vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
 
-	/* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
 	__i915_gem_object_flush_for_display(obj);
-	intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
 
 	/* It should now be out of any other write domains, and we can update
 	 * the domain values for our changes.
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f424fff477f6..1b2a402e32fa 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2858,6 +2858,9 @@ valid_fb:
 		return;
 	}
 
+	obj = intel_fb_obj(fb);
+	intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
+
 	plane_state->src_x = 0;
 	plane_state->src_y = 0;
 	plane_state->src_w = fb->width << 16;
@@ -2871,7 +2874,6 @@ valid_fb:
 	intel_state->base.src = drm_plane_state_src(plane_state);
 	intel_state->base.dst = drm_plane_state_dest(plane_state);
 
-	obj = intel_fb_obj(fb);
 	if (i915_gem_object_is_tiled(obj))
 		dev_priv->preserve_bios_swizzle = true;
 
@@ -12793,6 +12795,8 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 	if (ret)
 		return ret;
 
+	intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
+
 	if (!new_state->fence) { /* implicit fencing */
 		struct dma_fence *fence;
 
@@ -13186,8 +13190,9 @@ intel_legacy_cursor_update(struct drm_plane *plane,
 	if (ret)
 		goto out_unlock;
 
-	old_fb = old_plane_state->fb;
+	intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_DIRTYFB);
 
+	old_fb = old_plane_state->fb;
 	i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
 			  intel_plane->frontbuffer_bit);
 
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 6f12adc06365..65a3313723c9 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -221,6 +221,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
 		goto out_unlock;
 	}
 
+	fb = &ifbdev->fb->base;
+	intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_DIRTYFB);
+
 	info = drm_fb_helper_alloc_fbi(helper);
 	if (IS_ERR(info)) {
 		DRM_ERROR("Failed to allocate fb_info\n");
@@ -230,8 +233,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
 
 	info->par = helper;
 
-	fb = &ifbdev->fb->base;
-
 	ifbdev->helper.fb = fb;
 
 	strcpy(info->fix.id, "inteldrmfb");
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 36671a937fa4..c2f10d899329 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -807,6 +807,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 		ret = PTR_ERR(vma);
 		goto out_pin_section;
 	}
+	intel_fb_obj_flush(new_bo, ORIGIN_DIRTYFB);
 
 	ret = i915_vma_put_fence(vma);
 	if (ret)

From a694e226fbaefbf3101982e54ca2f014292c540f Mon Sep 17 00:00:00 2001
From: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Date: Tue, 6 Mar 2018 19:34:19 -0800
Subject: [PATCH 0028/1286] drm/i915/frontbuffer: HW tracking for cursor moves
 to fix PSR lags.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

DRM_IOCTL_MODE_CURSOR results in frontbuffer flush before the cursor
plane MMIOs are written to. But this flush should not be necessary for
PSR as hardware tracking triggers PSR exit when MMIOs are written. As
for FBC, the spec says "Flips or changes to plane size and panning" cause
FBC to be nuked. Use origin == ORIGIN_FLIP so that features can ignore
cursor updates in their frontbuffer_flush implementations.

 /sys/kernel/debug/dri/0/i915_fbc_status shows
"Compressing: yes" when I move the cursor around.

v3: Use ORIGIN_FLIP now that pin_to_display does not flush frontbuffer.
v2: Update comment in i915_gem_object_pin_to_display_plane. (Chris)

Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180307033420.3086-2-dhinakaran.pandiyan@intel.com
---
 drivers/gpu/drm/i915/intel_display.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 1b2a402e32fa..a7bfa238054c 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -13190,7 +13190,7 @@ intel_legacy_cursor_update(struct drm_plane *plane,
 	if (ret)
 		goto out_unlock;
 
-	intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_DIRTYFB);
+	intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_FLIP);
 
 	old_fb = old_plane_state->fb;
 	i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),

From 5baf63cc4d7879474221c5a32e4c2adc7ed33add Mon Sep 17 00:00:00 2001
From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Date: Tue, 6 Mar 2018 19:34:20 -0800
Subject: [PATCH 0029/1286] drm/i915/psr: Use more PSR HW tracking.

So far we are using frontbuffer tracking for everything
and ignoring that PSR has a HW capable HW tracking for many
modern usages of GPU on Core platforms and newer Atom ones.

One reason for that is that we were trying to keep same
infrastructure in place for VLV/CHV than the rest of platforms.
But also because when this infrastructure was created
the front-buffer-tracking origin wasn't that good and stable
how it is today after Paulo reworked it to attend FBC cases.

However this PSR implementation without HW tracking died
on gen8LP. And newer platforms are starting to demand more HW
tracking specially with PSR2 cases in mind.

By disabling and re-enabling PSR totally every time we believe
someone is going to change the front buffer content we don't
allow PSR HW tracking to do this job and specially compromising
the whole idea of PSR2 case where the HW tracking detect only
the damaged area and do a partial screen update.

So, from now on, on the platforms that has hw_tracking let's
rely more on HW tracking.

This also is the case in used by other drivers and more validated
by SV teams. So I hope that this will lead us to less misterious
bugs.

v2: Only do this for platform that actually has hw tracking.

v3 from DK
Do this only for flips, small gradual changes are better.

Cc: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Cc: Jim Bride <jim.bride@linux.intel.com>
Cc: Vathsala Nagaraju <vathsala.nagaraju@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Reviewed-by: Jose Roberto de Souza <jose.souza@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180307033420.3086-3-dhinakaran.pandiyan@intel.com
---
 drivers/gpu/drm/i915/i915_drv.h          |  1 +
 drivers/gpu/drm/i915/intel_drv.h         |  3 ++-
 drivers/gpu/drm/i915/intel_frontbuffer.c |  2 +-
 drivers/gpu/drm/i915/intel_psr.c         | 10 +++++++++-
 4 files changed, 13 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d35f805cb177..74b0e9d8ff62 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -606,6 +606,7 @@ struct i915_psr {
 	bool y_cord_support;
 	bool colorimetry_support;
 	bool alpm;
+	bool has_hw_tracking;
 
 	void (*enable_source)(struct intel_dp *,
 			      const struct intel_crtc_state *);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 83e5ca889d9c..de6db9196638 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1876,7 +1876,8 @@ void intel_psr_enable(struct intel_dp *intel_dp,
 void intel_psr_disable(struct intel_dp *intel_dp,
 		      const struct intel_crtc_state *old_crtc_state);
 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
-			  unsigned frontbuffer_bits);
+			  unsigned frontbuffer_bits,
+			  enum fb_op_origin origin);
 void intel_psr_flush(struct drm_i915_private *dev_priv,
 		     unsigned frontbuffer_bits,
 		     enum fb_op_origin origin);
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index 3a8d3d06c26a..7fff0a0eceb4 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -80,7 +80,7 @@ void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
 	}
 
 	might_sleep();
-	intel_psr_invalidate(dev_priv, frontbuffer_bits);
+	intel_psr_invalidate(dev_priv, frontbuffer_bits, origin);
 	intel_edp_drrs_invalidate(dev_priv, frontbuffer_bits);
 	intel_fbc_invalidate(dev_priv, frontbuffer_bits, origin);
 }
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 975ebb51c7af..a079b62a148b 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -957,6 +957,7 @@ void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
  * intel_psr_invalidate - Invalidade PSR
  * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
+ * @origin: which operation caused the invalidate
  *
  * Since the hardware frontbuffer tracking has gaps we need to integrate
  * with the software frontbuffer tracking. This function gets called every
@@ -966,7 +967,7 @@ void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
  */
 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
-			  unsigned frontbuffer_bits)
+			  unsigned frontbuffer_bits, enum fb_op_origin origin)
 {
 	struct drm_crtc *crtc;
 	enum pipe pipe;
@@ -974,6 +975,9 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
 	if (!CAN_PSR(dev_priv))
 		return;
 
+	if (dev_priv->psr.has_hw_tracking && origin == ORIGIN_FLIP)
+		return;
+
 	mutex_lock(&dev_priv->psr.lock);
 	if (!dev_priv->psr.enabled) {
 		mutex_unlock(&dev_priv->psr.lock);
@@ -1014,6 +1018,9 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
 	if (!CAN_PSR(dev_priv))
 		return;
 
+	if (dev_priv->psr.has_hw_tracking && origin == ORIGIN_FLIP)
+		return;
+
 	mutex_lock(&dev_priv->psr.lock);
 	if (!dev_priv->psr.enabled) {
 		mutex_unlock(&dev_priv->psr.lock);
@@ -1105,6 +1112,7 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
 		dev_priv->psr.activate = vlv_psr_activate;
 		dev_priv->psr.setup_vsc = vlv_psr_setup_vsc;
 	} else {
+		dev_priv->psr.has_hw_tracking = true;
 		dev_priv->psr.enable_source = hsw_psr_enable_source;
 		dev_priv->psr.disable_source = hsw_psr_disable;
 		dev_priv->psr.enable_sink = hsw_psr_enable_sink;

From be74229bd5456729a9e81dabc8aac2fb58a69492 Mon Sep 17 00:00:00 2001
From: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Date: Mon, 12 Mar 2018 21:42:11 -0700
Subject: [PATCH 0030/1286] drm/i915/psr: Remove PSR active flag from debugfs

The flag becomes misleading with flips and cursor moves not modifying it's
state as HW takes care of exiting PSR (when HW tracking is enabled)

Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Acked-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180313044211.27105-1-dhinakaran.pandiyan@intel.com
---
 drivers/gpu/drm/i915/i915_debugfs.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index bc3f7d546d53..972014b2497d 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2565,7 +2565,6 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
 
 	mutex_lock(&dev_priv->psr.lock);
 	seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
-	seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
 	seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
 		   dev_priv->psr.busy_frontbuffer_bits);
 	seq_printf(m, "Re-enable work scheduled: %s\n",

From a8ada068a5025d738c870851d023b80cf6be0c95 Mon Sep 17 00:00:00 2001
From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Date: Mon, 12 Mar 2018 14:05:28 -0700
Subject: [PATCH 0031/1286] drm/i915: Move CUR SURFLIVE definition to a better
 place.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

No functional change. But let's keep definitions clean
and cursor related register definitions together.

v2: Fix caps x no caps on same reg. Change name to match
    original reg name. (by Ville).
    Also fix name on code s/surlive/surflive and on subject
    s/cur_surlife/cur surflive/.

Suggested-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180312210528.7905-1-rodrigo.vivi@intel.com
---
 drivers/gpu/drm/i915/i915_reg.h  | 5 ++---
 drivers/gpu/drm/i915/intel_psr.c | 4 ++--
 2 files changed, 4 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 60febfb33154..38d4be46462f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -6009,6 +6009,7 @@ enum {
 #define CURSIZE			_MMIO(0x700a0) /* 845/865 */
 #define _CUR_FBC_CTL_A		0x700a0 /* ivb+ */
 #define   CUR_FBC_CTL_EN	(1 << 31)
+#define _CURASURFLIVE		0x700ac /* g4x+ */
 #define _CURBCNTR		0x700c0
 #define _CURBBASE		0x700c4
 #define _CURBPOS		0x700c8
@@ -6025,6 +6026,7 @@ enum {
 #define CURBASE(pipe) _CURSOR2(pipe, _CURABASE)
 #define CURPOS(pipe) _CURSOR2(pipe, _CURAPOS)
 #define CUR_FBC_CTL(pipe) _CURSOR2(pipe, _CUR_FBC_CTL_A)
+#define CURSURFLIVE(pipe) _CURSOR2(pipe, _CURASURFLIVE)
 
 #define CURSOR_A_OFFSET 0x70080
 #define CURSOR_B_OFFSET 0x700c0
@@ -6032,9 +6034,6 @@ enum {
 #define IVB_CURSOR_B_OFFSET 0x71080
 #define IVB_CURSOR_C_OFFSET 0x72080
 
-#define _CUR_SURLIVE		0x700AC
-#define CUR_SURLIVE(pipe)	_CURSOR2(pipe, _CUR_SURLIVE)
-
 /* Display A control */
 #define _DSPACNTR				0x70180
 #define   DISPLAY_PLANE_ENABLE			(1<<31)
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index a079b62a148b..317cb4a12693 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -1044,11 +1044,11 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
 			 * This documented WA for bxt can be safely applied
 			 * broadly so we can force HW tracking to exit PSR
 			 * instead of disabling and re-enabling.
-			 * Workaround tells us to write 0 to CUR_SURLIVE_A,
+			 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
 			 * but it makes more sense write to the current active
 			 * pipe.
 			 */
-			I915_WRITE(CUR_SURLIVE(pipe), 0);
+			I915_WRITE(CURSURFLIVE(pipe), 0);
 		}
 	}
 

From 629820fcd0ddbb7955a37c075e82756da69ea908 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Fri, 9 Mar 2018 10:11:14 +0000
Subject: [PATCH 0032/1286] drm/i915: Show GEM_TRACE when detecting a failed
 GPU idle

If we timeout waiting for the GPU to idle, something went seriously
wrong. We currently dump the engine state, but we can also dump the
ftrace buffer showing our last operations (when available).

In passing, note that since commit 559e040f1f08 ("drm/i915: Show the GPU
state when declaring wedged", we now show the engine state twice, once
in detecting the failed idle and then again on declaring wedged.

v2: ftrace_dump() takes a parameter specifying whether to dump all cpu
buffers or the local cpu's.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180309101114.1138-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_gem.c | 11 +----------
 drivers/gpu/drm/i915/i915_gem.h |  2 ++
 2 files changed, 3 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 58f8cf7d3b40..d0624c57d9a6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3672,16 +3672,7 @@ static int wait_for_engines(struct drm_i915_private *i915)
 	if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
 		dev_err(i915->drm.dev,
 			"Failed to idle engines, declaring wedged!\n");
-		if (drm_debug & DRM_UT_DRIVER) {
-			struct drm_printer p = drm_debug_printer(__func__);
-			struct intel_engine_cs *engine;
-			enum intel_engine_id id;
-
-			for_each_engine(engine, i915, id)
-				intel_engine_dump(engine, &p,
-						  "%s\n", engine->name);
-		}
-
+		GEM_TRACE_DUMP();
 		i915_gem_set_wedged(i915);
 		return -EIO;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index f54c4ff74ded..8922344fc21b 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -53,8 +53,10 @@
 
 #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM)
 #define GEM_TRACE(...) trace_printk(__VA_ARGS__)
+#define GEM_TRACE_DUMP() ftrace_dump(DUMP_ALL)
 #else
 #define GEM_TRACE(...) do { } while (0)
+#define GEM_TRACE_DUMP() do { } while (0)
 #endif
 
 #define I915_NUM_ENGINES 8

From 62801bf615679293957ecdf37cc093a18158a201 Mon Sep 17 00:00:00 2001
From: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Date: Mon, 12 Mar 2018 21:09:54 -0700
Subject: [PATCH 0033/1286] drm/i915/psr: Comment to clarify SRD_DEBUG is
 called PSR_MASK SKL+

What was called SRD_DEBUG(0x6F860) on HSW and BDW was renamed to PSR_MASK
SKL onwards, add a note next to the macro definition.
There is also a different PSR_DEBUG on SKL+ to add to the confusion.

Signed-off-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Acked-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180313040954.6289-1-dhinakaran.pandiyan@intel.com
---
 drivers/gpu/drm/i915/i915_reg.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 38d4be46462f..761bd3a4c5c1 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -4180,13 +4180,13 @@ enum {
 #define EDP_PSR_PERF_CNT		_MMIO(dev_priv->psr_mmio_base + 0x44)
 #define   EDP_PSR_PERF_CNT_MASK		0xffffff
 
-#define EDP_PSR_DEBUG				_MMIO(dev_priv->psr_mmio_base + 0x60)
+#define EDP_PSR_DEBUG				_MMIO(dev_priv->psr_mmio_base + 0x60) /* PSR_MASK on SKL+ */
 #define   EDP_PSR_DEBUG_MASK_MAX_SLEEP         (1<<28)
 #define   EDP_PSR_DEBUG_MASK_LPSP              (1<<27)
 #define   EDP_PSR_DEBUG_MASK_MEMUP             (1<<26)
 #define   EDP_PSR_DEBUG_MASK_HPD               (1<<25)
 #define   EDP_PSR_DEBUG_MASK_DISP_REG_WRITE    (1<<16)
-#define   EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1<<15)
+#define   EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1<<15) /* SKL+ */
 
 #define EDP_PSR2_CTL			_MMIO(0x6f900)
 #define   EDP_PSR2_ENABLE		(1<<31)

From 3c009e3c468a9c095343febca7de6397c0e78b7d Mon Sep 17 00:00:00 2001
From: Jackie Li <yaodong.li@intel.com>
Date: Tue, 13 Mar 2018 17:32:49 -0700
Subject: [PATCH 0034/1286] drm/i915/guc: Rename guc_ggtt_offset to
 intel_guc_ggtt_offset

GuC related exported functions should start with "intel_guc_" prefix and
pass intel_guc as the first parameter since its GuC related. Current
guc_ggtt_offset() failed to follow this code convention and this is a
problem for future patches that needs to access intel_guc data to verify
the GGTT offset against the GuC WOPCM top.

This patch renames the guc_ggtt_offset to intel_guc_ggtt_offset and updates
the related code to pass intel_guc pointer to this function call, so that
we can have a unified coding style for GuC code and also enable the future
patches to get GuC related data from intel_guc to do the offset
verification. Meanwhile, this patch also moves the GUC_GGTT_TOP from
intel_guc_regs.h to intel_guc.h since it is not GuC register related
definition.

v8:
 - Fixed coding style issues and moved GUC_GGTT_TOP to intel_guc.h (Sagar)
 - Updated commit message to explain to reason and motivation to add
   intel_guc as the first parameter of intel_guc_ggtt_offset (Chris)

v9:
 - Fixed code alignment issue due to line break (Chris)

v10:
 - Removed unnecessary comments, redundant code and avoided reuse variable
   to avoid potential issues (Joonas)

v13:
 - Updated the ordering of s-o-b/cc/r-b tags (Sagar)

Signed-off-by: Jackie Li <yaodong.li@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Sagar Arun Kamble <sagar.a.kamble@intel.com> (v8)
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> (v9)
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com> (v11)
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> (v12)
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1520987574-19351-1-git-send-email-yaodong.li@intel.com
---
 drivers/gpu/drm/i915/intel_guc.c            | 11 ++++++-----
 drivers/gpu/drm/i915/intel_guc.h            | 14 ++++++++++++--
 drivers/gpu/drm/i915/intel_guc_ads.c        |  8 ++++----
 drivers/gpu/drm/i915/intel_guc_ct.c         |  5 +++--
 drivers/gpu/drm/i915/intel_guc_fw.c         |  2 +-
 drivers/gpu/drm/i915/intel_guc_log.c        |  2 +-
 drivers/gpu/drm/i915/intel_guc_reg.h        |  3 ---
 drivers/gpu/drm/i915/intel_guc_submission.c | 10 +++++-----
 drivers/gpu/drm/i915/intel_huc.c            |  3 ++-
 drivers/gpu/drm/i915/intel_huc_fw.c         |  3 ++-
 10 files changed, 36 insertions(+), 25 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index 25f92291fd40..78463842ea7b 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -269,8 +269,9 @@ void intel_guc_init_params(struct intel_guc *guc)
 
 	/* If GuC submission is enabled, set up additional parameters here */
 	if (USES_GUC_SUBMISSION(dev_priv)) {
-		u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
-		u32 pgs = guc_ggtt_offset(dev_priv->guc.stage_desc_pool);
+		u32 ads = intel_guc_ggtt_offset(guc,
+						guc->ads_vma) >> PAGE_SHIFT;
+		u32 pgs = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
 		u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16;
 
 		params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
@@ -447,7 +448,7 @@ int intel_guc_suspend(struct intel_guc *guc)
 	u32 data[] = {
 		INTEL_GUC_ACTION_ENTER_S_STATE,
 		GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */
-		guc_ggtt_offset(guc->shared_data)
+		intel_guc_ggtt_offset(guc, guc->shared_data)
 	};
 
 	return intel_guc_send(guc, data, ARRAY_SIZE(data));
@@ -471,7 +472,7 @@ int intel_guc_reset_engine(struct intel_guc *guc,
 	data[3] = 0;
 	data[4] = 0;
 	data[5] = guc->execbuf_client->stage_id;
-	data[6] = guc_ggtt_offset(guc->shared_data);
+	data[6] = intel_guc_ggtt_offset(guc, guc->shared_data);
 
 	return intel_guc_send(guc, data, ARRAY_SIZE(data));
 }
@@ -485,7 +486,7 @@ int intel_guc_resume(struct intel_guc *guc)
 	u32 data[] = {
 		INTEL_GUC_ACTION_EXIT_S_STATE,
 		GUC_POWER_D0,
-		guc_ggtt_offset(guc->shared_data)
+		intel_guc_ggtt_offset(guc, guc->shared_data)
 	};
 
 	return intel_guc_send(guc, data, ARRAY_SIZE(data));
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index d878160ee6e5..a1be04eaafda 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -100,13 +100,23 @@ static inline void intel_guc_notify(struct intel_guc *guc)
 	guc->notify(guc);
 }
 
-/*
+/* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
+#define GUC_GGTT_TOP	0xFEE00000
+
+/**
+ * intel_guc_ggtt_offset() - Get and validate the GGTT offset of @vma
+ * @guc: intel_guc structure.
+ * @vma: i915 graphics virtual memory area.
+ *
  * GuC does not allow any gfx GGTT address that falls into range [0, WOPCM_TOP),
  * which is reserved for Boot ROM, SRAM and WOPCM. Currently this top address is
  * 512K. In order to exclude 0-512K address space from GGTT, all gfx objects
  * used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM.
+ *
+ * Return: GGTT offset that meets the GuC gfx address requirement.
  */
-static inline u32 guc_ggtt_offset(struct i915_vma *vma)
+static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc,
+					struct i915_vma *vma)
 {
 	u32 offset = i915_ggtt_offset(vma);
 
diff --git a/drivers/gpu/drm/i915/intel_guc_ads.c b/drivers/gpu/drm/i915/intel_guc_ads.c
index ac627534667d..334cb5202e1c 100644
--- a/drivers/gpu/drm/i915/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/intel_guc_ads.c
@@ -75,7 +75,7 @@ static void guc_policies_init(struct guc_policies *policies)
 int intel_guc_ads_create(struct intel_guc *guc)
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
-	struct i915_vma *vma;
+	struct i915_vma *vma, *kernel_ctx_vma;
 	struct page *page;
 	/* The ads obj includes the struct itself and buffers passed to GuC */
 	struct {
@@ -121,9 +121,9 @@ int intel_guc_ads_create(struct intel_guc *guc)
 	 * to find it. Note that we have to skip our header (1 page),
 	 * because our GuC shared data is there.
 	 */
+	kernel_ctx_vma = dev_priv->kernel_context->engine[RCS].state;
 	blob->ads.golden_context_lrca =
-		guc_ggtt_offset(dev_priv->kernel_context->engine[RCS].state) +
-		skipped_offset;
+		intel_guc_ggtt_offset(guc, kernel_ctx_vma) + skipped_offset;
 
 	/*
 	 * The GuC expects us to exclude the portion of the context image that
@@ -135,7 +135,7 @@ int intel_guc_ads_create(struct intel_guc *guc)
 		blob->ads.eng_state_size[engine->guc_id] =
 			engine->context_size - skipped_size;
 
-	base = guc_ggtt_offset(vma);
+	base = intel_guc_ggtt_offset(guc, vma);
 	blob->ads.scheduler_policies = base + ptr_offset(blob, policies);
 	blob->ads.reg_state_buffer = base + ptr_offset(blob, reg_state_buffer);
 	blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state);
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c
index 24ad55752396..0a0d3d523c23 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/intel_guc_ct.c
@@ -156,7 +156,8 @@ static int ctch_init(struct intel_guc *guc,
 		err = PTR_ERR(blob);
 		goto err_vma;
 	}
-	DRM_DEBUG_DRIVER("CT: vma base=%#x\n", guc_ggtt_offset(ctch->vma));
+	DRM_DEBUG_DRIVER("CT: vma base=%#x\n",
+			 intel_guc_ggtt_offset(guc, ctch->vma));
 
 	/* store pointers to desc and cmds */
 	for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) {
@@ -202,7 +203,7 @@ static int ctch_open(struct intel_guc *guc,
 	}
 
 	/* vma should be already allocated and map'ed */
-	base = guc_ggtt_offset(ctch->vma);
+	base = intel_guc_ggtt_offset(guc, ctch->vma);
 
 	/* (re)initialize descriptors
 	 * cmds buffers are in the second half of the blob page
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c
index d07f2b985f1c..978668cf82cc 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/intel_guc_fw.c
@@ -165,7 +165,7 @@ static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
 	I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
 
 	/* Set the source address for the new blob */
-	offset = guc_ggtt_offset(vma) + guc_fw->header_offset;
+	offset = intel_guc_ggtt_offset(guc, vma) + guc_fw->header_offset;
 	I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
 	I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
 
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 90b395f34808..b9c7bd745565 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -581,7 +581,7 @@ int intel_guc_log_create(struct intel_guc *guc)
 		(GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
 		(GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
 
-	offset = guc_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
+	offset = intel_guc_ggtt_offset(guc, vma) >> PAGE_SHIFT;
 	guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/intel_guc_reg.h b/drivers/gpu/drm/i915/intel_guc_reg.h
index 19a9247c5664..711e9e974b7c 100644
--- a/drivers/gpu/drm/i915/intel_guc_reg.h
+++ b/drivers/gpu/drm/i915/intel_guc_reg.h
@@ -80,9 +80,6 @@
 #define   GUC_WOPCM_TOP			  (0x80 << 12)	/* 512KB */
 #define   BXT_GUC_WOPCM_RC6_RESERVED	  (0x10 << 12)	/* 64KB  */
 
-/* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
-#define GUC_GGTT_TOP			0xFEE00000
-
 #define GEN8_GT_PM_CONFIG		_MMIO(0x138140)
 #define GEN9LP_GT_PM_CONFIG		_MMIO(0x138140)
 #define GEN9_GT_PM_CONFIG		_MMIO(0x13816c)
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 8a8ad2fe158d..33af2930fc79 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -386,8 +386,8 @@ static void guc_stage_desc_init(struct intel_guc *guc,
 		lrc->context_desc = lower_32_bits(ce->lrc_desc);
 
 		/* The state page is after PPHWSP */
-		lrc->ring_lrca =
-			guc_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
+		lrc->ring_lrca = intel_guc_ggtt_offset(guc, ce->state) +
+				 LRC_STATE_PN * PAGE_SIZE;
 
 		/* XXX: In direct submission, the GuC wants the HW context id
 		 * here. In proxy submission, it wants the stage id
@@ -395,7 +395,7 @@ static void guc_stage_desc_init(struct intel_guc *guc,
 		lrc->context_id = (client->stage_id << GUC_ELC_CTXID_OFFSET) |
 				(guc_engine_id << GUC_ELC_ENGINE_OFFSET);
 
-		lrc->ring_begin = guc_ggtt_offset(ce->ring->vma);
+		lrc->ring_begin = intel_guc_ggtt_offset(guc, ce->ring->vma);
 		lrc->ring_end = lrc->ring_begin + ce->ring->size - 1;
 		lrc->ring_next_free_location = lrc->ring_begin;
 		lrc->ring_current_tail_pointer_value = 0;
@@ -411,7 +411,7 @@ static void guc_stage_desc_init(struct intel_guc *guc,
 	 * The doorbell, process descriptor, and workqueue are all parts
 	 * of the client object, which the GuC will reference via the GGTT
 	 */
-	gfx_addr = guc_ggtt_offset(client->vma);
+	gfx_addr = intel_guc_ggtt_offset(guc, client->vma);
 	desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
 				client->doorbell_offset;
 	desc->db_trigger_cpu = ptr_to_u64(__get_doorbell(client));
@@ -584,7 +584,7 @@ static void inject_preempt_context(struct work_struct *work)
 	data[3] = engine->guc_id;
 	data[4] = guc->execbuf_client->priority;
 	data[5] = guc->execbuf_client->stage_id;
-	data[6] = guc_ggtt_offset(guc->shared_data);
+	data[6] = intel_guc_ggtt_offset(guc, guc->shared_data);
 
 	if (WARN_ON(intel_guc_send(guc, data, ARRAY_SIZE(data)))) {
 		execlists_clear_active(&engine->execlists,
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 65e2afb9b955..858c9543630d 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -63,7 +63,8 @@ int intel_huc_auth(struct intel_huc *huc)
 	}
 
 	ret = intel_guc_auth_huc(guc,
-				 guc_ggtt_offset(vma) + huc->fw.rsa_offset);
+				 intel_guc_ggtt_offset(guc, vma) +
+				 huc->fw.rsa_offset);
 	if (ret) {
 		DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret);
 		goto fail_unpin;
diff --git a/drivers/gpu/drm/i915/intel_huc_fw.c b/drivers/gpu/drm/i915/intel_huc_fw.c
index c66afa9b989a..bb0f8b7a8d2b 100644
--- a/drivers/gpu/drm/i915/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/intel_huc_fw.c
@@ -118,7 +118,8 @@ static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
 	/* Set the source address for the uCode */
-	offset = guc_ggtt_offset(vma) + huc_fw->header_offset;
+	offset = intel_guc_ggtt_offset(&dev_priv->guc, vma) +
+		 huc_fw->header_offset;
 	I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
 	I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
 

From 6b0478fb722ae638ad747e17251e90bbf1b7969b Mon Sep 17 00:00:00 2001
From: Jackie Li <yaodong.li@intel.com>
Date: Tue, 13 Mar 2018 17:32:50 -0700
Subject: [PATCH 0035/1286] drm/i915: Implement dynamic GuC WOPCM offset and
 size calculation

Hardware may have specific restrictions on GuC WOPCM offset and size. On
Gen9, the value of the GuC WOPCM size register needs to be larger than the
value of GuC WOPCM offset register + a Gen9 specific offset (144KB) for
reserved GuC WOPCM. Fail to enforce such a restriction on GuC WOPCM size
will lead to GuC firmware execution failures. On the other hand, with
current static GuC WOPCM offset and size values (512KB for both offset and
size), the GuC WOPCM size verification will fail on Gen9 even if it can be
fixed by lowering the GuC WOPCM offset by calculating its value based on
HuC firmware size (which is likely less than 200KB on Gen9), so that we can
have a GuC WOPCM size value which is large enough to pass the GuC WOPCM
size check.

This patch updates the reserved GuC WOPCM size for RC6 context on Gen9 to
24KB to strictly align with the Gen9 GuC WOPCM layout. It also adds support
to verify the GuC WOPCM size aganist the Gen9 hardware restrictions. To
meet all above requirements, let's provide dynamic partitioning of the
WOPCM that will be based on platform specific HuC/GuC firmware sizes.

v2:
 - Removed intel_wopcm_init (Ville/Sagar/Joonas)
 - Renamed and Moved the intel_wopcm_partition into intel_guc (Sagar)
 - Removed unnecessary function calls (Joonas)
 - Init GuC WOPCM partition as soon as firmware fetching is completed

v3:
 - Fixed indentation issues (Chris)
 - Removed layering violation code (Chris/Michal)
 - Created separat files for GuC wopcm code  (Michal)
 - Used inline function to avoid code duplication (Michal)

v4:
 - Preset the GuC WOPCM top during early GuC init (Chris)
 - Fail intel_uc_init_hw() as soon as GuC WOPCM partitioning failed

v5:
 - Moved GuC DMA WOPCM register updating code into intel_wopcm.c
 - Took care of the locking status before writing to GuC DMA
   Write-Once registers. (Joonas)

v6:
 - Made sure the GuC WOPCM size to be multiple of 4K (4K aligned)

v8:
 - Updated comments and fixed naming issues (Sagar/Joonas)
 - Updated commit message to include more description about the hardware
   restriction on GuC WOPCM size (Sagar)

v9:
 - Minor changes variable names and code comments (Sagar)
 - Added detailed GuC WOPCM layout drawing (Sagar/Michal)
 - Refined macro definitions to be reader friendly (Michal)
 - Removed redundent check to valid flag (Michal)
 - Unified first parameter for exported GuC WOPCM functions (Michal)
 - Refined the name and parameter list of hardware restriction checking
   functions (Michal)

v10:
 - Used shorter function name for internal functions (Joonas)
 - Moved init-ealry function into c file (Joonas)
 - Consolidated and removed redundant size checks (Joonas/Michal)
 - Removed unnecessary unlikely() from code which is only called once
   during boot (Joonas)
 - More fixes to kernel-doc format and content (Michal)
 - Avoided the use of PAGE_MASK for 4K pages (Michal)
 - Added error log messages to error paths (Michal)

v11:
 - Replaced intel_guc_wopcm with more generic intel_wopcm and attached
   intel_wopcm to drm_i915_private instead intel_guc (Michal)
 - dynamic calculation of GuC non-wopcm memory start (a.k.a WOPCM Top
   offset from GuC WOPCM base) (Michal)
 - Moved WOPCM marco definitions into .c source file (Michal)
 - Exported WOPCM layout diagram as kernel-doc (Michal)

v12:
 - Updated naming, function kernel-doc to align with new changes (Michal)

v13:
 - Updated the ordering of s-o-b/cc/r-b tags (Sagar)
 - Corrected one tense error in comment (Sagar)
 - Corrected typos and removed spurious comments (Joonas)

Bspec: 12690

Signed-off-by: Jackie Li <yaodong.li@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Sujaritha Sundaresan <sujaritha.sundaresan@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: John Spotswood <john.a.spotswood@intel.com>
Cc: Oscar Mateo <oscar.mateo@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Sagar Arun Kamble <sagar.a.kamble@intel.com> (v8)
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> (v9)
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com> (v11)
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> (v12)
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1520987574-19351-2-git-send-email-yaodong.li@intel.com
---
 drivers/gpu/drm/i915/Makefile           |   3 +-
 drivers/gpu/drm/i915/i915_drv.c         |   1 +
 drivers/gpu/drm/i915/i915_drv.h         |   8 ++
 drivers/gpu/drm/i915/i915_gem.c         |   4 +
 drivers/gpu/drm/i915/i915_gem_context.c |   5 +-
 drivers/gpu/drm/i915/intel_guc.c        |  66 +++++++--
 drivers/gpu/drm/i915/intel_guc.h        |  18 ++-
 drivers/gpu/drm/i915/intel_guc_reg.h    |   8 +-
 drivers/gpu/drm/i915/intel_huc.c        |   2 +-
 drivers/gpu/drm/i915/intel_uc.c         |   6 +-
 drivers/gpu/drm/i915/intel_uc_fw.c      |  13 +-
 drivers/gpu/drm/i915/intel_uc_fw.h      |  16 +++
 drivers/gpu/drm/i915/intel_wopcm.c      | 182 ++++++++++++++++++++++++
 drivers/gpu/drm/i915/intel_wopcm.h      |  30 ++++
 14 files changed, 321 insertions(+), 41 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/intel_wopcm.c
 create mode 100644 drivers/gpu/drm/i915/intel_wopcm.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index fcb8a7b27ae2..552e43e9663f 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -79,7 +79,8 @@ i915-y += i915_cmd_parser.o \
 	  intel_lrc.o \
 	  intel_mocs.o \
 	  intel_ringbuffer.o \
-	  intel_uncore.o
+	  intel_uncore.o \
+	  intel_wopcm.o
 
 # general-purpose microcontroller (GuC) support
 i915-y += intel_uc.o \
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 0126b222ab7f..f03555efc520 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -919,6 +919,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
 	mutex_init(&dev_priv->wm.wm_mutex);
 	mutex_init(&dev_priv->pps_mutex);
 
+	intel_wopcm_init_early(&dev_priv->wopcm);
 	intel_uc_init_early(dev_priv);
 	i915_memcpy_init_early(dev_priv);
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 74b0e9d8ff62..e27ba8fb64e6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -64,6 +64,7 @@
 #include "intel_opregion.h"
 #include "intel_ringbuffer.h"
 #include "intel_uncore.h"
+#include "intel_wopcm.h"
 #include "intel_uc.h"
 
 #include "i915_gem.h"
@@ -1589,6 +1590,8 @@ struct drm_i915_private {
 
 	struct intel_gvt *gvt;
 
+	struct intel_wopcm wopcm;
+
 	struct intel_huc huc;
 	struct intel_guc guc;
 
@@ -2121,6 +2124,11 @@ static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
 	return to_i915(dev_get_drvdata(kdev));
 }
 
+static inline struct drm_i915_private *wopcm_to_i915(struct intel_wopcm *wopcm)
+{
+	return container_of(wopcm, struct drm_i915_private, wopcm);
+}
+
 static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
 {
 	return container_of(guc, struct drm_i915_private, guc);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d0624c57d9a6..51faa6506739 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -5294,6 +5294,10 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 	if (ret)
 		return ret;
 
+	ret = intel_wopcm_init(&dev_priv->wopcm);
+	if (ret)
+		return ret;
+
 	ret = intel_uc_init_misc(dev_priv);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index f2cbea7cf940..5cfac0255758 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -318,12 +318,13 @@ __create_hw_context(struct drm_i915_private *dev_priv,
 	ctx->desc_template =
 		default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
 
-	/* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not
+	/*
+	 * GuC requires the ring to be placed in Non-WOPCM memory. If GuC is not
 	 * present or not in use we still need a small bias as ring wraparound
 	 * at offset 0 sometimes hangs. No idea why.
 	 */
 	if (USES_GUC(dev_priv))
-		ctx->ggtt_offset_bias = GUC_WOPCM_TOP;
+		ctx->ggtt_offset_bias = dev_priv->guc.ggtt_pin_bias;
 	else
 		ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE;
 
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index 78463842ea7b..3eb516e7c225 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -492,6 +492,57 @@ int intel_guc_resume(struct intel_guc *guc)
 	return intel_guc_send(guc, data, ARRAY_SIZE(data));
 }
 
+/**
+ * DOC: GuC Address Space
+ *
+ * The layout of GuC address space is shown as below:
+ *
+ *    +==============> +====================+ <== GUC_GGTT_TOP
+ *    ^                |                    |
+ *    |                |                    |
+ *    |                |        DRAM        |
+ *    |                |       Memory       |
+ *    |                |                    |
+ *   GuC               |                    |
+ * Address  +========> +====================+ <== WOPCM Top
+ *  Space   ^          |   HW contexts RSVD |
+ *    |     |          |        WOPCM       |
+ *    |     |     +==> +--------------------+ <== GuC WOPCM Top
+ *    |    GuC    ^    |                    |
+ *    |    GGTT   |    |                    |
+ *    |    Pin   GuC   |        GuC         |
+ *    |    Bias WOPCM  |       WOPCM        |
+ *    |     |    Size  |                    |
+ *    |     |     |    |                    |
+ *    v     v     v    |                    |
+ *    +=====+=====+==> +====================+ <== GuC WOPCM Base
+ *                     |   Non-GuC WOPCM    |
+ *                     |   (HuC/Reserved)   |
+ *                     +====================+ <== WOPCM Base
+ *
+ * The lower part [0, GuC ggtt_pin_bias) is mapped to WOPCM which consists of
+ * GuC WOPCM and WOPCM reserved for other usage (e.g.RC6 context). The value of
+ * the GuC ggtt_pin_bias is determined by the actually GuC WOPCM size which is
+ * set in GUC_WOPCM_SIZE register.
+ */
+
+/**
+ * intel_guc_init_ggtt_pin_bias() - Initialize the GuC ggtt_pin_bias value.
+ * @guc: intel_guc structure.
+ *
+ * This function will calculate and initialize the ggtt_pin_bias value based on
+ * overall WOPCM size and GuC WOPCM size.
+ */
+void intel_guc_init_ggtt_pin_bias(struct intel_guc *guc)
+{
+	struct drm_i915_private *i915 = guc_to_i915(guc);
+
+	GEM_BUG_ON(!i915->wopcm.size);
+	GEM_BUG_ON(i915->wopcm.size < i915->wopcm.guc.base);
+
+	guc->ggtt_pin_bias = i915->wopcm.size - i915->wopcm.guc.base;
+}
+
 /**
  * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
  * @guc:	the guc
@@ -500,7 +551,7 @@ int intel_guc_resume(struct intel_guc *guc)
  * This is a wrapper to create an object for use with the GuC. In order to
  * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
  * both some backing storage and a range inside the Global GTT. We must pin
- * it in the GGTT somewhere other than than [0, GUC_WOPCM_TOP) because that
+ * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that
  * range is reserved inside GuC.
  *
  * Return:	A i915_vma if successful, otherwise an ERR_PTR.
@@ -521,7 +572,7 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
 		goto err;
 
 	ret = i915_vma_pin(vma, 0, PAGE_SIZE,
-			   PIN_GLOBAL | PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+			   PIN_GLOBAL | PIN_OFFSET_BIAS | guc->ggtt_pin_bias);
 	if (ret) {
 		vma = ERR_PTR(ret);
 		goto err;
@@ -533,14 +584,3 @@ err:
 	i915_gem_object_put(obj);
 	return vma;
 }
-
-u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv)
-{
-	u32 wopcm_size = GUC_WOPCM_TOP;
-
-	/* On BXT, the top of WOPCM is reserved for RC6 context */
-	if (IS_GEN9_LP(dev_priv))
-		wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED;
-
-	return wopcm_size;
-}
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index a1be04eaafda..cdb649a9a4cf 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -49,6 +49,9 @@ struct intel_guc {
 	struct intel_guc_log log;
 	struct intel_guc_ct ct;
 
+	/* Offset where Non-WOPCM memory starts. */
+	u32 ggtt_pin_bias;
+
 	/* Log snapshot if GuC errors during load */
 	struct drm_i915_gem_object *load_err_log;
 
@@ -108,19 +111,20 @@ static inline void intel_guc_notify(struct intel_guc *guc)
  * @guc: intel_guc structure.
  * @vma: i915 graphics virtual memory area.
  *
- * GuC does not allow any gfx GGTT address that falls into range [0, WOPCM_TOP),
- * which is reserved for Boot ROM, SRAM and WOPCM. Currently this top address is
- * 512K. In order to exclude 0-512K address space from GGTT, all gfx objects
- * used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM.
+ * GuC does not allow any gfx GGTT address that falls into range
+ * [0, GuC ggtt_pin_bias), which is reserved for Boot ROM, SRAM and WOPCM.
+ * Currently, in order to exclude [0, GuC ggtt_pin_bias) address space from
+ * GGTT, all gfx objects used by GuC are allocated with intel_guc_allocate_vma()
+ * and pinned with PIN_OFFSET_BIAS along with the value of GuC ggtt_pin_bias.
  *
- * Return: GGTT offset that meets the GuC gfx address requirement.
+ * Return: GGTT offset of the @vma.
  */
 static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc,
 					struct i915_vma *vma)
 {
 	u32 offset = i915_ggtt_offset(vma);
 
-	GEM_BUG_ON(offset < GUC_WOPCM_TOP);
+	GEM_BUG_ON(offset < guc->ggtt_pin_bias);
 	GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
 
 	return offset;
@@ -129,6 +133,7 @@ static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc,
 void intel_guc_init_early(struct intel_guc *guc);
 void intel_guc_init_send_regs(struct intel_guc *guc);
 void intel_guc_init_params(struct intel_guc *guc);
+void intel_guc_init_ggtt_pin_bias(struct intel_guc *guc);
 int intel_guc_init_wq(struct intel_guc *guc);
 void intel_guc_fini_wq(struct intel_guc *guc);
 int intel_guc_init(struct intel_guc *guc);
@@ -141,7 +146,6 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
 int intel_guc_suspend(struct intel_guc *guc);
 int intel_guc_resume(struct intel_guc *guc);
 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
-u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
 
 static inline int intel_guc_sanitize(struct intel_guc *guc)
 {
diff --git a/drivers/gpu/drm/i915/intel_guc_reg.h b/drivers/gpu/drm/i915/intel_guc_reg.h
index 711e9e974b7c..01963d085ed6 100644
--- a/drivers/gpu/drm/i915/intel_guc_reg.h
+++ b/drivers/gpu/drm/i915/intel_guc_reg.h
@@ -68,17 +68,15 @@
 #define DMA_GUC_WOPCM_OFFSET		_MMIO(0xc340)
 #define   HUC_LOADING_AGENT_VCR		  (0<<1)
 #define   HUC_LOADING_AGENT_GUC		  (1<<1)
-#define   GUC_WOPCM_OFFSET_VALUE	  0x80000	/* 512KB */
+#define   GUC_WOPCM_OFFSET_SHIFT	14
 #define GUC_MAX_IDLE_COUNT		_MMIO(0xC3E4)
 
 #define HUC_STATUS2             _MMIO(0xD3B0)
 #define   HUC_FW_VERIFIED       (1<<7)
 
-/* Defines WOPCM space available to GuC firmware */
 #define GUC_WOPCM_SIZE			_MMIO(0xc050)
-/* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */
-#define   GUC_WOPCM_TOP			  (0x80 << 12)	/* 512KB */
-#define   BXT_GUC_WOPCM_RC6_RESERVED	  (0x10 << 12)	/* 64KB  */
+#define   GUC_WOPCM_SIZE_SHIFT		12
+#define   GUC_WOPCM_SIZE_MASK		  (0xfffff << GUC_WOPCM_SIZE_SHIFT)
 
 #define GEN8_GT_PM_CONFIG		_MMIO(0x138140)
 #define GEN9LP_GT_PM_CONFIG		_MMIO(0x138140)
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 858c9543630d..1d6c47b17935 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -55,7 +55,7 @@ int intel_huc_auth(struct intel_huc *huc)
 		return -ENOEXEC;
 
 	vma = i915_gem_object_ggtt_pin(huc->fw.obj, NULL, 0, 0,
-				PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+				       PIN_OFFSET_BIAS | guc->ggtt_pin_bias);
 	if (IS_ERR(vma)) {
 		ret = PTR_ERR(vma);
 		DRM_ERROR("HuC: Failed to pin huc fw object %d\n", ret);
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 9d5ffd74c16a..ed5a6fcc8557 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -272,6 +272,8 @@ int intel_uc_init_misc(struct drm_i915_private *dev_priv)
 	if (!USES_GUC(dev_priv))
 		return 0;
 
+	intel_guc_init_ggtt_pin_bias(guc);
+
 	ret = intel_guc_init_wq(guc);
 	if (ret)
 		return ret;
@@ -366,9 +368,9 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
 	gen9_reset_guc_interrupts(dev_priv);
 
 	/* init WOPCM */
-	I915_WRITE(GUC_WOPCM_SIZE, intel_guc_wopcm_size(dev_priv));
+	I915_WRITE(GUC_WOPCM_SIZE, dev_priv->wopcm.guc.size);
 	I915_WRITE(DMA_GUC_WOPCM_OFFSET,
-		   GUC_WOPCM_OFFSET_VALUE | HUC_LOADING_AGENT_GUC);
+		   dev_priv->wopcm.guc.base | HUC_LOADING_AGENT_GUC);
 
 	/* WaEnableuKernelHeaderValidFix:skl */
 	/* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c
index 3ec0ce505b76..30c73243f54d 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/intel_uc_fw.c
@@ -95,15 +95,6 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
 	uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size;
 	uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
 
-	/* Header and uCode will be loaded to WOPCM */
-	size = uc_fw->header_size + uc_fw->ucode_size;
-	if (size > intel_guc_wopcm_size(dev_priv)) {
-		DRM_WARN("%s: Firmware is too large to fit in WOPCM\n",
-			 intel_uc_fw_type_repr(uc_fw->type));
-		err = -E2BIG;
-		goto fail;
-	}
-
 	/* now RSA */
 	if (css->key_size_dw != UOS_RSA_SCRATCH_COUNT) {
 		DRM_WARN("%s: Mismatched firmware RSA key size (%u)\n",
@@ -208,6 +199,7 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
 		       int (*xfer)(struct intel_uc_fw *uc_fw,
 				   struct i915_vma *vma))
 {
+	struct drm_i915_private *i915 = to_i915(uc_fw->obj->base.dev);
 	struct i915_vma *vma;
 	int err;
 
@@ -231,7 +223,8 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
 	}
 
 	vma = i915_gem_object_ggtt_pin(uc_fw->obj, NULL, 0, 0,
-				       PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+				       PIN_OFFSET_BIAS |
+				       i915->guc.ggtt_pin_bias);
 	if (IS_ERR(vma)) {
 		err = PTR_ERR(vma);
 		DRM_DEBUG_DRIVER("%s fw ggtt-pin err=%d\n",
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.h b/drivers/gpu/drm/i915/intel_uc_fw.h
index 2601521a4006..dc33b12394de 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/intel_uc_fw.h
@@ -121,6 +121,22 @@ static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw)
 		uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
 }
 
+/**
+ * intel_uc_fw_get_upload_size() - Get size of firmware needed to be uploaded.
+ * @uc_fw: uC firmware.
+ *
+ * Get the size of the firmware and header that will be uploaded to WOPCM.
+ *
+ * Return: Upload firmware size, or zero on firmware fetch failure.
+ */
+static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw)
+{
+	if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
+		return 0;
+
+	return uc_fw->header_size + uc_fw->ucode_size;
+}
+
 void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
 		       struct intel_uc_fw *uc_fw);
 int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c
new file mode 100644
index 000000000000..7b150d580d4a
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_wopcm.c
@@ -0,0 +1,182 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2017-2018 Intel Corporation
+ */
+
+#include "intel_wopcm.h"
+#include "i915_drv.h"
+
+/**
+ * DOC: WOPCM Layout
+ *
+ * The layout of the WOPCM will be fixed after writing to GuC WOPCM size and
+ * offset registers whose are calculated are determined by size of HuC/GuC
+ * firmware size and set of hw requirements/restrictions as shown below:
+ *
+ *   +=========> +====================+ <== WOPCM Top
+ *   ^           |  HW contexts RSVD  |
+ *   |     +===> +====================+ <== GuC WOPCM Top
+ *   |     ^     |                    |
+ *   |     |     |                    |
+ *   |     |     |                    |
+ *   |    GuC    |                    |
+ *   |   WOPCM   |                    |
+ *   |    Size   +--------------------+
+ * WOPCM   |     |    GuC FW RSVD     |
+ *   |     |     +--------------------+
+ *   |     |     |   GuC Stack RSVD   |
+ *   |     |     +------------------- +
+ *   |     v     |   GuC WOPCM RSVD   |
+ *   |     +===> +====================+ <== GuC WOPCM base
+ *   |           |     WOPCM RSVD     |
+ *   |           +------------------- + <== HuC Firmware Top
+ *   v           |      HuC FW        |
+ *   +=========> +====================+ <== WOPCM Base
+ *
+ * GuC accessible WOPCM starts at GuC WOPCM base and ends at GuC WOPCM top.
+ * The top part of the WOPCM is reserved for hardware contexts (e.g. RC6
+ * context).
+ */
+
+/* Default WOPCM size 1MB. */
+#define GEN9_WOPCM_SIZE			(1024 * 1024)
+/* 16KB WOPCM (RSVD WOPCM) is reserved from HuC firmware top. */
+#define WOPCM_RESERVED_SIZE		(16 * 1024)
+
+/* 16KB reserved at the beginning of GuC WOPCM. */
+#define GUC_WOPCM_RESERVED		(16 * 1024)
+/* 8KB from GUC_WOPCM_RESERVED is reserved for GuC stack. */
+#define GUC_WOPCM_STACK_RESERVED	(8 * 1024)
+
+/* GuC WOPCM Offset value needs to be aligned to 16KB. */
+#define GUC_WOPCM_OFFSET_ALIGNMENT	(1UL << GUC_WOPCM_OFFSET_SHIFT)
+
+/* 24KB at the end of WOPCM is reserved for RC6 CTX on BXT. */
+#define BXT_WOPCM_RC6_CTX_RESERVED	(24 * 1024)
+
+/* 128KB from GUC_WOPCM_RESERVED is reserved for FW on Gen9. */
+#define GEN9_GUC_FW_RESERVED	(128 * 1024)
+#define GEN9_GUC_WOPCM_OFFSET	(GUC_WOPCM_RESERVED + GEN9_GUC_FW_RESERVED)
+
+/**
+ * intel_wopcm_init_early() - Early initialization of the WOPCM.
+ * @wopcm: pointer to intel_wopcm.
+ *
+ * Setup the size of WOPCM which will be used by later on WOPCM partitioning.
+ */
+void intel_wopcm_init_early(struct intel_wopcm *wopcm)
+{
+	wopcm->size = GEN9_WOPCM_SIZE;
+
+	DRM_DEBUG_DRIVER("WOPCM size: %uKiB\n", wopcm->size / 1024);
+}
+
+static inline u32 context_reserved_size(struct drm_i915_private *i915)
+{
+	if (IS_GEN9_LP(i915))
+		return BXT_WOPCM_RC6_CTX_RESERVED;
+	else
+		return 0;
+}
+
+static inline int gen9_check_dword_gap(u32 guc_wopcm_base, u32 guc_wopcm_size)
+{
+	u32 offset;
+
+	/*
+	 * GuC WOPCM size shall be at least a dword larger than the offset from
+	 * WOPCM base (GuC WOPCM offset from WOPCM base + GEN9_GUC_WOPCM_OFFSET)
+	 * due to hardware limitation on Gen9.
+	 */
+	offset = guc_wopcm_base + GEN9_GUC_WOPCM_OFFSET;
+	if (offset > guc_wopcm_size ||
+	    (guc_wopcm_size - offset) < sizeof(u32)) {
+		DRM_ERROR("GuC WOPCM size %uKiB is too small. %uKiB needed.\n",
+			  guc_wopcm_size / 1024,
+			  (u32)(offset + sizeof(u32)) / 1024);
+		return -E2BIG;
+	}
+
+	return 0;
+}
+
+static inline int check_hw_restriction(struct drm_i915_private *i915,
+				       u32 guc_wopcm_base, u32 guc_wopcm_size)
+{
+	int err = 0;
+
+	if (IS_GEN9(i915))
+		err = gen9_check_dword_gap(guc_wopcm_base, guc_wopcm_size);
+
+	return err;
+}
+
+/**
+ * intel_wopcm_init() - Initialize the WOPCM structure.
+ * @wopcm: pointer to intel_wopcm.
+ *
+ * This function will partition WOPCM space based on GuC and HuC firmware sizes
+ * and will allocate max remaining for use by GuC. This function will also
+ * enforce platform dependent hardware restrictions on GuC WOPCM offset and
+ * size. It will fail the WOPCM init if any of these checks were failed, so that
+ * the following GuC firmware uploading would be aborted.
+ *
+ * Return: 0 on success, non-zero error code on failure.
+ */
+int intel_wopcm_init(struct intel_wopcm *wopcm)
+{
+	struct drm_i915_private *i915 = wopcm_to_i915(wopcm);
+	u32 guc_fw_size = intel_uc_fw_get_upload_size(&i915->guc.fw);
+	u32 huc_fw_size = intel_uc_fw_get_upload_size(&i915->huc.fw);
+	u32 ctx_rsvd = context_reserved_size(i915);
+	u32 guc_wopcm_base;
+	u32 guc_wopcm_size;
+	u32 guc_wopcm_rsvd;
+	int err;
+
+	GEM_BUG_ON(!wopcm->size);
+
+	if (guc_fw_size >= wopcm->size) {
+		DRM_ERROR("GuC FW (%uKiB) is too big to fit in WOPCM.",
+			  guc_fw_size / 1024);
+		return -E2BIG;
+	}
+
+	if (huc_fw_size >= wopcm->size) {
+		DRM_ERROR("HuC FW (%uKiB) is too big to fit in WOPCM.",
+			  huc_fw_size / 1024);
+		return -E2BIG;
+	}
+
+	guc_wopcm_base = ALIGN(huc_fw_size + WOPCM_RESERVED_SIZE,
+			       GUC_WOPCM_OFFSET_ALIGNMENT);
+	if ((guc_wopcm_base + ctx_rsvd) >= wopcm->size) {
+		DRM_ERROR("GuC WOPCM base (%uKiB) is too big.\n",
+			  guc_wopcm_base / 1024);
+		return -E2BIG;
+	}
+
+	guc_wopcm_size = wopcm->size - guc_wopcm_base - ctx_rsvd;
+	guc_wopcm_size &= GUC_WOPCM_SIZE_MASK;
+
+	DRM_DEBUG_DRIVER("Calculated GuC WOPCM Region: [%uKiB, %uKiB)\n",
+			 guc_wopcm_base / 1024, guc_wopcm_size / 1024);
+
+	guc_wopcm_rsvd = GUC_WOPCM_RESERVED + GUC_WOPCM_STACK_RESERVED;
+	if ((guc_fw_size + guc_wopcm_rsvd) > guc_wopcm_size) {
+		DRM_ERROR("Need %uKiB WOPCM for GuC, %uKiB available.\n",
+			  (guc_fw_size + guc_wopcm_rsvd) / 1024,
+			  guc_wopcm_size / 1024);
+		return -E2BIG;
+	}
+
+	err = check_hw_restriction(i915, guc_wopcm_base, guc_wopcm_size);
+	if (err)
+		return err;
+
+	wopcm->guc.base = guc_wopcm_base;
+	wopcm->guc.size = guc_wopcm_size;
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_wopcm.h b/drivers/gpu/drm/i915/intel_wopcm.h
new file mode 100644
index 000000000000..93c402ca7489
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_wopcm.h
@@ -0,0 +1,30 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2017-2018 Intel Corporation
+ */
+
+#ifndef _INTEL_WOPCM_H_
+#define _INTEL_WOPCM_H_
+
+#include <linux/types.h>
+
+/**
+ * struct intel_wopcm - Overall WOPCM info and WOPCM regions.
+ * @size: Size of overall WOPCM.
+ * @guc: GuC WOPCM Region info.
+ * @guc.base: GuC WOPCM base which is offset from WOPCM base.
+ * @guc.size: Size of the GuC WOPCM region.
+ */
+struct intel_wopcm {
+	u32 size;
+	struct {
+		u32 base;
+		u32 size;
+	} guc;
+};
+
+void intel_wopcm_init_early(struct intel_wopcm *wopcm);
+int intel_wopcm_init(struct intel_wopcm *wopcm);
+
+#endif

From 5cbc1e2f48086ea08a37b3b75aa481ae9842af39 Mon Sep 17 00:00:00 2001
From: Jackie Li <yaodong.li@intel.com>
Date: Tue, 13 Mar 2018 17:32:51 -0700
Subject: [PATCH 0036/1286] drm/i915: Add support to return CNL specific
 reserved WOPCM size

CNL has its specific reserved GuC WOPCM size for RC6 and other hardware
contexts.

This patch updates the code to return CNL specific reserved GuC WOPCM size
for RC6 and other hardware contexts so that the GuC WOPCM size can be
calculated correctly for CNL.

v9:
 - Created a new patch for these changes originally made in v8 4/6 patch of
   this series (Sagar/Michal)

v10:
 - Used if-else ladder to the returning of context sizes (Joonas)

v11:
 - Removed GUC_ prefix from context size macro (Michal)

v13:
  - Updated the ordering of s-o-b/cc/r-b tags (Sagar)

Bspec: 12690

Signed-off-by: Jackie Li <yaodong.li@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> (v9)
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com> (v11)
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> (v12)
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1520987574-19351-3-git-send-email-yaodong.li@intel.com
---
 drivers/gpu/drm/i915/intel_wopcm.c | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c
index 7b150d580d4a..a29e0c9e60a7 100644
--- a/drivers/gpu/drm/i915/intel_wopcm.c
+++ b/drivers/gpu/drm/i915/intel_wopcm.c
@@ -54,6 +54,8 @@
 
 /* 24KB at the end of WOPCM is reserved for RC6 CTX on BXT. */
 #define BXT_WOPCM_RC6_CTX_RESERVED	(24 * 1024)
+/* 36KB WOPCM reserved at the end of WOPCM on CNL. */
+#define CNL_WOPCM_HW_CTX_RESERVED	(36 * 1024)
 
 /* 128KB from GUC_WOPCM_RESERVED is reserved for FW on Gen9. */
 #define GEN9_GUC_FW_RESERVED	(128 * 1024)
@@ -76,6 +78,8 @@ static inline u32 context_reserved_size(struct drm_i915_private *i915)
 {
 	if (IS_GEN9_LP(i915))
 		return BXT_WOPCM_RC6_CTX_RESERVED;
+	else if (INTEL_GEN(i915) >= 10)
+		return CNL_WOPCM_HW_CTX_RESERVED;
 	else
 		return 0;
 }

From 96c83d35a26f7ba06a1623adaf872b56d54fe093 Mon Sep 17 00:00:00 2001
From: Jackie Li <yaodong.li@intel.com>
Date: Tue, 13 Mar 2018 17:32:52 -0700
Subject: [PATCH 0037/1286] drm/i915: Add HuC firmware size related restriction
 for Gen9 and CNL A0

On CNL A0 and Gen9, there's a hardware restriction that requires the
available GuC WOPCM size to be larger than or equal to HuC firmware size.

This patch adds new verification code to ensure the available GuC WOPCM
size to be larger than or equal to HuC firmware size on both Gen9 and CNL
A0.

v6:
 - Extended HuC FW size check against GuC WOPCM size to all
   Gen9 and CNL A0 platforms

v7:
 - Fixed patch format issues

v8:
 - Renamed variables and functions to avoid ambiguity (Joonas)
 - Updated commit message and comments to be more comprehensive (Sagar)

v9:
 - Moved code that is not related to restriction check into a separate
   patch and updated the commit message accordingly (Sagar/Michal)
 - Avoided to call uc_get_fw_size for better layer isolation (Michal)

v10:
 - Shorten function names and reorganized size_check code to have clear
   isolation (Joonas)
 - Removed unnecessary comments (Joonas)

v11:
 - Fixed logic error in size check (Michal)

v12:
 - Add space between "HuC FW" and "(%uKiB)" in error message (Michal)

v13:
 - Updated the ordering of s-o-b/cc/r-b tags (Sagar)

BSpec: 10875

Signed-off-by: Jackie Li <yaodong.li@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: John Spotswood <john.a.spotswood@intel.com>
Cc: Jeff McGee <jeff.mcgee@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Sagar Arun Kamble <sagar.a.kamble@intel.com> (v8)
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com> (v11)
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> (v12)
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1520987574-19351-4-git-send-email-yaodong.li@intel.com
---
 drivers/gpu/drm/i915/intel_wopcm.c | 27 +++++++++++++++++++++++++--
 1 file changed, 25 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c
index a29e0c9e60a7..1fd1125f464b 100644
--- a/drivers/gpu/drm/i915/intel_wopcm.c
+++ b/drivers/gpu/drm/i915/intel_wopcm.c
@@ -105,14 +105,36 @@ static inline int gen9_check_dword_gap(u32 guc_wopcm_base, u32 guc_wopcm_size)
 	return 0;
 }
 
+static inline int gen9_check_huc_fw_fits(u32 guc_wopcm_size, u32 huc_fw_size)
+{
+	/*
+	 * On Gen9 & CNL A0, hardware requires the total available GuC WOPCM
+	 * size to be larger than or equal to HuC firmware size. Otherwise,
+	 * firmware uploading would fail.
+	 */
+	if (huc_fw_size > guc_wopcm_size - GUC_WOPCM_RESERVED) {
+		DRM_ERROR("HuC FW (%uKiB) won't fit in GuC WOPCM (%uKiB).\n",
+			  huc_fw_size / 1024,
+			  (guc_wopcm_size - GUC_WOPCM_RESERVED) / 1024);
+		return -E2BIG;
+	}
+
+	return 0;
+}
+
 static inline int check_hw_restriction(struct drm_i915_private *i915,
-				       u32 guc_wopcm_base, u32 guc_wopcm_size)
+				       u32 guc_wopcm_base, u32 guc_wopcm_size,
+				       u32 huc_fw_size)
 {
 	int err = 0;
 
 	if (IS_GEN9(i915))
 		err = gen9_check_dword_gap(guc_wopcm_base, guc_wopcm_size);
 
+	if (!err &&
+	    (IS_GEN9(i915) || IS_CNL_REVID(i915, CNL_REVID_A0, CNL_REVID_A0)))
+		err = gen9_check_huc_fw_fits(guc_wopcm_size, huc_fw_size);
+
 	return err;
 }
 
@@ -175,7 +197,8 @@ int intel_wopcm_init(struct intel_wopcm *wopcm)
 		return -E2BIG;
 	}
 
-	err = check_hw_restriction(i915, guc_wopcm_base, guc_wopcm_size);
+	err = check_hw_restriction(i915, guc_wopcm_base, guc_wopcm_size,
+				   huc_fw_size);
 	if (err)
 		return err;
 

From f08e2035cc089caf52ce57e855d96ba6ba90c71a Mon Sep 17 00:00:00 2001
From: Jackie Li <yaodong.li@intel.com>
Date: Tue, 13 Mar 2018 17:32:53 -0700
Subject: [PATCH 0038/1286] drm/i915/guc: Check the locking status of GuC WOPCM
 registers

GuC WOPCM registers are write-once registers. Current driver code accesses
these registers without checking the accessibility to these registers which
will lead to unpredictable driver behaviors if these registers were touch
by other components (such as faulty BIOS code).

This patch moves the GuC WOPCM registers updating code into intel_wopcm.c
and adds check before and after the update to GuC WOPCM registers so that
we can make sure the driver is in a known state after writing to these
write-once registers.

v6:
 - Made sure module reloading won't bug the kernel while doing
   locking status checking

v7:
 - Fixed patch format issues

v8:
 - Fixed coding style issue on register lock bit macro definition (Sagar)

v9:
 - Avoided to use redundant !! to cast uint to bool (Chris)
 - Return error code instead of GEM_BUG_ON for locked with invalid register
   values case (Sagar)
 - Updated guc_wopcm_hw_init to use guc_wopcm as first parameter (Michal)
 - Added code to set and validate the HuC_LOADING_AGENT_GUC bit in GuC
   WOPCM offset register based on the presence of HuC firmware (Michal)
 - Use bit fields instead of macros for GuC WOPCM flags (Michal)

v10:
 - Refined variable names, removed redundant comments (Joonas)
 - Introduced lockable_reg to handle the write once register write and
   propagate the write error to caller (Joonas)
 - Used lockable_reg abstraction to avoid locking bit check on generic
   i915_reg_t (Michal)
 - Added log message for error paths (Michal)
 - Removed hw_updated flag and only relies on real hardware status

v11:
 - Replaced lockable_reg with simplified function (Michal)
 - Used new macros for locking bits of WOPCM size/offset registers instead
   of using BIT(0) directly (Michal)
 - use intel_wopcm_init_hw() called from intel_gem_init_hw() to do GuC
   WOPCM register setup instead of calling from intel_uc_init_hw() (Michal)

v12:
 - Updated function kernel-doc to align with code changes (Michal)
 - Updated code to use wopcm pointer directly (Michal)

v13:
 - Updated the ordering of s-o-b/cc/r-b tags (Sagar)

BSpec: 10875, 10833

Signed-off-by: Jackie Li <yaodong.li@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com> (v11)
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> (v12)
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1520987574-19351-5-git-send-email-yaodong.li@intel.com
---
 drivers/gpu/drm/i915/i915_gem.c      |  6 +++
 drivers/gpu/drm/i915/intel_guc_reg.h |  3 ++
 drivers/gpu/drm/i915/intel_uc.c      |  5 ---
 drivers/gpu/drm/i915/intel_wopcm.c   | 64 ++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/intel_wopcm.h   |  1 +
 5 files changed, 74 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 51faa6506739..13d4b0e74641 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -5137,6 +5137,12 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
 		goto out;
 	}
 
+	ret = intel_wopcm_init_hw(&dev_priv->wopcm);
+	if (ret) {
+		DRM_ERROR("Enabling WOPCM failed (%d)\n", ret);
+		goto out;
+	}
+
 	/* We can't enable contexts until all firmware is loaded */
 	ret = intel_uc_init_hw(dev_priv);
 	if (ret) {
diff --git a/drivers/gpu/drm/i915/intel_guc_reg.h b/drivers/gpu/drm/i915/intel_guc_reg.h
index 01963d085ed6..d86084742a4a 100644
--- a/drivers/gpu/drm/i915/intel_guc_reg.h
+++ b/drivers/gpu/drm/i915/intel_guc_reg.h
@@ -66,15 +66,18 @@
 #define   UOS_MOVE			  (1<<4)
 #define   START_DMA			  (1<<0)
 #define DMA_GUC_WOPCM_OFFSET		_MMIO(0xc340)
+#define   GUC_WOPCM_OFFSET_VALID	  (1<<0)
 #define   HUC_LOADING_AGENT_VCR		  (0<<1)
 #define   HUC_LOADING_AGENT_GUC		  (1<<1)
 #define   GUC_WOPCM_OFFSET_SHIFT	14
+#define   GUC_WOPCM_OFFSET_MASK		  (0x3ffff << GUC_WOPCM_OFFSET_SHIFT)
 #define GUC_MAX_IDLE_COUNT		_MMIO(0xC3E4)
 
 #define HUC_STATUS2             _MMIO(0xD3B0)
 #define   HUC_FW_VERIFIED       (1<<7)
 
 #define GUC_WOPCM_SIZE			_MMIO(0xc050)
+#define   GUC_WOPCM_SIZE_LOCKED		  (1<<0)
 #define   GUC_WOPCM_SIZE_SHIFT		12
 #define   GUC_WOPCM_SIZE_MASK		  (0xfffff << GUC_WOPCM_SIZE_SHIFT)
 
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index ed5a6fcc8557..6316548a1c78 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -367,11 +367,6 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
 
 	gen9_reset_guc_interrupts(dev_priv);
 
-	/* init WOPCM */
-	I915_WRITE(GUC_WOPCM_SIZE, dev_priv->wopcm.guc.size);
-	I915_WRITE(DMA_GUC_WOPCM_OFFSET,
-		   dev_priv->wopcm.guc.base | HUC_LOADING_AGENT_GUC);
-
 	/* WaEnableuKernelHeaderValidFix:skl */
 	/* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
 	if (IS_GEN9(dev_priv))
diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c
index 1fd1125f464b..4117886bfb05 100644
--- a/drivers/gpu/drm/i915/intel_wopcm.c
+++ b/drivers/gpu/drm/i915/intel_wopcm.c
@@ -207,3 +207,67 @@ int intel_wopcm_init(struct intel_wopcm *wopcm)
 
 	return 0;
 }
+
+static inline int write_and_verify(struct drm_i915_private *dev_priv,
+				   i915_reg_t reg, u32 val, u32 mask,
+				   u32 locked_bit)
+{
+	u32 reg_val;
+
+	GEM_BUG_ON(val & ~mask);
+
+	I915_WRITE(reg, val);
+
+	reg_val = I915_READ(reg);
+
+	return (reg_val & mask) != (val | locked_bit) ? -EIO : 0;
+}
+
+/**
+ * intel_wopcm_init_hw() - Setup GuC WOPCM registers.
+ * @wopcm: pointer to intel_wopcm.
+ *
+ * Setup the GuC WOPCM size and offset registers with the calculated values. It
+ * will verify the register values to make sure the registers are locked with
+ * correct values.
+ *
+ * Return: 0 on success. -EIO if registers were locked with incorrect values.
+ */
+int intel_wopcm_init_hw(struct intel_wopcm *wopcm)
+{
+	struct drm_i915_private *dev_priv = wopcm_to_i915(wopcm);
+	u32 huc_agent;
+	u32 mask;
+	int err;
+
+	if (!USES_GUC(dev_priv))
+		return 0;
+
+	GEM_BUG_ON(!HAS_GUC(dev_priv));
+	GEM_BUG_ON(!wopcm->guc.size);
+	GEM_BUG_ON(!wopcm->guc.base);
+
+	err = write_and_verify(dev_priv, GUC_WOPCM_SIZE, wopcm->guc.size,
+			       GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED,
+			       GUC_WOPCM_SIZE_LOCKED);
+	if (err)
+		goto err_out;
+
+	huc_agent = USES_HUC(dev_priv) ? HUC_LOADING_AGENT_GUC : 0;
+	mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent;
+	err = write_and_verify(dev_priv, DMA_GUC_WOPCM_OFFSET,
+			       wopcm->guc.base | huc_agent, mask,
+			       GUC_WOPCM_OFFSET_VALID);
+	if (err)
+		goto err_out;
+
+	return 0;
+
+err_out:
+	DRM_ERROR("Failed to init WOPCM registers:\n");
+	DRM_ERROR("DMA_GUC_WOPCM_OFFSET=%#x\n",
+		  I915_READ(DMA_GUC_WOPCM_OFFSET));
+	DRM_ERROR("GUC_WOPCM_SIZE=%#x\n", I915_READ(GUC_WOPCM_SIZE));
+
+	return err;
+}
diff --git a/drivers/gpu/drm/i915/intel_wopcm.h b/drivers/gpu/drm/i915/intel_wopcm.h
index 93c402ca7489..6298910a384c 100644
--- a/drivers/gpu/drm/i915/intel_wopcm.h
+++ b/drivers/gpu/drm/i915/intel_wopcm.h
@@ -26,5 +26,6 @@ struct intel_wopcm {
 
 void intel_wopcm_init_early(struct intel_wopcm *wopcm);
 int intel_wopcm_init(struct intel_wopcm *wopcm);
+int intel_wopcm_init_hw(struct intel_wopcm *wopcm);
 
 #endif

From ab2681512b4c10d65a0fc412de0ce09bc4166edd Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed, 14 Mar 2018 10:16:30 +0000
Subject: [PATCH 0039/1286] drm/i915: Check rq->timeline before deference

Not only is the context suspect to disappearing, but so is it's
timeline. Under a lockless inspection of the requests for
debugging from intel_engine_dump(), the context may already have been
freed and we have to check before chasing the dangling pointer.

[28033.681755] Modules linked in: vgem snd_hda_codec_hdmi snd_hda_codec_realtek snd_hda_codec_generic x86_pkg_temp_thermal intel_powerclamp coretemp snd_hda_intel crct10dif_pclmul crc32_pclmul snd_hda_codec snd_hwdep snd_hda_core ghash_clmulni_intel snd_pcm mei_me mei i915 r8169 mii prime_numbers i2c_hid
[28033.681796] CPU: 3 PID: 3058 Comm: gem_exec_schedu Tainted: G     U           4.16.0-rc5+ #9
[28033.681804] Hardware name: Acer Aspire E5-575G/Ironman_SK  , BIOS V1.12 08/02/2016
[28033.681834] RIP: 0010:print_request+0x2b/0xb0 [i915]
[28033.681840] RSP: 0018:ffffc90004afbc18 EFLAGS: 00010202
[28033.681847] RAX: 6b6b6b6b6b6b6b6b RBX: ffff8801921b5a40 RCX: 0000000000000006
[28033.681854] RDX: ffffc90004afbc60 RSI: ffff8801921b5a40 RDI: 0000000000000004
[28033.681861] RBP: ffffc90004afbd80 R08: 0000000000000000 R09: 0000000000000001
[28033.681868] R10: ffffc90004afbbd0 R11: ffffc90004afbc73 R12: ffffc90004afbc60
[28033.681875] R13: ffffc90004afbd80 R14: ffff8801d40ec670 R15: ffff8801921b5a40
[28033.681883] FS:  00007fbba5f6c8c0(0000) GS:ffff8801e8400000(0000) knlGS:0000000000000000
[28033.681891] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[28033.681897] CR2: 00007fbba5f8f000 CR3: 00000001b2efa002 CR4: 00000000003606e0
[28033.681904] Call Trace:
[28033.681932]  intel_engine_print_registers+0x6a7/0x930 [i915]
[28033.681962]  intel_engine_dump+0x30d/0x740 [i915]
[28033.681971]  ? seq_printf+0x3a/0x50
[28033.681995]  i915_engine_info+0xb8/0xe0 [i915]
[28033.682003]  ? drm_get_color_range_name+0x20/0x20
[28033.682010]  seq_read+0xe1/0x440
[28033.682018]  full_proxy_read+0x51/0x80
[28033.682025]  __vfs_read+0x21/0x130
[28033.682031]  ? do_sys_open+0x134/0x220
[28033.682037]  ? kmem_cache_free+0x177/0x2b0
[28033.682043]  vfs_read+0xa1/0x150
[28033.682049]  SyS_read+0x40/0xa0
[28033.682055]  do_syscall_64+0x6b/0x1b0
[28033.682063]  entry_SYSCALL_64_after_hwframe+0x42/0xb7
[28033.682069] RIP: 0033:0x7fbba4655d11
[28033.682074] RSP: 002b:00007ffd8c49da58 EFLAGS: 00000246 ORIG_RAX: 0000000000000000
[28033.682082] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007fbba4655d11
[28033.682089] RDX: 000000000000003f RSI: 00005647bfbfc260 RDI: 0000000000000006
[28033.682096] RBP: 000000000000003f R08: 00000000ffffffff R09: 0000000000000000
[28033.682104] R10: 0000000000000000 R11: 0000000000000246 R12: 00005647bfbfc260
[28033.682111] R13: 0000000000000006 R14: 0000000000000000 R15: 00005647bfbfc260
[28033.682119] Code: 41 55 41 54 49 89 d4 55 53 48 89 fd 48 8b 86 c8 00 00 00 48 8b 3d d6 1e 14 e2 48 89 f3 48 2b be a8 02 00 00 48 8b 80 b0 00 00 00 <4c> 8b 68 18 e8 bc 80 02 e1 8b 8b 70 02 00 00 8b b3 28 02 00 00
[28033.682206] RIP: print_request+0x2b/0xb0 [i915] RSP: ffffc90004afbc18

Reported-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180314101630.8933-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_engine_cs.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index a2b1e9e2c008..f22c5f72df8d 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1716,13 +1716,15 @@ static void print_request(struct drm_printer *m,
 			  struct i915_request *rq,
 			  const char *prefix)
 {
+	const char *name = rq->fence.ops->get_timeline_name(&rq->fence);
+
 	drm_printf(m, "%s%x%s [%llx:%x] prio=%d @ %dms: %s\n", prefix,
 		   rq->global_seqno,
 		   i915_request_completed(rq) ? "!" : "",
 		   rq->fence.context, rq->fence.seqno,
 		   rq->priotree.priority,
 		   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
-		   rq->timeline->common->name);
+		   name);
 }
 
 static void hexdump(struct drm_printer *m, const void *buf, size_t len)

From ad055fb8e010e4ff37f66aeed1d380329bddce67 Mon Sep 17 00:00:00 2001
From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Date: Wed, 14 Mar 2018 08:05:35 +0000
Subject: [PATCH 0040/1286] drm/i915/pmu: Work around compiler warnings on some
 kernel configs

Arnd Bergman reports:
"""
The conditional spinlock confuses gcc into thinking the 'flags' value
might contain uninitialized data:

drivers/gpu/drm/i915/i915_pmu.c: In function '__i915_pmu_event_read':
arch/x86/include/asm/paravirt_types.h:573:3: error: 'flags' may be used uninitialized in this function [-Werror=maybe-uninitialized]

The code is correct, but it's easy to see how the compiler gets confused
here. This avoids the problem by pulling the lock outside of the function
into its only caller.
"""

On deeper look it seems this is caused by paravirt spinlocks
implementation when CONFIG_PARAVIRT_DEBUG is set, which by being
complicated, manages to convince gcc locked parameter can be changed
externally (impossible).

Work around it by removing the conditional locking parameters altogether.
(It was never the most elegant code anyway.)

Slight penalty we now pay is an additional irqsave spin lock/unlock cycle
on the event enable path. But since enable is not a fast path, that is
preferrable to the alternative solution which was doing MMIO under irqsave
spinlock.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reported-by: Arnd Bergmann <arnd@arndb.de>
Fixes: 1fe699e30113 ("drm/i915/pmu: Fix sleep under atomic in RC6 readout")
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: David Airlie <airlied@linux.ie>
Cc: intel-gfx@lists.freedesktop.org
Cc: dri-devel@lists.freedesktop.org
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180314080535.17490-1-tvrtko.ursulin@linux.intel.com
---
 drivers/gpu/drm/i915/i915_pmu.c | 32 +++++++++++++-------------------
 1 file changed, 13 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 4bc7aefa9541..11fb76bd3860 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -412,7 +412,7 @@ static u64 __get_rc6(struct drm_i915_private *i915)
 	return val;
 }
 
-static u64 get_rc6(struct drm_i915_private *i915, bool locked)
+static u64 get_rc6(struct drm_i915_private *i915)
 {
 #if IS_ENABLED(CONFIG_PM)
 	unsigned long flags;
@@ -428,8 +428,7 @@ static u64 get_rc6(struct drm_i915_private *i915, bool locked)
 		 * previously.
 		 */
 
-		if (!locked)
-			spin_lock_irqsave(&i915->pmu.lock, flags);
+		spin_lock_irqsave(&i915->pmu.lock, flags);
 
 		if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
 			i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
@@ -438,12 +437,10 @@ static u64 get_rc6(struct drm_i915_private *i915, bool locked)
 			val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
 		}
 
-		if (!locked)
-			spin_unlock_irqrestore(&i915->pmu.lock, flags);
+		spin_unlock_irqrestore(&i915->pmu.lock, flags);
 	} else {
 		struct pci_dev *pdev = i915->drm.pdev;
 		struct device *kdev = &pdev->dev;
-		unsigned long flags2;
 
 		/*
 		 * We are runtime suspended.
@@ -452,10 +449,8 @@ static u64 get_rc6(struct drm_i915_private *i915, bool locked)
 		 * on top of the last known real value, as the approximated RC6
 		 * counter value.
 		 */
-		if (!locked)
-			spin_lock_irqsave(&i915->pmu.lock, flags);
-
-		spin_lock_irqsave(&kdev->power.lock, flags2);
+		spin_lock_irqsave(&i915->pmu.lock, flags);
+		spin_lock(&kdev->power.lock);
 
 		if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
 			i915->pmu.suspended_jiffies_last =
@@ -465,14 +460,13 @@ static u64 get_rc6(struct drm_i915_private *i915, bool locked)
 		      i915->pmu.suspended_jiffies_last;
 		val += jiffies - kdev->power.accounting_timestamp;
 
-		spin_unlock_irqrestore(&kdev->power.lock, flags2);
+		spin_unlock(&kdev->power.lock);
 
 		val = jiffies_to_nsecs(val);
 		val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
 		i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
 
-		if (!locked)
-			spin_unlock_irqrestore(&i915->pmu.lock, flags);
+		spin_unlock_irqrestore(&i915->pmu.lock, flags);
 	}
 
 	return val;
@@ -481,7 +475,7 @@ static u64 get_rc6(struct drm_i915_private *i915, bool locked)
 #endif
 }
 
-static u64 __i915_pmu_event_read(struct perf_event *event, bool locked)
+static u64 __i915_pmu_event_read(struct perf_event *event)
 {
 	struct drm_i915_private *i915 =
 		container_of(event->pmu, typeof(*i915), pmu.base);
@@ -519,7 +513,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event, bool locked)
 			val = count_interrupts(i915);
 			break;
 		case I915_PMU_RC6_RESIDENCY:
-			val = get_rc6(i915, locked);
+			val = get_rc6(i915);
 			break;
 		}
 	}
@@ -534,7 +528,7 @@ static void i915_pmu_event_read(struct perf_event *event)
 
 again:
 	prev = local64_read(&hwc->prev_count);
-	new = __i915_pmu_event_read(event, false);
+	new = __i915_pmu_event_read(event);
 
 	if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev)
 		goto again;
@@ -584,14 +578,14 @@ static void i915_pmu_enable(struct perf_event *event)
 		engine->pmu.enable_count[sample]++;
 	}
 
+	spin_unlock_irqrestore(&i915->pmu.lock, flags);
+
 	/*
 	 * Store the current counter value so we can report the correct delta
 	 * for all listeners. Even when the event was already enabled and has
 	 * an existing non-zero value.
 	 */
-	local64_set(&event->hw.prev_count, __i915_pmu_event_read(event, true));
-
-	spin_unlock_irqrestore(&i915->pmu.lock, flags);
+	local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
 }
 
 static void i915_pmu_disable(struct perf_event *event)

From 4635b573634c7028043244dbc1141ef57341deb2 Mon Sep 17 00:00:00 2001
From: Mahesh Kumar <mahesh1.kumar@intel.com>
Date: Wed, 14 Mar 2018 13:36:52 +0530
Subject: [PATCH 0041/1286] drm/i915/cnl; Add macro to get PORT_TX register

This patch creates a new macro to get PORT_TX register for any given DW.
This removes the need of defining register address for each port & DW.

Changes since V1:
 - Use underscope prefix, as macro isn't returning an mmio reg(Lucas)
 - Merge patch 1 & 2 of the series
Changes since V2:
 - remove _MMIO_PORT6_LN macro (Rodrigo)

Signed-off-by: Mahesh Kumar <mahesh1.kumar@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180314080653.9444-2-mahesh1.kumar@intel.com
---
 drivers/gpu/drm/i915/i915_reg.h | 137 +++++++++-----------------------
 1 file changed, 39 insertions(+), 98 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 761bd3a4c5c1..5a2a3d6d8c97 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -154,8 +154,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define _PLL(pll, a, b) ((a) + (pll)*((b)-(a)))
 #define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b))
 #define _MMIO_PORT6(port, a, b, c, d, e, f) _MMIO(_PICK(port, a, b, c, d, e, f))
-#define _MMIO_PORT6_LN(port, ln, a0, a1, b, c, d, e, f)			\
-	_MMIO(_PICK(port, a0, b, c, d, e, f) + (ln * (a1 - a0)))
 #define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__)
 #define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
 
@@ -1964,30 +1962,36 @@ enum i915_power_well_id {
 						    _CNL_PORT_PCS_DW1_LN0_F)
 #define   COMMON_KEEPER_EN		(1 << 26)
 
-#define _CNL_PORT_TX_DW2_GRP_AE		0x162348
-#define _CNL_PORT_TX_DW2_GRP_B		0x1623C8
-#define _CNL_PORT_TX_DW2_GRP_C		0x162B48
-#define _CNL_PORT_TX_DW2_GRP_D		0x162BC8
-#define _CNL_PORT_TX_DW2_GRP_F		0x162A48
-#define _CNL_PORT_TX_DW2_LN0_AE		0x162448
-#define _CNL_PORT_TX_DW2_LN0_B		0x162648
-#define _CNL_PORT_TX_DW2_LN0_C		0x162C48
-#define _CNL_PORT_TX_DW2_LN0_D		0x162E48
-#define _CNL_PORT_TX_DW2_LN0_F		0x162848
-#define CNL_PORT_TX_DW2_GRP(port)	_MMIO_PORT6(port, \
-						    _CNL_PORT_TX_DW2_GRP_AE, \
-						    _CNL_PORT_TX_DW2_GRP_B, \
-						    _CNL_PORT_TX_DW2_GRP_C, \
-						    _CNL_PORT_TX_DW2_GRP_D, \
-						    _CNL_PORT_TX_DW2_GRP_AE, \
-						    _CNL_PORT_TX_DW2_GRP_F)
-#define CNL_PORT_TX_DW2_LN0(port)	_MMIO_PORT6(port, \
-						    _CNL_PORT_TX_DW2_LN0_AE, \
-						    _CNL_PORT_TX_DW2_LN0_B, \
-						    _CNL_PORT_TX_DW2_LN0_C, \
-						    _CNL_PORT_TX_DW2_LN0_D, \
-						    _CNL_PORT_TX_DW2_LN0_AE, \
-						    _CNL_PORT_TX_DW2_LN0_F)
+/* CNL Port TX registers */
+#define _CNL_PORT_TX_AE_GRP_OFFSET		0x162340
+#define _CNL_PORT_TX_B_GRP_OFFSET		0x1623C0
+#define _CNL_PORT_TX_C_GRP_OFFSET		0x162B40
+#define _CNL_PORT_TX_D_GRP_OFFSET		0x162BC0
+#define _CNL_PORT_TX_F_GRP_OFFSET		0x162A40
+#define _CNL_PORT_TX_AE_LN0_OFFSET		0x162440
+#define _CNL_PORT_TX_B_LN0_OFFSET		0x162640
+#define _CNL_PORT_TX_C_LN0_OFFSET		0x162C40
+#define _CNL_PORT_TX_D_LN0_OFFSET		0x162E40
+#define _CNL_PORT_TX_F_LN0_OFFSET		0x162840
+#define _CNL_PORT_TX_DW_GRP(port, dw)	(_PICK((port), \
+					       _CNL_PORT_TX_AE_GRP_OFFSET, \
+					       _CNL_PORT_TX_B_GRP_OFFSET, \
+					       _CNL_PORT_TX_B_GRP_OFFSET, \
+					       _CNL_PORT_TX_D_GRP_OFFSET, \
+					       _CNL_PORT_TX_AE_GRP_OFFSET, \
+					       _CNL_PORT_TX_F_GRP_OFFSET) + \
+					       4*(dw))
+#define _CNL_PORT_TX_DW_LN0(port, dw)	(_PICK((port), \
+					       _CNL_PORT_TX_AE_LN0_OFFSET, \
+					       _CNL_PORT_TX_B_LN0_OFFSET, \
+					       _CNL_PORT_TX_B_LN0_OFFSET, \
+					       _CNL_PORT_TX_D_LN0_OFFSET, \
+					       _CNL_PORT_TX_AE_LN0_OFFSET, \
+					       _CNL_PORT_TX_F_LN0_OFFSET) + \
+					       4*(dw))
+
+#define CNL_PORT_TX_DW2_GRP(port)	_MMIO(_CNL_PORT_TX_DW_GRP((port), 2))
+#define CNL_PORT_TX_DW2_LN0(port)	_MMIO(_CNL_PORT_TX_DW_LN0((port), 2))
 #define   SWING_SEL_UPPER(x)		((x >> 3) << 15)
 #define   SWING_SEL_UPPER_MASK		(1 << 15)
 #define   SWING_SEL_LOWER(x)		((x & 0x7) << 11)
@@ -1995,32 +1999,13 @@ enum i915_power_well_id {
 #define   RCOMP_SCALAR(x)		((x) << 0)
 #define   RCOMP_SCALAR_MASK		(0xFF << 0)
 
-#define _CNL_PORT_TX_DW4_GRP_AE		0x162350
-#define _CNL_PORT_TX_DW4_GRP_B		0x1623D0
-#define _CNL_PORT_TX_DW4_GRP_C		0x162B50
-#define _CNL_PORT_TX_DW4_GRP_D		0x162BD0
-#define _CNL_PORT_TX_DW4_GRP_F		0x162A50
 #define _CNL_PORT_TX_DW4_LN0_AE		0x162450
 #define _CNL_PORT_TX_DW4_LN1_AE		0x1624D0
-#define _CNL_PORT_TX_DW4_LN0_B		0x162650
-#define _CNL_PORT_TX_DW4_LN0_C		0x162C50
-#define _CNL_PORT_TX_DW4_LN0_D		0x162E50
-#define _CNL_PORT_TX_DW4_LN0_F		0x162850
-#define CNL_PORT_TX_DW4_GRP(port)       _MMIO_PORT6(port, \
-						    _CNL_PORT_TX_DW4_GRP_AE, \
-						    _CNL_PORT_TX_DW4_GRP_B, \
-						    _CNL_PORT_TX_DW4_GRP_C, \
-						    _CNL_PORT_TX_DW4_GRP_D, \
-						    _CNL_PORT_TX_DW4_GRP_AE, \
-						    _CNL_PORT_TX_DW4_GRP_F)
-#define CNL_PORT_TX_DW4_LN(port, ln)       _MMIO_PORT6_LN(port, ln,	\
-						    _CNL_PORT_TX_DW4_LN0_AE, \
-						    _CNL_PORT_TX_DW4_LN1_AE, \
-						    _CNL_PORT_TX_DW4_LN0_B, \
-						    _CNL_PORT_TX_DW4_LN0_C, \
-						    _CNL_PORT_TX_DW4_LN0_D, \
-						    _CNL_PORT_TX_DW4_LN0_AE, \
-						    _CNL_PORT_TX_DW4_LN0_F)
+#define CNL_PORT_TX_DW4_GRP(port)	_MMIO(_CNL_PORT_TX_DW_GRP((port), 4))
+#define CNL_PORT_TX_DW4_LN0(port)	_MMIO(_CNL_PORT_TX_DW_LN0((port), 4))
+#define CNL_PORT_TX_DW4_LN(port, ln)   _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \
+					     (ln * (_CNL_PORT_TX_DW4_LN1_AE - \
+						    _CNL_PORT_TX_DW4_LN0_AE)))
 #define   LOADGEN_SELECT		(1 << 31)
 #define   POST_CURSOR_1(x)		((x) << 12)
 #define   POST_CURSOR_1_MASK		(0x3F << 12)
@@ -2029,30 +2014,8 @@ enum i915_power_well_id {
 #define   CURSOR_COEFF(x)		((x) << 0)
 #define   CURSOR_COEFF_MASK		(0x3F << 0)
 
-#define _CNL_PORT_TX_DW5_GRP_AE		0x162354
-#define _CNL_PORT_TX_DW5_GRP_B		0x1623D4
-#define _CNL_PORT_TX_DW5_GRP_C		0x162B54
-#define _CNL_PORT_TX_DW5_GRP_D		0x162BD4
-#define _CNL_PORT_TX_DW5_GRP_F		0x162A54
-#define _CNL_PORT_TX_DW5_LN0_AE		0x162454
-#define _CNL_PORT_TX_DW5_LN0_B		0x162654
-#define _CNL_PORT_TX_DW5_LN0_C		0x162C54
-#define _CNL_PORT_TX_DW5_LN0_D		0x162E54
-#define _CNL_PORT_TX_DW5_LN0_F		0x162854
-#define CNL_PORT_TX_DW5_GRP(port)	_MMIO_PORT6(port, \
-						    _CNL_PORT_TX_DW5_GRP_AE, \
-						    _CNL_PORT_TX_DW5_GRP_B, \
-						    _CNL_PORT_TX_DW5_GRP_C, \
-						    _CNL_PORT_TX_DW5_GRP_D, \
-						    _CNL_PORT_TX_DW5_GRP_AE, \
-						    _CNL_PORT_TX_DW5_GRP_F)
-#define CNL_PORT_TX_DW5_LN0(port)	_MMIO_PORT6(port, \
-						    _CNL_PORT_TX_DW5_LN0_AE, \
-						    _CNL_PORT_TX_DW5_LN0_B, \
-						    _CNL_PORT_TX_DW5_LN0_C, \
-						    _CNL_PORT_TX_DW5_LN0_D, \
-						    _CNL_PORT_TX_DW5_LN0_AE, \
-						    _CNL_PORT_TX_DW5_LN0_F)
+#define CNL_PORT_TX_DW5_GRP(port)	_MMIO(_CNL_PORT_TX_DW_GRP((port), 5))
+#define CNL_PORT_TX_DW5_LN0(port)	_MMIO(_CNL_PORT_TX_DW_LN0((port), 5))
 #define   TX_TRAINING_EN		(1 << 31)
 #define   TAP3_DISABLE			(1 << 29)
 #define   SCALING_MODE_SEL(x)		((x) << 18)
@@ -2060,30 +2023,8 @@ enum i915_power_well_id {
 #define   RTERM_SELECT(x)		((x) << 3)
 #define   RTERM_SELECT_MASK		(0x7 << 3)
 
-#define _CNL_PORT_TX_DW7_GRP_AE		0x16235C
-#define _CNL_PORT_TX_DW7_GRP_B		0x1623DC
-#define _CNL_PORT_TX_DW7_GRP_C		0x162B5C
-#define _CNL_PORT_TX_DW7_GRP_D		0x162BDC
-#define _CNL_PORT_TX_DW7_GRP_F		0x162A5C
-#define _CNL_PORT_TX_DW7_LN0_AE		0x16245C
-#define _CNL_PORT_TX_DW7_LN0_B		0x16265C
-#define _CNL_PORT_TX_DW7_LN0_C		0x162C5C
-#define _CNL_PORT_TX_DW7_LN0_D		0x162E5C
-#define _CNL_PORT_TX_DW7_LN0_F		0x16285C
-#define CNL_PORT_TX_DW7_GRP(port)	_MMIO_PORT6(port, \
-						    _CNL_PORT_TX_DW7_GRP_AE, \
-						    _CNL_PORT_TX_DW7_GRP_B, \
-						    _CNL_PORT_TX_DW7_GRP_C, \
-						    _CNL_PORT_TX_DW7_GRP_D, \
-						    _CNL_PORT_TX_DW7_GRP_AE, \
-						    _CNL_PORT_TX_DW7_GRP_F)
-#define CNL_PORT_TX_DW7_LN0(port)	_MMIO_PORT6(port, \
-						    _CNL_PORT_TX_DW7_LN0_AE, \
-						    _CNL_PORT_TX_DW7_LN0_B, \
-						    _CNL_PORT_TX_DW7_LN0_C, \
-						    _CNL_PORT_TX_DW7_LN0_D, \
-						    _CNL_PORT_TX_DW7_LN0_AE, \
-						    _CNL_PORT_TX_DW7_LN0_F)
+#define CNL_PORT_TX_DW7_GRP(port)	_MMIO(_CNL_PORT_TX_DW_GRP((port), 7))
+#define CNL_PORT_TX_DW7_LN0(port)	_MMIO(_CNL_PORT_TX_DW_LN0((port), 7))
 #define   N_SCALAR(x)			((x) << 24)
 #define   N_SCALAR_MASK			(0x7F << 24)
 

From da9cb11f76623b99f2d5e6aa68f43d6ef714a7de Mon Sep 17 00:00:00 2001
From: Mahesh Kumar <mahesh1.kumar@intel.com>
Date: Wed, 14 Mar 2018 13:36:53 +0530
Subject: [PATCH 0042/1286] drm/i915/cnl: Kill _MMIO_PORT6 macro

This patch replaces use of remaining _MMIO_PORT6 macro and removes the
macro.

Changes Since V1:
 - Rebase

Signed-off-by: Mahesh Kumar <mahesh1.kumar@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180314080653.9444-3-mahesh1.kumar@intel.com
---
 drivers/gpu/drm/i915/i915_reg.h | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 5a2a3d6d8c97..d965b4ab6120 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -153,7 +153,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
 #define _PLL(pll, a, b) ((a) + (pll)*((b)-(a)))
 #define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b))
-#define _MMIO_PORT6(port, a, b, c, d, e, f) _MMIO(_PICK(port, a, b, c, d, e, f))
 #define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__)
 #define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
 
@@ -1946,20 +1945,21 @@ enum i915_power_well_id {
 #define _CNL_PORT_PCS_DW1_LN0_C		0x162C04
 #define _CNL_PORT_PCS_DW1_LN0_D		0x162E04
 #define _CNL_PORT_PCS_DW1_LN0_F		0x162804
-#define CNL_PORT_PCS_DW1_GRP(port)	_MMIO_PORT6(port, \
+#define CNL_PORT_PCS_DW1_GRP(port)	_MMIO(_PICK(port, \
 						    _CNL_PORT_PCS_DW1_GRP_AE, \
 						    _CNL_PORT_PCS_DW1_GRP_B, \
 						    _CNL_PORT_PCS_DW1_GRP_C, \
 						    _CNL_PORT_PCS_DW1_GRP_D, \
 						    _CNL_PORT_PCS_DW1_GRP_AE, \
-						    _CNL_PORT_PCS_DW1_GRP_F)
-#define CNL_PORT_PCS_DW1_LN0(port)	_MMIO_PORT6(port, \
+						    _CNL_PORT_PCS_DW1_GRP_F))
+
+#define CNL_PORT_PCS_DW1_LN0(port)	_MMIO(_PICK(port, \
 						    _CNL_PORT_PCS_DW1_LN0_AE, \
 						    _CNL_PORT_PCS_DW1_LN0_B, \
 						    _CNL_PORT_PCS_DW1_LN0_C, \
 						    _CNL_PORT_PCS_DW1_LN0_D, \
 						    _CNL_PORT_PCS_DW1_LN0_AE, \
-						    _CNL_PORT_PCS_DW1_LN0_F)
+						    _CNL_PORT_PCS_DW1_LN0_F))
 #define   COMMON_KEEPER_EN		(1 << 26)
 
 /* CNL Port TX registers */

From 80b216b98b0cd4f10303863c062b7ab7d117ada7 Mon Sep 17 00:00:00 2001
From: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Date: Wed, 14 Mar 2018 11:26:50 -0700
Subject: [PATCH 0043/1286] drm/i915: store all mmio bases in intel_engines

The mmio bases we're currently storing in the intel_engines array are
only valid for a subset of gens, so we need to ignore them and use
different values in some cases. Instead of doing that, we can have a
table of [starting gen, mmio base] pairs for each engine in
intel_engines and select the correct one based on the gen we're running
on in a consistent way.

v2: document that the list goes in reverse order, update starting gen
    for render (Chris)

v3: starting gen for render back to 1 to make our life easier with
    selftests (Chris)

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> #v2
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180314182653.26981-1-daniele.ceraolospurio@intel.com
---
 drivers/gpu/drm/i915/intel_engine_cs.c  | 78 ++++++++++++++++---------
 drivers/gpu/drm/i915/intel_ringbuffer.c |  1 -
 2 files changed, 50 insertions(+), 29 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index f22c5f72df8d..71eac571e141 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -81,12 +81,17 @@ static const struct engine_class_info intel_engine_classes[] = {
 	},
 };
 
+#define MAX_MMIO_BASES 3
 struct engine_info {
 	unsigned int hw_id;
 	unsigned int uabi_id;
 	u8 class;
 	u8 instance;
-	u32 mmio_base;
+	/* mmio bases table *must* be sorted in reverse gen order */
+	struct engine_mmio_base {
+		u32 gen : 8;
+		u32 base : 24;
+	} mmio_bases[MAX_MMIO_BASES];
 	unsigned irq_shift;
 };
 
@@ -96,7 +101,9 @@ static const struct engine_info intel_engines[] = {
 		.uabi_id = I915_EXEC_RENDER,
 		.class = RENDER_CLASS,
 		.instance = 0,
-		.mmio_base = RENDER_RING_BASE,
+		.mmio_bases = {
+			{ .gen = 1, .base = RENDER_RING_BASE }
+		},
 		.irq_shift = GEN8_RCS_IRQ_SHIFT,
 	},
 	[BCS] = {
@@ -104,7 +111,9 @@ static const struct engine_info intel_engines[] = {
 		.uabi_id = I915_EXEC_BLT,
 		.class = COPY_ENGINE_CLASS,
 		.instance = 0,
-		.mmio_base = BLT_RING_BASE,
+		.mmio_bases = {
+			{ .gen = 6, .base = BLT_RING_BASE }
+		},
 		.irq_shift = GEN8_BCS_IRQ_SHIFT,
 	},
 	[VCS] = {
@@ -112,7 +121,11 @@ static const struct engine_info intel_engines[] = {
 		.uabi_id = I915_EXEC_BSD,
 		.class = VIDEO_DECODE_CLASS,
 		.instance = 0,
-		.mmio_base = GEN6_BSD_RING_BASE,
+		.mmio_bases = {
+			{ .gen = 11, .base = GEN11_BSD_RING_BASE },
+			{ .gen = 6, .base = GEN6_BSD_RING_BASE },
+			{ .gen = 4, .base = BSD_RING_BASE }
+		},
 		.irq_shift = GEN8_VCS1_IRQ_SHIFT,
 	},
 	[VCS2] = {
@@ -120,7 +133,10 @@ static const struct engine_info intel_engines[] = {
 		.uabi_id = I915_EXEC_BSD,
 		.class = VIDEO_DECODE_CLASS,
 		.instance = 1,
-		.mmio_base = GEN8_BSD2_RING_BASE,
+		.mmio_bases = {
+			{ .gen = 11, .base = GEN11_BSD2_RING_BASE },
+			{ .gen = 8, .base = GEN8_BSD2_RING_BASE }
+		},
 		.irq_shift = GEN8_VCS2_IRQ_SHIFT,
 	},
 	[VCS3] = {
@@ -128,7 +144,9 @@ static const struct engine_info intel_engines[] = {
 		.uabi_id = I915_EXEC_BSD,
 		.class = VIDEO_DECODE_CLASS,
 		.instance = 2,
-		.mmio_base = GEN11_BSD3_RING_BASE,
+		.mmio_bases = {
+			{ .gen = 11, .base = GEN11_BSD3_RING_BASE }
+		},
 		.irq_shift = 0, /* not used */
 	},
 	[VCS4] = {
@@ -136,7 +154,9 @@ static const struct engine_info intel_engines[] = {
 		.uabi_id = I915_EXEC_BSD,
 		.class = VIDEO_DECODE_CLASS,
 		.instance = 3,
-		.mmio_base = GEN11_BSD4_RING_BASE,
+		.mmio_bases = {
+			{ .gen = 11, .base = GEN11_BSD4_RING_BASE }
+		},
 		.irq_shift = 0, /* not used */
 	},
 	[VECS] = {
@@ -144,7 +164,10 @@ static const struct engine_info intel_engines[] = {
 		.uabi_id = I915_EXEC_VEBOX,
 		.class = VIDEO_ENHANCEMENT_CLASS,
 		.instance = 0,
-		.mmio_base = VEBOX_RING_BASE,
+		.mmio_bases = {
+			{ .gen = 11, .base = GEN11_VEBOX_RING_BASE },
+			{ .gen = 7, .base = VEBOX_RING_BASE }
+		},
 		.irq_shift = GEN8_VECS_IRQ_SHIFT,
 	},
 	[VECS2] = {
@@ -152,7 +175,9 @@ static const struct engine_info intel_engines[] = {
 		.uabi_id = I915_EXEC_VEBOX,
 		.class = VIDEO_ENHANCEMENT_CLASS,
 		.instance = 1,
-		.mmio_base = GEN11_VEBOX2_RING_BASE,
+		.mmio_bases = {
+			{ .gen = 11, .base = GEN11_VEBOX2_RING_BASE }
+		},
 		.irq_shift = 0, /* not used */
 	},
 };
@@ -223,6 +248,21 @@ __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
 	}
 }
 
+static u32 __engine_mmio_base(struct drm_i915_private *i915,
+			      const struct engine_mmio_base *bases)
+{
+	int i;
+
+	for (i = 0; i < MAX_MMIO_BASES; i++)
+		if (INTEL_GEN(i915) >= bases[i].gen)
+			break;
+
+	GEM_BUG_ON(i == MAX_MMIO_BASES);
+	GEM_BUG_ON(!bases[i].base);
+
+	return bases[i].base;
+}
+
 static int
 intel_engine_setup(struct drm_i915_private *dev_priv,
 		   enum intel_engine_id id)
@@ -257,25 +297,7 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
 			 class_info->name, info->instance) >=
 		sizeof(engine->name));
 	engine->hw_id = engine->guc_id = info->hw_id;
-	if (INTEL_GEN(dev_priv) >= 11) {
-		switch (engine->id) {
-		case VCS:
-			engine->mmio_base = GEN11_BSD_RING_BASE;
-			break;
-		case VCS2:
-			engine->mmio_base = GEN11_BSD2_RING_BASE;
-			break;
-		case VECS:
-			engine->mmio_base = GEN11_VEBOX_RING_BASE;
-			break;
-		default:
-			/* take the original value for all other engines  */
-			engine->mmio_base = info->mmio_base;
-			break;
-		}
-	} else {
-		engine->mmio_base = info->mmio_base;
-	}
+	engine->mmio_base = __engine_mmio_base(dev_priv, info->mmio_bases);
 	engine->irq_shift = info->irq_shift;
 	engine->class = info->class;
 	engine->instance = info->instance;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 88eeb64041ae..3b478769a8c1 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2080,7 +2080,6 @@ int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
 		engine->emit_flush = gen6_bsd_ring_flush;
 		engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
 	} else {
-		engine->mmio_base = BSD_RING_BASE;
 		engine->emit_flush = bsd_ring_flush;
 		if (IS_GEN5(dev_priv))
 			engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;

From 74419daaae6c1dafe9cc5d4d0c92c17982f4eebd Mon Sep 17 00:00:00 2001
From: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Date: Wed, 14 Mar 2018 11:26:51 -0700
Subject: [PATCH 0044/1286] drm/i915: add a selftest for the mmio_bases table

Check that the entries are in reverse gen order and that all entries
with gen > 0 have an mmio base set.

v2: loop forward, simplify logic, use i915_subtests (Chris)

Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180314182653.26981-2-daniele.ceraolospurio@intel.com
---
 drivers/gpu/drm/i915/intel_engine_cs.c        | 16 +++--
 .../drm/i915/selftests/i915_mock_selftests.h  |  1 +
 .../gpu/drm/i915/selftests/intel_engine_cs.c  | 58 +++++++++++++++++++
 3 files changed, 69 insertions(+), 6 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/selftests/intel_engine_cs.c

diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 71eac571e141..8fda81126fbc 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -263,16 +263,21 @@ static u32 __engine_mmio_base(struct drm_i915_private *i915,
 	return bases[i].base;
 }
 
+static void __sprint_engine_name(char *name, const struct engine_info *info)
+{
+	WARN_ON(snprintf(name, INTEL_ENGINE_CS_MAX_NAME, "%s%u",
+			 intel_engine_classes[info->class].name,
+			 info->instance) >= INTEL_ENGINE_CS_MAX_NAME);
+}
+
 static int
 intel_engine_setup(struct drm_i915_private *dev_priv,
 		   enum intel_engine_id id)
 {
 	const struct engine_info *info = &intel_engines[id];
-	const struct engine_class_info *class_info;
 	struct intel_engine_cs *engine;
 
 	GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes));
-	class_info = &intel_engine_classes[info->class];
 
 	BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
 	BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
@@ -293,9 +298,7 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
 
 	engine->id = id;
 	engine->i915 = dev_priv;
-	WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s%u",
-			 class_info->name, info->instance) >=
-		sizeof(engine->name));
+	__sprint_engine_name(engine->name, info);
 	engine->hw_id = engine->guc_id = info->hw_id;
 	engine->mmio_base = __engine_mmio_base(dev_priv, info->mmio_bases);
 	engine->irq_shift = info->irq_shift;
@@ -303,7 +306,7 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
 	engine->instance = info->instance;
 
 	engine->uabi_id = info->uabi_id;
-	engine->uabi_class = class_info->uabi_class;
+	engine->uabi_class = intel_engine_classes[info->class].uabi_class;
 
 	engine->context_size = __intel_engine_context_size(dev_priv,
 							   engine->class);
@@ -2140,4 +2143,5 @@ void intel_disable_engine_stats(struct intel_engine_cs *engine)
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/mock_engine.c"
+#include "selftests/intel_engine_cs.c"
 #endif
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index 9a48aa441743..d16d74178e9d 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -14,6 +14,7 @@ selftest(fence, i915_sw_fence_mock_selftests)
 selftest(scatterlist, scatterlist_mock_selftests)
 selftest(syncmap, i915_syncmap_mock_selftests)
 selftest(uncore, intel_uncore_mock_selftests)
+selftest(engine, intel_engine_cs_mock_selftests)
 selftest(breadcrumbs, intel_breadcrumbs_mock_selftests)
 selftest(timelines, i915_gem_timeline_mock_selftests)
 selftest(requests, i915_request_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/intel_engine_cs.c b/drivers/gpu/drm/i915/selftests/intel_engine_cs.c
new file mode 100644
index 000000000000..cfaa6b296835
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/intel_engine_cs.c
@@ -0,0 +1,58 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "../i915_selftest.h"
+
+static int intel_mmio_bases_check(void *arg)
+{
+	int i, j;
+
+	for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
+		const struct engine_info *info = &intel_engines[i];
+		char name[INTEL_ENGINE_CS_MAX_NAME];
+		u8 prev = U8_MAX;
+
+		__sprint_engine_name(name, info);
+
+		for (j = 0; j < MAX_MMIO_BASES; j++) {
+			u8 gen = info->mmio_bases[j].gen;
+			u32 base = info->mmio_bases[j].base;
+
+			if (gen >= prev) {
+				pr_err("%s: %s: mmio base for gen %x "
+					"is before the one for gen %x\n",
+				       __func__, name, prev, gen);
+				return -EINVAL;
+			}
+
+			if (gen == 0)
+				break;
+
+			if (!base) {
+				pr_err("%s: %s: invalid mmio base (%x) "
+					"for gen %x at entry %u\n",
+				       __func__, name, base, gen, j);
+				return -EINVAL;
+			}
+
+			prev = gen;
+		}
+
+		pr_info("%s: min gen supported for %s = %d\n",
+			__func__, name, prev);
+	}
+
+	return 0;
+}
+
+int intel_engine_cs_mock_selftests(void)
+{
+	static const struct i915_subtest tests[] = {
+		SUBTEST(intel_mmio_bases_check),
+	};
+
+	return i915_subtests(tests, NULL);
+}

From 210060edc216ebd6330ee4fded5a01547d938642 Mon Sep 17 00:00:00 2001
From: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Date: Wed, 14 Mar 2018 11:26:52 -0700
Subject: [PATCH 0045/1286] drm/i915: use engine->irq_keep_mask when resetting
 irqs

The "reset" value and the "keep" value are the same.
While we are here, add a TODO for gen11 interrupt reset

Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180314182653.26981-3-daniele.ceraolospurio@intel.com
---
 drivers/gpu/drm/i915/intel_lrc.c | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 3a69b367e565..5e8f6896d059 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1666,6 +1666,10 @@ static void reset_irq(struct intel_engine_cs *engine)
 	struct drm_i915_private *dev_priv = engine->i915;
 	int i;
 
+	/* TODO: correctly reset irqs for gen11 */
+	if (WARN_ON_ONCE(INTEL_GEN(engine->i915) >= 11))
+		return;
+
 	GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir));
 
 	/*
@@ -1677,11 +1681,11 @@ static void reset_irq(struct intel_engine_cs *engine)
 	 */
 	for (i = 0; i < 2; i++) {
 		I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
-			   GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift);
+			   engine->irq_keep_mask);
 		POSTING_READ(GEN8_GT_IIR(gtiir[engine->id]));
 	}
 	GEM_BUG_ON(I915_READ(GEN8_GT_IIR(gtiir[engine->id])) &
-		   (GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift));
+		   engine->irq_keep_mask);
 
 	clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
 }

From fa6f071d54fb3658c7012634b8e4035c8d3a25bc Mon Sep 17 00:00:00 2001
From: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Date: Wed, 14 Mar 2018 11:26:53 -0700
Subject: [PATCH 0046/1286] drm/i915: move gen8 irq shifts to intel_lrc.c

The only usage outside the intel_lrc.c file is in the ringbuffer
init, but the irq mask calculated there is then overwritten for
all engines that have a non-zero shift, so we can drop it.

This change is not aimed at code saving but at removing from
intel_engines information that does not apply to all gens that have
the engine. When checking without the temporary WARN_ON, code size
is basically unchanged.

v2: make the irq_shifts array static const
v3: rebase, move irq_shifts array to logical_ring_default_irqs
v4: move array inside the if and use u8 for it (Chris)

Suggested-by: Michel Thierry <michel.thierry@intel.com>
Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180314182653.26981-4-daniele.ceraolospurio@intel.com
---
 drivers/gpu/drm/i915/intel_engine_cs.c  | 10 ----------
 drivers/gpu/drm/i915/intel_lrc.c        | 15 ++++++++++++++-
 drivers/gpu/drm/i915/intel_ringbuffer.c |  4 ++--
 drivers/gpu/drm/i915/intel_ringbuffer.h |  1 -
 4 files changed, 16 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 8fda81126fbc..337dfa56a738 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -92,7 +92,6 @@ struct engine_info {
 		u32 gen : 8;
 		u32 base : 24;
 	} mmio_bases[MAX_MMIO_BASES];
-	unsigned irq_shift;
 };
 
 static const struct engine_info intel_engines[] = {
@@ -104,7 +103,6 @@ static const struct engine_info intel_engines[] = {
 		.mmio_bases = {
 			{ .gen = 1, .base = RENDER_RING_BASE }
 		},
-		.irq_shift = GEN8_RCS_IRQ_SHIFT,
 	},
 	[BCS] = {
 		.hw_id = BCS_HW,
@@ -114,7 +112,6 @@ static const struct engine_info intel_engines[] = {
 		.mmio_bases = {
 			{ .gen = 6, .base = BLT_RING_BASE }
 		},
-		.irq_shift = GEN8_BCS_IRQ_SHIFT,
 	},
 	[VCS] = {
 		.hw_id = VCS_HW,
@@ -126,7 +123,6 @@ static const struct engine_info intel_engines[] = {
 			{ .gen = 6, .base = GEN6_BSD_RING_BASE },
 			{ .gen = 4, .base = BSD_RING_BASE }
 		},
-		.irq_shift = GEN8_VCS1_IRQ_SHIFT,
 	},
 	[VCS2] = {
 		.hw_id = VCS2_HW,
@@ -137,7 +133,6 @@ static const struct engine_info intel_engines[] = {
 			{ .gen = 11, .base = GEN11_BSD2_RING_BASE },
 			{ .gen = 8, .base = GEN8_BSD2_RING_BASE }
 		},
-		.irq_shift = GEN8_VCS2_IRQ_SHIFT,
 	},
 	[VCS3] = {
 		.hw_id = VCS3_HW,
@@ -147,7 +142,6 @@ static const struct engine_info intel_engines[] = {
 		.mmio_bases = {
 			{ .gen = 11, .base = GEN11_BSD3_RING_BASE }
 		},
-		.irq_shift = 0, /* not used */
 	},
 	[VCS4] = {
 		.hw_id = VCS4_HW,
@@ -157,7 +151,6 @@ static const struct engine_info intel_engines[] = {
 		.mmio_bases = {
 			{ .gen = 11, .base = GEN11_BSD4_RING_BASE }
 		},
-		.irq_shift = 0, /* not used */
 	},
 	[VECS] = {
 		.hw_id = VECS_HW,
@@ -168,7 +161,6 @@ static const struct engine_info intel_engines[] = {
 			{ .gen = 11, .base = GEN11_VEBOX_RING_BASE },
 			{ .gen = 7, .base = VEBOX_RING_BASE }
 		},
-		.irq_shift = GEN8_VECS_IRQ_SHIFT,
 	},
 	[VECS2] = {
 		.hw_id = VECS2_HW,
@@ -178,7 +170,6 @@ static const struct engine_info intel_engines[] = {
 		.mmio_bases = {
 			{ .gen = 11, .base = GEN11_VEBOX2_RING_BASE }
 		},
-		.irq_shift = 0, /* not used */
 	},
 };
 
@@ -301,7 +292,6 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
 	__sprint_engine_name(engine->name, info);
 	engine->hw_id = engine->guc_id = info->hw_id;
 	engine->mmio_base = __engine_mmio_base(dev_priv, info->mmio_bases);
-	engine->irq_shift = info->irq_shift;
 	engine->class = info->class;
 	engine->instance = info->instance;
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 5e8f6896d059..53f1c009ed7b 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -2118,7 +2118,20 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
 static inline void
 logical_ring_default_irqs(struct intel_engine_cs *engine)
 {
-	unsigned shift = engine->irq_shift;
+	unsigned int shift = 0;
+
+	if (INTEL_GEN(engine->i915) < 11) {
+		const u8 irq_shifts[] = {
+			[RCS]  = GEN8_RCS_IRQ_SHIFT,
+			[BCS]  = GEN8_BCS_IRQ_SHIFT,
+			[VCS]  = GEN8_VCS1_IRQ_SHIFT,
+			[VCS2] = GEN8_VCS2_IRQ_SHIFT,
+			[VECS] = GEN8_VECS_IRQ_SHIFT,
+		};
+
+		shift = irq_shifts[engine->id];
+	}
+
 	engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
 	engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 3b478769a8c1..72d6167c519a 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1944,8 +1944,6 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
 static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
 				struct intel_engine_cs *engine)
 {
-	engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << engine->irq_shift;
-
 	if (INTEL_GEN(dev_priv) >= 6) {
 		engine->irq_enable = gen6_irq_enable;
 		engine->irq_disable = gen6_irq_disable;
@@ -2030,6 +2028,8 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
 	if (HAS_L3_DPF(dev_priv))
 		engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
 
+	engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
+
 	if (INTEL_GEN(dev_priv) >= 6) {
 		engine->init_context = intel_rcs_ctx_init;
 		engine->emit_flush = gen7_render_ring_flush;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 81cdbbf257ec..80fae806aec9 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -331,7 +331,6 @@ struct intel_engine_cs {
 	u8 instance;
 	u32 context_size;
 	u32 mmio_base;
-	unsigned int irq_shift;
 
 	struct intel_ring *buffer;
 	struct intel_timeline *timeline;

From c080363fcdc3015e4cb9b5582afe2cd3aa890630 Mon Sep 17 00:00:00 2001
From: Michal Wajdeczko <michal.wajdeczko@intel.com>
Date: Tue, 13 Mar 2018 23:19:20 +0000
Subject: [PATCH 0047/1286] drm/i915: Split GPU commands definitions into
 separate header

We should not mix MMIO with MI_INSTR definitions.

v2: sanitize comment, change include order (Chris)

Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180313124109.39216-1-michal.wajdeczko@intel.com
Link: https://patchwork.freedesktop.org/patch/msgid/20180313231920.6932-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_reg.h           | 263 ---------------------
 drivers/gpu/drm/i915/intel_gpu_commands.h | 274 ++++++++++++++++++++++
 drivers/gpu/drm/i915/intel_ringbuffer.h   |   3 +-
 3 files changed, 276 insertions(+), 264 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/intel_gpu_commands.h

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index d965b4ab6120..dbcb8829faba 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -427,145 +427,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define VGA_CR_INDEX_CGA 0x3d4
 #define VGA_CR_DATA_CGA 0x3d5
 
-/*
- * Instruction field definitions used by the command parser
- */
-#define INSTR_CLIENT_SHIFT      29
-#define   INSTR_MI_CLIENT       0x0
-#define   INSTR_BC_CLIENT       0x2
-#define   INSTR_RC_CLIENT       0x3
-#define INSTR_SUBCLIENT_SHIFT   27
-#define INSTR_SUBCLIENT_MASK    0x18000000
-#define   INSTR_MEDIA_SUBCLIENT 0x2
-#define INSTR_26_TO_24_MASK	0x7000000
-#define   INSTR_26_TO_24_SHIFT	24
-
-/*
- * Memory interface instructions used by the kernel
- */
-#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags))
-/* Many MI commands use bit 22 of the header dword for GGTT vs PPGTT */
-#define  MI_GLOBAL_GTT    (1<<22)
-
-#define MI_NOOP			MI_INSTR(0, 0)
-#define MI_USER_INTERRUPT	MI_INSTR(0x02, 0)
-#define MI_WAIT_FOR_EVENT       MI_INSTR(0x03, 0)
-#define   MI_WAIT_FOR_OVERLAY_FLIP	(1<<16)
-#define   MI_WAIT_FOR_PLANE_B_FLIP      (1<<6)
-#define   MI_WAIT_FOR_PLANE_A_FLIP      (1<<2)
-#define   MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
-#define MI_FLUSH		MI_INSTR(0x04, 0)
-#define   MI_READ_FLUSH		(1 << 0)
-#define   MI_EXE_FLUSH		(1 << 1)
-#define   MI_NO_WRITE_FLUSH	(1 << 2)
-#define   MI_SCENE_COUNT	(1 << 3) /* just increment scene count */
-#define   MI_END_SCENE		(1 << 4) /* flush binner and incr scene count */
-#define   MI_INVALIDATE_ISP	(1 << 5) /* invalidate indirect state pointers */
-#define MI_REPORT_HEAD		MI_INSTR(0x07, 0)
-#define MI_ARB_ON_OFF		MI_INSTR(0x08, 0)
-#define   MI_ARB_ENABLE			(1<<0)
-#define   MI_ARB_DISABLE		(0<<0)
-#define MI_BATCH_BUFFER_END	MI_INSTR(0x0a, 0)
-#define MI_SUSPEND_FLUSH	MI_INSTR(0x0b, 0)
-#define   MI_SUSPEND_FLUSH_EN	(1<<0)
-#define MI_SET_APPID		MI_INSTR(0x0e, 0)
-#define MI_OVERLAY_FLIP		MI_INSTR(0x11, 0)
-#define   MI_OVERLAY_CONTINUE	(0x0<<21)
-#define   MI_OVERLAY_ON		(0x1<<21)
-#define   MI_OVERLAY_OFF	(0x2<<21)
-#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
-#define MI_DISPLAY_FLIP		MI_INSTR(0x14, 2)
-#define MI_DISPLAY_FLIP_I915	MI_INSTR(0x14, 1)
-#define   MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
-/* IVB has funny definitions for which plane to flip. */
-#define   MI_DISPLAY_FLIP_IVB_PLANE_A  (0 << 19)
-#define   MI_DISPLAY_FLIP_IVB_PLANE_B  (1 << 19)
-#define   MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19)
-#define   MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
-#define   MI_DISPLAY_FLIP_IVB_PLANE_C  (4 << 19)
-#define   MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
-/* SKL ones */
-#define   MI_DISPLAY_FLIP_SKL_PLANE_1_A	(0 << 8)
-#define   MI_DISPLAY_FLIP_SKL_PLANE_1_B	(1 << 8)
-#define   MI_DISPLAY_FLIP_SKL_PLANE_1_C	(2 << 8)
-#define   MI_DISPLAY_FLIP_SKL_PLANE_2_A	(4 << 8)
-#define   MI_DISPLAY_FLIP_SKL_PLANE_2_B	(5 << 8)
-#define   MI_DISPLAY_FLIP_SKL_PLANE_2_C	(6 << 8)
-#define   MI_DISPLAY_FLIP_SKL_PLANE_3_A	(7 << 8)
-#define   MI_DISPLAY_FLIP_SKL_PLANE_3_B	(8 << 8)
-#define   MI_DISPLAY_FLIP_SKL_PLANE_3_C	(9 << 8)
-#define MI_SEMAPHORE_MBOX	MI_INSTR(0x16, 1) /* gen6, gen7 */
-#define   MI_SEMAPHORE_GLOBAL_GTT    (1<<22)
-#define   MI_SEMAPHORE_UPDATE	    (1<<21)
-#define   MI_SEMAPHORE_COMPARE	    (1<<20)
-#define   MI_SEMAPHORE_REGISTER	    (1<<18)
-#define   MI_SEMAPHORE_SYNC_VR	    (0<<16) /* RCS  wait for VCS  (RVSYNC) */
-#define   MI_SEMAPHORE_SYNC_VER	    (1<<16) /* RCS  wait for VECS (RVESYNC) */
-#define   MI_SEMAPHORE_SYNC_BR	    (2<<16) /* RCS  wait for BCS  (RBSYNC) */
-#define   MI_SEMAPHORE_SYNC_BV	    (0<<16) /* VCS  wait for BCS  (VBSYNC) */
-#define   MI_SEMAPHORE_SYNC_VEV	    (1<<16) /* VCS  wait for VECS (VVESYNC) */
-#define   MI_SEMAPHORE_SYNC_RV	    (2<<16) /* VCS  wait for RCS  (VRSYNC) */
-#define   MI_SEMAPHORE_SYNC_RB	    (0<<16) /* BCS  wait for RCS  (BRSYNC) */
-#define   MI_SEMAPHORE_SYNC_VEB	    (1<<16) /* BCS  wait for VECS (BVESYNC) */
-#define   MI_SEMAPHORE_SYNC_VB	    (2<<16) /* BCS  wait for VCS  (BVSYNC) */
-#define   MI_SEMAPHORE_SYNC_BVE	    (0<<16) /* VECS wait for BCS  (VEBSYNC) */
-#define   MI_SEMAPHORE_SYNC_VVE	    (1<<16) /* VECS wait for VCS  (VEVSYNC) */
-#define   MI_SEMAPHORE_SYNC_RVE	    (2<<16) /* VECS wait for RCS  (VERSYNC) */
-#define   MI_SEMAPHORE_SYNC_INVALID (3<<16)
-#define   MI_SEMAPHORE_SYNC_MASK    (3<<16)
-#define MI_SET_CONTEXT		MI_INSTR(0x18, 0)
-#define   MI_MM_SPACE_GTT		(1<<8)
-#define   MI_MM_SPACE_PHYSICAL		(0<<8)
-#define   MI_SAVE_EXT_STATE_EN		(1<<3)
-#define   MI_RESTORE_EXT_STATE_EN	(1<<2)
-#define   MI_FORCE_RESTORE		(1<<1)
-#define   MI_RESTORE_INHIBIT		(1<<0)
-#define   HSW_MI_RS_SAVE_STATE_EN       (1<<3)
-#define   HSW_MI_RS_RESTORE_STATE_EN    (1<<2)
-#define MI_SEMAPHORE_SIGNAL	MI_INSTR(0x1b, 0) /* GEN8+ */
-#define   MI_SEMAPHORE_TARGET(engine)	((engine)<<15)
-#define MI_SEMAPHORE_WAIT	MI_INSTR(0x1c, 2) /* GEN8+ */
-#define   MI_SEMAPHORE_POLL		(1<<15)
-#define   MI_SEMAPHORE_SAD_GTE_SDD	(1<<12)
-#define MI_STORE_DWORD_IMM	MI_INSTR(0x20, 1)
-#define MI_STORE_DWORD_IMM_GEN4	MI_INSTR(0x20, 2)
-#define   MI_MEM_VIRTUAL	(1 << 22) /* 945,g33,965 */
-#define   MI_USE_GGTT		(1 << 22) /* g4x+ */
-#define MI_STORE_DWORD_INDEX	MI_INSTR(0x21, 1)
-#define   MI_STORE_DWORD_INDEX_SHIFT 2
-/* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM:
- * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw
- *   simply ignores the register load under certain conditions.
- * - One can actually load arbitrary many arbitrary registers: Simply issue x
- *   address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
- */
-#define MI_LOAD_REGISTER_IMM(x)	MI_INSTR(0x22, 2*(x)-1)
-#define   MI_LRI_FORCE_POSTED		(1<<12)
-#define MI_STORE_REGISTER_MEM        MI_INSTR(0x24, 1)
-#define MI_STORE_REGISTER_MEM_GEN8   MI_INSTR(0x24, 2)
-#define   MI_SRM_LRM_GLOBAL_GTT		(1<<22)
-#define MI_FLUSH_DW		MI_INSTR(0x26, 1) /* for GEN6 */
-#define   MI_FLUSH_DW_STORE_INDEX	(1<<21)
-#define   MI_INVALIDATE_TLB		(1<<18)
-#define   MI_FLUSH_DW_OP_STOREDW	(1<<14)
-#define   MI_FLUSH_DW_OP_MASK		(3<<14)
-#define   MI_FLUSH_DW_NOTIFY		(1<<8)
-#define   MI_INVALIDATE_BSD		(1<<7)
-#define   MI_FLUSH_DW_USE_GTT		(1<<2)
-#define   MI_FLUSH_DW_USE_PPGTT		(0<<2)
-#define MI_LOAD_REGISTER_MEM	   MI_INSTR(0x29, 1)
-#define MI_LOAD_REGISTER_MEM_GEN8  MI_INSTR(0x29, 2)
-#define MI_BATCH_BUFFER		MI_INSTR(0x30, 1)
-#define   MI_BATCH_NON_SECURE		(1)
-/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
-#define   MI_BATCH_NON_SECURE_I965	(1<<8)
-#define   MI_BATCH_PPGTT_HSW		(1<<8)
-#define   MI_BATCH_NON_SECURE_HSW	(1<<13)
-#define MI_BATCH_BUFFER_START	MI_INSTR(0x31, 0)
-#define   MI_BATCH_GTT		    (2<<6) /* aliased with (1<<7) on gen4 */
-#define MI_BATCH_BUFFER_START_GEN8	MI_INSTR(0x31, 1)
-#define   MI_BATCH_RESOURCE_STREAMER (1<<10)
-
 #define MI_PREDICATE_SRC0	_MMIO(0x2400)
 #define MI_PREDICATE_SRC0_UDW	_MMIO(0x2400 + 4)
 #define MI_PREDICATE_SRC1	_MMIO(0x2408)
@@ -575,130 +436,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define  LOWER_SLICE_ENABLED	(1<<0)
 #define  LOWER_SLICE_DISABLED	(0<<0)
 
-/*
- * 3D instructions used by the kernel
- */
-#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags))
-
-#define GEN9_MEDIA_POOL_STATE     ((0x3 << 29) | (0x2 << 27) | (0x5 << 16) | 4)
-#define   GEN9_MEDIA_POOL_ENABLE  (1 << 31)
-#define GFX_OP_RASTER_RULES    ((0x3<<29)|(0x7<<24))
-#define GFX_OP_SCISSOR         ((0x3<<29)|(0x1c<<24)|(0x10<<19))
-#define   SC_UPDATE_SCISSOR       (0x1<<1)
-#define   SC_ENABLE_MASK          (0x1<<0)
-#define   SC_ENABLE               (0x1<<0)
-#define GFX_OP_LOAD_INDIRECT   ((0x3<<29)|(0x1d<<24)|(0x7<<16))
-#define GFX_OP_SCISSOR_INFO    ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
-#define   SCI_YMIN_MASK      (0xffff<<16)
-#define   SCI_XMIN_MASK      (0xffff<<0)
-#define   SCI_YMAX_MASK      (0xffff<<16)
-#define   SCI_XMAX_MASK      (0xffff<<0)
-#define GFX_OP_SCISSOR_ENABLE	 ((0x3<<29)|(0x1c<<24)|(0x10<<19))
-#define GFX_OP_SCISSOR_RECT	 ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
-#define GFX_OP_COLOR_FACTOR      ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
-#define GFX_OP_STIPPLE           ((0x3<<29)|(0x1d<<24)|(0x83<<16))
-#define GFX_OP_MAP_INFO          ((0x3<<29)|(0x1d<<24)|0x4)
-#define GFX_OP_DESTBUFFER_VARS   ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
-#define GFX_OP_DESTBUFFER_INFO	 ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
-#define GFX_OP_DRAWRECT_INFO     ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
-#define GFX_OP_DRAWRECT_INFO_I965  ((0x7900<<16)|0x2)
-
-#define COLOR_BLT_CMD			(2<<29 | 0x40<<22 | (5-2))
-#define SRC_COPY_BLT_CMD		((2<<29)|(0x43<<22)|4)
-#define XY_SRC_COPY_BLT_CMD		((2<<29)|(0x53<<22)|6)
-#define XY_MONO_SRC_COPY_IMM_BLT	((2<<29)|(0x71<<22)|5)
-#define   BLT_WRITE_A			(2<<20)
-#define   BLT_WRITE_RGB			(1<<20)
-#define   BLT_WRITE_RGBA		(BLT_WRITE_RGB | BLT_WRITE_A)
-#define   BLT_DEPTH_8			(0<<24)
-#define   BLT_DEPTH_16_565		(1<<24)
-#define   BLT_DEPTH_16_1555		(2<<24)
-#define   BLT_DEPTH_32			(3<<24)
-#define   BLT_ROP_SRC_COPY		(0xcc<<16)
-#define   BLT_ROP_COLOR_COPY		(0xf0<<16)
-#define XY_SRC_COPY_BLT_SRC_TILED	(1<<15) /* 965+ only */
-#define XY_SRC_COPY_BLT_DST_TILED	(1<<11) /* 965+ only */
-#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
-#define   ASYNC_FLIP                (1<<22)
-#define   DISPLAY_PLANE_A           (0<<20)
-#define   DISPLAY_PLANE_B           (1<<20)
-#define GFX_OP_PIPE_CONTROL(len)	((0x3<<29)|(0x3<<27)|(0x2<<24)|((len)-2))
-#define   PIPE_CONTROL_FLUSH_L3				(1<<27)
-#define   PIPE_CONTROL_GLOBAL_GTT_IVB			(1<<24) /* gen7+ */
-#define   PIPE_CONTROL_MMIO_WRITE			(1<<23)
-#define   PIPE_CONTROL_STORE_DATA_INDEX			(1<<21)
-#define   PIPE_CONTROL_CS_STALL				(1<<20)
-#define   PIPE_CONTROL_TLB_INVALIDATE			(1<<18)
-#define   PIPE_CONTROL_MEDIA_STATE_CLEAR		(1<<16)
-#define   PIPE_CONTROL_QW_WRITE				(1<<14)
-#define   PIPE_CONTROL_POST_SYNC_OP_MASK                (3<<14)
-#define   PIPE_CONTROL_DEPTH_STALL			(1<<13)
-#define   PIPE_CONTROL_WRITE_FLUSH			(1<<12)
-#define   PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH	(1<<12) /* gen6+ */
-#define   PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE	(1<<11) /* MBZ on Ironlake */
-#define   PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE		(1<<10) /* GM45+ only */
-#define   PIPE_CONTROL_INDIRECT_STATE_DISABLE		(1<<9)
-#define   PIPE_CONTROL_NOTIFY				(1<<8)
-#define   PIPE_CONTROL_FLUSH_ENABLE			(1<<7) /* gen7+ */
-#define   PIPE_CONTROL_DC_FLUSH_ENABLE			(1<<5)
-#define   PIPE_CONTROL_VF_CACHE_INVALIDATE		(1<<4)
-#define   PIPE_CONTROL_CONST_CACHE_INVALIDATE		(1<<3)
-#define   PIPE_CONTROL_STATE_CACHE_INVALIDATE		(1<<2)
-#define   PIPE_CONTROL_STALL_AT_SCOREBOARD		(1<<1)
-#define   PIPE_CONTROL_DEPTH_CACHE_FLUSH		(1<<0)
-#define   PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
-
-/*
- * Commands used only by the command parser
- */
-#define MI_SET_PREDICATE        MI_INSTR(0x01, 0)
-#define MI_ARB_CHECK            MI_INSTR(0x05, 0)
-#define MI_RS_CONTROL           MI_INSTR(0x06, 0)
-#define MI_URB_ATOMIC_ALLOC     MI_INSTR(0x09, 0)
-#define MI_PREDICATE            MI_INSTR(0x0C, 0)
-#define MI_RS_CONTEXT           MI_INSTR(0x0F, 0)
-#define MI_TOPOLOGY_FILTER      MI_INSTR(0x0D, 0)
-#define MI_LOAD_SCAN_LINES_EXCL MI_INSTR(0x13, 0)
-#define MI_URB_CLEAR            MI_INSTR(0x19, 0)
-#define MI_UPDATE_GTT           MI_INSTR(0x23, 0)
-#define MI_CLFLUSH              MI_INSTR(0x27, 0)
-#define MI_REPORT_PERF_COUNT    MI_INSTR(0x28, 0)
-#define   MI_REPORT_PERF_COUNT_GGTT (1<<0)
-#define MI_LOAD_REGISTER_REG    MI_INSTR(0x2A, 0)
-#define MI_RS_STORE_DATA_IMM    MI_INSTR(0x2B, 0)
-#define MI_LOAD_URB_MEM         MI_INSTR(0x2C, 0)
-#define MI_STORE_URB_MEM        MI_INSTR(0x2D, 0)
-#define MI_CONDITIONAL_BATCH_BUFFER_END MI_INSTR(0x36, 0)
-
-#define PIPELINE_SELECT                ((0x3<<29)|(0x1<<27)|(0x1<<24)|(0x4<<16))
-#define GFX_OP_3DSTATE_VF_STATISTICS   ((0x3<<29)|(0x1<<27)|(0x0<<24)|(0xB<<16))
-#define MEDIA_VFE_STATE                ((0x3<<29)|(0x2<<27)|(0x0<<24)|(0x0<<16))
-#define  MEDIA_VFE_STATE_MMIO_ACCESS_MASK (0x18)
-#define GPGPU_OBJECT                   ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x4<<16))
-#define GPGPU_WALKER                   ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x5<<16))
-#define GFX_OP_3DSTATE_DX9_CONSTANTF_VS \
-	((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x39<<16))
-#define GFX_OP_3DSTATE_DX9_CONSTANTF_PS \
-	((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x3A<<16))
-#define GFX_OP_3DSTATE_SO_DECL_LIST \
-	((0x3<<29)|(0x3<<27)|(0x1<<24)|(0x17<<16))
-
-#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS \
-	((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x43<<16))
-#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS \
-	((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x44<<16))
-#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS \
-	((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x45<<16))
-#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS \
-	((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x46<<16))
-#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS \
-	((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x47<<16))
-
-#define MFX_WAIT  ((0x3<<29)|(0x1<<27)|(0x0<<16))
-
-#define COLOR_BLT     ((0x2<<29)|(0x40<<22))
-#define SRC_COPY_BLT  ((0x2<<29)|(0x43<<22))
-
 /*
  * Registers used only by the command parser
  */
diff --git a/drivers/gpu/drm/i915/intel_gpu_commands.h b/drivers/gpu/drm/i915/intel_gpu_commands.h
new file mode 100644
index 000000000000..105e2a9e874a
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_gpu_commands.h
@@ -0,0 +1,274 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright � 2003-2018 Intel Corporation
+ */
+
+#ifndef _INTEL_GPU_COMMANDS_H_
+#define _INTEL_GPU_COMMANDS_H_
+
+/*
+ * Instruction field definitions used by the command parser
+ */
+#define INSTR_CLIENT_SHIFT      29
+#define   INSTR_MI_CLIENT       0x0
+#define   INSTR_BC_CLIENT       0x2
+#define   INSTR_RC_CLIENT       0x3
+#define INSTR_SUBCLIENT_SHIFT   27
+#define INSTR_SUBCLIENT_MASK    0x18000000
+#define   INSTR_MEDIA_SUBCLIENT 0x2
+#define INSTR_26_TO_24_MASK	0x7000000
+#define   INSTR_26_TO_24_SHIFT	24
+
+/*
+ * Memory interface instructions used by the kernel
+ */
+#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags))
+/* Many MI commands use bit 22 of the header dword for GGTT vs PPGTT */
+#define  MI_GLOBAL_GTT    (1<<22)
+
+#define MI_NOOP			MI_INSTR(0, 0)
+#define MI_USER_INTERRUPT	MI_INSTR(0x02, 0)
+#define MI_WAIT_FOR_EVENT       MI_INSTR(0x03, 0)
+#define   MI_WAIT_FOR_OVERLAY_FLIP	(1<<16)
+#define   MI_WAIT_FOR_PLANE_B_FLIP      (1<<6)
+#define   MI_WAIT_FOR_PLANE_A_FLIP      (1<<2)
+#define   MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
+#define MI_FLUSH		MI_INSTR(0x04, 0)
+#define   MI_READ_FLUSH		(1 << 0)
+#define   MI_EXE_FLUSH		(1 << 1)
+#define   MI_NO_WRITE_FLUSH	(1 << 2)
+#define   MI_SCENE_COUNT	(1 << 3) /* just increment scene count */
+#define   MI_END_SCENE		(1 << 4) /* flush binner and incr scene count */
+#define   MI_INVALIDATE_ISP	(1 << 5) /* invalidate indirect state pointers */
+#define MI_REPORT_HEAD		MI_INSTR(0x07, 0)
+#define MI_ARB_ON_OFF		MI_INSTR(0x08, 0)
+#define   MI_ARB_ENABLE			(1<<0)
+#define   MI_ARB_DISABLE		(0<<0)
+#define MI_BATCH_BUFFER_END	MI_INSTR(0x0a, 0)
+#define MI_SUSPEND_FLUSH	MI_INSTR(0x0b, 0)
+#define   MI_SUSPEND_FLUSH_EN	(1<<0)
+#define MI_SET_APPID		MI_INSTR(0x0e, 0)
+#define MI_OVERLAY_FLIP		MI_INSTR(0x11, 0)
+#define   MI_OVERLAY_CONTINUE	(0x0<<21)
+#define   MI_OVERLAY_ON		(0x1<<21)
+#define   MI_OVERLAY_OFF	(0x2<<21)
+#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
+#define MI_DISPLAY_FLIP		MI_INSTR(0x14, 2)
+#define MI_DISPLAY_FLIP_I915	MI_INSTR(0x14, 1)
+#define   MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
+/* IVB has funny definitions for which plane to flip. */
+#define   MI_DISPLAY_FLIP_IVB_PLANE_A  (0 << 19)
+#define   MI_DISPLAY_FLIP_IVB_PLANE_B  (1 << 19)
+#define   MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19)
+#define   MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
+#define   MI_DISPLAY_FLIP_IVB_PLANE_C  (4 << 19)
+#define   MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
+/* SKL ones */
+#define   MI_DISPLAY_FLIP_SKL_PLANE_1_A	(0 << 8)
+#define   MI_DISPLAY_FLIP_SKL_PLANE_1_B	(1 << 8)
+#define   MI_DISPLAY_FLIP_SKL_PLANE_1_C	(2 << 8)
+#define   MI_DISPLAY_FLIP_SKL_PLANE_2_A	(4 << 8)
+#define   MI_DISPLAY_FLIP_SKL_PLANE_2_B	(5 << 8)
+#define   MI_DISPLAY_FLIP_SKL_PLANE_2_C	(6 << 8)
+#define   MI_DISPLAY_FLIP_SKL_PLANE_3_A	(7 << 8)
+#define   MI_DISPLAY_FLIP_SKL_PLANE_3_B	(8 << 8)
+#define   MI_DISPLAY_FLIP_SKL_PLANE_3_C	(9 << 8)
+#define MI_SEMAPHORE_MBOX	MI_INSTR(0x16, 1) /* gen6, gen7 */
+#define   MI_SEMAPHORE_GLOBAL_GTT    (1<<22)
+#define   MI_SEMAPHORE_UPDATE	    (1<<21)
+#define   MI_SEMAPHORE_COMPARE	    (1<<20)
+#define   MI_SEMAPHORE_REGISTER	    (1<<18)
+#define   MI_SEMAPHORE_SYNC_VR	    (0<<16) /* RCS  wait for VCS  (RVSYNC) */
+#define   MI_SEMAPHORE_SYNC_VER	    (1<<16) /* RCS  wait for VECS (RVESYNC) */
+#define   MI_SEMAPHORE_SYNC_BR	    (2<<16) /* RCS  wait for BCS  (RBSYNC) */
+#define   MI_SEMAPHORE_SYNC_BV	    (0<<16) /* VCS  wait for BCS  (VBSYNC) */
+#define   MI_SEMAPHORE_SYNC_VEV	    (1<<16) /* VCS  wait for VECS (VVESYNC) */
+#define   MI_SEMAPHORE_SYNC_RV	    (2<<16) /* VCS  wait for RCS  (VRSYNC) */
+#define   MI_SEMAPHORE_SYNC_RB	    (0<<16) /* BCS  wait for RCS  (BRSYNC) */
+#define   MI_SEMAPHORE_SYNC_VEB	    (1<<16) /* BCS  wait for VECS (BVESYNC) */
+#define   MI_SEMAPHORE_SYNC_VB	    (2<<16) /* BCS  wait for VCS  (BVSYNC) */
+#define   MI_SEMAPHORE_SYNC_BVE	    (0<<16) /* VECS wait for BCS  (VEBSYNC) */
+#define   MI_SEMAPHORE_SYNC_VVE	    (1<<16) /* VECS wait for VCS  (VEVSYNC) */
+#define   MI_SEMAPHORE_SYNC_RVE	    (2<<16) /* VECS wait for RCS  (VERSYNC) */
+#define   MI_SEMAPHORE_SYNC_INVALID (3<<16)
+#define   MI_SEMAPHORE_SYNC_MASK    (3<<16)
+#define MI_SET_CONTEXT		MI_INSTR(0x18, 0)
+#define   MI_MM_SPACE_GTT		(1<<8)
+#define   MI_MM_SPACE_PHYSICAL		(0<<8)
+#define   MI_SAVE_EXT_STATE_EN		(1<<3)
+#define   MI_RESTORE_EXT_STATE_EN	(1<<2)
+#define   MI_FORCE_RESTORE		(1<<1)
+#define   MI_RESTORE_INHIBIT		(1<<0)
+#define   HSW_MI_RS_SAVE_STATE_EN       (1<<3)
+#define   HSW_MI_RS_RESTORE_STATE_EN    (1<<2)
+#define MI_SEMAPHORE_SIGNAL	MI_INSTR(0x1b, 0) /* GEN8+ */
+#define   MI_SEMAPHORE_TARGET(engine)	((engine)<<15)
+#define MI_SEMAPHORE_WAIT	MI_INSTR(0x1c, 2) /* GEN8+ */
+#define   MI_SEMAPHORE_POLL		(1<<15)
+#define   MI_SEMAPHORE_SAD_GTE_SDD	(1<<12)
+#define MI_STORE_DWORD_IMM	MI_INSTR(0x20, 1)
+#define MI_STORE_DWORD_IMM_GEN4	MI_INSTR(0x20, 2)
+#define   MI_MEM_VIRTUAL	(1 << 22) /* 945,g33,965 */
+#define   MI_USE_GGTT		(1 << 22) /* g4x+ */
+#define MI_STORE_DWORD_INDEX	MI_INSTR(0x21, 1)
+#define   MI_STORE_DWORD_INDEX_SHIFT 2
+/*
+ * Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM:
+ * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw
+ *   simply ignores the register load under certain conditions.
+ * - One can actually load arbitrary many arbitrary registers: Simply issue x
+ *   address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
+ */
+#define MI_LOAD_REGISTER_IMM(x)	MI_INSTR(0x22, 2*(x)-1)
+#define   MI_LRI_FORCE_POSTED		(1<<12)
+#define MI_STORE_REGISTER_MEM        MI_INSTR(0x24, 1)
+#define MI_STORE_REGISTER_MEM_GEN8   MI_INSTR(0x24, 2)
+#define   MI_SRM_LRM_GLOBAL_GTT		(1<<22)
+#define MI_FLUSH_DW		MI_INSTR(0x26, 1) /* for GEN6 */
+#define   MI_FLUSH_DW_STORE_INDEX	(1<<21)
+#define   MI_INVALIDATE_TLB		(1<<18)
+#define   MI_FLUSH_DW_OP_STOREDW	(1<<14)
+#define   MI_FLUSH_DW_OP_MASK		(3<<14)
+#define   MI_FLUSH_DW_NOTIFY		(1<<8)
+#define   MI_INVALIDATE_BSD		(1<<7)
+#define   MI_FLUSH_DW_USE_GTT		(1<<2)
+#define   MI_FLUSH_DW_USE_PPGTT		(0<<2)
+#define MI_LOAD_REGISTER_MEM	   MI_INSTR(0x29, 1)
+#define MI_LOAD_REGISTER_MEM_GEN8  MI_INSTR(0x29, 2)
+#define MI_BATCH_BUFFER		MI_INSTR(0x30, 1)
+#define   MI_BATCH_NON_SECURE		(1)
+/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
+#define   MI_BATCH_NON_SECURE_I965	(1<<8)
+#define   MI_BATCH_PPGTT_HSW		(1<<8)
+#define   MI_BATCH_NON_SECURE_HSW	(1<<13)
+#define MI_BATCH_BUFFER_START	MI_INSTR(0x31, 0)
+#define   MI_BATCH_GTT		    (2<<6) /* aliased with (1<<7) on gen4 */
+#define MI_BATCH_BUFFER_START_GEN8	MI_INSTR(0x31, 1)
+#define   MI_BATCH_RESOURCE_STREAMER (1<<10)
+
+/*
+ * 3D instructions used by the kernel
+ */
+#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags))
+
+#define GEN9_MEDIA_POOL_STATE     ((0x3 << 29) | (0x2 << 27) | (0x5 << 16) | 4)
+#define   GEN9_MEDIA_POOL_ENABLE  (1 << 31)
+#define GFX_OP_RASTER_RULES    ((0x3<<29)|(0x7<<24))
+#define GFX_OP_SCISSOR         ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define   SC_UPDATE_SCISSOR       (0x1<<1)
+#define   SC_ENABLE_MASK          (0x1<<0)
+#define   SC_ENABLE               (0x1<<0)
+#define GFX_OP_LOAD_INDIRECT   ((0x3<<29)|(0x1d<<24)|(0x7<<16))
+#define GFX_OP_SCISSOR_INFO    ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
+#define   SCI_YMIN_MASK      (0xffff<<16)
+#define   SCI_XMIN_MASK      (0xffff<<0)
+#define   SCI_YMAX_MASK      (0xffff<<16)
+#define   SCI_XMAX_MASK      (0xffff<<0)
+#define GFX_OP_SCISSOR_ENABLE	 ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define GFX_OP_SCISSOR_RECT	 ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
+#define GFX_OP_COLOR_FACTOR      ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
+#define GFX_OP_STIPPLE           ((0x3<<29)|(0x1d<<24)|(0x83<<16))
+#define GFX_OP_MAP_INFO          ((0x3<<29)|(0x1d<<24)|0x4)
+#define GFX_OP_DESTBUFFER_VARS   ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
+#define GFX_OP_DESTBUFFER_INFO	 ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
+#define GFX_OP_DRAWRECT_INFO     ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
+#define GFX_OP_DRAWRECT_INFO_I965  ((0x7900<<16)|0x2)
+
+#define COLOR_BLT_CMD			(2<<29 | 0x40<<22 | (5-2))
+#define SRC_COPY_BLT_CMD		((2<<29)|(0x43<<22)|4)
+#define XY_SRC_COPY_BLT_CMD		((2<<29)|(0x53<<22)|6)
+#define XY_MONO_SRC_COPY_IMM_BLT	((2<<29)|(0x71<<22)|5)
+#define   BLT_WRITE_A			(2<<20)
+#define   BLT_WRITE_RGB			(1<<20)
+#define   BLT_WRITE_RGBA		(BLT_WRITE_RGB | BLT_WRITE_A)
+#define   BLT_DEPTH_8			(0<<24)
+#define   BLT_DEPTH_16_565		(1<<24)
+#define   BLT_DEPTH_16_1555		(2<<24)
+#define   BLT_DEPTH_32			(3<<24)
+#define   BLT_ROP_SRC_COPY		(0xcc<<16)
+#define   BLT_ROP_COLOR_COPY		(0xf0<<16)
+#define XY_SRC_COPY_BLT_SRC_TILED	(1<<15) /* 965+ only */
+#define XY_SRC_COPY_BLT_DST_TILED	(1<<11) /* 965+ only */
+#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
+#define   ASYNC_FLIP                (1<<22)
+#define   DISPLAY_PLANE_A           (0<<20)
+#define   DISPLAY_PLANE_B           (1<<20)
+#define GFX_OP_PIPE_CONTROL(len)	((0x3<<29)|(0x3<<27)|(0x2<<24)|((len)-2))
+#define   PIPE_CONTROL_FLUSH_L3				(1<<27)
+#define   PIPE_CONTROL_GLOBAL_GTT_IVB			(1<<24) /* gen7+ */
+#define   PIPE_CONTROL_MMIO_WRITE			(1<<23)
+#define   PIPE_CONTROL_STORE_DATA_INDEX			(1<<21)
+#define   PIPE_CONTROL_CS_STALL				(1<<20)
+#define   PIPE_CONTROL_TLB_INVALIDATE			(1<<18)
+#define   PIPE_CONTROL_MEDIA_STATE_CLEAR		(1<<16)
+#define   PIPE_CONTROL_QW_WRITE				(1<<14)
+#define   PIPE_CONTROL_POST_SYNC_OP_MASK                (3<<14)
+#define   PIPE_CONTROL_DEPTH_STALL			(1<<13)
+#define   PIPE_CONTROL_WRITE_FLUSH			(1<<12)
+#define   PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH	(1<<12) /* gen6+ */
+#define   PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE	(1<<11) /* MBZ on ILK */
+#define   PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE		(1<<10) /* GM45+ only */
+#define   PIPE_CONTROL_INDIRECT_STATE_DISABLE		(1<<9)
+#define   PIPE_CONTROL_NOTIFY				(1<<8)
+#define   PIPE_CONTROL_FLUSH_ENABLE			(1<<7) /* gen7+ */
+#define   PIPE_CONTROL_DC_FLUSH_ENABLE			(1<<5)
+#define   PIPE_CONTROL_VF_CACHE_INVALIDATE		(1<<4)
+#define   PIPE_CONTROL_CONST_CACHE_INVALIDATE		(1<<3)
+#define   PIPE_CONTROL_STATE_CACHE_INVALIDATE		(1<<2)
+#define   PIPE_CONTROL_STALL_AT_SCOREBOARD		(1<<1)
+#define   PIPE_CONTROL_DEPTH_CACHE_FLUSH		(1<<0)
+#define   PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
+
+/*
+ * Commands used only by the command parser
+ */
+#define MI_SET_PREDICATE        MI_INSTR(0x01, 0)
+#define MI_ARB_CHECK            MI_INSTR(0x05, 0)
+#define MI_RS_CONTROL           MI_INSTR(0x06, 0)
+#define MI_URB_ATOMIC_ALLOC     MI_INSTR(0x09, 0)
+#define MI_PREDICATE            MI_INSTR(0x0C, 0)
+#define MI_RS_CONTEXT           MI_INSTR(0x0F, 0)
+#define MI_TOPOLOGY_FILTER      MI_INSTR(0x0D, 0)
+#define MI_LOAD_SCAN_LINES_EXCL MI_INSTR(0x13, 0)
+#define MI_URB_CLEAR            MI_INSTR(0x19, 0)
+#define MI_UPDATE_GTT           MI_INSTR(0x23, 0)
+#define MI_CLFLUSH              MI_INSTR(0x27, 0)
+#define MI_REPORT_PERF_COUNT    MI_INSTR(0x28, 0)
+#define   MI_REPORT_PERF_COUNT_GGTT (1<<0)
+#define MI_LOAD_REGISTER_REG    MI_INSTR(0x2A, 0)
+#define MI_RS_STORE_DATA_IMM    MI_INSTR(0x2B, 0)
+#define MI_LOAD_URB_MEM         MI_INSTR(0x2C, 0)
+#define MI_STORE_URB_MEM        MI_INSTR(0x2D, 0)
+#define MI_CONDITIONAL_BATCH_BUFFER_END MI_INSTR(0x36, 0)
+
+#define PIPELINE_SELECT                ((0x3<<29)|(0x1<<27)|(0x1<<24)|(0x4<<16))
+#define GFX_OP_3DSTATE_VF_STATISTICS   ((0x3<<29)|(0x1<<27)|(0x0<<24)|(0xB<<16))
+#define MEDIA_VFE_STATE                ((0x3<<29)|(0x2<<27)|(0x0<<24)|(0x0<<16))
+#define  MEDIA_VFE_STATE_MMIO_ACCESS_MASK (0x18)
+#define GPGPU_OBJECT                   ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x4<<16))
+#define GPGPU_WALKER                   ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x5<<16))
+#define GFX_OP_3DSTATE_DX9_CONSTANTF_VS \
+	((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x39<<16))
+#define GFX_OP_3DSTATE_DX9_CONSTANTF_PS \
+	((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x3A<<16))
+#define GFX_OP_3DSTATE_SO_DECL_LIST \
+	((0x3<<29)|(0x3<<27)|(0x1<<24)|(0x17<<16))
+
+#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS \
+	((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x43<<16))
+#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS \
+	((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x44<<16))
+#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS \
+	((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x45<<16))
+#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS \
+	((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x46<<16))
+#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS \
+	((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x47<<16))
+
+#define MFX_WAIT  ((0x3<<29)|(0x1<<27)|(0x0<<16))
+
+#define COLOR_BLT     ((0x2<<29)|(0x40<<22))
+#define SRC_COPY_BLT  ((0x2<<29)|(0x43<<22))
+
+#endif /* _INTEL_GPU_COMMANDS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 80fae806aec9..1f50727a5ddb 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -7,10 +7,11 @@
 #include "i915_gem_batch_pool.h"
 #include "i915_gem_timeline.h"
 
-#include "i915_reg.h" /* FIXME split out i915_gpu_commands.h */
+#include "i915_reg.h"
 #include "i915_pmu.h"
 #include "i915_request.h"
 #include "i915_selftest.h"
+#include "intel_gpu_commands.h"
 
 struct drm_printer;
 

From 56b9a8b083870162310fe37d4b1b5597eb983bae Mon Sep 17 00:00:00 2001
From: Michal Wajdeczko <michal.wajdeczko@intel.com>
Date: Wed, 14 Mar 2018 14:45:39 +0000
Subject: [PATCH 0048/1286] drm/i915/guc: Update syntax of GuC log functions
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

We moved GuC log related data and code to separate files and
definition but we didn't change functions syntax to follow
object-verb pattern. Let's fix that before we continue with
next round of code refactoring.

v2: rebased

Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Michal Winiarski <michal.winiarski@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Michał Winiarski <michal.winiarski@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180314144539.11152-1-michal.wajdeczko@intel.com
[ickle: checkpatch booleans]
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_debugfs.c  |   4 +-
 drivers/gpu/drm/i915/intel_guc.c     |   8 +-
 drivers/gpu/drm/i915/intel_guc_log.c | 203 ++++++++++++++-------------
 drivers/gpu/drm/i915/intel_guc_log.h |  18 +--
 drivers/gpu/drm/i915/intel_uc.c      |   4 +-
 5 files changed, 126 insertions(+), 111 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 972014b2497d..298a3aa9513b 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2502,7 +2502,7 @@ static int i915_guc_log_control_get(void *data, u64 *val)
 	if (!USES_GUC(dev_priv))
 		return -ENODEV;
 
-	*val = intel_guc_log_control_get(&dev_priv->guc);
+	*val = intel_guc_log_control_get(&dev_priv->guc.log);
 
 	return 0;
 }
@@ -2514,7 +2514,7 @@ static int i915_guc_log_control_set(void *data, u64 val)
 	if (!USES_GUC(dev_priv))
 		return -ENODEV;
 
-	return intel_guc_log_control_set(&dev_priv->guc, val);
+	return intel_guc_log_control_set(&dev_priv->guc.log, val);
 }
 
 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops,
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index 3eb516e7c225..e70bf654d21e 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -64,7 +64,7 @@ void intel_guc_init_early(struct intel_guc *guc)
 {
 	intel_guc_fw_init_early(guc);
 	intel_guc_ct_init_early(&guc->ct);
-	intel_guc_log_init_early(guc);
+	intel_guc_log_init_early(&guc->log);
 
 	mutex_init(&guc->send_mutex);
 	guc->send = intel_guc_send_nop;
@@ -169,7 +169,7 @@ int intel_guc_init(struct intel_guc *guc)
 		return ret;
 	GEM_BUG_ON(!guc->shared_data);
 
-	ret = intel_guc_log_create(guc);
+	ret = intel_guc_log_create(&guc->log);
 	if (ret)
 		goto err_shared;
 
@@ -184,7 +184,7 @@ int intel_guc_init(struct intel_guc *guc)
 	return 0;
 
 err_log:
-	intel_guc_log_destroy(guc);
+	intel_guc_log_destroy(&guc->log);
 err_shared:
 	guc_shared_data_destroy(guc);
 	return ret;
@@ -196,7 +196,7 @@ void intel_guc_fini(struct intel_guc *guc)
 
 	i915_ggtt_disable_guc(dev_priv);
 	intel_guc_ads_destroy(guc);
-	intel_guc_log_destroy(guc);
+	intel_guc_log_destroy(&guc->log);
 	guc_shared_data_destroy(guc);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index b9c7bd745565..bfb9a68fffef 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -22,13 +22,10 @@
  *
  */
 
-#include <linux/debugfs.h>
-#include <linux/relay.h>
-
 #include "intel_guc_log.h"
 #include "i915_drv.h"
 
-static void guc_log_capture_logs(struct intel_guc *guc);
+static void guc_log_capture_logs(struct intel_guc_log *log);
 
 /**
  * DOC: GuC firmware log
@@ -74,6 +71,11 @@ static int guc_log_control(struct intel_guc *guc, bool enable, u32 verbosity)
 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
 }
 
+static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
+{
+	return container_of(log, struct intel_guc, log);
+}
+
 /*
  * Sub buffer switch callback. Called whenever relay has to switch to a new
  * sub buffer, relay stays on the same sub buffer if 0 is returned.
@@ -149,8 +151,9 @@ static struct rchan_callbacks relay_callbacks = {
 	.remove_buf_file = remove_buf_file_callback,
 };
 
-static int guc_log_relay_file_create(struct intel_guc *guc)
+static int guc_log_relay_file_create(struct intel_guc_log *log)
 {
+	struct intel_guc *guc = log_to_guc(log);
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 	struct dentry *log_dir;
 	int ret;
@@ -158,7 +161,7 @@ static int guc_log_relay_file_create(struct intel_guc *guc)
 	if (!i915_modparams.guc_log_level)
 		return 0;
 
-	mutex_lock(&guc->log.runtime.relay_lock);
+	mutex_lock(&log->runtime.relay_lock);
 
 	/* For now create the log file in /sys/kernel/debug/dri/0 dir */
 	log_dir = dev_priv->drm.primary->debugfs_root;
@@ -181,7 +184,8 @@ static int guc_log_relay_file_create(struct intel_guc *guc)
 		goto out_unlock;
 	}
 
-	ret = relay_late_setup_files(guc->log.runtime.relay_chan, "guc_log", log_dir);
+	ret = relay_late_setup_files(log->runtime.relay_chan, "guc_log",
+				     log_dir);
 	if (ret < 0 && ret != -EEXIST) {
 		DRM_ERROR("Couldn't associate relay chan with file %d\n", ret);
 		goto out_unlock;
@@ -190,18 +194,18 @@ static int guc_log_relay_file_create(struct intel_guc *guc)
 	ret = 0;
 
 out_unlock:
-	mutex_unlock(&guc->log.runtime.relay_lock);
+	mutex_unlock(&log->runtime.relay_lock);
 	return ret;
 }
 
-static bool guc_log_has_relay(struct intel_guc *guc)
+static bool guc_log_has_relay(struct intel_guc_log *log)
 {
-	lockdep_assert_held(&guc->log.runtime.relay_lock);
+	lockdep_assert_held(&log->runtime.relay_lock);
 
-	return guc->log.runtime.relay_chan != NULL;
+	return log->runtime.relay_chan;
 }
 
-static void guc_move_to_next_buf(struct intel_guc *guc)
+static void guc_move_to_next_buf(struct intel_guc_log *log)
 {
 	/*
 	 * Make sure the updates made in the sub buffer are visible when
@@ -209,19 +213,19 @@ static void guc_move_to_next_buf(struct intel_guc *guc)
 	 */
 	smp_wmb();
 
-	if (!guc_log_has_relay(guc))
+	if (!guc_log_has_relay(log))
 		return;
 
 	/* All data has been written, so now move the offset of sub buffer. */
-	relay_reserve(guc->log.runtime.relay_chan, guc->log.vma->obj->base.size);
+	relay_reserve(log->runtime.relay_chan, log->vma->obj->base.size);
 
 	/* Switch to the next sub buffer */
-	relay_flush(guc->log.runtime.relay_chan);
+	relay_flush(log->runtime.relay_chan);
 }
 
-static void *guc_get_write_buffer(struct intel_guc *guc)
+static void *guc_get_write_buffer(struct intel_guc_log *log)
 {
-	if (!guc_log_has_relay(guc))
+	if (!guc_log_has_relay(log))
 		return NULL;
 
 	/*
@@ -233,25 +237,25 @@ static void *guc_get_write_buffer(struct intel_guc *guc)
 	 * done without using relay_reserve() along with relay_write(). So its
 	 * better to use relay_reserve() alone.
 	 */
-	return relay_reserve(guc->log.runtime.relay_chan, 0);
+	return relay_reserve(log->runtime.relay_chan, 0);
 }
 
-static bool guc_check_log_buf_overflow(struct intel_guc *guc,
+static bool guc_check_log_buf_overflow(struct intel_guc_log *log,
 				       enum guc_log_buffer_type type,
 				       unsigned int full_cnt)
 {
-	unsigned int prev_full_cnt = guc->log.prev_overflow_count[type];
+	unsigned int prev_full_cnt = log->prev_overflow_count[type];
 	bool overflow = false;
 
 	if (full_cnt != prev_full_cnt) {
 		overflow = true;
 
-		guc->log.prev_overflow_count[type] = full_cnt;
-		guc->log.total_overflow_count[type] += full_cnt - prev_full_cnt;
+		log->prev_overflow_count[type] = full_cnt;
+		log->total_overflow_count[type] += full_cnt - prev_full_cnt;
 
 		if (full_cnt < prev_full_cnt) {
 			/* buffer_full_cnt is a 4 bit counter */
-			guc->log.total_overflow_count[type] += 16;
+			log->total_overflow_count[type] += 16;
 		}
 		DRM_ERROR_RATELIMITED("GuC log buffer overflow\n");
 	}
@@ -275,7 +279,7 @@ static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
 	return 0;
 }
 
-static void guc_read_update_log_buffer(struct intel_guc *guc)
+static void guc_read_update_log_buffer(struct intel_guc_log *log)
 {
 	unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
 	struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
@@ -284,16 +288,16 @@ static void guc_read_update_log_buffer(struct intel_guc *guc)
 	void *src_data, *dst_data;
 	bool new_overflow;
 
-	if (WARN_ON(!guc->log.runtime.buf_addr))
+	if (WARN_ON(!log->runtime.buf_addr))
 		return;
 
 	/* Get the pointer to shared GuC log buffer */
-	log_buf_state = src_data = guc->log.runtime.buf_addr;
+	log_buf_state = src_data = log->runtime.buf_addr;
 
-	mutex_lock(&guc->log.runtime.relay_lock);
+	mutex_lock(&log->runtime.relay_lock);
 
 	/* Get the pointer to local buffer to store the logs */
-	log_buf_snapshot_state = dst_data = guc_get_write_buffer(guc);
+	log_buf_snapshot_state = dst_data = guc_get_write_buffer(log);
 
 	if (unlikely(!log_buf_snapshot_state)) {
 		/*
@@ -301,8 +305,8 @@ static void guc_read_update_log_buffer(struct intel_guc *guc)
 		 * getting consumed by User at a slow rate.
 		 */
 		DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
-		guc->log.capture_miss_count++;
-		mutex_unlock(&guc->log.runtime.relay_lock);
+		log->capture_miss_count++;
+		mutex_unlock(&log->runtime.relay_lock);
 
 		return;
 	}
@@ -325,8 +329,8 @@ static void guc_read_update_log_buffer(struct intel_guc *guc)
 		full_cnt = log_buf_state_local.buffer_full_cnt;
 
 		/* Bookkeeping stuff */
-		guc->log.flush_count[type] += log_buf_state_local.flush_to_file;
-		new_overflow = guc_check_log_buf_overflow(guc, type, full_cnt);
+		log->flush_count[type] += log_buf_state_local.flush_to_file;
+		new_overflow = guc_check_log_buf_overflow(log, type, full_cnt);
 
 		/* Update the state of shared log buffer */
 		log_buf_state->read_ptr = write_offset;
@@ -373,38 +377,39 @@ static void guc_read_update_log_buffer(struct intel_guc *guc)
 		dst_data += buffer_size;
 	}
 
-	guc_move_to_next_buf(guc);
+	guc_move_to_next_buf(log);
 
-	mutex_unlock(&guc->log.runtime.relay_lock);
+	mutex_unlock(&log->runtime.relay_lock);
 }
 
 static void capture_logs_work(struct work_struct *work)
 {
-	struct intel_guc *guc =
-		container_of(work, struct intel_guc, log.runtime.flush_work);
+	struct intel_guc_log *log =
+		container_of(work, struct intel_guc_log, runtime.flush_work);
 
-	guc_log_capture_logs(guc);
+	guc_log_capture_logs(log);
 }
 
-static bool guc_log_has_runtime(struct intel_guc *guc)
+static bool guc_log_has_runtime(struct intel_guc_log *log)
 {
-	return guc->log.runtime.buf_addr != NULL;
+	return log->runtime.buf_addr;
 }
 
-static int guc_log_runtime_create(struct intel_guc *guc)
+static int guc_log_runtime_create(struct intel_guc_log *log)
 {
+	struct intel_guc *guc = log_to_guc(log);
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 	void *vaddr;
 	int ret;
 
 	lockdep_assert_held(&dev_priv->drm.struct_mutex);
 
-	if (!guc->log.vma)
+	if (!log->vma)
 		return -ENODEV;
 
-	GEM_BUG_ON(guc_log_has_runtime(guc));
+	GEM_BUG_ON(guc_log_has_runtime(log));
 
-	ret = i915_gem_object_set_to_wc_domain(guc->log.vma->obj, true);
+	ret = i915_gem_object_set_to_wc_domain(log->vma->obj, true);
 	if (ret)
 		return ret;
 
@@ -413,38 +418,39 @@ static int guc_log_runtime_create(struct intel_guc *guc)
 	 * buffer pages, so that we can directly get the data
 	 * (up-to-date) from memory.
 	 */
-	vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC);
+	vaddr = i915_gem_object_pin_map(log->vma->obj, I915_MAP_WC);
 	if (IS_ERR(vaddr)) {
 		DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
 		return PTR_ERR(vaddr);
 	}
 
-	guc->log.runtime.buf_addr = vaddr;
+	log->runtime.buf_addr = vaddr;
 
 	return 0;
 }
 
-static void guc_log_runtime_destroy(struct intel_guc *guc)
+static void guc_log_runtime_destroy(struct intel_guc_log *log)
 {
 	/*
 	 * It's possible that the runtime stuff was never allocated because
 	 * GuC log was disabled at the boot time.
 	 */
-	if (!guc_log_has_runtime(guc))
+	if (!guc_log_has_runtime(log))
 		return;
 
-	i915_gem_object_unpin_map(guc->log.vma->obj);
-	guc->log.runtime.buf_addr = NULL;
+	i915_gem_object_unpin_map(log->vma->obj);
+	log->runtime.buf_addr = NULL;
 }
 
-void intel_guc_log_init_early(struct intel_guc *guc)
+void intel_guc_log_init_early(struct intel_guc_log *log)
 {
-	mutex_init(&guc->log.runtime.relay_lock);
-	INIT_WORK(&guc->log.runtime.flush_work, capture_logs_work);
+	mutex_init(&log->runtime.relay_lock);
+	INIT_WORK(&log->runtime.flush_work, capture_logs_work);
 }
 
-static int guc_log_relay_create(struct intel_guc *guc)
+static int guc_log_relay_create(struct intel_guc_log *log)
 {
+	struct intel_guc *guc = log_to_guc(log);
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 	struct rchan *guc_log_relay_chan;
 	size_t n_subbufs, subbuf_size;
@@ -453,9 +459,9 @@ static int guc_log_relay_create(struct intel_guc *guc)
 	if (!i915_modparams.guc_log_level)
 		return 0;
 
-	mutex_lock(&guc->log.runtime.relay_lock);
+	mutex_lock(&log->runtime.relay_lock);
 
-	GEM_BUG_ON(guc_log_has_relay(guc));
+	GEM_BUG_ON(guc_log_has_relay(log));
 
 	 /* Keep the size of sub buffers same as shared log buffer */
 	subbuf_size = GUC_LOG_SIZE;
@@ -483,42 +489,43 @@ static int guc_log_relay_create(struct intel_guc *guc)
 	}
 
 	GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
-	guc->log.runtime.relay_chan = guc_log_relay_chan;
+	log->runtime.relay_chan = guc_log_relay_chan;
 
-	mutex_unlock(&guc->log.runtime.relay_lock);
+	mutex_unlock(&log->runtime.relay_lock);
 
 	return 0;
 
 err:
-	mutex_unlock(&guc->log.runtime.relay_lock);
+	mutex_unlock(&log->runtime.relay_lock);
 	/* logging will be off */
 	i915_modparams.guc_log_level = 0;
 	return ret;
 }
 
-static void guc_log_relay_destroy(struct intel_guc *guc)
+static void guc_log_relay_destroy(struct intel_guc_log *log)
 {
-	mutex_lock(&guc->log.runtime.relay_lock);
+	mutex_lock(&log->runtime.relay_lock);
 
 	/*
 	 * It's possible that the relay was never allocated because
 	 * GuC log was disabled at the boot time.
 	 */
-	if (!guc_log_has_relay(guc))
+	if (!guc_log_has_relay(log))
 		goto out_unlock;
 
-	relay_close(guc->log.runtime.relay_chan);
-	guc->log.runtime.relay_chan = NULL;
+	relay_close(log->runtime.relay_chan);
+	log->runtime.relay_chan = NULL;
 
 out_unlock:
-	mutex_unlock(&guc->log.runtime.relay_lock);
+	mutex_unlock(&log->runtime.relay_lock);
 }
 
-static void guc_log_capture_logs(struct intel_guc *guc)
+static void guc_log_capture_logs(struct intel_guc_log *log)
 {
+	struct intel_guc *guc = log_to_guc(log);
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 
-	guc_read_update_log_buffer(guc);
+	guc_read_update_log_buffer(log);
 
 	/*
 	 * Generally device is expected to be active only at this
@@ -529,15 +536,16 @@ static void guc_log_capture_logs(struct intel_guc *guc)
 	intel_runtime_pm_put(dev_priv);
 }
 
-static void guc_flush_logs(struct intel_guc *guc)
+static void guc_flush_logs(struct intel_guc_log *log)
 {
+	struct intel_guc *guc = log_to_guc(log);
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 
 	/*
 	 * Before initiating the forceful flush, wait for any pending/ongoing
 	 * flush to complete otherwise forceful flush may not actually happen.
 	 */
-	flush_work(&guc->log.runtime.flush_work);
+	flush_work(&log->runtime.flush_work);
 
 	/* Ask GuC to update the log buffer state */
 	intel_runtime_pm_get(dev_priv);
@@ -545,17 +553,18 @@ static void guc_flush_logs(struct intel_guc *guc)
 	intel_runtime_pm_put(dev_priv);
 
 	/* GuC would have updated log buffer by now, so capture it */
-	guc_log_capture_logs(guc);
+	guc_log_capture_logs(log);
 }
 
-int intel_guc_log_create(struct intel_guc *guc)
+int intel_guc_log_create(struct intel_guc_log *log)
 {
+	struct intel_guc *guc = log_to_guc(log);
 	struct i915_vma *vma;
 	unsigned long offset;
 	u32 flags;
 	int ret;
 
-	GEM_BUG_ON(guc->log.vma);
+	GEM_BUG_ON(log->vma);
 
 	/*
 	 * We require SSE 4.1 for fast reads from the GuC log buffer and
@@ -573,7 +582,7 @@ int intel_guc_log_create(struct intel_guc *guc)
 		goto err;
 	}
 
-	guc->log.vma = vma;
+	log->vma = vma;
 
 	/* each allocated unit is a page */
 	flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
@@ -582,7 +591,7 @@ int intel_guc_log_create(struct intel_guc *guc)
 		(GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
 
 	offset = intel_guc_ggtt_offset(guc, vma) >> PAGE_SHIFT;
-	guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
+	log->flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
 
 	return 0;
 
@@ -592,15 +601,15 @@ err:
 	return ret;
 }
 
-void intel_guc_log_destroy(struct intel_guc *guc)
+void intel_guc_log_destroy(struct intel_guc_log *log)
 {
-	guc_log_runtime_destroy(guc);
-	i915_vma_unpin_and_release(&guc->log.vma);
+	guc_log_runtime_destroy(log);
+	i915_vma_unpin_and_release(&log->vma);
 }
 
-int intel_guc_log_control_get(struct intel_guc *guc)
+int intel_guc_log_control_get(struct intel_guc_log *log)
 {
-	GEM_BUG_ON(!guc->log.vma);
+	GEM_BUG_ON(!log->vma);
 	GEM_BUG_ON(i915_modparams.guc_log_level < 0);
 
 	return i915_modparams.guc_log_level;
@@ -613,14 +622,15 @@ int intel_guc_log_control_get(struct intel_guc *guc)
 	LOG_LEVEL_TO_ENABLED(_x) ? _x - 1 : 0;	\
 })
 #define VERBOSITY_TO_LOG_LEVEL(x)  ((x) + 1)
-int intel_guc_log_control_set(struct intel_guc *guc, u64 val)
+int intel_guc_log_control_set(struct intel_guc_log *log, u64 val)
 {
+	struct intel_guc *guc = log_to_guc(log);
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 	bool enabled = LOG_LEVEL_TO_ENABLED(val);
 	int ret;
 
 	BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0);
-	GEM_BUG_ON(!guc->log.vma);
+	GEM_BUG_ON(!log->vma);
 	GEM_BUG_ON(i915_modparams.guc_log_level < 0);
 
 	/*
@@ -650,15 +660,15 @@ int intel_guc_log_control_set(struct intel_guc *guc, u64 val)
 
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 
-	if (enabled && !guc_log_has_runtime(guc)) {
-		ret = intel_guc_log_register(guc);
+	if (enabled && !guc_log_has_runtime(log)) {
+		ret = intel_guc_log_register(log);
 		if (ret) {
 			/* logging will remain off */
 			i915_modparams.guc_log_level = 0;
 			goto out;
 		}
-	} else if (!enabled && guc_log_has_runtime(guc)) {
-		intel_guc_log_unregister(guc);
+	} else if (!enabled && guc_log_has_runtime(log)) {
+		intel_guc_log_unregister(log);
 	}
 
 	return 0;
@@ -669,30 +679,31 @@ out:
 	return ret;
 }
 
-int intel_guc_log_register(struct intel_guc *guc)
+int intel_guc_log_register(struct intel_guc_log *log)
 {
+	struct intel_guc *guc = log_to_guc(log);
 	struct drm_i915_private *i915 = guc_to_i915(guc);
 	int ret;
 
-	GEM_BUG_ON(guc_log_has_runtime(guc));
+	GEM_BUG_ON(guc_log_has_runtime(log));
 
 	/*
 	 * If log was disabled at boot time, then setup needed to handle
 	 * log buffer flush interrupts would not have been done yet, so
 	 * do that now.
 	 */
-	ret = guc_log_relay_create(guc);
+	ret = guc_log_relay_create(log);
 	if (ret)
 		goto err;
 
 	mutex_lock(&i915->drm.struct_mutex);
-	ret = guc_log_runtime_create(guc);
+	ret = guc_log_runtime_create(log);
 	mutex_unlock(&i915->drm.struct_mutex);
 
 	if (ret)
 		goto err_relay;
 
-	ret = guc_log_relay_file_create(guc);
+	ret = guc_log_relay_file_create(log);
 	if (ret)
 		goto err_runtime;
 
@@ -707,16 +718,17 @@ int intel_guc_log_register(struct intel_guc *guc)
 
 err_runtime:
 	mutex_lock(&i915->drm.struct_mutex);
-	guc_log_runtime_destroy(guc);
+	guc_log_runtime_destroy(log);
 	mutex_unlock(&i915->drm.struct_mutex);
 err_relay:
-	guc_log_relay_destroy(guc);
+	guc_log_relay_destroy(log);
 err:
 	return ret;
 }
 
-void intel_guc_log_unregister(struct intel_guc *guc)
+void intel_guc_log_unregister(struct intel_guc_log *log)
 {
+	struct intel_guc *guc = log_to_guc(log);
 	struct drm_i915_private *i915 = guc_to_i915(guc);
 
 	/*
@@ -725,16 +737,17 @@ void intel_guc_log_unregister(struct intel_guc *guc)
 	 * which is yet to be captured. So request GuC to update the log
 	 * buffer state and then collect the left over logs.
 	 */
-	guc_flush_logs(guc);
+	guc_flush_logs(log);
 
 	mutex_lock(&i915->drm.struct_mutex);
+
 	/* GuC logging is currently the only user of Guc2Host interrupts */
 	intel_runtime_pm_get(i915);
 	gen9_disable_guc_interrupts(i915);
 	intel_runtime_pm_put(i915);
 
-	guc_log_runtime_destroy(guc);
+	guc_log_runtime_destroy(log);
 	mutex_unlock(&i915->drm.struct_mutex);
 
-	guc_log_relay_destroy(guc);
+	guc_log_relay_destroy(log);
 }
diff --git a/drivers/gpu/drm/i915/intel_guc_log.h b/drivers/gpu/drm/i915/intel_guc_log.h
index 09dd2ef1933d..6264bd5ba080 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/intel_guc_log.h
@@ -25,11 +25,12 @@
 #ifndef _INTEL_GUC_LOG_H_
 #define _INTEL_GUC_LOG_H_
 
+#include <linux/mutex.h>
+#include <linux/relay.h>
 #include <linux/workqueue.h>
 
 #include "intel_guc_fwif.h"
 
-struct drm_i915_private;
 struct intel_guc;
 
 /*
@@ -59,12 +60,13 @@ struct intel_guc_log {
 	u32 flush_count[GUC_MAX_LOG_BUFFER];
 };
 
-int intel_guc_log_create(struct intel_guc *guc);
-void intel_guc_log_destroy(struct intel_guc *guc);
-void intel_guc_log_init_early(struct intel_guc *guc);
-int intel_guc_log_control_get(struct intel_guc *guc);
-int intel_guc_log_control_set(struct intel_guc *guc, u64 control_val);
-int intel_guc_log_register(struct intel_guc *guc);
-void intel_guc_log_unregister(struct intel_guc *guc);
+void intel_guc_log_init_early(struct intel_guc_log *log);
+int intel_guc_log_create(struct intel_guc_log *log);
+int intel_guc_log_register(struct intel_guc_log *log);
+void intel_guc_log_unregister(struct intel_guc_log *log);
+void intel_guc_log_destroy(struct intel_guc_log *log);
+
+int intel_guc_log_control_get(struct intel_guc_log *log);
+int intel_guc_log_control_set(struct intel_guc_log *log, u64 control);
 
 #endif
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 6316548a1c78..104c03ae2742 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -229,7 +229,7 @@ int intel_uc_register(struct drm_i915_private *i915)
 		return 0;
 
 	if (i915_modparams.guc_log_level)
-		ret = intel_guc_log_register(&i915->guc);
+		ret = intel_guc_log_register(&i915->guc.log);
 
 	return ret;
 }
@@ -240,7 +240,7 @@ void intel_uc_unregister(struct drm_i915_private *i915)
 		return;
 
 	if (i915_modparams.guc_log_level)
-		intel_guc_log_unregister(&i915->guc);
+		intel_guc_log_unregister(&i915->guc.log);
 }
 
 static int guc_enable_communication(struct intel_guc *guc)

From 7fb96dac6755d053d3a540aff55fe0064001bdf6 Mon Sep 17 00:00:00 2001
From: Michal Wajdeczko <michal.wajdeczko@intel.com>
Date: Thu, 15 Mar 2018 15:28:47 +0000
Subject: [PATCH 0049/1286] drm/i915/guc: Fix build break on config without
 DEBUG_FS

In commit 56b9a8b08387 ("drm/i915/guc: Update syntax of GuC
log functions") we accidentally removed debugfs.h header
where needed stub functions were defined.

Reported-by: Mike Lothian <mike@fireburn.co.uk>
Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Mike Lothian <mike@fireburn.co.uk>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Acked-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180315152848.40476-1-michal.wajdeczko@intel.com
---
 drivers/gpu/drm/i915/intel_guc_log.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index bfb9a68fffef..1c2127bc3878 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -22,6 +22,8 @@
  *
  */
 
+#include <linux/debugfs.h>
+
 #include "intel_guc_log.h"
 #include "i915_drv.h"
 

From d9b13c4dde6cacd8f2c4385cd6d293b0ac622e0b Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Thu, 15 Mar 2018 13:14:50 +0000
Subject: [PATCH 0050/1286] drm/i915: Trace GEM steps between submit and
 wedging

We still have an odd race with wedging/unwedging as shown by igt/gem_eio
that defies expectations. Add some more trace_printks to try and
visualize the flow over the precipice.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180315131451.4060-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_gem.c     | 14 ++++++++++++++
 drivers/gpu/drm/i915/i915_request.c | 23 +++++++++++++++++++++++
 2 files changed, 37 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 13d4b0e74641..2fbd622bba30 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3193,6 +3193,9 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
 
 static void nop_submit_request(struct i915_request *request)
 {
+	GEM_TRACE("%s fence %llx:%d -> -EIO\n",
+		  request->engine->name,
+		  request->fence.context, request->fence.seqno);
 	dma_fence_set_error(&request->fence, -EIO);
 
 	i915_request_submit(request);
@@ -3202,6 +3205,9 @@ static void nop_complete_submit_request(struct i915_request *request)
 {
 	unsigned long flags;
 
+	GEM_TRACE("%s fence %llx:%d -> -EIO\n",
+		  request->engine->name,
+		  request->fence.context, request->fence.seqno);
 	dma_fence_set_error(&request->fence, -EIO);
 
 	spin_lock_irqsave(&request->engine->timeline->lock, flags);
@@ -3215,6 +3221,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
 
+	GEM_TRACE("start\n");
+
 	if (drm_debug & DRM_UT_DRIVER) {
 		struct drm_printer p = drm_debug_printer(__func__);
 
@@ -3279,6 +3287,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
 		i915_gem_reset_finish_engine(engine);
 	}
 
+	GEM_TRACE("end\n");
+
 	wake_up_all(&i915->gpu_error.reset_queue);
 }
 
@@ -3291,6 +3301,8 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
 	if (!test_bit(I915_WEDGED, &i915->gpu_error.flags))
 		return true;
 
+	GEM_TRACE("start\n");
+
 	/*
 	 * Before unwedging, make sure that all pending operations
 	 * are flushed and errored out - we may have requests waiting upon
@@ -3341,6 +3353,8 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
 	intel_engines_reset_default_submission(i915);
 	i915_gem_contexts_lost(i915);
 
+	GEM_TRACE("end\n");
+
 	smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
 	clear_bit(I915_WEDGED, &i915->gpu_error.flags);
 
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 1810fa1b81cb..43c7134a9b93 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -207,11 +207,16 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
 	if (ret)
 		return ret;
 
+	GEM_BUG_ON(i915->gt.active_requests);
+
 	/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
 	for_each_engine(engine, i915, id) {
 		struct i915_gem_timeline *timeline;
 		struct intel_timeline *tl = engine->timeline;
 
+		GEM_TRACE("%s seqno %d -> %d\n",
+			  engine->name, tl->seqno, seqno);
+
 		if (!i915_seqno_passed(seqno, tl->seqno)) {
 			/* Flush any waiters before we reuse the seqno */
 			intel_engine_disarm_breadcrumbs(engine);
@@ -381,6 +386,11 @@ static void i915_request_retire(struct i915_request *request)
 	struct intel_engine_cs *engine = request->engine;
 	struct i915_gem_active *active, *next;
 
+	GEM_TRACE("%s(%d) fence %llx:%d, global_seqno %d\n",
+		  engine->name, intel_engine_get_seqno(engine),
+		  request->fence.context, request->fence.seqno,
+		  request->global_seqno);
+
 	lockdep_assert_held(&request->i915->drm.struct_mutex);
 	GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
 	GEM_BUG_ON(!i915_request_completed(request));
@@ -488,6 +498,11 @@ void __i915_request_submit(struct i915_request *request)
 	struct intel_timeline *timeline;
 	u32 seqno;
 
+	GEM_TRACE("%s fence %llx:%d -> global_seqno %d\n",
+		  request->engine->name,
+		  request->fence.context, request->fence.seqno,
+		  engine->timeline->seqno);
+
 	GEM_BUG_ON(!irqs_disabled());
 	lockdep_assert_held(&engine->timeline->lock);
 
@@ -537,6 +552,11 @@ void __i915_request_unsubmit(struct i915_request *request)
 	struct intel_engine_cs *engine = request->engine;
 	struct intel_timeline *timeline;
 
+	GEM_TRACE("%s fence %llx:%d <- global_seqno %d\n",
+		  request->engine->name,
+		  request->fence.context, request->fence.seqno,
+		  request->global_seqno);
+
 	GEM_BUG_ON(!irqs_disabled());
 	lockdep_assert_held(&engine->timeline->lock);
 
@@ -996,6 +1016,9 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
 	u32 *cs;
 	int err;
 
+	GEM_TRACE("%s fence %llx:%d\n",
+		  engine->name, request->fence.context, request->fence.seqno);
+
 	lockdep_assert_held(&request->i915->drm.struct_mutex);
 	trace_i915_request_add(request);
 

From ac697ae8013a7c7301174c9c3b02a92fe418b7ea Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Thu, 15 Mar 2018 15:10:15 +0000
Subject: [PATCH 0051/1286] drm/i915: Stop engines when declaring the machine
 wedged
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

If we fail to reset the GPU, we declare the machine wedged. However, the
GPU may well still be running in the background with an in-flight
request. So despite our efforts in cleaning up the request queue and
faking the breadcrumb in the HWSP, the GPU may eventually write the
in-flght seqno there breaking all of our assumptions and throwing the
driver into a deep turmoil, wedging beyond wedged.

To avoid this we ideally want to reset the GPU. Since that has already
failed, make sure the rings have the stop bit set instead. This is part
of the normal GPU reset sequence, but that is actually disabled by
igt/gem_eio to force the wedged state. If we assume the worst, we must
poke at the bit again before we give up.

v2: Move the intel_gpu_reset() from set-wedged in the reset error path
into i915_gem_set_wedged() itself. Even if the reset fails (e.g. if it is
disabled by gem_eio), it still tries to make sure the engines are
stopped. For i915_gem_set_wedged() callers from outside of i915_reset(),
this should make sure the GPU is disabled while the driver is marked as
being wedged.

Testcase: igt/gem_eio
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Michel Thierry <michel.thierry@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180315151015.22741-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_drv.c | 1 -
 drivers/gpu/drm/i915/i915_gem.c | 3 +++
 2 files changed, 3 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f03555efc520..3df5193487f3 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1995,7 +1995,6 @@ taint:
 error:
 	i915_gem_set_wedged(i915);
 	i915_retire_requests(i915);
-	intel_gpu_reset(i915, ALL_ENGINES);
 	goto finish;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2fbd622bba30..802df8e1a544 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3246,6 +3246,9 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
 	}
 	i915->caps.scheduler = 0;
 
+	/* Even if the GPU reset fails, it should still stop the engines */
+	intel_gpu_reset(i915, ALL_ENGINES);
+
 	/*
 	 * Make sure no one is running the old callback before we proceed with
 	 * cancelling requests and resetting the completion tracking. Otherwise

From 0c65dfd1a84142887c810fc11573e2edb8df87f6 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Mon, 12 Mar 2018 16:52:04 +0000
Subject: [PATCH 0052/1286] drm/i915/stolen: Switch from DEBUG_KMS to
 DEBUG_DRIVER
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

i915_gem_stolen is an allocator for the reserved portion of memory
("stolen" from the system by the BIOS). It is not tied to KMS but
central to the driver, so prefer DRM_DEBUG_DRIVER.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180312165206.31772-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_gem_stolen.c | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 62aa67960bf4..f11a4b908aaf 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -121,8 +121,8 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
 
 		if (stolen[0].start != stolen[1].start ||
 		    stolen[0].end != stolen[1].end) {
-			DRM_DEBUG_KMS("GTT within stolen memory at %pR\n", &ggtt_res);
-			DRM_DEBUG_KMS("Stolen memory adjusted to %pR\n", dsm);
+			DRM_DEBUG_DRIVER("GTT within stolen memory at %pR\n", &ggtt_res);
+			DRM_DEBUG_DRIVER("Stolen memory adjusted to %pR\n", dsm);
 		}
 	}
 
@@ -406,9 +406,9 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
 	 * memory, so just consider the start. */
 	reserved_total = stolen_top - reserved_base;
 
-	DRM_DEBUG_KMS("Memory reserved for graphics device: %lluK, usable: %lluK\n",
-		      (u64)resource_size(&dev_priv->dsm) >> 10,
-		      ((u64)resource_size(&dev_priv->dsm) - reserved_total) >> 10);
+	DRM_DEBUG_DRIVER("Memory reserved for graphics device: %lluK, usable: %lluK\n",
+			 (u64)resource_size(&dev_priv->dsm) >> 10,
+			 ((u64)resource_size(&dev_priv->dsm) - reserved_total) >> 10);
 
 	stolen_usable_start = 0;
 	/* WaSkipStolenMemoryFirstPage:bdw+ */
@@ -580,8 +580,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
 
 	lockdep_assert_held(&dev_priv->drm.struct_mutex);
 
-	DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
-			&stolen_offset, &gtt_offset, &size);
+	DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
+			 &stolen_offset, &gtt_offset, &size);
 
 	/* KISS and expect everything to be page-aligned */
 	if (WARN_ON(size == 0) ||
@@ -599,14 +599,14 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
 	ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
 	mutex_unlock(&dev_priv->mm.stolen_lock);
 	if (ret) {
-		DRM_DEBUG_KMS("failed to allocate stolen space\n");
+		DRM_DEBUG_DRIVER("failed to allocate stolen space\n");
 		kfree(stolen);
 		return NULL;
 	}
 
 	obj = _i915_gem_object_create_stolen(dev_priv, stolen);
 	if (obj == NULL) {
-		DRM_DEBUG_KMS("failed to allocate stolen object\n");
+		DRM_DEBUG_DRIVER("failed to allocate stolen object\n");
 		i915_gem_stolen_remove_node(dev_priv, stolen);
 		kfree(stolen);
 		return NULL;
@@ -635,7 +635,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
 				   size, gtt_offset, obj->cache_level,
 				   0);
 	if (ret) {
-		DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
+		DRM_DEBUG_DRIVER("failed to allocate stolen GTT space\n");
 		goto err_pages;
 	}
 

From 0efb656147e04f26433de5a399d1b03bf00e4ed6 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Mon, 12 Mar 2018 16:52:05 +0000
Subject: [PATCH 0053/1286] drm/i915/stolen: Checkpatch cleansing
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

In the next patch, we will introduce a new vlv_get_stolen_reserved, so
before we do, make sure checkpatch is happy with the surrounding code.
Sneak in some debug output while we are here.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180312165206.31772-2-chris@chris-wilson.co.uk
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem_stolen.c | 40 ++++++++++++++++++--------
 1 file changed, 28 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index f11a4b908aaf..5a57a1773fa7 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -174,13 +174,17 @@ void i915_gem_cleanup_stolen(struct drm_device *dev)
 }
 
 static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
-				    resource_size_t *base, resource_size_t *size)
+				    resource_size_t *base,
+				    resource_size_t *size)
 {
-	uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
-				     CTG_STOLEN_RESERVED :
-				     ELK_STOLEN_RESERVED);
+	u32 reg_val = I915_READ(IS_GM45(dev_priv) ?
+				CTG_STOLEN_RESERVED :
+				ELK_STOLEN_RESERVED);
 	resource_size_t stolen_top = dev_priv->dsm.end + 1;
 
+	DRM_DEBUG_DRIVER("%s_STOLEN_RESERVED = %08x\n",
+			 IS_GM45(dev_priv) ? "CTG" : "ELK", reg_val);
+
 	if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0) {
 		*base = 0;
 		*size = 0;
@@ -208,9 +212,12 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
 }
 
 static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
-				     resource_size_t *base, resource_size_t *size)
+				     resource_size_t *base,
+				     resource_size_t *size)
 {
-	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+	u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+
+	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
 
 	if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) {
 		*base = 0;
@@ -240,9 +247,12 @@ static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
 }
 
 static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
-				     resource_size_t *base, resource_size_t *size)
+				     resource_size_t *base,
+				     resource_size_t *size)
 {
-	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+	u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+
+	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
 
 	if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) {
 		*base = 0;
@@ -266,9 +276,12 @@ static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
 }
 
 static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
-				    resource_size_t *base, resource_size_t *size)
+				    resource_size_t *base,
+				    resource_size_t *size)
 {
-	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+	u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+
+	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
 
 	if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) {
 		*base = 0;
@@ -298,11 +311,14 @@ static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
 }
 
 static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
-				    resource_size_t *base, resource_size_t *size)
+				    resource_size_t *base,
+				    resource_size_t *size)
 {
-	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+	u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
 	resource_size_t stolen_top;
 
+	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
+
 	if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) {
 		*base = 0;
 		*size = 0;

From 957d32feaf04d2a67fd506743e5789359480d574 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Mon, 12 Mar 2018 16:52:06 +0000
Subject: [PATCH 0054/1286] drm/i915/stolen: Deduce base of reserved portion as
 top-size on vlv
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

On Valleyview, the HW deduces the base of the reserved portion of stolen
memory as being (top - size) and the address field within
GEN6_STOLEN_RESERVED is set to 0. Add yet another GEN6_STOLEN_RESERVED
reader to cope with the subtly different path required for vlv.

v2: Avoid using reserved_base = reserved_size = 0 as the invalid
condition as that typically falls outside of the stolen region,
provoking a consistency error.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180312165206.31772-3-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_gem_stolen.c | 103 ++++++++++++++-----------
 1 file changed, 56 insertions(+), 47 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 5a57a1773fa7..af915d041281 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -185,11 +185,8 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
 	DRM_DEBUG_DRIVER("%s_STOLEN_RESERVED = %08x\n",
 			 IS_GM45(dev_priv) ? "CTG" : "ELK", reg_val);
 
-	if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0) {
-		*base = 0;
-		*size = 0;
+	if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
 		return;
-	}
 
 	/*
 	 * Whether ILK really reuses the ELK register for this is unclear.
@@ -197,18 +194,13 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
 	 */
 	WARN(IS_GEN5(dev_priv), "ILK stolen reserved found? 0x%08x\n", reg_val);
 
-	*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
+	if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
+		return;
 
+	*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
 	WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
 
-	/* On these platforms, the register doesn't have a size field, so the
-	 * size is the distance between the base and the top of the stolen
-	 * memory. We also have the genuine case where base is zero and there's
-	 * nothing reserved. */
-	if (*base == 0)
-		*size = 0;
-	else
-		*size = stolen_top - *base;
+	*size = stolen_top - *base;
 }
 
 static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
@@ -219,11 +211,8 @@ static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
 
 	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
 
-	if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) {
-		*base = 0;
-		*size = 0;
+	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
 		return;
-	}
 
 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
 
@@ -246,6 +235,33 @@ static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
 	}
 }
 
+static void vlv_get_stolen_reserved(struct drm_i915_private *dev_priv,
+				    resource_size_t *base,
+				    resource_size_t *size)
+{
+	u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+	resource_size_t stolen_top = dev_priv->dsm.end + 1;
+
+	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
+
+	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
+		return;
+
+	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
+	default:
+		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
+	case GEN7_STOLEN_RESERVED_1M:
+		*size = 1024 * 1024;
+		break;
+	}
+
+	/*
+	 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the
+	 * reserved location as (top - size).
+	 */
+	*base = stolen_top - *size;
+}
+
 static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
 				     resource_size_t *base,
 				     resource_size_t *size)
@@ -254,11 +270,8 @@ static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
 
 	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
 
-	if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) {
-		*base = 0;
-		*size = 0;
+	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
 		return;
-	}
 
 	*base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
 
@@ -283,11 +296,8 @@ static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
 
 	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
 
-	if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) {
-		*base = 0;
-		*size = 0;
+	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
 		return;
-	}
 
 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
 
@@ -315,28 +325,18 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
 				    resource_size_t *size)
 {
 	u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
-	resource_size_t stolen_top;
+	resource_size_t stolen_top = dev_priv->dsm.end + 1;
 
 	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
 
-	if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) {
-		*base = 0;
-		*size = 0;
+	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
 		return;
-	}
 
-	stolen_top = dev_priv->dsm.end + 1;
+	if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK))
+		return;
 
 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
-
-	/* On these platforms, the register doesn't have a size field, so the
-	 * size is the distance between the base and the top of the stolen
-	 * memory. We also have the genuine case where base is zero and there's
-	 * nothing reserved. */
-	if (*base == 0)
-		*size = 0;
-	else
-		*size = stolen_top - *base;
+	*size = stolen_top - *base;
 }
 
 int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
@@ -369,7 +369,7 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
 	GEM_BUG_ON(dev_priv->dsm.end <= dev_priv->dsm.start);
 
 	stolen_top = dev_priv->dsm.end + 1;
-	reserved_base = 0;
+	reserved_base = stolen_top;
 	reserved_size = 0;
 
 	switch (INTEL_GEN(dev_priv)) {
@@ -389,8 +389,12 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
 					 &reserved_base, &reserved_size);
 		break;
 	case 7:
-		gen7_get_stolen_reserved(dev_priv,
-					 &reserved_base, &reserved_size);
+		if (IS_VALLEYVIEW(dev_priv))
+			vlv_get_stolen_reserved(dev_priv,
+						&reserved_base, &reserved_size);
+		else
+			gen7_get_stolen_reserved(dev_priv,
+						 &reserved_base, &reserved_size);
 		break;
 	default:
 		if (IS_LP(dev_priv))
@@ -402,11 +406,16 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
 		break;
 	}
 
-	/* It is possible for the reserved base to be zero, but the register
-	 * field for size doesn't have a zero option. */
-	if (reserved_base == 0) {
-		reserved_size = 0;
+	/*
+	 * Our expectation is that the reserved space is at the top of the
+	 * stolen region and *never* at the bottom. If we see !reserved_base,
+	 * it likely means we failed to read the registers correctly.
+	 */
+	if (!reserved_base) {
+		DRM_ERROR("inconsistent reservation %pa + %pa; ignoring\n",
+			  &reserved_base, &reserved_size);
 		reserved_base = stolen_top;
+		reserved_size = 0;
 	}
 
 	dev_priv->dsm_reserved =

From 1947fd133cf0f58b171adc8565685c1a06de07b7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Mon, 5 Mar 2018 19:41:22 +0200
Subject: [PATCH 0055/1286] drm/i915: Don't initialize plane_to_crtc_mapping[]
 on SKL+
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

We don't use the enum i9xx_plane_id namespace on SKL+ anymore, so
do not initialize the related plane_to_crtc_mapping[] table either.

Actually the only remaining user of that table is the pre-g4x
watermark code, but no harm in initializing the table on all
pre-SKL platforms.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180305174122.17273-1-ville.syrjala@linux.intel.com
Reviewed-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
---
 drivers/gpu/drm/i915/intel_display.c | 15 +++++++++++----
 1 file changed, 11 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index a7bfa238054c..b31b80643f87 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -13570,10 +13570,17 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
 	/* initialize shared scalers */
 	intel_crtc_init_scalers(intel_crtc, crtc_state);
 
-	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
-	       dev_priv->plane_to_crtc_mapping[primary->i9xx_plane] != NULL);
-	dev_priv->plane_to_crtc_mapping[primary->i9xx_plane] = intel_crtc;
-	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = intel_crtc;
+	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
+	       dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
+	dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
+
+	if (INTEL_GEN(dev_priv) < 9) {
+		enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
+
+		BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
+		       dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
+		dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
+	}
 
 	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
 

From e4006713d16567c203ba710f6a2b709ed6107db5 Mon Sep 17 00:00:00 2001
From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Date: Fri, 16 Mar 2018 16:12:13 +0200
Subject: [PATCH 0056/1286] i915: Re-use DEFINE_SHOW_ATTRIBUTE() macro

...instead of open coding file operations followed by custom ->open()
callbacks per each attribute.

Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180316141213.38774-1-andriy.shevchenko@linux.intel.com
---
 drivers/gpu/drm/i915/gvt/debugfs.c  | 13 +----
 drivers/gpu/drm/i915/i915_debugfs.c | 76 +++++------------------------
 2 files changed, 12 insertions(+), 77 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index 32a66dfdf112..f7d0078eb61b 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -122,18 +122,7 @@ static int vgpu_mmio_diff_show(struct seq_file *s, void *unused)
 	seq_printf(s, "Total: %d, Diff: %d\n", param.total, param.diff);
 	return 0;
 }
-
-static int vgpu_mmio_diff_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, vgpu_mmio_diff_show, inode->i_private);
-}
-
-static const struct file_operations vgpu_mmio_diff_fops = {
-	.open		= vgpu_mmio_diff_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(vgpu_mmio_diff);
 
 /**
  * intel_gvt_debugfs_add_vgpu - register debugfs entries for a vGPU
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 298a3aa9513b..5378863e3238 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -3562,7 +3562,8 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
 
 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
 {
-	struct drm_device *dev = m->private;
+	struct drm_i915_private *dev_priv = m->private;
+	struct drm_device *dev = &dev_priv->drm;
 	struct drm_connector *connector;
 	struct drm_connector_list_iter conn_iter;
 	struct intel_dp *intel_dp;
@@ -3596,10 +3597,8 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data)
 static int i915_displayport_test_active_open(struct inode *inode,
 					     struct file *file)
 {
-	struct drm_i915_private *dev_priv = inode->i_private;
-
 	return single_open(file, i915_displayport_test_active_show,
-			   &dev_priv->drm);
+			   inode->i_private);
 }
 
 static const struct file_operations i915_displayport_test_active_fops = {
@@ -3613,7 +3612,8 @@ static const struct file_operations i915_displayport_test_active_fops = {
 
 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
 {
-	struct drm_device *dev = m->private;
+	struct drm_i915_private *dev_priv = m->private;
+	struct drm_device *dev = &dev_priv->drm;
 	struct drm_connector *connector;
 	struct drm_connector_list_iter conn_iter;
 	struct intel_dp *intel_dp;
@@ -3652,26 +3652,12 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
 
 	return 0;
 }
-static int i915_displayport_test_data_open(struct inode *inode,
-					   struct file *file)
-{
-	struct drm_i915_private *dev_priv = inode->i_private;
-
-	return single_open(file, i915_displayport_test_data_show,
-			   &dev_priv->drm);
-}
-
-static const struct file_operations i915_displayport_test_data_fops = {
-	.owner = THIS_MODULE,
-	.open = i915_displayport_test_data_open,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.release = single_release
-};
+DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
 
 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
 {
-	struct drm_device *dev = m->private;
+	struct drm_i915_private *dev_priv = m->private;
+	struct drm_device *dev = &dev_priv->drm;
 	struct drm_connector *connector;
 	struct drm_connector_list_iter conn_iter;
 	struct intel_dp *intel_dp;
@@ -3698,23 +3684,7 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
 
 	return 0;
 }
-
-static int i915_displayport_test_type_open(struct inode *inode,
-				       struct file *file)
-{
-	struct drm_i915_private *dev_priv = inode->i_private;
-
-	return single_open(file, i915_displayport_test_type_show,
-			   &dev_priv->drm);
-}
-
-static const struct file_operations i915_displayport_test_type_fops = {
-	.owner = THIS_MODULE,
-	.open = i915_displayport_test_type_open,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.release = single_release
-};
+DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
 
 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
 {
@@ -4875,19 +4845,7 @@ static int i915_dpcd_show(struct seq_file *m, void *data)
 
 	return 0;
 }
-
-static int i915_dpcd_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, i915_dpcd_show, inode->i_private);
-}
-
-static const struct file_operations i915_dpcd_fops = {
-	.owner = THIS_MODULE,
-	.open = i915_dpcd_open,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
 
 static int i915_panel_show(struct seq_file *m, void *data)
 {
@@ -4909,19 +4867,7 @@ static int i915_panel_show(struct seq_file *m, void *data)
 
 	return 0;
 }
-
-static int i915_panel_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, i915_panel_show, inode->i_private);
-}
-
-static const struct file_operations i915_panel_fops = {
-	.owner = THIS_MODULE,
-	.open = i915_panel_open,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(i915_panel);
 
 /**
  * i915_debugfs_connector_add - add i915 specific connector debugfs files

From 3b358cdaf3319521efdf19ff07918bcc4d57013e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Fri, 2 Mar 2018 11:56:56 +0200
Subject: [PATCH 0057/1286] drm/i915: Kill the remaining CHV HBR2 leftovers
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

AFAIK CHV was supposed to have HBR2 originally, but in the end the feature
was dropped. We still have some code leftovers from those early days.
Eliminate them.

The extra bit for the training pattern seems to be dead in the hardware.
I can set it (in fact I can set almost any reserved bit in the
registers) but it doesn't seem to interfere with the operation of the
hardware. Either that or I'm very lucky that my displays complete link
training with the incorrect pattern being sent out.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180302095656.19662-1-ville.syrjala@linux.intel.com
Reviewed-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
---
 drivers/gpu/drm/i915/i915_reg.h |  2 --
 drivers/gpu/drm/i915/intel_dp.c | 20 ++++----------------
 2 files changed, 4 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index dbcb8829faba..1b48d50dfcf1 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -4943,8 +4943,6 @@ enum {
 #define   DP_LINK_TRAIN_OFF		(3 << 28)
 #define   DP_LINK_TRAIN_MASK		(3 << 28)
 #define   DP_LINK_TRAIN_SHIFT		28
-#define   DP_LINK_TRAIN_PAT_3_CHV	(1 << 14)
-#define   DP_LINK_TRAIN_MASK_CHV	((3 << 28)|(1<<14))
 
 /* CPT Link training mode */
 #define   DP_LINK_TRAIN_PAT_1_CPT	(0 << 8)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 4dd1b2287dd6..62f82c4298ac 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -91,8 +91,6 @@ static const struct dp_link_dpll chv_dpll[] = {
 		{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
 	{ 270000,	/* m2_int = 27, m2_fraction = 0 */
 		{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
-	{ 540000,	/* m2_int = 27, m2_fraction = 0 */
-		{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
 };
 
 /**
@@ -2900,10 +2898,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
 		}
 
 	} else {
-		if (IS_CHERRYVIEW(dev_priv))
-			*DP &= ~DP_LINK_TRAIN_MASK_CHV;
-		else
-			*DP &= ~DP_LINK_TRAIN_MASK;
+		*DP &= ~DP_LINK_TRAIN_MASK;
 
 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
 		case DP_TRAINING_PATTERN_DISABLE:
@@ -2916,12 +2911,8 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
 			*DP |= DP_LINK_TRAIN_PAT_2;
 			break;
 		case DP_TRAINING_PATTERN_3:
-			if (IS_CHERRYVIEW(dev_priv)) {
-				*DP |= DP_LINK_TRAIN_PAT_3_CHV;
-			} else {
-				DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
-				*DP |= DP_LINK_TRAIN_PAT_2;
-			}
+			DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
+			*DP |= DP_LINK_TRAIN_PAT_2;
 			break;
 		}
 	}
@@ -3660,10 +3651,7 @@ intel_dp_link_down(struct intel_encoder *encoder,
 		DP &= ~DP_LINK_TRAIN_MASK_CPT;
 		DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
 	} else {
-		if (IS_CHERRYVIEW(dev_priv))
-			DP &= ~DP_LINK_TRAIN_MASK_CHV;
-		else
-			DP &= ~DP_LINK_TRAIN_MASK;
+		DP &= ~DP_LINK_TRAIN_MASK;
 		DP |= DP_LINK_TRAIN_PAT_IDLE;
 	}
 	I915_WRITE(intel_dp->output_reg, DP);

From ad260ab32a4d94fa974f58262f8000472d34fd5b Mon Sep 17 00:00:00 2001
From: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Date: Tue, 13 Mar 2018 22:48:25 -0700
Subject: [PATCH 0058/1286] drm/i915/dp: Write to SET_POWER dpcd to enable MST
 hub.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

If bios sets up an MST output and hardware state readout code sees this is
an SST configuration, when disabling the encoder we end up calling
->post_disable_dp() hook instead of the MST version. Consequently, we write
to the DP_SET_POWER dpcd to set it D3 state. Further along when we try
enable the encoder in MST mode, POWER_UP_PHY transaction fails to power up
the MST hub. This results in continuous link training failures which keep
the system busy delaying boot. We could identify bios MST boot discrepancy
and handle it accordingly but a simple way to solve this is to write to the
DP_SET_POWER dpcd for MST too.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=105470
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Cc: Jani Nikula <jani.nikula@intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reported-by: Laura Abbott <labbott@redhat.com>
Cc: stable@vger.kernel.org
Fixes: 5ea2355a100a ("drm/i915/mst: Use MST sideband message transactions for dpms control")
Tested-by: Laura Abbott <labbott@redhat.com>
Signed-off-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180314054825.1718-1-dhinakaran.pandiyan@intel.com
---
 drivers/gpu/drm/i915/intel_ddi.c | 7 ++-----
 1 file changed, 2 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index dbcf1a0586f9..8c2d778560f0 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -2205,8 +2205,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
 		intel_prepare_dp_ddi_buffers(encoder, crtc_state);
 
 	intel_ddi_init_dp_buf_reg(encoder);
-	if (!is_mst)
-		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
 	intel_dp_start_link_train(intel_dp);
 	if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
 		intel_dp_stop_link_train(intel_dp);
@@ -2304,14 +2303,12 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
 	struct intel_dp *intel_dp = &dig_port->dp;
-	bool is_mst = intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST);
 
 	/*
 	 * Power down sink before disabling the port, otherwise we end
 	 * up getting interrupts from the sink on detecting link loss.
 	 */
-	if (!is_mst)
-		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
 
 	intel_disable_ddi_buf(encoder);
 

From eacd8391f977d3800cc41a026f9f81fce210a78c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Micha=C5=82=20Winiarski?= <michal.winiarski@intel.com>
Date: Mon, 19 Mar 2018 10:53:36 +0100
Subject: [PATCH 0059/1286] drm/i915/guc: Keep GuC interrupts enabled when
 using GuC
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

The GuC log contains a separate space used for crash dump.
We even get a separate notification for it. While we're not handling
crash differently yet, it makes sense to decouple the two right now to
simplify the following patches.

v2: Move guc_log_flush_irq_disable up to avoid movement in following
    patches (Sagar).
v3: s/guc_log_flush_irq_*/guc_flush_log_msg_*, rebase after mass rename

Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: Sagar Arun Kamble <sagar.a.kamble@intel.com> (v2)
Reviewed-by: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180319095348.9716-1-michal.winiarski@intel.com
---
 drivers/gpu/drm/i915/intel_guc.c     | 23 ++++++++-------------
 drivers/gpu/drm/i915/intel_guc.h     |  2 ++
 drivers/gpu/drm/i915/intel_guc_log.c | 31 +++++++++++++++++-----------
 drivers/gpu/drm/i915/intel_uc.c      | 14 +++++--------
 4 files changed, 35 insertions(+), 35 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index e70bf654d21e..3af603536b1b 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -67,6 +67,7 @@ void intel_guc_init_early(struct intel_guc *guc)
 	intel_guc_log_init_early(&guc->log);
 
 	mutex_init(&guc->send_mutex);
+	spin_lock_init(&guc->irq_lock);
 	guc->send = intel_guc_send_nop;
 	guc->notify = gen8_guc_raise_irq;
 }
@@ -368,7 +369,7 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len)
 void intel_guc_to_host_event_handler(struct intel_guc *guc)
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
-	u32 msg, flush;
+	u32 msg, val;
 
 	/*
 	 * Sample the log buffer flush related bits & clear them out now
@@ -381,24 +382,18 @@ void intel_guc_to_host_event_handler(struct intel_guc *guc)
 	 * could happen that GuC sets the bit for 2nd interrupt but Host
 	 * clears out the bit on handling the 1st interrupt.
 	 */
+	spin_lock(&guc->irq_lock);
+	val = I915_READ(SOFT_SCRATCH(15));
+	msg = val & guc->msg_enabled_mask;
+	I915_WRITE(SOFT_SCRATCH(15), val & ~msg);
+	spin_unlock(&guc->irq_lock);
 
-	msg = I915_READ(SOFT_SCRATCH(15));
-	flush = msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED |
-		       INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER);
-	if (flush) {
-		/* Clear the message bits that are handled */
-		I915_WRITE(SOFT_SCRATCH(15), msg & ~flush);
-
-		/* Handle flush interrupt in bottom half */
+	if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
+		   INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED)) {
 		queue_work(guc->log.runtime.flush_wq,
 			   &guc->log.runtime.flush_work);
 
 		guc->log.flush_interrupt_count++;
-	} else {
-		/*
-		 * Not clearing of unhandled event bits won't result in
-		 * re-triggering of the interrupt.
-		 */
 	}
 }
 
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index cdb649a9a4cf..9a95d1518aa9 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -56,7 +56,9 @@ struct intel_guc {
 	struct drm_i915_gem_object *load_err_log;
 
 	/* intel_guc_recv interrupt related state */
+	spinlock_t irq_lock;
 	bool interrupts_enabled;
+	unsigned int msg_enabled_mask;
 
 	struct i915_vma *ads_vma;
 	struct i915_vma *stage_desc_pool;
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 1c2127bc3878..1e209fcf90e1 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -73,6 +73,22 @@ static int guc_log_control(struct intel_guc *guc, bool enable, u32 verbosity)
 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
 }
 
+static void guc_flush_log_msg_enable(struct intel_guc *guc)
+{
+	spin_lock_irq(&guc->irq_lock);
+	guc->msg_enabled_mask |= INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
+				 INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED;
+	spin_unlock_irq(&guc->irq_lock);
+}
+
+static void guc_flush_log_msg_disable(struct intel_guc *guc)
+{
+	spin_lock_irq(&guc->irq_lock);
+	guc->msg_enabled_mask &= ~(INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
+				   INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED);
+	spin_unlock_irq(&guc->irq_lock);
+}
+
 static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
 {
 	return container_of(log, struct intel_guc, log);
@@ -709,12 +725,7 @@ int intel_guc_log_register(struct intel_guc_log *log)
 	if (ret)
 		goto err_runtime;
 
-	/* GuC logging is currently the only user of Guc2Host interrupts */
-	mutex_lock(&i915->drm.struct_mutex);
-	intel_runtime_pm_get(i915);
-	gen9_enable_guc_interrupts(i915);
-	intel_runtime_pm_put(i915);
-	mutex_unlock(&i915->drm.struct_mutex);
+	guc_flush_log_msg_enable(guc);
 
 	return 0;
 
@@ -733,6 +744,8 @@ void intel_guc_log_unregister(struct intel_guc_log *log)
 	struct intel_guc *guc = log_to_guc(log);
 	struct drm_i915_private *i915 = guc_to_i915(guc);
 
+	guc_flush_log_msg_disable(guc);
+
 	/*
 	 * Once logging is disabled, GuC won't generate logs & send an
 	 * interrupt. But there could be some data in the log buffer
@@ -742,12 +755,6 @@ void intel_guc_log_unregister(struct intel_guc_log *log)
 	guc_flush_logs(log);
 
 	mutex_lock(&i915->drm.struct_mutex);
-
-	/* GuC logging is currently the only user of Guc2Host interrupts */
-	intel_runtime_pm_get(i915);
-	gen9_disable_guc_interrupts(i915);
-	intel_runtime_pm_put(i915);
-
 	guc_log_runtime_destroy(log);
 	mutex_unlock(&i915->drm.struct_mutex);
 
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 104c03ae2742..765b86a53f19 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -247,6 +247,8 @@ static int guc_enable_communication(struct intel_guc *guc)
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 
+	gen9_enable_guc_interrupts(dev_priv);
+
 	if (HAS_GUC_CT(dev_priv))
 		return intel_guc_enable_ct(guc);
 
@@ -261,6 +263,8 @@ static void guc_disable_communication(struct intel_guc *guc)
 	if (HAS_GUC_CT(dev_priv))
 		intel_guc_disable_ct(guc);
 
+	gen9_disable_guc_interrupts(dev_priv);
+
 	guc->send = intel_guc_send_nop;
 }
 
@@ -413,12 +417,9 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
 	}
 
 	if (USES_GUC_SUBMISSION(dev_priv)) {
-		if (i915_modparams.guc_log_level)
-			gen9_enable_guc_interrupts(dev_priv);
-
 		ret = intel_guc_submission_enable(guc);
 		if (ret)
-			goto err_interrupts;
+			goto err_communication;
 	}
 
 	dev_info(dev_priv->drm.dev, "GuC firmware version %u.%u\n",
@@ -433,8 +434,6 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
 	/*
 	 * We've failed to load the firmware :(
 	 */
-err_interrupts:
-	gen9_disable_guc_interrupts(dev_priv);
 err_communication:
 	guc_disable_communication(guc);
 err_log_capture:
@@ -464,9 +463,6 @@ void intel_uc_fini_hw(struct drm_i915_private *dev_priv)
 		intel_guc_submission_disable(guc);
 
 	guc_disable_communication(guc);
-
-	if (USES_GUC_SUBMISSION(dev_priv))
-		gen9_disable_guc_interrupts(dev_priv);
 }
 
 int intel_uc_suspend(struct drm_i915_private *i915)

From b813d50e869aeda09ccf22e8d869e61b63389e4d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Micha=C5=82=20Winiarski?= <michal.winiarski@intel.com>
Date: Mon, 19 Mar 2018 10:53:37 +0100
Subject: [PATCH 0060/1286] drm/i915/guc: Log runtime should consist of both
 mapping and relay
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Currently, we're treating relay and mapping of GuC log as a separate
concepts. We're also using inconsistent locking, sometimes using
relay_lock, sometimes using struct mutex.
Let's correct that. Anything touching the runtime is now serialized
using runtime.lock, while we're still using struct mutex as inner lock
for mapping.
We're still racy in setting the log level - but we'll take care of that
in the following patches.

v2: Tidy locking (Sagar)
v3: Remove obsoleted comment (Sagar)

Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180319095348.9716-2-michal.winiarski@intel.com
---
 drivers/gpu/drm/i915/intel_guc_log.c | 125 ++++++++-------------------
 drivers/gpu/drm/i915/intel_guc_log.h |   3 +-
 2 files changed, 38 insertions(+), 90 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 1e209fcf90e1..b82866bfbef5 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -176,10 +176,7 @@ static int guc_log_relay_file_create(struct intel_guc_log *log)
 	struct dentry *log_dir;
 	int ret;
 
-	if (!i915_modparams.guc_log_level)
-		return 0;
-
-	mutex_lock(&log->runtime.relay_lock);
+	lockdep_assert_held(&log->runtime.lock);
 
 	/* For now create the log file in /sys/kernel/debug/dri/0 dir */
 	log_dir = dev_priv->drm.primary->debugfs_root;
@@ -198,29 +195,17 @@ static int guc_log_relay_file_create(struct intel_guc_log *log)
 	 */
 	if (!log_dir) {
 		DRM_ERROR("Debugfs dir not available yet for GuC log file\n");
-		ret = -ENODEV;
-		goto out_unlock;
+		return -ENODEV;
 	}
 
 	ret = relay_late_setup_files(log->runtime.relay_chan, "guc_log",
 				     log_dir);
 	if (ret < 0 && ret != -EEXIST) {
 		DRM_ERROR("Couldn't associate relay chan with file %d\n", ret);
-		goto out_unlock;
+		return ret;
 	}
 
-	ret = 0;
-
-out_unlock:
-	mutex_unlock(&log->runtime.relay_lock);
-	return ret;
-}
-
-static bool guc_log_has_relay(struct intel_guc_log *log)
-{
-	lockdep_assert_held(&log->runtime.relay_lock);
-
-	return log->runtime.relay_chan;
+	return 0;
 }
 
 static void guc_move_to_next_buf(struct intel_guc_log *log)
@@ -231,9 +216,6 @@ static void guc_move_to_next_buf(struct intel_guc_log *log)
 	 */
 	smp_wmb();
 
-	if (!guc_log_has_relay(log))
-		return;
-
 	/* All data has been written, so now move the offset of sub buffer. */
 	relay_reserve(log->runtime.relay_chan, log->vma->obj->base.size);
 
@@ -243,9 +225,6 @@ static void guc_move_to_next_buf(struct intel_guc_log *log)
 
 static void *guc_get_write_buffer(struct intel_guc_log *log)
 {
-	if (!guc_log_has_relay(log))
-		return NULL;
-
 	/*
 	 * Just get the base address of a new sub buffer and copy data into it
 	 * ourselves. NULL will be returned in no-overwrite mode, if all sub
@@ -306,14 +285,14 @@ static void guc_read_update_log_buffer(struct intel_guc_log *log)
 	void *src_data, *dst_data;
 	bool new_overflow;
 
+	mutex_lock(&log->runtime.lock);
+
 	if (WARN_ON(!log->runtime.buf_addr))
-		return;
+		goto out_unlock;
 
 	/* Get the pointer to shared GuC log buffer */
 	log_buf_state = src_data = log->runtime.buf_addr;
 
-	mutex_lock(&log->runtime.relay_lock);
-
 	/* Get the pointer to local buffer to store the logs */
 	log_buf_snapshot_state = dst_data = guc_get_write_buffer(log);
 
@@ -324,9 +303,8 @@ static void guc_read_update_log_buffer(struct intel_guc_log *log)
 		 */
 		DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
 		log->capture_miss_count++;
-		mutex_unlock(&log->runtime.relay_lock);
 
-		return;
+		goto out_unlock;
 	}
 
 	/* Actual logs are present from the 2nd page */
@@ -397,7 +375,8 @@ static void guc_read_update_log_buffer(struct intel_guc_log *log)
 
 	guc_move_to_next_buf(log);
 
-	mutex_unlock(&log->runtime.relay_lock);
+out_unlock:
+	mutex_unlock(&log->runtime.lock);
 }
 
 static void capture_logs_work(struct work_struct *work)
@@ -413,21 +392,21 @@ static bool guc_log_has_runtime(struct intel_guc_log *log)
 	return log->runtime.buf_addr;
 }
 
-static int guc_log_runtime_create(struct intel_guc_log *log)
+static int guc_log_map(struct intel_guc_log *log)
 {
 	struct intel_guc *guc = log_to_guc(log);
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 	void *vaddr;
 	int ret;
 
-	lockdep_assert_held(&dev_priv->drm.struct_mutex);
+	lockdep_assert_held(&log->runtime.lock);
 
 	if (!log->vma)
 		return -ENODEV;
 
-	GEM_BUG_ON(guc_log_has_runtime(log));
-
+	mutex_lock(&dev_priv->drm.struct_mutex);
 	ret = i915_gem_object_set_to_wc_domain(log->vma->obj, true);
+	mutex_unlock(&dev_priv->drm.struct_mutex);
 	if (ret)
 		return ret;
 
@@ -447,14 +426,9 @@ static int guc_log_runtime_create(struct intel_guc_log *log)
 	return 0;
 }
 
-static void guc_log_runtime_destroy(struct intel_guc_log *log)
+static void guc_log_unmap(struct intel_guc_log *log)
 {
-	/*
-	 * It's possible that the runtime stuff was never allocated because
-	 * GuC log was disabled at the boot time.
-	 */
-	if (!guc_log_has_runtime(log))
-		return;
+	lockdep_assert_held(&log->runtime.lock);
 
 	i915_gem_object_unpin_map(log->vma->obj);
 	log->runtime.buf_addr = NULL;
@@ -462,7 +436,7 @@ static void guc_log_runtime_destroy(struct intel_guc_log *log)
 
 void intel_guc_log_init_early(struct intel_guc_log *log)
 {
-	mutex_init(&log->runtime.relay_lock);
+	mutex_init(&log->runtime.lock);
 	INIT_WORK(&log->runtime.flush_work, capture_logs_work);
 }
 
@@ -474,12 +448,7 @@ static int guc_log_relay_create(struct intel_guc_log *log)
 	size_t n_subbufs, subbuf_size;
 	int ret;
 
-	if (!i915_modparams.guc_log_level)
-		return 0;
-
-	mutex_lock(&log->runtime.relay_lock);
-
-	GEM_BUG_ON(guc_log_has_relay(log));
+	lockdep_assert_held(&log->runtime.lock);
 
 	 /* Keep the size of sub buffers same as shared log buffer */
 	subbuf_size = GUC_LOG_SIZE;
@@ -509,12 +478,9 @@ static int guc_log_relay_create(struct intel_guc_log *log)
 	GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
 	log->runtime.relay_chan = guc_log_relay_chan;
 
-	mutex_unlock(&log->runtime.relay_lock);
-
 	return 0;
 
 err:
-	mutex_unlock(&log->runtime.relay_lock);
 	/* logging will be off */
 	i915_modparams.guc_log_level = 0;
 	return ret;
@@ -522,20 +488,10 @@ err:
 
 static void guc_log_relay_destroy(struct intel_guc_log *log)
 {
-	mutex_lock(&log->runtime.relay_lock);
-
-	/*
-	 * It's possible that the relay was never allocated because
-	 * GuC log was disabled at the boot time.
-	 */
-	if (!guc_log_has_relay(log))
-		goto out_unlock;
+	lockdep_assert_held(&log->runtime.lock);
 
 	relay_close(log->runtime.relay_chan);
 	log->runtime.relay_chan = NULL;
-
-out_unlock:
-	mutex_unlock(&log->runtime.relay_lock);
 }
 
 static void guc_log_capture_logs(struct intel_guc_log *log)
@@ -621,7 +577,6 @@ err:
 
 void intel_guc_log_destroy(struct intel_guc_log *log)
 {
-	guc_log_runtime_destroy(log);
 	i915_vma_unpin_and_release(&log->vma);
 }
 
@@ -699,52 +654,43 @@ out:
 
 int intel_guc_log_register(struct intel_guc_log *log)
 {
-	struct intel_guc *guc = log_to_guc(log);
-	struct drm_i915_private *i915 = guc_to_i915(guc);
 	int ret;
 
+	mutex_lock(&log->runtime.lock);
+
 	GEM_BUG_ON(guc_log_has_runtime(log));
 
-	/*
-	 * If log was disabled at boot time, then setup needed to handle
-	 * log buffer flush interrupts would not have been done yet, so
-	 * do that now.
-	 */
 	ret = guc_log_relay_create(log);
 	if (ret)
 		goto err;
 
-	mutex_lock(&i915->drm.struct_mutex);
-	ret = guc_log_runtime_create(log);
-	mutex_unlock(&i915->drm.struct_mutex);
-
+	ret = guc_log_map(log);
 	if (ret)
 		goto err_relay;
 
 	ret = guc_log_relay_file_create(log);
 	if (ret)
-		goto err_runtime;
+		goto err_unmap;
 
-	guc_flush_log_msg_enable(guc);
+	guc_flush_log_msg_enable(log_to_guc(log));
+
+	mutex_unlock(&log->runtime.lock);
 
 	return 0;
 
-err_runtime:
-	mutex_lock(&i915->drm.struct_mutex);
-	guc_log_runtime_destroy(log);
-	mutex_unlock(&i915->drm.struct_mutex);
+err_unmap:
+	guc_log_unmap(log);
 err_relay:
 	guc_log_relay_destroy(log);
 err:
+	mutex_unlock(&log->runtime.lock);
+
 	return ret;
 }
 
 void intel_guc_log_unregister(struct intel_guc_log *log)
 {
-	struct intel_guc *guc = log_to_guc(log);
-	struct drm_i915_private *i915 = guc_to_i915(guc);
-
-	guc_flush_log_msg_disable(guc);
+	guc_flush_log_msg_disable(log_to_guc(log));
 
 	/*
 	 * Once logging is disabled, GuC won't generate logs & send an
@@ -754,9 +700,12 @@ void intel_guc_log_unregister(struct intel_guc_log *log)
 	 */
 	guc_flush_logs(log);
 
-	mutex_lock(&i915->drm.struct_mutex);
-	guc_log_runtime_destroy(log);
-	mutex_unlock(&i915->drm.struct_mutex);
+	mutex_lock(&log->runtime.lock);
 
+	GEM_BUG_ON(!guc_log_has_runtime(log));
+
+	guc_log_unmap(log);
 	guc_log_relay_destroy(log);
+
+	mutex_unlock(&log->runtime.lock);
 }
diff --git a/drivers/gpu/drm/i915/intel_guc_log.h b/drivers/gpu/drm/i915/intel_guc_log.h
index 6264bd5ba080..e0ea625032fb 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/intel_guc_log.h
@@ -49,8 +49,7 @@ struct intel_guc_log {
 		struct workqueue_struct *flush_wq;
 		struct work_struct flush_work;
 		struct rchan *relay_chan;
-		/* To serialize the access to relay_chan */
-		struct mutex relay_lock;
+		struct mutex lock;
 	} runtime;
 	/* logging related stats */
 	u32 capture_miss_count;

From 2b47733045aaf883c275c3bdbe3b503137144f6e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Micha=C5=82=20Winiarski?= <michal.winiarski@intel.com>
Date: Mon, 19 Mar 2018 10:53:38 +0100
Subject: [PATCH 0061/1286] drm/i915/guc: Merge log relay file and channel
 creation
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

We have all the information we need at relay_open call time.
Since there's no reason to split the process into relay_open and
relay_late_setup_files, let's remove the extra code.

v2: Remove obsoleted comments (Sagar)
v3: There was one obsolete comment left (Sagar)

Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180319095348.9716-3-michal.winiarski@intel.com
---
 drivers/gpu/drm/i915/intel_guc_log.c | 65 +++-------------------------
 1 file changed, 5 insertions(+), 60 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index b82866bfbef5..767c0d00fca6 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -141,14 +141,7 @@ static struct dentry *create_buf_file_callback(const char *filename,
 	if (!parent)
 		return NULL;
 
-	/*
-	 * Not using the channel filename passed as an argument, since for each
-	 * channel relay appends the corresponding CPU number to the filename
-	 * passed in relay_open(). This should be fine as relay just needs a
-	 * dentry of the file associated with the channel buffer and that file's
-	 * name need not be same as the filename passed as an argument.
-	 */
-	buf_file = debugfs_create_file("guc_log", mode,
+	buf_file = debugfs_create_file(filename, mode,
 				       parent, buf, &relay_file_operations);
 	return buf_file;
 }
@@ -169,45 +162,6 @@ static struct rchan_callbacks relay_callbacks = {
 	.remove_buf_file = remove_buf_file_callback,
 };
 
-static int guc_log_relay_file_create(struct intel_guc_log *log)
-{
-	struct intel_guc *guc = log_to_guc(log);
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
-	struct dentry *log_dir;
-	int ret;
-
-	lockdep_assert_held(&log->runtime.lock);
-
-	/* For now create the log file in /sys/kernel/debug/dri/0 dir */
-	log_dir = dev_priv->drm.primary->debugfs_root;
-
-	/*
-	 * If /sys/kernel/debug/dri/0 location do not exist, then debugfs is
-	 * not mounted and so can't create the relay file.
-	 * The relay API seems to fit well with debugfs only, for availing relay
-	 * there are 3 requirements which can be met for debugfs file only in a
-	 * straightforward/clean manner :-
-	 * i)   Need the associated dentry pointer of the file, while opening the
-	 *      relay channel.
-	 * ii)  Should be able to use 'relay_file_operations' fops for the file.
-	 * iii) Set the 'i_private' field of file's inode to the pointer of
-	 *	relay channel buffer.
-	 */
-	if (!log_dir) {
-		DRM_ERROR("Debugfs dir not available yet for GuC log file\n");
-		return -ENODEV;
-	}
-
-	ret = relay_late_setup_files(log->runtime.relay_chan, "guc_log",
-				     log_dir);
-	if (ret < 0 && ret != -EEXIST) {
-		DRM_ERROR("Couldn't associate relay chan with file %d\n", ret);
-		return ret;
-	}
-
-	return 0;
-}
-
 static void guc_move_to_next_buf(struct intel_guc_log *log)
 {
 	/*
@@ -461,13 +415,10 @@ static int guc_log_relay_create(struct intel_guc_log *log)
 	 */
 	n_subbufs = 8;
 
-	/*
-	 * Create a relay channel, so that we have buffers for storing
-	 * the GuC firmware logs, the channel will be linked with a file
-	 * later on when debugfs is registered.
-	 */
-	guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size,
-					n_subbufs, &relay_callbacks, dev_priv);
+	guc_log_relay_chan = relay_open("guc_log",
+					dev_priv->drm.primary->debugfs_root,
+					subbuf_size, n_subbufs,
+					&relay_callbacks, dev_priv);
 	if (!guc_log_relay_chan) {
 		DRM_ERROR("Couldn't create relay chan for GuC logging\n");
 
@@ -668,18 +619,12 @@ int intel_guc_log_register(struct intel_guc_log *log)
 	if (ret)
 		goto err_relay;
 
-	ret = guc_log_relay_file_create(log);
-	if (ret)
-		goto err_unmap;
-
 	guc_flush_log_msg_enable(log_to_guc(log));
 
 	mutex_unlock(&log->runtime.lock);
 
 	return 0;
 
-err_unmap:
-	guc_log_unmap(log);
 err_relay:
 	guc_log_relay_destroy(log);
 err:

From d3fbf9437b22bd663e292d5d5e9f8e37c8eed208 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Micha=C5=82=20Winiarski?= <michal.winiarski@intel.com>
Date: Mon, 19 Mar 2018 10:53:39 +0100
Subject: [PATCH 0062/1286] drm/i915/guc: Flush directly in log unregister
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Having both guc_flush_logs and guc_log_flush functions is confusing.
While we could just rename things, guc_flush_logs implementation is
quite simple. Let's get rid of it and move its content to unregister.

v2: s/dev_priv/i915 (Sagar)

Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180319095348.9716-4-michal.winiarski@intel.com
---
 drivers/gpu/drm/i915/intel_guc_log.c | 38 ++++++++++++----------------
 1 file changed, 16 insertions(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 767c0d00fca6..72a71bc94adf 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -461,26 +461,6 @@ static void guc_log_capture_logs(struct intel_guc_log *log)
 	intel_runtime_pm_put(dev_priv);
 }
 
-static void guc_flush_logs(struct intel_guc_log *log)
-{
-	struct intel_guc *guc = log_to_guc(log);
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
-
-	/*
-	 * Before initiating the forceful flush, wait for any pending/ongoing
-	 * flush to complete otherwise forceful flush may not actually happen.
-	 */
-	flush_work(&log->runtime.flush_work);
-
-	/* Ask GuC to update the log buffer state */
-	intel_runtime_pm_get(dev_priv);
-	guc_log_flush(guc);
-	intel_runtime_pm_put(dev_priv);
-
-	/* GuC would have updated log buffer by now, so capture it */
-	guc_log_capture_logs(log);
-}
-
 int intel_guc_log_create(struct intel_guc_log *log)
 {
 	struct intel_guc *guc = log_to_guc(log);
@@ -635,7 +615,16 @@ err:
 
 void intel_guc_log_unregister(struct intel_guc_log *log)
 {
-	guc_flush_log_msg_disable(log_to_guc(log));
+	struct intel_guc *guc = log_to_guc(log);
+	struct drm_i915_private *i915 = guc_to_i915(guc);
+
+	guc_flush_log_msg_disable(guc);
+
+	/*
+	 * Before initiating the forceful flush, wait for any pending/ongoing
+	 * flush to complete otherwise forceful flush may not actually happen.
+	 */
+	flush_work(&log->runtime.flush_work);
 
 	/*
 	 * Once logging is disabled, GuC won't generate logs & send an
@@ -643,7 +632,12 @@ void intel_guc_log_unregister(struct intel_guc_log *log)
 	 * which is yet to be captured. So request GuC to update the log
 	 * buffer state and then collect the left over logs.
 	 */
-	guc_flush_logs(log);
+	intel_runtime_pm_get(i915);
+	guc_log_flush(guc);
+	intel_runtime_pm_put(i915);
+
+	/* GuC would have updated log buffer by now, so capture it */
+	guc_log_capture_logs(log);
 
 	mutex_lock(&log->runtime.lock);
 

From 4977a287b9e7c3dbe156bf28f8771b758060ee3e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Micha=C5=82=20Winiarski?= <michal.winiarski@intel.com>
Date: Mon, 19 Mar 2018 10:53:40 +0100
Subject: [PATCH 0063/1286] drm/i915/guc: Split relay control and GuC log level
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Those two concepts are really separate. Since GuC is writing data into
its own buffer and we even provide a way for userspace to read directly
from it using i915_guc_log_dump debugfs, there's no real reason to tie
log level with relay creation.
Let's create a separate debugfs, giving userspace a way to create a
relay on demand, when it wants to read a continuous log rather than a
snapshot.

v2: Don't touch guc_log_level on relay creation error, adjust locking
    after rebase, s/dev_priv/i915, pass guc to file->private_data (Sagar)
    Use struct_mutex rather than runtime.lock for set_log_level
v3: Tidy ordering of definitions (Sagar)

Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180319095348.9716-5-michal.winiarski@intel.com
---
 drivers/gpu/drm/i915/i915_debugfs.c  | 56 +++++++++++++++++---
 drivers/gpu/drm/i915/i915_drv.c      |  4 --
 drivers/gpu/drm/i915/intel_guc_log.c | 77 +++++++++++-----------------
 drivers/gpu/drm/i915/intel_guc_log.h |  9 ++--
 drivers/gpu/drm/i915/intel_uc.c      | 22 --------
 drivers/gpu/drm/i915/intel_uc.h      |  2 -
 6 files changed, 85 insertions(+), 85 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 5378863e3238..e857a9493b6f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2495,32 +2495,73 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
 	return 0;
 }
 
-static int i915_guc_log_control_get(void *data, u64 *val)
+static int i915_guc_log_level_get(void *data, u64 *val)
 {
 	struct drm_i915_private *dev_priv = data;
 
 	if (!USES_GUC(dev_priv))
 		return -ENODEV;
 
-	*val = intel_guc_log_control_get(&dev_priv->guc.log);
+	*val = intel_guc_log_level_get(&dev_priv->guc.log);
 
 	return 0;
 }
 
-static int i915_guc_log_control_set(void *data, u64 val)
+static int i915_guc_log_level_set(void *data, u64 val)
 {
 	struct drm_i915_private *dev_priv = data;
 
 	if (!USES_GUC(dev_priv))
 		return -ENODEV;
 
-	return intel_guc_log_control_set(&dev_priv->guc.log, val);
+	return intel_guc_log_level_set(&dev_priv->guc.log, val);
 }
 
-DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops,
-			i915_guc_log_control_get, i915_guc_log_control_set,
+DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
+			i915_guc_log_level_get, i915_guc_log_level_set,
 			"%lld\n");
 
+static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
+{
+	struct drm_i915_private *dev_priv = inode->i_private;
+
+	if (!USES_GUC(dev_priv))
+		return -ENODEV;
+
+	file->private_data = &dev_priv->guc.log;
+
+	return intel_guc_log_relay_open(&dev_priv->guc.log);
+}
+
+static ssize_t
+i915_guc_log_relay_write(struct file *filp,
+			 const char __user *ubuf,
+			 size_t cnt,
+			 loff_t *ppos)
+{
+	struct intel_guc_log *log = filp->private_data;
+
+	intel_guc_log_relay_flush(log);
+
+	return cnt;
+}
+
+static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
+{
+	struct drm_i915_private *dev_priv = inode->i_private;
+
+	intel_guc_log_relay_close(&dev_priv->guc.log);
+
+	return 0;
+}
+
+static const struct file_operations i915_guc_log_relay_fops = {
+	.owner = THIS_MODULE,
+	.open = i915_guc_log_relay_open,
+	.write = i915_guc_log_relay_write,
+	.release = i915_guc_log_relay_release,
+};
+
 static const char *psr2_live_status(u32 val)
 {
 	static const char * const live_status[] = {
@@ -4748,7 +4789,8 @@ static const struct i915_debugfs_files {
 	{"i915_dp_test_data", &i915_displayport_test_data_fops},
 	{"i915_dp_test_type", &i915_displayport_test_type_fops},
 	{"i915_dp_test_active", &i915_displayport_test_active_fops},
-	{"i915_guc_log_control", &i915_guc_log_control_fops},
+	{"i915_guc_log_level", &i915_guc_log_level_fops},
+	{"i915_guc_log_relay", &i915_guc_log_relay_fops},
 	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
 	{"i915_ipc_status", &i915_ipc_status_fops},
 	{"i915_drrs_ctl", &i915_drrs_ctl_fops}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 3df5193487f3..1021bf40e236 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1239,9 +1239,6 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
 		i915_debugfs_register(dev_priv);
 		i915_setup_sysfs(dev_priv);
 
-		/* Depends on debugfs having been initialized */
-		intel_uc_register(dev_priv);
-
 		/* Depends on sysfs having been initialized */
 		i915_perf_register(dev_priv);
 	} else
@@ -1299,7 +1296,6 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
 	i915_pmu_unregister(dev_priv);
 
 	i915_teardown_sysfs(dev_priv);
-	intel_uc_unregister(dev_priv);
 	drm_dev_unregister(&dev_priv->drm);
 
 	i915_gem_shrinker_unregister(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 72a71bc94adf..20254dde172c 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -423,18 +423,13 @@ static int guc_log_relay_create(struct intel_guc_log *log)
 		DRM_ERROR("Couldn't create relay chan for GuC logging\n");
 
 		ret = -ENOMEM;
-		goto err;
+		return ret;
 	}
 
 	GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
 	log->runtime.relay_chan = guc_log_relay_chan;
 
 	return 0;
-
-err:
-	/* logging will be off */
-	i915_modparams.guc_log_level = 0;
-	return ret;
 }
 
 static void guc_log_relay_destroy(struct intel_guc_log *log)
@@ -511,7 +506,7 @@ void intel_guc_log_destroy(struct intel_guc_log *log)
 	i915_vma_unpin_and_release(&log->vma);
 }
 
-int intel_guc_log_control_get(struct intel_guc_log *log)
+int intel_guc_log_level_get(struct intel_guc_log *log)
 {
 	GEM_BUG_ON(!log->vma);
 	GEM_BUG_ON(i915_modparams.guc_log_level < 0);
@@ -526,11 +521,10 @@ int intel_guc_log_control_get(struct intel_guc_log *log)
 	LOG_LEVEL_TO_ENABLED(_x) ? _x - 1 : 0;	\
 })
 #define VERBOSITY_TO_LOG_LEVEL(x)  ((x) + 1)
-int intel_guc_log_control_set(struct intel_guc_log *log, u64 val)
+int intel_guc_log_level_set(struct intel_guc_log *log, u64 val)
 {
 	struct intel_guc *guc = log_to_guc(log);
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
-	bool enabled = LOG_LEVEL_TO_ENABLED(val);
 	int ret;
 
 	BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0);
@@ -553,7 +547,8 @@ int intel_guc_log_control_set(struct intel_guc_log *log, u64 val)
 	}
 
 	intel_runtime_pm_get(dev_priv);
-	ret = guc_log_control(guc, enabled, LOG_LEVEL_TO_VERBOSITY(val));
+	ret = guc_log_control(guc, LOG_LEVEL_TO_ENABLED(val),
+			      LOG_LEVEL_TO_VERBOSITY(val));
 	intel_runtime_pm_put(dev_priv);
 	if (ret) {
 		DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret);
@@ -562,89 +557,79 @@ int intel_guc_log_control_set(struct intel_guc_log *log, u64 val)
 
 	i915_modparams.guc_log_level = val;
 
-	mutex_unlock(&dev_priv->drm.struct_mutex);
-
-	if (enabled && !guc_log_has_runtime(log)) {
-		ret = intel_guc_log_register(log);
-		if (ret) {
-			/* logging will remain off */
-			i915_modparams.guc_log_level = 0;
-			goto out;
-		}
-	} else if (!enabled && guc_log_has_runtime(log)) {
-		intel_guc_log_unregister(log);
-	}
-
-	return 0;
-
 out_unlock:
 	mutex_unlock(&dev_priv->drm.struct_mutex);
-out:
+
 	return ret;
 }
 
-int intel_guc_log_register(struct intel_guc_log *log)
+int intel_guc_log_relay_open(struct intel_guc_log *log)
 {
 	int ret;
 
 	mutex_lock(&log->runtime.lock);
 
-	GEM_BUG_ON(guc_log_has_runtime(log));
+	if (guc_log_has_runtime(log)) {
+		ret = -EEXIST;
+		goto out_unlock;
+	}
 
 	ret = guc_log_relay_create(log);
 	if (ret)
-		goto err;
+		goto out_unlock;
 
 	ret = guc_log_map(log);
 	if (ret)
-		goto err_relay;
-
-	guc_flush_log_msg_enable(log_to_guc(log));
+		goto out_relay;
 
 	mutex_unlock(&log->runtime.lock);
 
+	guc_flush_log_msg_enable(log_to_guc(log));
+
+	/*
+	 * When GuC is logging without us relaying to userspace, we're ignoring
+	 * the flush notification. This means that we need to unconditionally
+	 * flush on relay enabling, since GuC only notifies us once.
+	 */
+	queue_work(log->runtime.flush_wq, &log->runtime.flush_work);
+
 	return 0;
 
-err_relay:
+out_relay:
 	guc_log_relay_destroy(log);
-err:
+out_unlock:
 	mutex_unlock(&log->runtime.lock);
 
 	return ret;
 }
 
-void intel_guc_log_unregister(struct intel_guc_log *log)
+void intel_guc_log_relay_flush(struct intel_guc_log *log)
 {
 	struct intel_guc *guc = log_to_guc(log);
 	struct drm_i915_private *i915 = guc_to_i915(guc);
 
-	guc_flush_log_msg_disable(guc);
-
 	/*
 	 * Before initiating the forceful flush, wait for any pending/ongoing
 	 * flush to complete otherwise forceful flush may not actually happen.
 	 */
 	flush_work(&log->runtime.flush_work);
 
-	/*
-	 * Once logging is disabled, GuC won't generate logs & send an
-	 * interrupt. But there could be some data in the log buffer
-	 * which is yet to be captured. So request GuC to update the log
-	 * buffer state and then collect the left over logs.
-	 */
 	intel_runtime_pm_get(i915);
 	guc_log_flush(guc);
 	intel_runtime_pm_put(i915);
 
 	/* GuC would have updated log buffer by now, so capture it */
 	guc_log_capture_logs(log);
+}
+
+void intel_guc_log_relay_close(struct intel_guc_log *log)
+{
+	guc_flush_log_msg_disable(log_to_guc(log));
+	flush_work(&log->runtime.flush_work);
 
 	mutex_lock(&log->runtime.lock);
-
 	GEM_BUG_ON(!guc_log_has_runtime(log));
-
 	guc_log_unmap(log);
 	guc_log_relay_destroy(log);
-
 	mutex_unlock(&log->runtime.lock);
 }
diff --git a/drivers/gpu/drm/i915/intel_guc_log.h b/drivers/gpu/drm/i915/intel_guc_log.h
index e0ea625032fb..3cf911eef3a8 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/intel_guc_log.h
@@ -61,11 +61,12 @@ struct intel_guc_log {
 
 void intel_guc_log_init_early(struct intel_guc_log *log);
 int intel_guc_log_create(struct intel_guc_log *log);
-int intel_guc_log_register(struct intel_guc_log *log);
-void intel_guc_log_unregister(struct intel_guc_log *log);
 void intel_guc_log_destroy(struct intel_guc_log *log);
 
-int intel_guc_log_control_get(struct intel_guc_log *log);
-int intel_guc_log_control_set(struct intel_guc_log *log, u64 control);
+int intel_guc_log_level_get(struct intel_guc_log *log);
+int intel_guc_log_level_set(struct intel_guc_log *log, u64 control_val);
+int intel_guc_log_relay_open(struct intel_guc_log *log);
+void intel_guc_log_relay_flush(struct intel_guc_log *log);
+void intel_guc_log_relay_close(struct intel_guc_log *log);
 
 #endif
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 765b86a53f19..9bb40cd047a0 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -221,28 +221,6 @@ static void guc_free_load_err_log(struct intel_guc *guc)
 		i915_gem_object_put(guc->load_err_log);
 }
 
-int intel_uc_register(struct drm_i915_private *i915)
-{
-	int ret = 0;
-
-	if (!USES_GUC(i915))
-		return 0;
-
-	if (i915_modparams.guc_log_level)
-		ret = intel_guc_log_register(&i915->guc.log);
-
-	return ret;
-}
-
-void intel_uc_unregister(struct drm_i915_private *i915)
-{
-	if (!USES_GUC(i915))
-		return;
-
-	if (i915_modparams.guc_log_level)
-		intel_guc_log_unregister(&i915->guc.log);
-}
-
 static int guc_enable_communication(struct intel_guc *guc)
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index 0a2b413e9cd0..937e61175258 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -30,8 +30,6 @@
 
 void intel_uc_init_early(struct drm_i915_private *dev_priv);
 void intel_uc_init_mmio(struct drm_i915_private *dev_priv);
-int intel_uc_register(struct drm_i915_private *dev_priv);
-void intel_uc_unregister(struct drm_i915_private *dev_priv);
 void intel_uc_init_fw(struct drm_i915_private *dev_priv);
 void intel_uc_fini_fw(struct drm_i915_private *dev_priv);
 int intel_uc_init_misc(struct drm_i915_private *dev_priv);

From b8299c71d4e15972ba507a9b8cdba5653cd247d5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Micha=C5=82=20Winiarski?= <michal.winiarski@intel.com>
Date: Mon, 19 Mar 2018 10:53:41 +0100
Subject: [PATCH 0064/1286] drm/i915/guc: Move check for fast memcpy_wc to
 relay creation
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

We only need those fast memcpy_wc when we're using relay to read
continuous GuC log. Let's prevent the user from creating a relay if we
know we won't be able to keep up with GuC.

v2: Adjust the return value (Michał)

Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180319095348.9716-6-michal.winiarski@intel.com
---
 drivers/gpu/drm/i915/intel_guc_log.c | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 20254dde172c..db89999a84e8 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -466,16 +466,6 @@ int intel_guc_log_create(struct intel_guc_log *log)
 
 	GEM_BUG_ON(log->vma);
 
-	/*
-	 * We require SSE 4.1 for fast reads from the GuC log buffer and
-	 * it should be present on the chipsets supporting GuC based
-	 * submisssions.
-	 */
-	if (WARN_ON(!i915_has_memcpy_from_wc())) {
-		ret = -EINVAL;
-		goto err;
-	}
-
 	vma = intel_guc_allocate_vma(guc, GUC_LOG_SIZE);
 	if (IS_ERR(vma)) {
 		ret = PTR_ERR(vma);
@@ -574,6 +564,16 @@ int intel_guc_log_relay_open(struct intel_guc_log *log)
 		goto out_unlock;
 	}
 
+	/*
+	 * We require SSE 4.1 for fast reads from the GuC log buffer and
+	 * it should be present on the chipsets supporting GuC based
+	 * submisssions.
+	 */
+	if (!i915_has_memcpy_from_wc()) {
+		ret = -ENXIO;
+		goto out_unlock;
+	}
+
 	ret = guc_log_relay_create(log);
 	if (ret)
 		goto out_unlock;

From 6a96be2448a446efb1ac67974535fd4b33df3d48 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Micha=C5=82=20Winiarski?= <michal.winiarski@intel.com>
Date: Mon, 19 Mar 2018 10:53:42 +0100
Subject: [PATCH 0065/1286] drm/i915/guc: Get rid of GuC log runtime
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Runtime is not a very good name. Let's also move counting relay
overflows inside relay struct.

v2: Rename things rather than remove the struct (Chris)

Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180319095348.9716-7-michal.winiarski@intel.com
---
 drivers/gpu/drm/i915/i915_debugfs.c  |  4 +-
 drivers/gpu/drm/i915/intel_guc.c     | 15 ++++---
 drivers/gpu/drm/i915/intel_guc_log.c | 64 ++++++++++++++--------------
 drivers/gpu/drm/i915/intel_guc_log.h |  7 ++-
 4 files changed, 45 insertions(+), 45 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e857a9493b6f..d3d4d1b29112 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2347,8 +2347,8 @@ static void i915_guc_log_info(struct seq_file *m,
 	seq_printf(m, "\tTotal flush interrupt count: %u\n",
 		   guc->log.flush_interrupt_count);
 
-	seq_printf(m, "\tCapture miss count: %u\n",
-		   guc->log.capture_miss_count);
+	seq_printf(m, "\tRelay full count: %u\n",
+		   guc->log.relay.full_count);
 }
 
 static void i915_guc_client_info(struct seq_file *m,
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index 3af603536b1b..70d118bb0a1a 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -87,9 +87,10 @@ int intel_guc_init_wq(struct intel_guc *guc)
 	 * or scheduled later on resume. This way the handling of work
 	 * item can be kept same between system suspend & rpm suspend.
 	 */
-	guc->log.runtime.flush_wq = alloc_ordered_workqueue("i915-guc_log",
-						WQ_HIGHPRI | WQ_FREEZABLE);
-	if (!guc->log.runtime.flush_wq) {
+	guc->log.relay.flush_wq =
+		alloc_ordered_workqueue("i915-guc_log",
+					WQ_HIGHPRI | WQ_FREEZABLE);
+	if (!guc->log.relay.flush_wq) {
 		DRM_ERROR("Couldn't allocate workqueue for GuC log\n");
 		return -ENOMEM;
 	}
@@ -112,7 +113,7 @@ int intel_guc_init_wq(struct intel_guc *guc)
 		guc->preempt_wq = alloc_ordered_workqueue("i915-guc_preempt",
 							  WQ_HIGHPRI);
 		if (!guc->preempt_wq) {
-			destroy_workqueue(guc->log.runtime.flush_wq);
+			destroy_workqueue(guc->log.relay.flush_wq);
 			DRM_ERROR("Couldn't allocate workqueue for GuC "
 				  "preemption\n");
 			return -ENOMEM;
@@ -130,7 +131,7 @@ void intel_guc_fini_wq(struct intel_guc *guc)
 	    USES_GUC_SUBMISSION(dev_priv))
 		destroy_workqueue(guc->preempt_wq);
 
-	destroy_workqueue(guc->log.runtime.flush_wq);
+	destroy_workqueue(guc->log.relay.flush_wq);
 }
 
 static int guc_shared_data_create(struct intel_guc *guc)
@@ -390,8 +391,8 @@ void intel_guc_to_host_event_handler(struct intel_guc *guc)
 
 	if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
 		   INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED)) {
-		queue_work(guc->log.runtime.flush_wq,
-			   &guc->log.runtime.flush_work);
+		queue_work(guc->log.relay.flush_wq,
+			   &guc->log.relay.flush_work);
 
 		guc->log.flush_interrupt_count++;
 	}
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index db89999a84e8..c220c2893d2c 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -171,10 +171,10 @@ static void guc_move_to_next_buf(struct intel_guc_log *log)
 	smp_wmb();
 
 	/* All data has been written, so now move the offset of sub buffer. */
-	relay_reserve(log->runtime.relay_chan, log->vma->obj->base.size);
+	relay_reserve(log->relay.channel, log->vma->obj->base.size);
 
 	/* Switch to the next sub buffer */
-	relay_flush(log->runtime.relay_chan);
+	relay_flush(log->relay.channel);
 }
 
 static void *guc_get_write_buffer(struct intel_guc_log *log)
@@ -188,7 +188,7 @@ static void *guc_get_write_buffer(struct intel_guc_log *log)
 	 * done without using relay_reserve() along with relay_write(). So its
 	 * better to use relay_reserve() alone.
 	 */
-	return relay_reserve(log->runtime.relay_chan, 0);
+	return relay_reserve(log->relay.channel, 0);
 }
 
 static bool guc_check_log_buf_overflow(struct intel_guc_log *log,
@@ -239,13 +239,13 @@ static void guc_read_update_log_buffer(struct intel_guc_log *log)
 	void *src_data, *dst_data;
 	bool new_overflow;
 
-	mutex_lock(&log->runtime.lock);
+	mutex_lock(&log->relay.lock);
 
-	if (WARN_ON(!log->runtime.buf_addr))
+	if (WARN_ON(!log->relay.buf_addr))
 		goto out_unlock;
 
 	/* Get the pointer to shared GuC log buffer */
-	log_buf_state = src_data = log->runtime.buf_addr;
+	log_buf_state = src_data = log->relay.buf_addr;
 
 	/* Get the pointer to local buffer to store the logs */
 	log_buf_snapshot_state = dst_data = guc_get_write_buffer(log);
@@ -256,7 +256,7 @@ static void guc_read_update_log_buffer(struct intel_guc_log *log)
 		 * getting consumed by User at a slow rate.
 		 */
 		DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
-		log->capture_miss_count++;
+		log->relay.full_count++;
 
 		goto out_unlock;
 	}
@@ -330,20 +330,20 @@ static void guc_read_update_log_buffer(struct intel_guc_log *log)
 	guc_move_to_next_buf(log);
 
 out_unlock:
-	mutex_unlock(&log->runtime.lock);
+	mutex_unlock(&log->relay.lock);
 }
 
 static void capture_logs_work(struct work_struct *work)
 {
 	struct intel_guc_log *log =
-		container_of(work, struct intel_guc_log, runtime.flush_work);
+		container_of(work, struct intel_guc_log, relay.flush_work);
 
 	guc_log_capture_logs(log);
 }
 
-static bool guc_log_has_runtime(struct intel_guc_log *log)
+static bool guc_log_relay_enabled(struct intel_guc_log *log)
 {
-	return log->runtime.buf_addr;
+	return log->relay.buf_addr;
 }
 
 static int guc_log_map(struct intel_guc_log *log)
@@ -353,7 +353,7 @@ static int guc_log_map(struct intel_guc_log *log)
 	void *vaddr;
 	int ret;
 
-	lockdep_assert_held(&log->runtime.lock);
+	lockdep_assert_held(&log->relay.lock);
 
 	if (!log->vma)
 		return -ENODEV;
@@ -375,23 +375,23 @@ static int guc_log_map(struct intel_guc_log *log)
 		return PTR_ERR(vaddr);
 	}
 
-	log->runtime.buf_addr = vaddr;
+	log->relay.buf_addr = vaddr;
 
 	return 0;
 }
 
 static void guc_log_unmap(struct intel_guc_log *log)
 {
-	lockdep_assert_held(&log->runtime.lock);
+	lockdep_assert_held(&log->relay.lock);
 
 	i915_gem_object_unpin_map(log->vma->obj);
-	log->runtime.buf_addr = NULL;
+	log->relay.buf_addr = NULL;
 }
 
 void intel_guc_log_init_early(struct intel_guc_log *log)
 {
-	mutex_init(&log->runtime.lock);
-	INIT_WORK(&log->runtime.flush_work, capture_logs_work);
+	mutex_init(&log->relay.lock);
+	INIT_WORK(&log->relay.flush_work, capture_logs_work);
 }
 
 static int guc_log_relay_create(struct intel_guc_log *log)
@@ -402,7 +402,7 @@ static int guc_log_relay_create(struct intel_guc_log *log)
 	size_t n_subbufs, subbuf_size;
 	int ret;
 
-	lockdep_assert_held(&log->runtime.lock);
+	lockdep_assert_held(&log->relay.lock);
 
 	 /* Keep the size of sub buffers same as shared log buffer */
 	subbuf_size = GUC_LOG_SIZE;
@@ -427,17 +427,17 @@ static int guc_log_relay_create(struct intel_guc_log *log)
 	}
 
 	GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
-	log->runtime.relay_chan = guc_log_relay_chan;
+	log->relay.channel = guc_log_relay_chan;
 
 	return 0;
 }
 
 static void guc_log_relay_destroy(struct intel_guc_log *log)
 {
-	lockdep_assert_held(&log->runtime.lock);
+	lockdep_assert_held(&log->relay.lock);
 
-	relay_close(log->runtime.relay_chan);
-	log->runtime.relay_chan = NULL;
+	relay_close(log->relay.channel);
+	log->relay.channel = NULL;
 }
 
 static void guc_log_capture_logs(struct intel_guc_log *log)
@@ -557,9 +557,9 @@ int intel_guc_log_relay_open(struct intel_guc_log *log)
 {
 	int ret;
 
-	mutex_lock(&log->runtime.lock);
+	mutex_lock(&log->relay.lock);
 
-	if (guc_log_has_runtime(log)) {
+	if (guc_log_relay_enabled(log)) {
 		ret = -EEXIST;
 		goto out_unlock;
 	}
@@ -582,7 +582,7 @@ int intel_guc_log_relay_open(struct intel_guc_log *log)
 	if (ret)
 		goto out_relay;
 
-	mutex_unlock(&log->runtime.lock);
+	mutex_unlock(&log->relay.lock);
 
 	guc_flush_log_msg_enable(log_to_guc(log));
 
@@ -591,14 +591,14 @@ int intel_guc_log_relay_open(struct intel_guc_log *log)
 	 * the flush notification. This means that we need to unconditionally
 	 * flush on relay enabling, since GuC only notifies us once.
 	 */
-	queue_work(log->runtime.flush_wq, &log->runtime.flush_work);
+	queue_work(log->relay.flush_wq, &log->relay.flush_work);
 
 	return 0;
 
 out_relay:
 	guc_log_relay_destroy(log);
 out_unlock:
-	mutex_unlock(&log->runtime.lock);
+	mutex_unlock(&log->relay.lock);
 
 	return ret;
 }
@@ -612,7 +612,7 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log)
 	 * Before initiating the forceful flush, wait for any pending/ongoing
 	 * flush to complete otherwise forceful flush may not actually happen.
 	 */
-	flush_work(&log->runtime.flush_work);
+	flush_work(&log->relay.flush_work);
 
 	intel_runtime_pm_get(i915);
 	guc_log_flush(guc);
@@ -625,11 +625,11 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log)
 void intel_guc_log_relay_close(struct intel_guc_log *log)
 {
 	guc_flush_log_msg_disable(log_to_guc(log));
-	flush_work(&log->runtime.flush_work);
+	flush_work(&log->relay.flush_work);
 
-	mutex_lock(&log->runtime.lock);
-	GEM_BUG_ON(!guc_log_has_runtime(log));
+	mutex_lock(&log->relay.lock);
+	GEM_BUG_ON(!guc_log_relay_enabled(log));
 	guc_log_unmap(log);
 	guc_log_relay_destroy(log);
-	mutex_unlock(&log->runtime.lock);
+	mutex_unlock(&log->relay.lock);
 }
diff --git a/drivers/gpu/drm/i915/intel_guc_log.h b/drivers/gpu/drm/i915/intel_guc_log.h
index 3cf911eef3a8..db35e548d2ed 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/intel_guc_log.h
@@ -43,16 +43,15 @@ struct intel_guc;
 struct intel_guc_log {
 	u32 flags;
 	struct i915_vma *vma;
-	/* The runtime stuff gets created only when GuC logging gets enabled */
 	struct {
 		void *buf_addr;
 		struct workqueue_struct *flush_wq;
 		struct work_struct flush_work;
-		struct rchan *relay_chan;
+		struct rchan *channel;
 		struct mutex lock;
-	} runtime;
+		u32 full_count;
+	} relay;
 	/* logging related stats */
-	u32 capture_miss_count;
 	u32 flush_interrupt_count;
 	u32 prev_overflow_count[GUC_MAX_LOG_BUFFER];
 	u32 total_overflow_count[GUC_MAX_LOG_BUFFER];

From db5579934f2fc8d916fe29355ac0c716acf1d921 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Micha=C5=82=20Winiarski?= <michal.winiarski@intel.com>
Date: Mon, 19 Mar 2018 10:53:43 +0100
Subject: [PATCH 0066/1286] drm/i915/guc: Always print log stats in
 i915_guc_info when using GuC
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

While some of the content in this file is related to GuC submission
only, that's not the case with log related statistics.

Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180319095348.9716-8-michal.winiarski@intel.com
---
 drivers/gpu/drm/i915/i915_debugfs.c | 15 +++++++++------
 1 file changed, 9 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index d3d4d1b29112..5584736a4293 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2330,7 +2330,7 @@ static void i915_guc_log_info(struct seq_file *m,
 {
 	struct intel_guc *guc = &dev_priv->guc;
 
-	seq_puts(m, "\nGuC logging stats:\n");
+	seq_puts(m, "GuC logging stats:\n");
 
 	seq_printf(m, "\tISR:   flush count %10u, overflow count %10u\n",
 		   guc->log.flush_count[GUC_ISR_LOG_BUFFER],
@@ -2378,14 +2378,19 @@ static int i915_guc_info(struct seq_file *m, void *data)
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	const struct intel_guc *guc = &dev_priv->guc;
 
-	if (!USES_GUC_SUBMISSION(dev_priv))
+	if (!USES_GUC(dev_priv))
 		return -ENODEV;
 
+	i915_guc_log_info(m, dev_priv);
+
+	if (!USES_GUC_SUBMISSION(dev_priv))
+		return 0;
+
 	GEM_BUG_ON(!guc->execbuf_client);
 
-	seq_printf(m, "Doorbell map:\n");
+	seq_printf(m, "\nDoorbell map:\n");
 	seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
-	seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc->db_cacheline);
+	seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
 
 	seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
 	i915_guc_client_info(m, dev_priv, guc->execbuf_client);
@@ -2395,8 +2400,6 @@ static int i915_guc_info(struct seq_file *m, void *data)
 		i915_guc_client_info(m, dev_priv, guc->preempt_client);
 	}
 
-	i915_guc_log_info(m, dev_priv);
-
 	/* Add more as required ... */
 
 	return 0;

From 5e24e4a240770008ed46d90d6571ec27b5e2bd5b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Micha=C5=82=20Winiarski?= <michal.winiarski@intel.com>
Date: Mon, 19 Mar 2018 10:53:44 +0100
Subject: [PATCH 0067/1286] drm/i915/guc: Don't print out relay statistics when
 relay is disabled
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

If nobody has enabled the relay, we're not comunicating with GuC, which
means that the stats don't have any meaning. Let's also remove interrupt
counter and tidy the debugfs formatting.

v2: Correct stats accounting (Sagar)
v3: Corrected one more error in stats accounting, move relay_enabled (Sagar)

Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180319095348.9716-9-michal.winiarski@intel.com
---
 drivers/gpu/drm/i915/i915_debugfs.c  | 49 ++++++++++++++++++----------
 drivers/gpu/drm/i915/intel_guc.c     |  5 +--
 drivers/gpu/drm/i915/intel_guc_log.c | 26 +++++++--------
 drivers/gpu/drm/i915/intel_guc_log.h | 10 +++---
 4 files changed, 52 insertions(+), 38 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 5584736a4293..964ea1a12357 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2325,30 +2325,45 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
 	return 0;
 }
 
+static const char *
+stringify_guc_log_type(enum guc_log_buffer_type type)
+{
+	switch (type) {
+	case GUC_ISR_LOG_BUFFER:
+		return "ISR";
+	case GUC_DPC_LOG_BUFFER:
+		return "DPC";
+	case GUC_CRASH_DUMP_LOG_BUFFER:
+		return "CRASH";
+	default:
+		MISSING_CASE(type);
+	}
+
+	return "";
+}
+
 static void i915_guc_log_info(struct seq_file *m,
 			      struct drm_i915_private *dev_priv)
 {
-	struct intel_guc *guc = &dev_priv->guc;
+	struct intel_guc_log *log = &dev_priv->guc.log;
+	enum guc_log_buffer_type type;
+
+	if (!intel_guc_log_relay_enabled(log)) {
+		seq_puts(m, "GuC log relay disabled\n");
+		return;
+	}
 
 	seq_puts(m, "GuC logging stats:\n");
 
-	seq_printf(m, "\tISR:   flush count %10u, overflow count %10u\n",
-		   guc->log.flush_count[GUC_ISR_LOG_BUFFER],
-		   guc->log.total_overflow_count[GUC_ISR_LOG_BUFFER]);
-
-	seq_printf(m, "\tDPC:   flush count %10u, overflow count %10u\n",
-		   guc->log.flush_count[GUC_DPC_LOG_BUFFER],
-		   guc->log.total_overflow_count[GUC_DPC_LOG_BUFFER]);
-
-	seq_printf(m, "\tCRASH: flush count %10u, overflow count %10u\n",
-		   guc->log.flush_count[GUC_CRASH_DUMP_LOG_BUFFER],
-		   guc->log.total_overflow_count[GUC_CRASH_DUMP_LOG_BUFFER]);
-
-	seq_printf(m, "\tTotal flush interrupt count: %u\n",
-		   guc->log.flush_interrupt_count);
-
 	seq_printf(m, "\tRelay full count: %u\n",
-		   guc->log.relay.full_count);
+		   log->relay.full_count);
+
+	for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
+		seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
+			   stringify_guc_log_type(type),
+			   log->stats[type].flush,
+			   log->stats[type].sampled_overflow);
+	}
 }
 
 static void i915_guc_client_info(struct seq_file *m,
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index 70d118bb0a1a..eeda1aa2afe6 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -390,12 +390,9 @@ void intel_guc_to_host_event_handler(struct intel_guc *guc)
 	spin_unlock(&guc->irq_lock);
 
 	if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
-		   INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED)) {
+		   INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED))
 		queue_work(guc->log.relay.flush_wq,
 			   &guc->log.relay.flush_work);
-
-		guc->log.flush_interrupt_count++;
-	}
 }
 
 int intel_guc_sample_forcewake(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index c220c2893d2c..3180645b9642 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -195,18 +195,18 @@ static bool guc_check_log_buf_overflow(struct intel_guc_log *log,
 				       enum guc_log_buffer_type type,
 				       unsigned int full_cnt)
 {
-	unsigned int prev_full_cnt = log->prev_overflow_count[type];
+	unsigned int prev_full_cnt = log->stats[type].sampled_overflow;
 	bool overflow = false;
 
 	if (full_cnt != prev_full_cnt) {
 		overflow = true;
 
-		log->prev_overflow_count[type] = full_cnt;
-		log->total_overflow_count[type] += full_cnt - prev_full_cnt;
+		log->stats[type].overflow = full_cnt;
+		log->stats[type].sampled_overflow += full_cnt - prev_full_cnt;
 
 		if (full_cnt < prev_full_cnt) {
 			/* buffer_full_cnt is a 4 bit counter */
-			log->total_overflow_count[type] += 16;
+			log->stats[type].sampled_overflow += 16;
 		}
 		DRM_ERROR_RATELIMITED("GuC log buffer overflow\n");
 	}
@@ -241,7 +241,7 @@ static void guc_read_update_log_buffer(struct intel_guc_log *log)
 
 	mutex_lock(&log->relay.lock);
 
-	if (WARN_ON(!log->relay.buf_addr))
+	if (WARN_ON(!intel_guc_log_relay_enabled(log)))
 		goto out_unlock;
 
 	/* Get the pointer to shared GuC log buffer */
@@ -279,7 +279,7 @@ static void guc_read_update_log_buffer(struct intel_guc_log *log)
 		full_cnt = log_buf_state_local.buffer_full_cnt;
 
 		/* Bookkeeping stuff */
-		log->flush_count[type] += log_buf_state_local.flush_to_file;
+		log->stats[type].flush += log_buf_state_local.flush_to_file;
 		new_overflow = guc_check_log_buf_overflow(log, type, full_cnt);
 
 		/* Update the state of shared log buffer */
@@ -341,11 +341,6 @@ static void capture_logs_work(struct work_struct *work)
 	guc_log_capture_logs(log);
 }
 
-static bool guc_log_relay_enabled(struct intel_guc_log *log)
-{
-	return log->relay.buf_addr;
-}
-
 static int guc_log_map(struct intel_guc_log *log)
 {
 	struct intel_guc *guc = log_to_guc(log);
@@ -553,13 +548,18 @@ out_unlock:
 	return ret;
 }
 
+bool intel_guc_log_relay_enabled(const struct intel_guc_log *log)
+{
+	return log->relay.buf_addr;
+}
+
 int intel_guc_log_relay_open(struct intel_guc_log *log)
 {
 	int ret;
 
 	mutex_lock(&log->relay.lock);
 
-	if (guc_log_relay_enabled(log)) {
+	if (intel_guc_log_relay_enabled(log)) {
 		ret = -EEXIST;
 		goto out_unlock;
 	}
@@ -628,7 +628,7 @@ void intel_guc_log_relay_close(struct intel_guc_log *log)
 	flush_work(&log->relay.flush_work);
 
 	mutex_lock(&log->relay.lock);
-	GEM_BUG_ON(!guc_log_relay_enabled(log));
+	GEM_BUG_ON(!intel_guc_log_relay_enabled(log));
 	guc_log_unmap(log);
 	guc_log_relay_destroy(log);
 	mutex_unlock(&log->relay.lock);
diff --git a/drivers/gpu/drm/i915/intel_guc_log.h b/drivers/gpu/drm/i915/intel_guc_log.h
index db35e548d2ed..9ec5703d712c 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/intel_guc_log.h
@@ -52,10 +52,11 @@ struct intel_guc_log {
 		u32 full_count;
 	} relay;
 	/* logging related stats */
-	u32 flush_interrupt_count;
-	u32 prev_overflow_count[GUC_MAX_LOG_BUFFER];
-	u32 total_overflow_count[GUC_MAX_LOG_BUFFER];
-	u32 flush_count[GUC_MAX_LOG_BUFFER];
+	struct {
+		u32 sampled_overflow;
+		u32 overflow;
+		u32 flush;
+	} stats[GUC_MAX_LOG_BUFFER];
 };
 
 void intel_guc_log_init_early(struct intel_guc_log *log);
@@ -64,6 +65,7 @@ void intel_guc_log_destroy(struct intel_guc_log *log);
 
 int intel_guc_log_level_get(struct intel_guc_log *log);
 int intel_guc_log_level_set(struct intel_guc_log *log, u64 control_val);
+bool intel_guc_log_relay_enabled(const struct intel_guc_log *log);
 int intel_guc_log_relay_open(struct intel_guc_log *log);
 void intel_guc_log_relay_flush(struct intel_guc_log *log);
 void intel_guc_log_relay_close(struct intel_guc_log *log);

From cb5d64e9f13e0dd817c3ae2dbe73c3b8c6c13f95 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Micha=C5=82=20Winiarski?= <michal.winiarski@intel.com>
Date: Mon, 19 Mar 2018 10:53:45 +0100
Subject: [PATCH 0068/1286] drm/i915/guc: Allow user to control default GuC
 logging
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

While both naming and actual log enable logic in GuC interface are
confusing, we can simply expose the default log as yet another log
level.
GuC logic aside, from i915 point of view we now have the following GuC
log levels:
	0 Log disabled
	1 Non-verbose log
	2-5 Verbose log

v2: Adjust naming after rebase.
v3: Fixed the log_level logic error introduced on rebase.

Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180319095348.9716-10-michal.winiarski@intel.com
---
 drivers/gpu/drm/i915/intel_guc.c      | 24 +++++++++++++++---------
 drivers/gpu/drm/i915/intel_guc_fwif.h |  5 +++--
 drivers/gpu/drm/i915/intel_guc_log.c  | 18 +++++++-----------
 drivers/gpu/drm/i915/intel_guc_log.h  | 15 +++++++++++++++
 drivers/gpu/drm/i915/intel_uc.c       | 14 +++++++++-----
 5 files changed, 49 insertions(+), 27 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index eeda1aa2afe6..dc16392c4c3a 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -222,17 +222,23 @@ static u32 get_core_family(struct drm_i915_private *dev_priv)
 	}
 }
 
-static u32 get_log_verbosity_flags(void)
+static u32 get_log_control_flags(void)
 {
-	if (i915_modparams.guc_log_level > 0) {
-		u32 verbosity = i915_modparams.guc_log_level - 1;
+	u32 level = i915_modparams.guc_log_level;
+	u32 flags = 0;
 
-		GEM_BUG_ON(verbosity > GUC_LOG_VERBOSITY_MAX);
-		return verbosity << GUC_LOG_VERBOSITY_SHIFT;
-	}
+	GEM_BUG_ON(level < 0);
 
-	GEM_BUG_ON(i915_modparams.enable_guc < 0);
-	return GUC_LOG_DISABLED;
+	if (!GUC_LOG_LEVEL_TO_ENABLED(level))
+		flags |= GUC_LOG_DEFAULT_DISABLED;
+
+	if (!GUC_LOG_LEVEL_TO_VERBOSE(level))
+		flags |= GUC_LOG_DISABLED;
+	else
+		flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
+			 GUC_LOG_VERBOSITY_SHIFT;
+
+	return flags;
 }
 
 /*
@@ -267,7 +273,7 @@ void intel_guc_init_params(struct intel_guc *guc)
 
 	params[GUC_CTL_LOG_PARAMS] = guc->log.flags;
 
-	params[GUC_CTL_DEBUG] = get_log_verbosity_flags();
+	params[GUC_CTL_DEBUG] = get_log_control_flags();
 
 	/* If GuC submission is enabled, set up additional parameters here */
 	if (USES_GUC_SUBMISSION(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 6a10aa6f04d3..4971685a2ea8 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -127,7 +127,7 @@
 #define   GUC_PROFILE_ENABLED		(1 << 7)
 #define   GUC_WQ_TRACK_ENABLED		(1 << 8)
 #define   GUC_ADS_ENABLED		(1 << 9)
-#define   GUC_DEBUG_RESERVED		(1 << 10)
+#define   GUC_LOG_DEFAULT_DISABLED	(1 << 10)
 #define   GUC_ADS_ADDR_SHIFT		11
 #define   GUC_ADS_ADDR_MASK		0xfffff800
 
@@ -539,7 +539,8 @@ union guc_log_control {
 		u32 logging_enabled:1;
 		u32 reserved1:3;
 		u32 verbosity:4;
-		u32 reserved2:24;
+		u32 default_logging:1;
+		u32 reserved2:23;
 	};
 	u32 value;
 } __packed;
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 3180645b9642..4cb422ceb283 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -57,12 +57,14 @@ static int guc_log_flush(struct intel_guc *guc)
 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
 }
 
-static int guc_log_control(struct intel_guc *guc, bool enable, u32 verbosity)
+static int guc_log_control(struct intel_guc *guc, bool enable,
+			   bool default_logging, u32 verbosity)
 {
 	union guc_log_control control_val = {
 		{
 			.logging_enabled = enable,
 			.verbosity = verbosity,
+			.default_logging = default_logging,
 		},
 	};
 	u32 action[] = {
@@ -499,13 +501,6 @@ int intel_guc_log_level_get(struct intel_guc_log *log)
 	return i915_modparams.guc_log_level;
 }
 
-#define GUC_LOG_LEVEL_DISABLED		0
-#define LOG_LEVEL_TO_ENABLED(x)		((x) > 0)
-#define LOG_LEVEL_TO_VERBOSITY(x) ({		\
-	typeof(x) _x = (x);			\
-	LOG_LEVEL_TO_ENABLED(_x) ? _x - 1 : 0;	\
-})
-#define VERBOSITY_TO_LOG_LEVEL(x)  ((x) + 1)
 int intel_guc_log_level_set(struct intel_guc_log *log, u64 val)
 {
 	struct intel_guc *guc = log_to_guc(log);
@@ -521,7 +516,7 @@ int intel_guc_log_level_set(struct intel_guc_log *log, u64 val)
 	 * as indication that logging should be disabled.
 	 */
 	if (val < GUC_LOG_LEVEL_DISABLED ||
-	    val > VERBOSITY_TO_LOG_LEVEL(GUC_LOG_VERBOSITY_MAX))
+	    val > GUC_VERBOSITY_TO_LOG_LEVEL(GUC_LOG_VERBOSITY_MAX))
 		return -EINVAL;
 
 	mutex_lock(&dev_priv->drm.struct_mutex);
@@ -532,8 +527,9 @@ int intel_guc_log_level_set(struct intel_guc_log *log, u64 val)
 	}
 
 	intel_runtime_pm_get(dev_priv);
-	ret = guc_log_control(guc, LOG_LEVEL_TO_ENABLED(val),
-			      LOG_LEVEL_TO_VERBOSITY(val));
+	ret = guc_log_control(guc, GUC_LOG_LEVEL_TO_VERBOSE(val),
+			      GUC_LOG_LEVEL_TO_ENABLED(val),
+			      GUC_LOG_LEVEL_TO_VERBOSITY(val));
 	intel_runtime_pm_put(dev_priv);
 	if (ret) {
 		DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret);
diff --git a/drivers/gpu/drm/i915/intel_guc_log.h b/drivers/gpu/drm/i915/intel_guc_log.h
index 9ec5703d712c..af1532c0d3e4 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/intel_guc_log.h
@@ -40,6 +40,21 @@ struct intel_guc;
 #define GUC_LOG_SIZE	((1 + GUC_LOG_DPC_PAGES + 1 + GUC_LOG_ISR_PAGES + \
 			  1 + GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT)
 
+/*
+ * While we're using plain log level in i915, GuC controls are much more...
+ * "elaborate"? We have a couple of bits for verbosity, separate bit for actual
+ * log enabling, and separate bit for default logging - which "conveniently"
+ * ignores the enable bit.
+ */
+#define GUC_LOG_LEVEL_DISABLED			0
+#define GUC_LOG_LEVEL_TO_ENABLED(x)		((x) > 0)
+#define GUC_LOG_LEVEL_TO_VERBOSE(x)		((x) > 1)
+#define GUC_LOG_LEVEL_TO_VERBOSITY(x) ({		\
+	typeof(x) _x = (x);				\
+	GUC_LOG_LEVEL_TO_VERBOSE(_x) ? _x - 2 : 0;	\
+})
+#define GUC_VERBOSITY_TO_LOG_LEVEL(x)		((x) + 2)
+
 struct intel_guc_log {
 	u32 flags;
 	struct i915_vma *vma;
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 9bb40cd047a0..ad1785522497 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -75,7 +75,8 @@ static int __get_default_guc_log_level(struct drm_i915_private *dev_priv)
 	if (HAS_GUC(dev_priv) && intel_uc_is_using_guc() &&
 	    (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
 	     IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)))
-		guc_log_level = 1 + GUC_LOG_VERBOSITY_MAX;
+		guc_log_level =
+			GUC_VERBOSITY_TO_LOG_LEVEL(GUC_LOG_VERBOSITY_MAX);
 
 	/* Any platform specific fine-tuning can be done here */
 
@@ -142,17 +143,20 @@ static void sanitize_options_early(struct drm_i915_private *dev_priv)
 		i915_modparams.guc_log_level = 0;
 	}
 
-	if (i915_modparams.guc_log_level > 1 + GUC_LOG_VERBOSITY_MAX) {
+	if (i915_modparams.guc_log_level >
+	    GUC_VERBOSITY_TO_LOG_LEVEL(GUC_LOG_VERBOSITY_MAX)) {
 		DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
 			 "guc_log_level", i915_modparams.guc_log_level,
 			 "verbosity too high");
-		i915_modparams.guc_log_level = 1 + GUC_LOG_VERBOSITY_MAX;
+		i915_modparams.guc_log_level =
+			GUC_VERBOSITY_TO_LOG_LEVEL(GUC_LOG_VERBOSITY_MAX);
 	}
 
-	DRM_DEBUG_DRIVER("guc_log_level=%d (enabled:%s verbosity:%d)\n",
+	DRM_DEBUG_DRIVER("guc_log_level=%d (enabled:%s, verbose:%s, verbosity:%d)\n",
 			 i915_modparams.guc_log_level,
 			 yesno(i915_modparams.guc_log_level),
-			 i915_modparams.guc_log_level - 1);
+			 yesno(GUC_LOG_LEVEL_TO_VERBOSE(i915_modparams.guc_log_level)),
+			 GUC_LOG_LEVEL_TO_VERBOSITY(i915_modparams.guc_log_level));
 
 	/* Make sure that sanitization was done */
 	GEM_BUG_ON(i915_modparams.enable_guc < 0);

From 9605d1ce7c6bcb673b6893ac12b565f1bde8f0bc Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Micha=C5=82=20Winiarski?= <michal.winiarski@intel.com>
Date: Mon, 19 Mar 2018 10:53:46 +0100
Subject: [PATCH 0069/1286] drm/i915/guc: Default to non-verbose GuC logging
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Now that we've decoupled logging from relay, GuC log level is only
controlling the GuC behavior - there shouldn't be any impact on i915
behaviour. We're only going to see a single extra interrupt when log
will get half full.
That, and the fact that we're seeing igt/gem_exec_nop/basic-series
failing with non-verbose logging being disabled.

v2: Bring back the "auto" guc_log_level, now that we fixed the log

Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180319095348.9716-11-michal.winiarski@intel.com
---
 drivers/gpu/drm/i915/i915_params.h | 2 +-
 drivers/gpu/drm/i915/intel_uc.c    | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 430f5f9d0ff4..c96360398072 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -48,7 +48,7 @@ struct drm_printer;
 	param(int, enable_ips, 1) \
 	param(int, invert_brightness, 0) \
 	param(int, enable_guc, 0) \
-	param(int, guc_log_level, 0) \
+	param(int, guc_log_level, -1) \
 	param(char *, guc_firmware_path, NULL) \
 	param(char *, huc_firmware_path, NULL) \
 	param(int, mmio_debug, 0) \
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index ad1785522497..34e847d0ee4c 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -69,7 +69,7 @@ static int __get_platform_enable_guc(struct drm_i915_private *dev_priv)
 
 static int __get_default_guc_log_level(struct drm_i915_private *dev_priv)
 {
-	int guc_log_level = 0; /* disabled */
+	int guc_log_level = 1; /* non-verbose */
 
 	/* Enable if we're running on platform with GuC and debug config */
 	if (HAS_GUC(dev_priv) && intel_uc_is_using_guc() &&

From feb06c151fade9ecaa3dd410d792cce26e8b10de Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Micha=C5=82=20Winiarski?= <michal.winiarski@intel.com>
Date: Mon, 19 Mar 2018 10:53:47 +0100
Subject: [PATCH 0070/1286] drm/i915/guc: Demote GuC error messages
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

We're using those functions in selftests, and the callers are expected
to do the error handling anyways. Let's demote all GuC actions and
doorbell creation to DEBUG_DRIVER.

Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Michel Thierry <michel.thierry@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180319095348.9716-12-michal.winiarski@intel.com
---
 drivers/gpu/drm/i915/intel_guc.c            | 7 ++++---
 drivers/gpu/drm/i915/intel_guc_submission.c | 4 ++--
 2 files changed, 6 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index dc16392c4c3a..ee5230cc722e 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -362,9 +362,10 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len)
 		if (ret != -ETIMEDOUT)
 			ret = -EIO;
 
-		DRM_WARN("INTEL_GUC_SEND: Action 0x%X failed;"
-			 " ret=%d status=0x%08X response=0x%08X\n",
-			 action[0], ret, status, I915_READ(SOFT_SCRATCH(15)));
+		DRM_DEBUG_DRIVER("INTEL_GUC_SEND: Action 0x%X failed;"
+				 " ret=%d status=0x%08X response=0x%08X\n",
+				 action[0], ret, status,
+				 I915_READ(SOFT_SCRATCH(15)));
 	}
 
 	intel_uncore_forcewake_put(dev_priv, guc->send_regs.fw_domains);
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 33af2930fc79..207cda062626 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -231,8 +231,8 @@ static int create_doorbell(struct intel_guc_client *client)
 	if (ret) {
 		__destroy_doorbell(client);
 		__update_doorbell_desc(client, GUC_DOORBELL_INVALID);
-		DRM_ERROR("Couldn't create client %u doorbell: %d\n",
-			  client->stage_id, ret);
+		DRM_DEBUG_DRIVER("Couldn't create client %u doorbell: %d\n",
+				 client->stage_id, ret);
 		return ret;
 	}
 

From 46b863325c2f58b564463c4d6e66ee0d4f2f3244 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Mon, 19 Mar 2018 12:35:28 +0000
Subject: [PATCH 0071/1286] drm/i915: Prefer memset64() when filling the iomap

As the ringbuffer may exist inside stolen memory, our access to it may
be via the GTT iomap. This implies we may only have WC access for which
the conventional memset() substitution of rep stos performs very badly,
so switch to the rep mov[dq] variants when available.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180319123528.28249-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_ringbuffer.c | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 72d6167c519a..04d9d9a946a7 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1693,17 +1693,18 @@ u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
 		need_wrap &= ~1;
 		GEM_BUG_ON(need_wrap > ring->space);
 		GEM_BUG_ON(ring->emit + need_wrap > ring->size);
+		GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64)));
 
 		/* Fill the tail with MI_NOOP */
-		memset(ring->vaddr + ring->emit, 0, need_wrap);
-		ring->emit = 0;
+		memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64));
 		ring->space -= need_wrap;
+		ring->emit = 0;
 	}
 
 	GEM_BUG_ON(ring->emit > ring->size - bytes);
 	GEM_BUG_ON(ring->space < bytes);
 	cs = ring->vaddr + ring->emit;
-	GEM_DEBUG_EXEC(memset(cs, POISON_INUSE, bytes));
+	GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs)));
 	ring->emit += bytes;
 	ring->space -= bytes;
 

From 873d66fb9b1d4f4cd441f84068abb5457c60f127 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Fri, 16 Mar 2018 21:49:59 +0000
Subject: [PATCH 0072/1286] drm/i915: Trim error mask to known engines
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

For the convenience of userspace passing in an arbitrary reset mask,
remove unknown engines from the set of engines that are to be reset.
This means that we always follow a per-engine reset with a full-device
reset when userspace writes -1 into debugfs/i915_wedged.

Reported-by: Michał Winiarski <michal.winiarski@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180316215001.12391-1-chris@chris-wilson.co.uk
Reviewed-by: Michel Thierry <michel.thierry@intel.com>
---
 drivers/gpu/drm/i915/i915_irq.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 828f3104488c..44eef355e12c 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2985,6 +2985,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
 	 */
 	intel_runtime_pm_get(dev_priv);
 
+	engine_mask &= INTEL_INFO(dev_priv)->ring_mask;
 	i915_capture_error_state(dev_priv, engine_mask, error_msg);
 	i915_clear_error_registers(dev_priv);
 

From 91b00dff56856ea4f87aa9fb3f6c90dbb5adfc55 Mon Sep 17 00:00:00 2001
From: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Tue, 20 Mar 2018 13:50:09 +0100
Subject: [PATCH 0073/1286] drm/i915: Select STACKDEPOT for DRM_I915_DEBUG

select in Kconfig isn't recursive, we need to select the stuff our
selects select, too. Fix that.

Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20180320125009.2305-1-daniel.vetter@ffwll.ch
---
 drivers/gpu/drm/i915/Kconfig.debug | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 108d21f34777..dd5bf6389ead 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -25,6 +25,7 @@ config DRM_I915_DEBUG
         select X86_MSR # used by igt/pm_rpm
         select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
         select DRM_DEBUG_MM if DRM=y
+        select STACKDEPOT if DRM=y # for DRM_DEBUG_MM
 	select DRM_DEBUG_MM_SELFTEST
 	select SW_SYNC # signaling validation framework (igt/syncobj*)
 	select DRM_I915_SW_FENCE_DEBUG_OBJECTS

From 26376a7e74d2deff445a72a2cfbfef084c28e4bc Mon Sep 17 00:00:00 2001
From: Oscar Mateo <oscar.mateo@intel.com>
Date: Fri, 16 Mar 2018 14:14:49 +0200
Subject: [PATCH 0074/1286] drm/i915/icl: Check for fused-off VDBOX and VEBOX
 instances

In Gen11, the Video Decode engines (aka VDBOX, aka VCS, aka BSD) and the
Video Enhancement engines (aka VEBOX, aka VECS) could be fused off. Also,
each VDBOX and VEBOX has its own power well, which only exist if the
related engine exists in the HW.

Unfortunately, we have a Catch-22 situation going on: we need the blitter
forcewake to read the register with the fuse info, but we cannot initialize
the forcewake domains without knowin about the engines present in the HW.
We workaround this problem by allowing the initialization of all forcewake
domains and then pruning the fused off ones, as per the fuse information.

Bspec: 20680

v2: We were shifting incorrectly for vebox disable (Vinay)

v3: Assert mmio is ready and warn if we have attempted to initialize
    forcewake for fused-off engines (Paulo)

v4:
  - Use INTEL_GEN in new code (Tvrtko)
  - Shorter local variable (Tvrtko, Michal)
  - Keep "if (!...) continue" style (Tvrtko)
  - No unnecessary BUG_ON (Tvrtko)
  - WARN_ON and cleanup if wrong mask (Tvrtko, Michal)
  - Use I915_READ_FW (Michal)
  - Use I915_MAX_VCS/VECS macros (Michal)

v5: Rebased by Rodrigo fixing conflicts on top of:
    "drm/i915: Simplify intel_engines_init"

v6: Fix v5. Remove info->num_rings. (by Oscar)

v7: Rebase (Rodrigo).

v8:
  - s/intel_device_info_fused_off_engines/
    intel_device_info_init_mmio (Chris)
  - Make vdbox_disable & vebox_disable local variables (Chris)

v9:
  - Move function declaration to intel_device_info.h (Michal)
  - Missing indent in bit fields definitions (Michal)
  - When RC6 is enabled by BIOS, the fuse register cannot be read until
    the blitter powerwell is awake. Shuffle where the fuse is read, prune
    the forcewake domains after the fact and change the commit message
    accordingly (Vinay, Sagar, Chris).

v10:
  - Improved commit message (Sagar)
  - New line in header file (Sagar)
  - Specify the message in fw_domain_reset applies to ICL+ (Sagar)

Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Cc: Vinay Belgaumkar <vinay.belgaumkar@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Reviewed-by: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180316121456.11577-1-mika.kuoppala@linux.intel.com
[Mika: soothe checkpatch on commit msg]
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.c          |  4 ++
 drivers/gpu/drm/i915/i915_reg.h          |  5 +++
 drivers/gpu/drm/i915/intel_device_info.c | 47 ++++++++++++++++++++
 drivers/gpu/drm/i915/intel_device_info.h |  2 +
 drivers/gpu/drm/i915/intel_uncore.c      | 56 ++++++++++++++++++++++++
 drivers/gpu/drm/i915/intel_uncore.h      |  1 +
 6 files changed, 115 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 1021bf40e236..ba5f150a29c0 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1033,6 +1033,10 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
 
 	intel_uncore_init(dev_priv);
 
+	intel_device_info_init_mmio(dev_priv);
+
+	intel_uncore_prune(dev_priv);
+
 	intel_uc_init_mmio(dev_priv);
 
 	ret = intel_engines_init_mmio(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 1b48d50dfcf1..429de0ad6cd4 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2545,6 +2545,11 @@ enum i915_power_well_id {
 #define GEN10_EU_DISABLE3		_MMIO(0x9140)
 #define   GEN10_EU_DIS_SS_MASK		0xff
 
+#define GEN11_GT_VEBOX_VDBOX_DISABLE	_MMIO(0x9140)
+#define   GEN11_GT_VDBOX_DISABLE_MASK	0xff
+#define   GEN11_GT_VEBOX_DISABLE_SHIFT	16
+#define   GEN11_GT_VEBOX_DISABLE_MASK	(0xff << GEN11_GT_VEBOX_DISABLE_SHIFT)
+
 #define GEN6_BSD_SLEEP_PSMI_CONTROL	_MMIO(0x12050)
 #define   GEN6_BSD_SLEEP_MSG_DISABLE	(1 << 0)
 #define   GEN6_BSD_SLEEP_FLUSH_DISABLE	(1 << 2)
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 3dd350f7b8e6..4babfc6ee45b 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -780,3 +780,50 @@ void intel_driver_caps_print(const struct intel_driver_caps *caps,
 {
 	drm_printf(p, "scheduler: %x\n", caps->scheduler);
 }
+
+/*
+ * Determine which engines are fused off in our particular hardware. Since the
+ * fuse register is in the blitter powerwell, we need forcewake to be ready at
+ * this point (but later we need to prune the forcewake domains for engines that
+ * are indeed fused off).
+ */
+void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
+{
+	struct intel_device_info *info = mkwrite_device_info(dev_priv);
+	u8 vdbox_disable, vebox_disable;
+	u32 media_fuse;
+	int i;
+
+	if (INTEL_GEN(dev_priv) < 11)
+		return;
+
+	media_fuse = I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
+
+	vdbox_disable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
+	vebox_disable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
+			GEN11_GT_VEBOX_DISABLE_SHIFT;
+
+	DRM_DEBUG_DRIVER("vdbox disable: %04x\n", vdbox_disable);
+	for (i = 0; i < I915_MAX_VCS; i++) {
+		if (!HAS_ENGINE(dev_priv, _VCS(i)))
+			continue;
+
+		if (!(BIT(i) & vdbox_disable))
+			continue;
+
+		info->ring_mask &= ~ENGINE_MASK(_VCS(i));
+		DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
+	}
+
+	DRM_DEBUG_DRIVER("vebox disable: %04x\n", vebox_disable);
+	for (i = 0; i < I915_MAX_VECS; i++) {
+		if (!HAS_ENGINE(dev_priv, _VECS(i)))
+			continue;
+
+		if (!(BIT(i) & vebox_disable))
+			continue;
+
+		info->ring_mask &= ~ENGINE_MASK(_VECS(i));
+		DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
+	}
+}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 0835752c8b22..0cbb92223013 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -247,6 +247,8 @@ void intel_device_info_dump_runtime(const struct intel_device_info *info,
 void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
 				     struct drm_printer *p);
 
+void intel_device_info_init_mmio(struct drm_i915_private *dev_priv);
+
 void intel_driver_caps_print(const struct intel_driver_caps *caps,
 			     struct drm_printer *p);
 
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 4df7c2ef8576..4c616d074a97 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -62,6 +62,11 @@ static inline void
 fw_domain_reset(struct drm_i915_private *i915,
 		const struct intel_uncore_forcewake_domain *d)
 {
+	/*
+	 * We don't really know if the powerwell for the forcewake domain we are
+	 * trying to reset here does exist at this point (engines could be fused
+	 * off in ICL+), so no waiting for acks
+	 */
 	__raw_i915_write32(i915, d->reg_set, i915->uncore.fw_reset);
 }
 
@@ -1353,6 +1358,23 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
 	fw_domain_reset(dev_priv, d);
 }
 
+static void fw_domain_fini(struct drm_i915_private *dev_priv,
+			   enum forcewake_domain_id domain_id)
+{
+	struct intel_uncore_forcewake_domain *d;
+
+	if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
+		return;
+
+	d = &dev_priv->uncore.fw_domain[domain_id];
+
+	WARN_ON(d->wake_count);
+	WARN_ON(hrtimer_cancel(&d->timer));
+	memset(d, 0, sizeof(*d));
+
+	dev_priv->uncore.fw_domains &= ~BIT(domain_id);
+}
+
 static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
 {
 	if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv))
@@ -1565,6 +1587,40 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
 		&dev_priv->uncore.pmic_bus_access_nb);
 }
 
+/*
+ * We might have detected that some engines are fused off after we initialized
+ * the forcewake domains. Prune them, to make sure they only reference existing
+ * engines.
+ */
+void intel_uncore_prune(struct drm_i915_private *dev_priv)
+{
+	if (INTEL_GEN(dev_priv) >= 11) {
+		enum forcewake_domains fw_domains = dev_priv->uncore.fw_domains;
+		enum forcewake_domain_id domain_id;
+		int i;
+
+		for (i = 0; i < I915_MAX_VCS; i++) {
+			domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
+
+			if (HAS_ENGINE(dev_priv, _VCS(i)))
+				continue;
+
+			if (fw_domains & BIT(domain_id))
+				fw_domain_fini(dev_priv, domain_id);
+		}
+
+		for (i = 0; i < I915_MAX_VECS; i++) {
+			domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
+
+			if (HAS_ENGINE(dev_priv, _VECS(i)))
+				continue;
+
+			if (fw_domains & BIT(domain_id))
+				fw_domain_fini(dev_priv, domain_id);
+		}
+	}
+}
+
 void intel_uncore_fini(struct drm_i915_private *dev_priv)
 {
 	/* Paranoia: make sure we have disabled everything before we exit. */
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index dfdf444e4bcc..47478d609630 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -140,6 +140,7 @@ struct intel_uncore {
 
 void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
 void intel_uncore_init(struct drm_i915_private *dev_priv);
+void intel_uncore_prune(struct drm_i915_private *dev_priv);
 bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
 bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
 void intel_uncore_fini(struct drm_i915_private *dev_priv);

From d53d5ffb9b937ae08402d5aec5c44fb9be409afb Mon Sep 17 00:00:00 2001
From: Oscar Mateo <oscar.mateo@intel.com>
Date: Fri, 16 Mar 2018 14:14:50 +0200
Subject: [PATCH 0075/1286] drm/i915/icl: Enable the extra video decode and
 enhancement boxes for Icelake 11

Icelake 11 has one vebox and two vdboxes (0 and 2).

Bspec: 21140

v2: Split out in two (Daniele)

Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Reviewed-by: Michel Thierry <michel.thierry@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180316121456.11577-2-mika.kuoppala@linux.intel.com
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_pci.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 062e91b39085..4364922e935d 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -602,6 +602,7 @@ static const struct intel_device_info intel_icelake_11_info = {
 	PLATFORM(INTEL_ICELAKE),
 	.is_alpha_support = 1,
 	.has_resource_streamer = 0,
+	.ring_mask = RENDER_RING | BLT_RING | VEBOX_RING | BSD_RING | BSD3_RING,
 };
 
 #undef GEN

From d3d57927995f872e5786ff6ae517a6c3e7a94d75 Mon Sep 17 00:00:00 2001
From: Kelvin Gardiner <kelvin.gardiner@intel.com>
Date: Fri, 16 Mar 2018 14:14:51 +0200
Subject: [PATCH 0076/1286] drm/i915/icl: Update subslice define for ICL 11

ICL 11 has a greater number of maximum subslices. This patch
reflects this.

v2: GEN11 updates to MCR_SELECTOR (Oscar)
v3: Copypasta error in the new defines (Lionel)

Bspec: 21139
BSpec: 21108

Signed-off-by: Kelvin Gardiner <kelvin.gardiner@intel.com>
Reviewed-by: Oscar Mateo <oscar.mateo@intel.com> (v1)
Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> (v1)
Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180316121456.11577-3-mika.kuoppala@linux.intel.com
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_reg.h         |  4 ++++
 drivers/gpu/drm/i915/intel_engine_cs.c  | 22 ++++++++++++++++++----
 drivers/gpu/drm/i915/intel_ringbuffer.h |  2 +-
 3 files changed, 23 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 429de0ad6cd4..699292eae02e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2151,6 +2151,10 @@ enum i915_power_well_id {
 #define   GEN8_MCR_SLICE_MASK		GEN8_MCR_SLICE(3)
 #define   GEN8_MCR_SUBSLICE(subslice)	(((subslice) & 3) << 24)
 #define   GEN8_MCR_SUBSLICE_MASK	GEN8_MCR_SUBSLICE(3)
+#define   GEN11_MCR_SLICE(slice)	(((slice) & 0xf) << 27)
+#define   GEN11_MCR_SLICE_MASK		GEN11_MCR_SLICE(0xf)
+#define   GEN11_MCR_SUBSLICE(subslice)	(((subslice) & 0x7) << 24)
+#define   GEN11_MCR_SUBSLICE_MASK	GEN11_MCR_SUBSLICE(0x7)
 #define RING_IPEIR(base)	_MMIO((base)+0x64)
 #define RING_IPEHR(base)	_MMIO((base)+0x68)
 /*
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 337dfa56a738..de09fa42a509 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -800,10 +800,24 @@ static inline uint32_t
 read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
 		  int subslice, i915_reg_t reg)
 {
+	uint32_t mcr_slice_subslice_mask;
+	uint32_t mcr_slice_subslice_select;
 	uint32_t mcr;
 	uint32_t ret;
 	enum forcewake_domains fw_domains;
 
+	if (INTEL_GEN(dev_priv) >= 11) {
+		mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
+					  GEN11_MCR_SUBSLICE_MASK;
+		mcr_slice_subslice_select = GEN11_MCR_SLICE(slice) |
+					    GEN11_MCR_SUBSLICE(subslice);
+	} else {
+		mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK |
+					  GEN8_MCR_SUBSLICE_MASK;
+		mcr_slice_subslice_select = GEN8_MCR_SLICE(slice) |
+					    GEN8_MCR_SUBSLICE(subslice);
+	}
+
 	fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
 						    FW_REG_READ);
 	fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
@@ -818,14 +832,14 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
 	 * The HW expects the slice and sublice selectors to be reset to 0
 	 * after reading out the registers.
 	 */
-	WARN_ON_ONCE(mcr & (GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK));
-	mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
-	mcr |= GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
+	WARN_ON_ONCE(mcr & mcr_slice_subslice_mask);
+	mcr &= ~mcr_slice_subslice_mask;
+	mcr |= mcr_slice_subslice_select;
 	I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
 
 	ret = I915_READ_FW(reg);
 
-	mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
+	mcr &= ~mcr_slice_subslice_mask;
 	I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
 
 	intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 1f50727a5ddb..a02c7b3b9d55 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -86,7 +86,7 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
 }
 
 #define I915_MAX_SLICES	3
-#define I915_MAX_SUBSLICES 3
+#define I915_MAX_SUBSLICES 8
 
 #define instdone_slice_mask(dev_priv__) \
 	(INTEL_GEN(dev_priv__) == 7 ? \

From 03380d173a697475c747e4cd6ea2be739005dedc Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Micha=C5=82=20Winiarski?= <michal.winiarski@intel.com>
Date: Tue, 20 Mar 2018 12:55:17 +0100
Subject: [PATCH 0077/1286] drm/i915/guc: Don't try to enable GuC logging when
 we're not using GuC
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

When changing the default values for guc_log_level, we accidentally left
the log enabled on non-guc platforms. Let's fix that.

v2: Define the levels used and remove (now obsolete) comments (Chris)
v3: Use "IS" rather than "TO" for booleans (Chris)

Fixes: 9605d1ce7c6b ("drm/i915/guc: Default to non-verbose GuC logging")
Reported-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180320115517.20423-1-michal.winiarski@intel.com
---
 drivers/gpu/drm/i915/intel_guc.c     |  4 ++--
 drivers/gpu/drm/i915/intel_guc_log.c |  7 +++----
 drivers/gpu/drm/i915/intel_guc_log.h | 12 +++++++-----
 drivers/gpu/drm/i915/intel_uc.c      | 23 +++++++++++------------
 4 files changed, 23 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index ee5230cc722e..4b7c9c6415dd 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -229,10 +229,10 @@ static u32 get_log_control_flags(void)
 
 	GEM_BUG_ON(level < 0);
 
-	if (!GUC_LOG_LEVEL_TO_ENABLED(level))
+	if (!GUC_LOG_LEVEL_IS_ENABLED(level))
 		flags |= GUC_LOG_DEFAULT_DISABLED;
 
-	if (!GUC_LOG_LEVEL_TO_VERBOSE(level))
+	if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
 		flags |= GUC_LOG_DISABLED;
 	else
 		flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 4cb422ceb283..ae9b2569adab 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -515,8 +515,7 @@ int intel_guc_log_level_set(struct intel_guc_log *log, u64 val)
 	 * GuC is recognizing log levels starting from 0 to max, we're using 0
 	 * as indication that logging should be disabled.
 	 */
-	if (val < GUC_LOG_LEVEL_DISABLED ||
-	    val > GUC_VERBOSITY_TO_LOG_LEVEL(GUC_LOG_VERBOSITY_MAX))
+	if (val < GUC_LOG_LEVEL_DISABLED || val > GUC_LOG_LEVEL_MAX)
 		return -EINVAL;
 
 	mutex_lock(&dev_priv->drm.struct_mutex);
@@ -527,8 +526,8 @@ int intel_guc_log_level_set(struct intel_guc_log *log, u64 val)
 	}
 
 	intel_runtime_pm_get(dev_priv);
-	ret = guc_log_control(guc, GUC_LOG_LEVEL_TO_VERBOSE(val),
-			      GUC_LOG_LEVEL_TO_ENABLED(val),
+	ret = guc_log_control(guc, GUC_LOG_LEVEL_IS_VERBOSE(val),
+			      GUC_LOG_LEVEL_IS_ENABLED(val),
 			      GUC_LOG_LEVEL_TO_VERBOSITY(val));
 	intel_runtime_pm_put(dev_priv);
 	if (ret) {
diff --git a/drivers/gpu/drm/i915/intel_guc_log.h b/drivers/gpu/drm/i915/intel_guc_log.h
index af1532c0d3e4..1b0d2fa4c0b6 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/intel_guc_log.h
@@ -46,14 +46,16 @@ struct intel_guc;
  * log enabling, and separate bit for default logging - which "conveniently"
  * ignores the enable bit.
  */
-#define GUC_LOG_LEVEL_DISABLED			0
-#define GUC_LOG_LEVEL_TO_ENABLED(x)		((x) > 0)
-#define GUC_LOG_LEVEL_TO_VERBOSE(x)		((x) > 1)
+#define GUC_LOG_LEVEL_DISABLED		0
+#define GUC_LOG_LEVEL_NON_VERBOSE	1
+#define GUC_LOG_LEVEL_IS_ENABLED(x)	((x) > GUC_LOG_LEVEL_DISABLED)
+#define GUC_LOG_LEVEL_IS_VERBOSE(x)	((x) > GUC_LOG_LEVEL_NON_VERBOSE)
 #define GUC_LOG_LEVEL_TO_VERBOSITY(x) ({		\
 	typeof(x) _x = (x);				\
-	GUC_LOG_LEVEL_TO_VERBOSE(_x) ? _x - 2 : 0;	\
+	GUC_LOG_LEVEL_IS_VERBOSE(_x) ? _x - 2 : 0;	\
 })
-#define GUC_VERBOSITY_TO_LOG_LEVEL(x)		((x) + 2)
+#define GUC_VERBOSITY_TO_LOG_LEVEL(x)	((x) + 2)
+#define GUC_LOG_LEVEL_MAX GUC_VERBOSITY_TO_LOG_LEVEL(GUC_LOG_VERBOSITY_MAX)
 
 struct intel_guc_log {
 	u32 flags;
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 34e847d0ee4c..2befcafbaabe 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -69,14 +69,15 @@ static int __get_platform_enable_guc(struct drm_i915_private *dev_priv)
 
 static int __get_default_guc_log_level(struct drm_i915_private *dev_priv)
 {
-	int guc_log_level = 1; /* non-verbose */
+	int guc_log_level;
 
-	/* Enable if we're running on platform with GuC and debug config */
-	if (HAS_GUC(dev_priv) && intel_uc_is_using_guc() &&
-	    (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
-	     IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)))
-		guc_log_level =
-			GUC_VERBOSITY_TO_LOG_LEVEL(GUC_LOG_VERBOSITY_MAX);
+	if (!HAS_GUC(dev_priv) || !intel_uc_is_using_guc())
+		guc_log_level = GUC_LOG_LEVEL_DISABLED;
+	else if (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
+		 IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+		guc_log_level = GUC_LOG_LEVEL_MAX;
+	else
+		guc_log_level = GUC_LOG_LEVEL_NON_VERBOSE;
 
 	/* Any platform specific fine-tuning can be done here */
 
@@ -143,19 +144,17 @@ static void sanitize_options_early(struct drm_i915_private *dev_priv)
 		i915_modparams.guc_log_level = 0;
 	}
 
-	if (i915_modparams.guc_log_level >
-	    GUC_VERBOSITY_TO_LOG_LEVEL(GUC_LOG_VERBOSITY_MAX)) {
+	if (i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX) {
 		DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
 			 "guc_log_level", i915_modparams.guc_log_level,
 			 "verbosity too high");
-		i915_modparams.guc_log_level =
-			GUC_VERBOSITY_TO_LOG_LEVEL(GUC_LOG_VERBOSITY_MAX);
+		i915_modparams.guc_log_level = GUC_LOG_LEVEL_MAX;
 	}
 
 	DRM_DEBUG_DRIVER("guc_log_level=%d (enabled:%s, verbose:%s, verbosity:%d)\n",
 			 i915_modparams.guc_log_level,
 			 yesno(i915_modparams.guc_log_level),
-			 yesno(GUC_LOG_LEVEL_TO_VERBOSE(i915_modparams.guc_log_level)),
+			 yesno(GUC_LOG_LEVEL_IS_VERBOSE(i915_modparams.guc_log_level)),
 			 GUC_LOG_LEVEL_TO_VERBOSITY(i915_modparams.guc_log_level));
 
 	/* Make sure that sanitization was done */

From ca98317b89428e6ac17be0938b467ed78654dd56 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Tue, 20 Mar 2018 10:04:48 +0000
Subject: [PATCH 0078/1286] drm/i915: Specify which engines to reset following
 semaphore/event lockups

If the GPU is stuck waiting for an event or for a semaphore, we need to
reset the GPU in order to recover. We have to tell the reset routine
which engines we want reset, but we were still using the old interface
and declaring it as "not-fatal".

Fixes: 14b730fcb8d9 ("drm/i915/tdr: Prepare error handler to accept mask of hung engines")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Cc: Michel Thierry <michel.thierry@intel.com>
Reviewed-by: Michel Thierry <michel.thierry@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180320100449.1360-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_hangcheck.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index 42e45ae87393..c8ea510629fa 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -246,7 +246,7 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
 	 */
 	tmp = I915_READ_CTL(engine);
 	if (tmp & RING_WAIT) {
-		i915_handle_error(dev_priv, 0,
+		i915_handle_error(dev_priv, BIT(engine->id),
 				  "Kicking stuck wait on %s",
 				  engine->name);
 		I915_WRITE_CTL(engine, tmp);
@@ -258,7 +258,7 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
 		default:
 			return ENGINE_DEAD;
 		case 1:
-			i915_handle_error(dev_priv, 0,
+			i915_handle_error(dev_priv, ALL_ENGINES,
 					  "Kicking stuck semaphore on %s",
 					  engine->name);
 			I915_WRITE_CTL(engine, tmp);

From ce80075470f6328e487389262c95af092d421ffc Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Tue, 20 Mar 2018 10:04:49 +0000
Subject: [PATCH 0079/1286] drm/i915: Add control flags to i915_handle_error()

Not all callers want the GPU error to handled in the same way, so expose
a control parameter. In the first instance, some callers do not want the
heavyweight error capture so add a bit to request the state to be
captured and saved.

v2: Pass msg down to i915_reset/i915_reset_engine so that we include the
reason for the reset in the dev_notice(), superseding the earlier option
to not print that notice.
v3: Stash the reason inside the i915->gpu_error to handover to the direct
reset from the blocking waiter.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Jeff McGee <jeff.mcgee@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Cc: Michel Thierry <michel.thierry@intel.com>
Reviewed-by: Michel Thierry <michel.thierry@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180320100449.1360-2-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_debugfs.c           |  4 +-
 drivers/gpu/drm/i915/i915_drv.c               | 17 +++---
 drivers/gpu/drm/i915/i915_drv.h               | 10 ++--
 drivers/gpu/drm/i915/i915_gpu_error.h         |  3 +
 drivers/gpu/drm/i915/i915_irq.c               | 55 +++++++++++--------
 drivers/gpu/drm/i915/i915_request.c           |  2 +-
 drivers/gpu/drm/i915/intel_hangcheck.c        | 13 ++---
 .../gpu/drm/i915/selftests/intel_hangcheck.c  | 13 ++---
 8 files changed, 62 insertions(+), 55 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 964ea1a12357..7816cd53100a 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -4011,8 +4011,8 @@ i915_wedged_set(void *data, u64 val)
 		engine->hangcheck.stalled = true;
 	}
 
-	i915_handle_error(i915, val, "Manually set wedged engine mask = %llx",
-			  val);
+	i915_handle_error(i915, val, I915_ERROR_CAPTURE,
+			  "Manually set wedged engine mask = %llx", val);
 
 	wait_on_bit(&i915->gpu_error.flags,
 		    I915_RESET_HANDOFF,
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ba5f150a29c0..3f637ab89e51 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1873,7 +1873,6 @@ static int i915_resume_switcheroo(struct drm_device *dev)
 /**
  * i915_reset - reset chip after a hang
  * @i915: #drm_i915_private to reset
- * @flags: Instructions
  *
  * Reset the chip.  Useful if a hang is detected. Marks the device as wedged
  * on failure.
@@ -1888,7 +1887,7 @@ static int i915_resume_switcheroo(struct drm_device *dev)
  *   - re-init interrupt state
  *   - re-init display
  */
-void i915_reset(struct drm_i915_private *i915, unsigned int flags)
+void i915_reset(struct drm_i915_private *i915)
 {
 	struct i915_gpu_error *error = &i915->gpu_error;
 	int ret;
@@ -1905,8 +1904,9 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags)
 	if (!i915_gem_unset_wedged(i915))
 		goto wakeup;
 
-	if (!(flags & I915_RESET_QUIET))
-		dev_notice(i915->drm.dev, "Resetting chip after gpu hang\n");
+	if (error->reason)
+		dev_notice(i915->drm.dev,
+			   "Resetting chip for %s\n", error->reason);
 	error->reset_count++;
 
 	disable_irq(i915->drm.irq);
@@ -2007,7 +2007,7 @@ static inline int intel_gt_reset_engine(struct drm_i915_private *dev_priv,
 /**
  * i915_reset_engine - reset GPU engine to recover from a hang
  * @engine: engine to reset
- * @flags: options
+ * @msg: reason for GPU reset; or NULL for no dev_notice()
  *
  * Reset a specific GPU engine. Useful if a hang is detected.
  * Returns zero on successful reset or otherwise an error code.
@@ -2017,7 +2017,7 @@ static inline int intel_gt_reset_engine(struct drm_i915_private *dev_priv,
  *  - reset engine (which will force the engine to idle)
  *  - re-init/configure engine
  */
-int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags)
+int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
 {
 	struct i915_gpu_error *error = &engine->i915->gpu_error;
 	struct i915_request *active_request;
@@ -2032,10 +2032,9 @@ int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags)
 		goto out;
 	}
 
-	if (!(flags & I915_RESET_QUIET)) {
+	if (msg)
 		dev_notice(engine->i915->drm.dev,
-			   "Resetting %s after gpu hang\n", engine->name);
-	}
+			   "Resetting %s for %s\n", engine->name, msg);
 	error->reset_engine_count[engine->id]++;
 
 	if (!engine->i915->guc.execbuf_client)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e27ba8fb64e6..c9c3b2ba6a86 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2700,10 +2700,8 @@ extern void i915_driver_unload(struct drm_device *dev);
 extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
 extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
 
-#define I915_RESET_QUIET BIT(0)
-extern void i915_reset(struct drm_i915_private *i915, unsigned int flags);
-extern int i915_reset_engine(struct intel_engine_cs *engine,
-			     unsigned int flags);
+extern void i915_reset(struct drm_i915_private *i915);
+extern int i915_reset_engine(struct intel_engine_cs *engine, const char *msg);
 
 extern bool intel_has_reset_engine(struct drm_i915_private *dev_priv);
 extern int intel_reset_guc(struct drm_i915_private *dev_priv);
@@ -2751,10 +2749,12 @@ static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
 			   &dev_priv->gpu_error.hangcheck_work, delay);
 }
 
-__printf(3, 4)
+__printf(4, 5)
 void i915_handle_error(struct drm_i915_private *dev_priv,
 		       u32 engine_mask,
+		       unsigned long flags,
 		       const char *fmt, ...);
+#define I915_ERROR_CAPTURE BIT(0)
 
 extern void intel_irq_init(struct drm_i915_private *dev_priv);
 extern void intel_irq_fini(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index ebbdf37e2879..ac5760673cc9 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -269,6 +269,9 @@ struct i915_gpu_error {
 	/** Number of times an engine has been reset */
 	u32 reset_engine_count[I915_NUM_ENGINES];
 
+	/** Reason for the current *global* reset */
+	const char *reason;
+
 	/**
 	 * Waitqueue to signal when a hang is detected. Used to for waiters
 	 * to release the struct_mutex for the reset to procede.
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 44eef355e12c..fa7310766217 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2877,15 +2877,10 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
 	return IRQ_HANDLED;
 }
 
-/**
- * i915_reset_device - do process context error handling work
- * @dev_priv: i915 device private
- *
- * Fire an error uevent so userspace can see that a hang or error
- * was detected.
- */
-static void i915_reset_device(struct drm_i915_private *dev_priv)
+static void i915_reset_device(struct drm_i915_private *dev_priv,
+			      const char *msg)
 {
+	struct i915_gpu_error *error = &dev_priv->gpu_error;
 	struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
 	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
 	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
@@ -2901,29 +2896,32 @@ static void i915_reset_device(struct drm_i915_private *dev_priv)
 	i915_wedge_on_timeout(&w, dev_priv, 5*HZ) {
 		intel_prepare_reset(dev_priv);
 
+		error->reason = msg;
+
 		/* Signal that locked waiters should reset the GPU */
-		set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags);
-		wake_up_all(&dev_priv->gpu_error.wait_queue);
+		set_bit(I915_RESET_HANDOFF, &error->flags);
+		wake_up_all(&error->wait_queue);
 
 		/* Wait for anyone holding the lock to wakeup, without
 		 * blocking indefinitely on struct_mutex.
 		 */
 		do {
 			if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
-				i915_reset(dev_priv, 0);
+				i915_reset(dev_priv);
 				mutex_unlock(&dev_priv->drm.struct_mutex);
 			}
-		} while (wait_on_bit_timeout(&dev_priv->gpu_error.flags,
+		} while (wait_on_bit_timeout(&error->flags,
 					     I915_RESET_HANDOFF,
 					     TASK_UNINTERRUPTIBLE,
 					     1));
 
+		error->reason = NULL;
+
 		intel_finish_reset(dev_priv);
 	}
 
-	if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
-		kobject_uevent_env(kobj,
-				   KOBJ_CHANGE, reset_done_event);
+	if (!test_bit(I915_WEDGED, &error->flags))
+		kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
 }
 
 static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
@@ -2955,6 +2953,7 @@ static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
  * i915_handle_error - handle a gpu error
  * @dev_priv: i915 device private
  * @engine_mask: mask representing engines that are hung
+ * @flags: control flags
  * @fmt: Error message format string
  *
  * Do some basic checking of register state at error time and
@@ -2965,16 +2964,23 @@ static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
  */
 void i915_handle_error(struct drm_i915_private *dev_priv,
 		       u32 engine_mask,
+		       unsigned long flags,
 		       const char *fmt, ...)
 {
 	struct intel_engine_cs *engine;
 	unsigned int tmp;
-	va_list args;
 	char error_msg[80];
+	char *msg = NULL;
 
-	va_start(args, fmt);
-	vscnprintf(error_msg, sizeof(error_msg), fmt, args);
-	va_end(args);
+	if (fmt) {
+		va_list args;
+
+		va_start(args, fmt);
+		vscnprintf(error_msg, sizeof(error_msg), fmt, args);
+		va_end(args);
+
+		msg = error_msg;
+	}
 
 	/*
 	 * In most cases it's guaranteed that we get here with an RPM
@@ -2986,8 +2992,11 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
 	intel_runtime_pm_get(dev_priv);
 
 	engine_mask &= INTEL_INFO(dev_priv)->ring_mask;
-	i915_capture_error_state(dev_priv, engine_mask, error_msg);
-	i915_clear_error_registers(dev_priv);
+
+	if (flags & I915_ERROR_CAPTURE) {
+		i915_capture_error_state(dev_priv, engine_mask, msg);
+		i915_clear_error_registers(dev_priv);
+	}
 
 	/*
 	 * Try engine reset when available. We fall back to full reset if
@@ -3000,7 +3009,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
 					     &dev_priv->gpu_error.flags))
 				continue;
 
-			if (i915_reset_engine(engine, 0) == 0)
+			if (i915_reset_engine(engine, msg) == 0)
 				engine_mask &= ~intel_engine_flag(engine);
 
 			clear_bit(I915_RESET_ENGINE + engine->id,
@@ -3030,7 +3039,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
 				    TASK_UNINTERRUPTIBLE);
 	}
 
-	i915_reset_device(dev_priv);
+	i915_reset_device(dev_priv, msg);
 
 	for_each_engine(engine, dev_priv, tmp) {
 		clear_bit(I915_RESET_ENGINE + engine->id,
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 43c7134a9b93..2325886d1d55 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1229,7 +1229,7 @@ static bool __i915_wait_request_check_and_reset(struct i915_request *request)
 		return false;
 
 	__set_current_state(TASK_RUNNING);
-	i915_reset(request->i915, 0);
+	i915_reset(request->i915);
 	return true;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index c8ea510629fa..fd0ffb8328d0 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -246,9 +246,8 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
 	 */
 	tmp = I915_READ_CTL(engine);
 	if (tmp & RING_WAIT) {
-		i915_handle_error(dev_priv, BIT(engine->id),
-				  "Kicking stuck wait on %s",
-				  engine->name);
+		i915_handle_error(dev_priv, BIT(engine->id), 0,
+				  "stuck wait on %s", engine->name);
 		I915_WRITE_CTL(engine, tmp);
 		return ENGINE_WAIT_KICK;
 	}
@@ -258,8 +257,8 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
 		default:
 			return ENGINE_DEAD;
 		case 1:
-			i915_handle_error(dev_priv, ALL_ENGINES,
-					  "Kicking stuck semaphore on %s",
+			i915_handle_error(dev_priv, ALL_ENGINES, 0,
+					  "stuck semaphore on %s",
 					  engine->name);
 			I915_WRITE_CTL(engine, tmp);
 			return ENGINE_WAIT_KICK;
@@ -386,13 +385,13 @@ static void hangcheck_declare_hang(struct drm_i915_private *i915,
 	if (stuck != hung)
 		hung &= ~stuck;
 	len = scnprintf(msg, sizeof(msg),
-			"%s on ", stuck == hung ? "No progress" : "Hang");
+			"%s on ", stuck == hung ? "no progress" : "hang");
 	for_each_engine_masked(engine, i915, hung, tmp)
 		len += scnprintf(msg + len, sizeof(msg) - len,
 				 "%s, ", engine->name);
 	msg[len-2] = '\0';
 
-	return i915_handle_error(i915, hung, "%s", msg);
+	return i915_handle_error(i915, hung, I915_ERROR_CAPTURE, "%s", msg);
 }
 
 /*
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index df7898c8edcb..4372826998aa 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -433,7 +433,7 @@ static int igt_global_reset(void *arg)
 	mutex_lock(&i915->drm.struct_mutex);
 	reset_count = i915_reset_count(&i915->gpu_error);
 
-	i915_reset(i915, I915_RESET_QUIET);
+	i915_reset(i915);
 
 	if (i915_reset_count(&i915->gpu_error) == reset_count) {
 		pr_err("No GPU reset recorded!\n");
@@ -518,7 +518,7 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
 			engine->hangcheck.seqno =
 				intel_engine_get_seqno(engine);
 
-			err = i915_reset_engine(engine, I915_RESET_QUIET);
+			err = i915_reset_engine(engine, NULL);
 			if (err) {
 				pr_err("i915_reset_engine failed\n");
 				break;
@@ -725,7 +725,7 @@ static int __igt_reset_engine_others(struct drm_i915_private *i915,
 			engine->hangcheck.seqno =
 				intel_engine_get_seqno(engine);
 
-			err = i915_reset_engine(engine, I915_RESET_QUIET);
+			err = i915_reset_engine(engine, NULL);
 			if (err) {
 				pr_err("i915_reset_engine(%s:%s) failed, err=%d\n",
 				       engine->name, active ? "active" : "idle", err);
@@ -865,7 +865,6 @@ static int igt_wait_reset(void *arg)
 		       __func__, rq->fence.seqno, hws_seqno(&h, rq));
 		intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
 
-		i915_reset(i915, 0);
 		i915_gem_set_wedged(i915);
 
 		err = -EIO;
@@ -962,7 +961,6 @@ static int igt_reset_queue(void *arg)
 				i915_request_put(rq);
 				i915_request_put(prev);
 
-				i915_reset(i915, 0);
 				i915_gem_set_wedged(i915);
 
 				err = -EIO;
@@ -971,7 +969,7 @@ static int igt_reset_queue(void *arg)
 
 			reset_count = fake_hangcheck(prev);
 
-			i915_reset(i915, I915_RESET_QUIET);
+			i915_reset(i915);
 
 			GEM_BUG_ON(test_bit(I915_RESET_HANDOFF,
 					    &i915->gpu_error.flags));
@@ -1069,7 +1067,6 @@ static int igt_handle_error(void *arg)
 		       __func__, rq->fence.seqno, hws_seqno(&h, rq));
 		intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
 
-		i915_reset(i915, 0);
 		i915_gem_set_wedged(i915);
 
 		err = -EIO;
@@ -1084,7 +1081,7 @@ static int igt_handle_error(void *arg)
 	engine->hangcheck.stalled = true;
 	engine->hangcheck.seqno = intel_engine_get_seqno(engine);
 
-	i915_handle_error(i915, intel_engine_flag(engine), "%s", __func__);
+	i915_handle_error(i915, intel_engine_flag(engine), 0, NULL);
 
 	xchg(&i915->gpu_error.first_error, error);
 

From 8b5eb5e2b5d2ddf9185e55669f22ea87d28f4e90 Mon Sep 17 00:00:00 2001
From: Kelvin Gardiner <kelvin.gardiner@intel.com>
Date: Tue, 20 Mar 2018 12:45:21 -0700
Subject: [PATCH 0080/1286] drm/i915/icl: Added ICL 11 slice, subslice and EU
 fuse detection

This patch adds support to detect ICL, slice, subslice and EU fuse
settings.

Add addresses for ICL 11 slice, subslice and EU fuses registers.
These register addresses are the same as previous platforms but the
format and / or the meaning of the information is different. Therefore
Gen11 defines for these registers are added.

Bspec: 9731
Bspec: 20643
Bspec: 20673

v2: Update fusing information storage after introducing the new query
    uAPI (Lionel)

v3 (Oscar):
  - The maximum number of slices in ICL 11 is 1
  - The subslice disable fuse can potentially store information in
    all bits
  - GEN_MAX_SUBSLICES has to be increased to 8
  - Don't trust the slice enabled fuse outside the max number of
    expected slices
  - Indentation fix and some reordering and renaming of local
    variables

v4: Use single space after Cc tag

Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Kelvin Gardiner <kelvin.gardiner@intel.com>
Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1521575121-9577-1-git-send-email-oscar.mateo@intel.com
---
 drivers/gpu/drm/i915/i915_reg.h          |  8 +++++
 drivers/gpu/drm/i915/intel_device_info.c | 43 +++++++++++++++++++++++-
 drivers/gpu/drm/i915/intel_device_info.h |  2 +-
 3 files changed, 51 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 699292eae02e..bac3e926583a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2554,6 +2554,14 @@ enum i915_power_well_id {
 #define   GEN11_GT_VEBOX_DISABLE_SHIFT	16
 #define   GEN11_GT_VEBOX_DISABLE_MASK	(0xff << GEN11_GT_VEBOX_DISABLE_SHIFT)
 
+#define GEN11_EU_DISABLE _MMIO(0x9134)
+#define GEN11_EU_DIS_MASK 0xFF
+
+#define GEN11_GT_SLICE_ENABLE _MMIO(0x9138)
+#define GEN11_GT_S_ENA_MASK 0xFF
+
+#define GEN11_GT_SUBSLICE_DISABLE _MMIO(0x913C)
+
 #define GEN6_BSD_SLEEP_PSMI_CONTROL	_MMIO(0x12050)
 #define   GEN6_BSD_SLEEP_MSG_DISABLE	(1 << 0)
 #define   GEN6_BSD_SLEEP_FLUSH_DISABLE	(1 << 2)
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 4babfc6ee45b..a504281e2afa 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -158,6 +158,45 @@ static u16 compute_eu_total(const struct sseu_dev_info *sseu)
 	return total;
 }
 
+static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
+{
+	struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
+	u8 s_en;
+	u32 ss_en, ss_en_mask;
+	u8 eu_en;
+	int s;
+
+	sseu->max_slices = 1;
+	sseu->max_subslices = 8;
+	sseu->max_eus_per_subslice = 8;
+
+	s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;
+	ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE);
+	ss_en_mask = BIT(sseu->max_subslices) - 1;
+	eu_en = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK);
+
+	for (s = 0; s < sseu->max_slices; s++) {
+		if (s_en & BIT(s)) {
+			int ss_idx = sseu->max_subslices * s;
+			int ss;
+
+			sseu->slice_mask |= BIT(s);
+			sseu->subslice_mask[s] = (ss_en >> ss_idx) & ss_en_mask;
+			for (ss = 0; ss < sseu->max_subslices; ss++) {
+				if (sseu->subslice_mask[s] & BIT(ss))
+					sseu_set_eus(sseu, s, ss, eu_en);
+			}
+		}
+	}
+	sseu->eu_per_subslice = hweight8(eu_en);
+	sseu->eu_total = compute_eu_total(sseu);
+
+	/* ICL has no power gating restrictions. */
+	sseu->has_slice_pg = 1;
+	sseu->has_subslice_pg = 1;
+	sseu->has_eu_pg = 1;
+}
+
 static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
 {
 	struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
@@ -768,8 +807,10 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
 		broadwell_sseu_info_init(dev_priv);
 	else if (INTEL_GEN(dev_priv) == 9)
 		gen9_sseu_info_init(dev_priv);
-	else if (INTEL_GEN(dev_priv) >= 10)
+	else if (INTEL_GEN(dev_priv) == 10)
 		gen10_sseu_info_init(dev_priv);
+	else if (INTEL_INFO(dev_priv)->gen >= 11)
+		gen11_sseu_info_init(dev_priv);
 
 	/* Initialize command stream timestamp frequency */
 	info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 0cbb92223013..933e31669557 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -114,7 +114,7 @@ enum intel_platform {
 	func(has_ipc);
 
 #define GEN_MAX_SLICES		(6) /* CNL upper bound */
-#define GEN_MAX_SUBSLICES	(7)
+#define GEN_MAX_SUBSLICES	(8) /* ICL upper bound */
 
 struct sseu_dev_info {
 	u8 slice_mask;

From fa265275910f9d2396f8656317196c830878bd40 Mon Sep 17 00:00:00 2001
From: Michal Wajdeczko <michal.wajdeczko@intel.com>
Date: Wed, 14 Mar 2018 20:04:29 +0000
Subject: [PATCH 0081/1286] drm/i915/huc: Check HuC status in dedicated
 function

We try to keep all HuC related code in dedicated file.
There is no need to peek HuC register directly during
handling getparam ioctl.

Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Michel Thierry <michel.thierry@intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Anusha Srivatsa <anusha.srivatsa@intel.com>
Reviewed-by: Michel Thierry <michel.thierry@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180314200429.40132-1-michal.wajdeczko@intel.com
---
 drivers/gpu/drm/i915/i915_drv.c  |  6 +++---
 drivers/gpu/drm/i915/intel_huc.c | 25 +++++++++++++++++++++++++
 drivers/gpu/drm/i915/intel_huc.h |  1 +
 3 files changed, 29 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 3f637ab89e51..a7d3275f45d2 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -377,9 +377,9 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
 		value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool;
 		break;
 	case I915_PARAM_HUC_STATUS:
-		intel_runtime_pm_get(dev_priv);
-		value = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED;
-		intel_runtime_pm_put(dev_priv);
+		value = intel_huc_check_status(&dev_priv->huc);
+		if (value < 0)
+			return value;
 		break;
 	case I915_PARAM_MMAP_GTT_VERSION:
 		/* Though we've started our numbering from 1, and so class all
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 1d6c47b17935..291285277403 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -92,3 +92,28 @@ fail:
 	DRM_ERROR("HuC: Authentication failed %d\n", ret);
 	return ret;
 }
+
+/**
+ * intel_huc_check_status() - check HuC status
+ * @huc: intel_huc structure
+ *
+ * This function reads status register to verify if HuC
+ * firmware was successfully loaded.
+ *
+ * Returns positive value if HuC firmware is loaded and verified
+ * and -ENODEV if HuC is not present.
+ */
+int intel_huc_check_status(struct intel_huc *huc)
+{
+	struct drm_i915_private *dev_priv = huc_to_i915(huc);
+	u32 status;
+
+	if (!HAS_HUC(dev_priv))
+		return -ENODEV;
+
+	intel_runtime_pm_get(dev_priv);
+	status = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED;
+	intel_runtime_pm_put(dev_priv);
+
+	return status;
+}
diff --git a/drivers/gpu/drm/i915/intel_huc.h b/drivers/gpu/drm/i915/intel_huc.h
index b1858503c451..aa854907abac 100644
--- a/drivers/gpu/drm/i915/intel_huc.h
+++ b/drivers/gpu/drm/i915/intel_huc.h
@@ -37,6 +37,7 @@ struct intel_huc {
 
 void intel_huc_init_early(struct intel_huc *huc);
 int intel_huc_auth(struct intel_huc *huc);
+int intel_huc_check_status(struct intel_huc *huc);
 
 static inline int intel_huc_sanitize(struct intel_huc *huc)
 {

From 7beae44d7b295323d8416526fd612ee166851baf Mon Sep 17 00:00:00 2001
From: Michal Wajdeczko <michal.wajdeczko@intel.com>
Date: Tue, 20 Mar 2018 18:14:17 +0000
Subject: [PATCH 0082/1286] drm/i915/guc: Unify naming of private GuC action
 functions
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

We should avoid using guc_log prefix for functions that don't
operate on GuC log, but rather request action from the GuC.
Better to use guc_action prefix.

v2: rebase + naming compromise
v3: rebase

Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Michal Winiarski <michal.winiarski@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Reviewed-by: Michał Winiarski <michal.winiarski@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180320181419.35576-1-michal.wajdeczko@intel.com
---
 drivers/gpu/drm/i915/intel_guc_log.c | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index ae9b2569adab..957f7edc8ead 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -38,7 +38,7 @@ static void guc_log_capture_logs(struct intel_guc_log *log);
  * registers value.
  */
 
-static int guc_log_flush_complete(struct intel_guc *guc)
+static int guc_action_flush_log_complete(struct intel_guc *guc)
 {
 	u32 action[] = {
 		INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE
@@ -47,7 +47,7 @@ static int guc_log_flush_complete(struct intel_guc *guc)
 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
 }
 
-static int guc_log_flush(struct intel_guc *guc)
+static int guc_action_flush_log(struct intel_guc *guc)
 {
 	u32 action[] = {
 		INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH,
@@ -57,8 +57,8 @@ static int guc_log_flush(struct intel_guc *guc)
 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
 }
 
-static int guc_log_control(struct intel_guc *guc, bool enable,
-			   bool default_logging, u32 verbosity)
+static int guc_action_control_log(struct intel_guc *guc, bool enable,
+				  bool default_logging, u32 verbosity)
 {
 	union guc_log_control control_val = {
 		{
@@ -449,7 +449,7 @@ static void guc_log_capture_logs(struct intel_guc_log *log)
 	 * time, so get/put should be really quick.
 	 */
 	intel_runtime_pm_get(dev_priv);
-	guc_log_flush_complete(guc);
+	guc_action_flush_log_complete(guc);
 	intel_runtime_pm_put(dev_priv);
 }
 
@@ -526,9 +526,9 @@ int intel_guc_log_level_set(struct intel_guc_log *log, u64 val)
 	}
 
 	intel_runtime_pm_get(dev_priv);
-	ret = guc_log_control(guc, GUC_LOG_LEVEL_IS_VERBOSE(val),
-			      GUC_LOG_LEVEL_IS_ENABLED(val),
-			      GUC_LOG_LEVEL_TO_VERBOSITY(val));
+	ret = guc_action_control_log(guc, GUC_LOG_LEVEL_IS_VERBOSE(val),
+				     GUC_LOG_LEVEL_IS_ENABLED(val),
+				     GUC_LOG_LEVEL_TO_VERBOSITY(val));
 	intel_runtime_pm_put(dev_priv);
 	if (ret) {
 		DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret);
@@ -610,7 +610,7 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log)
 	flush_work(&log->relay.flush_work);
 
 	intel_runtime_pm_get(i915);
-	guc_log_flush(guc);
+	guc_action_flush_log(guc);
 	intel_runtime_pm_put(i915);
 
 	/* GuC would have updated log buffer by now, so capture it */

From 154374c331b01acbaf6a6957a7f9e65192f6a459 Mon Sep 17 00:00:00 2001
From: Michal Wajdeczko <michal.wajdeczko@intel.com>
Date: Tue, 20 Mar 2018 18:14:18 +0000
Subject: [PATCH 0083/1286] drm/i915/guc: Drop union guc_log_control
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Usually we use shift/mask macros for bit field definitions.
Union guc_log_control was not following that pattern.

Additional bonus:

add/remove: 0/0 grow/shrink: 0/1 up/down: 0/-25 (-25)
Function                                     old     new   delta
intel_guc_log_level_set                      388     363     -25

v2: prevent out-of-range verbosity (MichalWi)

Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Michal Winiarski <michal.winiarski@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Reviewed-by: MichaĹ Winiarski <michal.winiarski@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180320181419.35576-2-michal.wajdeczko@intel.com
---
 drivers/gpu/drm/i915/intel_guc_fwif.h | 16 +++++-----------
 drivers/gpu/drm/i915/intel_guc_log.c  | 13 +++++--------
 2 files changed, 10 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 4971685a2ea8..72941bd704fd 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -534,17 +534,6 @@ struct guc_log_buffer_state {
 	u32 version;
 } __packed;
 
-union guc_log_control {
-	struct {
-		u32 logging_enabled:1;
-		u32 reserved1:3;
-		u32 verbosity:4;
-		u32 default_logging:1;
-		u32 reserved2:23;
-	};
-	u32 value;
-} __packed;
-
 struct guc_ctx_report {
 	u32 report_return_status;
 	u32 reserved1[64];
@@ -603,6 +592,11 @@ enum intel_guc_report_status {
 	INTEL_GUC_REPORT_STATUS_COMPLETE = 0x4,
 };
 
+#define GUC_LOG_CONTROL_LOGGING_ENABLED	(1 << 0)
+#define GUC_LOG_CONTROL_VERBOSITY_SHIFT	4
+#define GUC_LOG_CONTROL_VERBOSITY_MASK	(0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT)
+#define GUC_LOG_CONTROL_DEFAULT_LOGGING	(1 << 8)
+
 /*
  * The GuC sends its response to a command by overwriting the
  * command in SS0. The response is distinguishable from a command
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 957f7edc8ead..188d390e2099 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -60,18 +60,15 @@ static int guc_action_flush_log(struct intel_guc *guc)
 static int guc_action_control_log(struct intel_guc *guc, bool enable,
 				  bool default_logging, u32 verbosity)
 {
-	union guc_log_control control_val = {
-		{
-			.logging_enabled = enable,
-			.verbosity = verbosity,
-			.default_logging = default_logging,
-		},
-	};
 	u32 action[] = {
 		INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING,
-		control_val.value
+		(enable ? GUC_LOG_CONTROL_LOGGING_ENABLED : 0) |
+		(verbosity << GUC_LOG_CONTROL_VERBOSITY_SHIFT) |
+		(default_logging ? GUC_LOG_CONTROL_DEFAULT_LOGGING : 0)
 	};
 
+	GEM_BUG_ON(verbosity > GUC_LOG_VERBOSITY_MAX);
+
 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
 }
 

From bc598425eb18d40a18f18d67f7c460189c43a3af Mon Sep 17 00:00:00 2001
From: Michal Wajdeczko <michal.wajdeczko@intel.com>
Date: Tue, 20 Mar 2018 18:14:19 +0000
Subject: [PATCH 0084/1286] drm/i915/guc: Move enable/disable msg functions to
 GuC header
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

While today we are modifying GuC enabled msg mask only in GuC
log, this code should be defined as generic GuC to allow future
code reuse.

Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Michal Winiarski <michal.winiarski@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Michał Winiarski <michal.winiarski@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180320181419.35576-3-michal.wajdeczko@intel.com
---
 drivers/gpu/drm/i915/intel_guc.h     | 14 ++++++++++++
 drivers/gpu/drm/i915/intel_guc_log.c | 34 +++++++++++++---------------
 2 files changed, 30 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 9a95d1518aa9..13f3d1dbf38d 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -155,4 +155,18 @@ static inline int intel_guc_sanitize(struct intel_guc *guc)
 	return 0;
 }
 
+static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask)
+{
+	spin_lock_irq(&guc->irq_lock);
+	guc->msg_enabled_mask |= mask;
+	spin_unlock_irq(&guc->irq_lock);
+}
+
+static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask)
+{
+	spin_lock_irq(&guc->irq_lock);
+	guc->msg_enabled_mask &= ~mask;
+	spin_unlock_irq(&guc->irq_lock);
+}
+
 #endif
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 188d390e2099..a401f7e72c14 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -72,27 +72,25 @@ static int guc_action_control_log(struct intel_guc *guc, bool enable,
 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
 }
 
-static void guc_flush_log_msg_enable(struct intel_guc *guc)
-{
-	spin_lock_irq(&guc->irq_lock);
-	guc->msg_enabled_mask |= INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
-				 INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED;
-	spin_unlock_irq(&guc->irq_lock);
-}
-
-static void guc_flush_log_msg_disable(struct intel_guc *guc)
-{
-	spin_lock_irq(&guc->irq_lock);
-	guc->msg_enabled_mask &= ~(INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
-				   INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED);
-	spin_unlock_irq(&guc->irq_lock);
-}
-
 static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
 {
 	return container_of(log, struct intel_guc, log);
 }
 
+static void guc_log_enable_flush_events(struct intel_guc_log *log)
+{
+	intel_guc_enable_msg(log_to_guc(log),
+			     INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
+			     INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED);
+}
+
+static void guc_log_disable_flush_events(struct intel_guc_log *log)
+{
+	intel_guc_disable_msg(log_to_guc(log),
+			      INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
+			      INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED);
+}
+
 /*
  * Sub buffer switch callback. Called whenever relay has to switch to a new
  * sub buffer, relay stays on the same sub buffer if 0 is returned.
@@ -576,7 +574,7 @@ int intel_guc_log_relay_open(struct intel_guc_log *log)
 
 	mutex_unlock(&log->relay.lock);
 
-	guc_flush_log_msg_enable(log_to_guc(log));
+	guc_log_enable_flush_events(log);
 
 	/*
 	 * When GuC is logging without us relaying to userspace, we're ignoring
@@ -616,7 +614,7 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log)
 
 void intel_guc_log_relay_close(struct intel_guc_log *log)
 {
-	guc_flush_log_msg_disable(log_to_guc(log));
+	guc_log_disable_flush_events(log);
 	flush_work(&log->relay.flush_work);
 
 	mutex_lock(&log->relay.lock);

From e9c7e651798b340a175e18fc70ba41c7008d0760 Mon Sep 17 00:00:00 2001
From: Michal Wajdeczko <michal.wajdeczko@intel.com>
Date: Mon, 19 Mar 2018 12:50:49 +0000
Subject: [PATCH 0085/1286] drm/i915/guc: Handle GuC log flush event in
 dedicated function
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

We already try to keep all GuC log related code in separate file,
handling flush event should be placed there too. This will also
allow future code reuse.

v2: rebased

Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Michal Winiarski <michal.winiarski@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Oscar Mateo <oscar.mateo@intel.com>
Reviewed-by: Michał Winiarski <michal.winiarski@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180319125049.48932-1-michal.wajdeczko@intel.com
---
 drivers/gpu/drm/i915/intel_guc.c     | 3 +--
 drivers/gpu/drm/i915/intel_guc_log.c | 5 +++++
 drivers/gpu/drm/i915/intel_guc_log.h | 2 ++
 3 files changed, 8 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index 4b7c9c6415dd..8f93f5bef8fd 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -398,8 +398,7 @@ void intel_guc_to_host_event_handler(struct intel_guc *guc)
 
 	if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
 		   INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED))
-		queue_work(guc->log.relay.flush_wq,
-			   &guc->log.relay.flush_work);
+		intel_guc_log_handle_flush_event(&guc->log);
 }
 
 int intel_guc_sample_forcewake(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index a401f7e72c14..401e1704d61e 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -623,3 +623,8 @@ void intel_guc_log_relay_close(struct intel_guc_log *log)
 	guc_log_relay_destroy(log);
 	mutex_unlock(&log->relay.lock);
 }
+
+void intel_guc_log_handle_flush_event(struct intel_guc_log *log)
+{
+	queue_work(log->relay.flush_wq, &log->relay.flush_work);
+}
diff --git a/drivers/gpu/drm/i915/intel_guc_log.h b/drivers/gpu/drm/i915/intel_guc_log.h
index 1b0d2fa4c0b6..fa80535a6f9d 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/intel_guc_log.h
@@ -87,4 +87,6 @@ int intel_guc_log_relay_open(struct intel_guc_log *log);
 void intel_guc_log_relay_flush(struct intel_guc_log *log);
 void intel_guc_log_relay_close(struct intel_guc_log *log);
 
+void intel_guc_log_handle_flush_event(struct intel_guc_log *log);
+
 #endif

From d871bfd0089f50e3010f361e804d290abe67119c Mon Sep 17 00:00:00 2001
From: Michal Wajdeczko <michal.wajdeczko@intel.com>
Date: Tue, 20 Mar 2018 16:20:20 +0000
Subject: [PATCH 0086/1286] drm/i915/guc: Unify parameters of public CT
 functions

There is no need to mix parameter types in public CT functions
as we can always accept intel_guc_ct.

v2: fix 'Return' doc, s/dev_priv/i915 (Sagar)

Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180320162020.38672-1-michal.wajdeczko@intel.com
---
 drivers/gpu/drm/i915/intel_guc_ct.c | 41 +++++++++++++++++++----------
 drivers/gpu/drm/i915/intel_guc_ct.h |  6 ++---
 drivers/gpu/drm/i915/intel_uc.c     |  4 +--
 3 files changed, 31 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c
index 0a0d3d523c23..a726283489d1 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/intel_guc_ct.c
@@ -28,12 +28,21 @@ enum { CTB_SEND = 0, CTB_RECV = 1 };
 
 enum { CTB_OWNER_HOST = 0 };
 
+/**
+ * intel_guc_ct_init_early - Initialize CT state without requiring device access
+ * @ct: pointer to CT struct
+ */
 void intel_guc_ct_init_early(struct intel_guc_ct *ct)
 {
 	/* we're using static channel owners */
 	ct->host_channel.owner = CTB_OWNER_HOST;
 }
 
+static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
+{
+	return container_of(ct, struct intel_guc, ct);
+}
+
 static inline const char *guc_ct_buffer_type_to_str(u32 type)
 {
 	switch (type) {
@@ -416,19 +425,21 @@ static int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len)
 }
 
 /**
- * Enable buffer based command transport
+ * intel_guc_ct_enable - Enable buffer based command transport.
+ * @ct: pointer to CT struct
+ *
  * Shall only be called for platforms with HAS_GUC_CT.
- * @guc:	the guc
- * return:	0 on success
- *		non-zero on failure
+ *
+ * Return: 0 on success, a negative errno code on failure.
  */
-int intel_guc_enable_ct(struct intel_guc *guc)
+int intel_guc_ct_enable(struct intel_guc_ct *ct)
 {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
-	struct intel_guc_ct_channel *ctch = &guc->ct.host_channel;
+	struct intel_guc *guc = ct_to_guc(ct);
+	struct drm_i915_private *i915 = guc_to_i915(guc);
+	struct intel_guc_ct_channel *ctch = &ct->host_channel;
 	int err;
 
-	GEM_BUG_ON(!HAS_GUC_CT(dev_priv));
+	GEM_BUG_ON(!HAS_GUC_CT(i915));
 
 	err = ctch_open(guc, ctch);
 	if (unlikely(err))
@@ -441,16 +452,18 @@ int intel_guc_enable_ct(struct intel_guc *guc)
 }
 
 /**
- * Disable buffer based command transport.
+ * intel_guc_ct_disable - Disable buffer based command transport.
+ * @ct: pointer to CT struct
+ *
  * Shall only be called for platforms with HAS_GUC_CT.
- * @guc: the guc
  */
-void intel_guc_disable_ct(struct intel_guc *guc)
+void intel_guc_ct_disable(struct intel_guc_ct *ct)
 {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
-	struct intel_guc_ct_channel *ctch = &guc->ct.host_channel;
+	struct intel_guc *guc = ct_to_guc(ct);
+	struct drm_i915_private *i915 = guc_to_i915(guc);
+	struct intel_guc_ct_channel *ctch = &ct->host_channel;
 
-	GEM_BUG_ON(!HAS_GUC_CT(dev_priv));
+	GEM_BUG_ON(!HAS_GUC_CT(i915));
 
 	if (!ctch_is_open(ctch))
 		return;
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.h b/drivers/gpu/drm/i915/intel_guc_ct.h
index 6d97f36fcc62..595c8ad5bd4a 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.h
+++ b/drivers/gpu/drm/i915/intel_guc_ct.h
@@ -78,9 +78,7 @@ struct intel_guc_ct {
 };
 
 void intel_guc_ct_init_early(struct intel_guc_ct *ct);
-
-/* XXX: move to intel_uc.h ? don't fit there either */
-int intel_guc_enable_ct(struct intel_guc *guc);
-void intel_guc_disable_ct(struct intel_guc *guc);
+int intel_guc_ct_enable(struct intel_guc_ct *ct);
+void intel_guc_ct_disable(struct intel_guc_ct *ct);
 
 #endif /* _INTEL_GUC_CT_H_ */
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 2befcafbaabe..34f8a2c219d8 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -231,7 +231,7 @@ static int guc_enable_communication(struct intel_guc *guc)
 	gen9_enable_guc_interrupts(dev_priv);
 
 	if (HAS_GUC_CT(dev_priv))
-		return intel_guc_enable_ct(guc);
+		return intel_guc_ct_enable(&guc->ct);
 
 	guc->send = intel_guc_send_mmio;
 	return 0;
@@ -242,7 +242,7 @@ static void guc_disable_communication(struct intel_guc *guc)
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 
 	if (HAS_GUC_CT(dev_priv))
-		intel_guc_disable_ct(guc);
+		intel_guc_ct_disable(&guc->ct);
 
 	gen9_disable_guc_interrupts(dev_priv);
 

From 9153e6b7c85edbc89e874e5c83f86217c53dcfaf Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed, 21 Mar 2018 09:10:27 +0000
Subject: [PATCH 0087/1286] drm/i915/execlists: Use a locked clear_bit() for
 synchronisation with interrupt

We were relying on the uncached reads when processing the CSB to provide
ourselves with the serialisation with the interrupt handler (so we could
detect new interrupts in the middle of processing the old one). However,
in commit 767a983ab255 ("drm/i915/execlists: Read the context-status HEAD
from the HWSP") those uncached reads were eliminated (on one path at
least) and along with them our serialisation. The result is that we
would very rarely miss notification of a new interrupt and leave a
context-switch unprocessed, hanging the GPU.

Fixes: 767a983ab255 ("drm/i915/execlists: Read the context-status HEAD from the HWSP")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michel Thierry <michel.thierry@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Michel Thierry <michel.thierry@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180321091027.21034-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_lrc.c | 21 ++++++++-------------
 1 file changed, 8 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 53f1c009ed7b..67b6a0f658d6 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -831,7 +831,8 @@ static void execlists_submission_tasklet(unsigned long data)
 	struct drm_i915_private *dev_priv = engine->i915;
 	bool fw = false;
 
-	/* We can skip acquiring intel_runtime_pm_get() here as it was taken
+	/*
+	 * We can skip acquiring intel_runtime_pm_get() here as it was taken
 	 * on our behalf by the request (see i915_gem_mark_busy()) and it will
 	 * not be relinquished until the device is idle (see
 	 * i915_gem_idle_work_handler()). As a precaution, we make sure
@@ -840,7 +841,8 @@ static void execlists_submission_tasklet(unsigned long data)
 	 */
 	GEM_BUG_ON(!dev_priv->gt.awake);
 
-	/* Prefer doing test_and_clear_bit() as a two stage operation to avoid
+	/*
+	 * Prefer doing test_and_clear_bit() as a two stage operation to avoid
 	 * imposing the cost of a locked atomic transaction when submitting a
 	 * new request (outside of the context-switch interrupt).
 	 */
@@ -856,17 +858,10 @@ static void execlists_submission_tasklet(unsigned long data)
 			execlists->csb_head = -1; /* force mmio read of CSB ptrs */
 		}
 
-		/* The write will be ordered by the uncached read (itself
-		 * a memory barrier), so we do not need another in the form
-		 * of a locked instruction. The race between the interrupt
-		 * handler and the split test/clear is harmless as we order
-		 * our clear before the CSB read. If the interrupt arrived
-		 * first between the test and the clear, we read the updated
-		 * CSB and clear the bit. If the interrupt arrives as we read
-		 * the CSB or later (i.e. after we had cleared the bit) the bit
-		 * is set and we do a new loop.
-		 */
-		__clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+		/* Clear before reading to catch new interrupts */
+		clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+		smp_mb__after_atomic();
+
 		if (unlikely(execlists->csb_head == -1)) { /* following a reset */
 			if (!fw) {
 				intel_uncore_forcewake_get(dev_priv,

From b90eed08d8d0f07f9f08074645d4470e121ff6f5 Mon Sep 17 00:00:00 2001
From: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Date: Mon, 12 Mar 2018 20:46:45 -0700
Subject: [PATCH 0088/1286] drm/i915/psr: Move PSR aux setup to it's own
 function.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Non-functional change useful for the following patch.

Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: José Roberto de Souza <jose.souza@intel.com>
Signed-off-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180313034646.3721-1-dhinakaran.pandiyan@intel.com
---
 drivers/gpu/drm/i915/intel_psr.c | 31 ++++++++++++++++++++-----------
 1 file changed, 20 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 317cb4a12693..2c001f4fba3e 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -246,7 +246,7 @@ static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
 		return EDP_PSR_AUX_DATA(index);
 }
 
-static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
+static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
 {
 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 	struct drm_device *dev = dig_port->base.base.dev;
@@ -267,6 +267,24 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
 	BUILD_BUG_ON(sizeof(aux_msg) > 20);
 
 	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
+	aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port);
+
+	/* Setup AUX registers */
+	for (i = 0; i < sizeof(aux_msg); i += 4)
+		I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2),
+			   intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
+
+	aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg),
+					     aux_clock_divider);
+	I915_WRITE(aux_ctl_reg, aux_ctl);
+}
+
+static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+
 
 	/* Enable AUX frame sync at sink */
 	if (dev_priv->psr.aux_frame_sync)
@@ -285,16 +303,7 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
 				   DP_PSR_ENABLE);
 
-	aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port);
-
-	/* Setup AUX registers */
-	for (i = 0; i < sizeof(aux_msg); i += 4)
-		I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2),
-			   intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
-
-	aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg),
-					     aux_clock_divider);
-	I915_WRITE(aux_ctl_reg, aux_ctl);
+	hsw_psr_setup_aux(intel_dp);
 }
 
 static void vlv_psr_enable_source(struct intel_dp *intel_dp,

From d544e918ff132488770ab2cb6b03e2af69497d1c Mon Sep 17 00:00:00 2001
From: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Date: Mon, 12 Mar 2018 20:46:46 -0700
Subject: [PATCH 0089/1286] drm/i915/psr: Remove open-coded PSR AUX
 transactions for SKL+
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

HSW and BDW have SRD_AUX_{CTL, STATUS} registers that the driver needs to
setup for the HW to use whenever exiting PSR. SKL+ hardware use hardcoded
values for the same and do not need any registers to be setup. So, use
drm_dp_dpcd_writeb() for a one-time write during PSR enable and setup the
PSR aux registers on HSW and BDW for later use by HW.

We also end up writing to reserved bits in SRD_AUX_CTL by reusing
intel_dp->get_aux_send_ctl() for HSW and BDW, fix this.

Since the AUX register setup is source side programming, move the call
to enable_source() from enable_sink().

Cc: José Roberto de Souza <jose.souza@intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Reviewed-by: Jose Roberto de Souza <jose.souza@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180313034646.3721-2-dhinakaran.pandiyan@intel.com
---
 drivers/gpu/drm/i915/i915_reg.h  |  6 ++++
 drivers/gpu/drm/i915/intel_psr.c | 55 +++++++++++++-------------------
 2 files changed, 28 insertions(+), 33 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index bac3e926583a..4e31dfff940a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3846,6 +3846,12 @@ enum {
 #define   EDP_PSR_IDLE_FRAME_SHIFT		0
 
 #define EDP_PSR_AUX_CTL				_MMIO(dev_priv->psr_mmio_base + 0x10)
+#define   EDP_PSR_AUX_CTL_TIME_OUT_MASK		(3 << 26)
+#define   EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK	(0x1f << 20)
+#define   EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK	(0xf << 16)
+#define   EDP_PSR_AUX_CTL_ERROR_INTERRUPT	(1 << 11)
+#define   EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK	(0x7ff)
+
 #define EDP_PSR_AUX_DATA(i)			_MMIO(dev_priv->psr_mmio_base + 0x14 + (i) * 4) /* 5 registers */
 
 #define EDP_PSR_STATUS				_MMIO(dev_priv->psr_mmio_base + 0x40)
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 2c001f4fba3e..b8e083e10029 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -228,31 +228,12 @@ static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
 			   DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
 }
 
-static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
-				       enum port port)
-{
-	if (INTEL_GEN(dev_priv) >= 9)
-		return DP_AUX_CH_CTL(port);
-	else
-		return EDP_PSR_AUX_CTL;
-}
-
-static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
-					enum port port, int index)
-{
-	if (INTEL_GEN(dev_priv) >= 9)
-		return DP_AUX_CH_DATA(port, index);
-	else
-		return EDP_PSR_AUX_DATA(index);
-}
-
 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
 {
 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	struct drm_device *dev = dig_port->base.base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	uint32_t aux_clock_divider;
-	i915_reg_t aux_ctl_reg;
+	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+	u32 aux_clock_divider, aux_ctl;
+	int i;
 	static const uint8_t aux_msg[] = {
 		[0] = DP_AUX_NATIVE_WRITE << 4,
 		[1] = DP_SET_POWER >> 8,
@@ -260,23 +241,25 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
 		[3] = 1 - 1,
 		[4] = DP_SET_POWER_D0,
 	};
-	enum port port = dig_port->base.port;
-	u32 aux_ctl;
-	int i;
+	u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK |
+			   EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
+			   EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
+			   EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
 
 	BUILD_BUG_ON(sizeof(aux_msg) > 20);
-
-	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
-	aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port);
-
-	/* Setup AUX registers */
 	for (i = 0; i < sizeof(aux_msg); i += 4)
-		I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2),
+		I915_WRITE(EDP_PSR_AUX_DATA(i >> 2),
 			   intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
 
+	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
+
+	/* Start with bits set for DDI_AUX_CTL register */
 	aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg),
 					     aux_clock_divider);
-	I915_WRITE(aux_ctl_reg, aux_ctl);
+
+	/* Select only valid bits for SRD_AUX_CTL */
+	aux_ctl &= psr_aux_mask;
+	I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl);
 }
 
 static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
@@ -303,7 +286,7 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
 				   DP_PSR_ENABLE);
 
-	hsw_psr_setup_aux(intel_dp);
+	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
 }
 
 static void vlv_psr_enable_source(struct intel_dp *intel_dp,
@@ -599,6 +582,12 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp,
 
 	psr_aux_io_power_get(intel_dp);
 
+	/* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
+	 * use hardcoded values PSR AUX transactions
+	 */
+	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+		hsw_psr_setup_aux(intel_dp);
+
 	if (dev_priv->psr.psr2_support) {
 		chicken = PSR2_VSC_ENABLE_PROG_HEADER;
 		if (dev_priv->psr.y_cord_support)

From 3ae7fb202d86b7847f237daa474f3946bdc3b0c6 Mon Sep 17 00:00:00 2001
From: Haneen Mohammed <hamohammed.sa@gmail.com>
Date: Tue, 20 Mar 2018 09:37:49 -0400
Subject: [PATCH 0090/1286] drm: Remove drm_property_{un/reference}_blob
 aliases

This patch remove the compatibility aliases
drm_property_{reference/unreference}_blob of
drm_property_blob_{get/put} since all callers have been converted to the
prefered _{get/put}.

Remove the helpers from the semantic patch drm-get-put-cocci.

Signed-off-by: Haneen Mohammed <hamohammed.sa@gmail.com>
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20180320133749.GA11695@haneen-VirtualBox
---
 include/drm/drm_property.h               | 26 ------------------------
 scripts/coccinelle/api/drm-get-put.cocci | 10 ---------
 2 files changed, 36 deletions(-)

diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
index d1423c7f3c73..ab8167baade5 100644
--- a/include/drm/drm_property.h
+++ b/include/drm/drm_property.h
@@ -280,32 +280,6 @@ bool drm_property_replace_blob(struct drm_property_blob **blob,
 struct drm_property_blob *drm_property_blob_get(struct drm_property_blob *blob);
 void drm_property_blob_put(struct drm_property_blob *blob);
 
-/**
- * drm_property_reference_blob - acquire a blob property reference
- * @blob: DRM blob property
- *
- * This is a compatibility alias for drm_property_blob_get() and should not be
- * used by new code.
- */
-static inline struct drm_property_blob *
-drm_property_reference_blob(struct drm_property_blob *blob)
-{
-	return drm_property_blob_get(blob);
-}
-
-/**
- * drm_property_unreference_blob - release a blob property reference
- * @blob: DRM blob property
- *
- * This is a compatibility alias for drm_property_blob_put() and should not be
- * used by new code.
- */
-static inline void
-drm_property_unreference_blob(struct drm_property_blob *blob)
-{
-	drm_property_blob_put(blob);
-}
-
 /**
  * drm_property_find - find property object
  * @dev: DRM device
diff --git a/scripts/coccinelle/api/drm-get-put.cocci b/scripts/coccinelle/api/drm-get-put.cocci
index ceb71ea7f61c..3a09c97ad87d 100644
--- a/scripts/coccinelle/api/drm-get-put.cocci
+++ b/scripts/coccinelle/api/drm-get-put.cocci
@@ -40,12 +40,6 @@ expression object;
 - drm_gem_object_unreference_unlocked(object)
 + drm_gem_object_put_unlocked(object)
 |
-- drm_property_reference_blob(object)
-+ drm_property_blob_get(object)
-|
-- drm_property_unreference_blob(object)
-+ drm_property_blob_put(object)
-|
 - drm_dev_unref(object)
 + drm_dev_put(object)
 )
@@ -72,10 +66,6 @@ __drm_gem_object_unreference(object)
 |
 drm_gem_object_unreference_unlocked(object)
 |
-drm_property_unreference_blob@p(object)
-|
-drm_property_reference_blob@p(object)
-|
 drm_dev_unref@p(object)
 )
 

From 0e59c209f4ccf9f9d505babdb04731294e18c4ed Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Thu, 22 Mar 2018 11:00:59 +0000
Subject: [PATCH 0091/1286] drm/i915: Fix tracing of submit seqno

We pre-increment the timeline->seqno when handing it to the request,
make sure the GEM_TRACE takes this into account. Otherwise, it appears
that we go backwards over a preemption point:

1d..1 157681077us : __i915_request_unsubmit: vcs0 fence 75e:3 <- global_seqno 17
0d.s1 157681113us : __i915_request_submit: vcs0 fence 75e:3 -> global_seqno 16

Fixes: d9b13c4dde6c ("drm/i915: Trace GEM steps between submit and wedging")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180322110059.4467-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_request.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 2325886d1d55..f1b81fe4f9ab 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -501,7 +501,7 @@ void __i915_request_submit(struct i915_request *request)
 	GEM_TRACE("%s fence %llx:%d -> global_seqno %d\n",
 		  request->engine->name,
 		  request->fence.context, request->fence.seqno,
-		  engine->timeline->seqno);
+		  engine->timeline->seqno + 1);
 
 	GEM_BUG_ON(!irqs_disabled());
 	lockdep_assert_held(&engine->timeline->lock);

From 4ccfee92f4b6fbbedee1eb68f110a66f03edf7c6 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Thu, 22 Mar 2018 13:10:34 +0000
Subject: [PATCH 0092/1286] drm/i915: Remove local timeline var from
 submit/unsubmit

Both request_submit and request_unsubmit deal with transferring the
request from the client's timeline onto the execution timeline and back
again. As both functions deal with a pair of timeline's, using a
shorthand for just one of them is slightly confusing, especially as the
different functions use the shorthand for the alternate timeline.
Instead, use the full version of each timeline so it should be easier to
keep track of the transfer between the request/client and the engine.

v2: Refactor the common lock+list_move
v3: Be clear we require the other timeline list to be locked as well.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180322131034.6036-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_request.c | 30 ++++++++++++++---------------
 1 file changed, 15 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index f1b81fe4f9ab..2314a26cd7f8 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -492,10 +492,20 @@ static u32 timeline_get_seqno(struct intel_timeline *tl)
 	return ++tl->seqno;
 }
 
+static void move_to_timeline(struct i915_request *request,
+			     struct intel_timeline *timeline)
+{
+	GEM_BUG_ON(request->timeline == request->engine->timeline);
+	lockdep_assert_held(&request->engine->timeline->lock);
+
+	spin_lock(&request->timeline->lock);
+	list_move_tail(&request->link, &timeline->requests);
+	spin_unlock(&request->timeline->lock);
+}
+
 void __i915_request_submit(struct i915_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
-	struct intel_timeline *timeline;
 	u32 seqno;
 
 	GEM_TRACE("%s fence %llx:%d -> global_seqno %d\n",
@@ -506,12 +516,9 @@ void __i915_request_submit(struct i915_request *request)
 	GEM_BUG_ON(!irqs_disabled());
 	lockdep_assert_held(&engine->timeline->lock);
 
-	/* Transfer from per-context onto the global per-engine timeline */
-	timeline = engine->timeline;
-	GEM_BUG_ON(timeline == request->timeline);
 	GEM_BUG_ON(request->global_seqno);
 
-	seqno = timeline_get_seqno(timeline);
+	seqno = timeline_get_seqno(engine->timeline);
 	GEM_BUG_ON(!seqno);
 	GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno));
 
@@ -525,9 +532,8 @@ void __i915_request_submit(struct i915_request *request)
 	engine->emit_breadcrumb(request,
 				request->ring->vaddr + request->postfix);
 
-	spin_lock(&request->timeline->lock);
-	list_move_tail(&request->link, &timeline->requests);
-	spin_unlock(&request->timeline->lock);
+	/* Transfer from per-context onto the global per-engine timeline */
+	move_to_timeline(request, engine->timeline);
 
 	trace_i915_request_execute(request);
 
@@ -550,7 +556,6 @@ void i915_request_submit(struct i915_request *request)
 void __i915_request_unsubmit(struct i915_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
-	struct intel_timeline *timeline;
 
 	GEM_TRACE("%s fence %llx:%d <- global_seqno %d\n",
 		  request->engine->name,
@@ -578,12 +583,7 @@ void __i915_request_unsubmit(struct i915_request *request)
 	spin_unlock(&request->lock);
 
 	/* Transfer back from the global per-engine timeline to per-context */
-	timeline = request->timeline;
-	GEM_BUG_ON(timeline == engine->timeline);
-
-	spin_lock(&timeline->lock);
-	list_move(&request->link, &timeline->requests);
-	spin_unlock(&timeline->lock);
+	move_to_timeline(request, request->timeline);
 
 	/*
 	 * We don't need to wake_up any waiters on request->execute, they

From 0ade43909d599bdde0d60e0c79e6d73479d65ffa Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Thu, 22 Mar 2018 07:49:08 +0000
Subject: [PATCH 0093/1286] drm/i915/selftests: Include the trace as a debug
 aide

If we fail to reset the GPU in a timely fashion, dump the GEM trace so
that we can see what operations were in flight when the GPU got stuck.

v2: There's more than one timeout that deserves tracing!
v3: Silence checkpatch by not even using a product at all!

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Jeff McGee <jeff.mcgee@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180322074908.10838-1-chris@chris-wilson.co.uk
---
 .../gpu/drm/i915/selftests/intel_hangcheck.c  | 23 ++++++++++++++++---
 1 file changed, 20 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 4372826998aa..9b235dae8dd9 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -260,8 +260,11 @@ static void wedge_me(struct work_struct *work)
 {
 	struct wedge_me *w = container_of(work, typeof(*w), work.work);
 
-	pr_err("%pS timed out, cancelling all further testing.\n",
-	       w->symbol);
+	pr_err("%pS timed out, cancelling all further testing.\n", w->symbol);
+
+	GEM_TRACE("%pS timed out.\n", w->symbol);
+	GEM_TRACE_DUMP();
+
 	i915_gem_set_wedged(w->i915);
 }
 
@@ -621,9 +624,19 @@ static int active_engine(void *data)
 		mutex_unlock(&engine->i915->drm.struct_mutex);
 
 		if (old) {
-			i915_request_wait(old, 0, MAX_SCHEDULE_TIMEOUT);
+			if (i915_request_wait(old, 0, HZ) < 0) {
+				GEM_TRACE("%s timed out.\n", engine->name);
+				GEM_TRACE_DUMP();
+
+				i915_gem_set_wedged(engine->i915);
+				i915_request_put(old);
+				err = -EIO;
+				break;
+			}
 			i915_request_put(old);
 		}
+
+		cond_resched();
 	}
 
 	for (count = 0; count < ARRAY_SIZE(rq); count++)
@@ -1126,6 +1139,10 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
 
 	err = i915_subtests(tests, i915);
 
+	mutex_lock(&i915->drm.struct_mutex);
+	flush_test(i915, I915_WAIT_LOCKED);
+	mutex_unlock(&i915->drm.struct_mutex);
+
 	i915_modparams.enable_hangcheck = saved_hangcheck;
 	intel_runtime_pm_put(i915);
 

From a90507d60763ee1067cf217614a8bb2ab43aca1a Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Thu, 22 Mar 2018 07:35:31 +0000
Subject: [PATCH 0094/1286] drm/i915/selftests: Stress
 resets-vs-request-priority

Watch what happens if we try to reset with a queue of requests with
varying priorities -- that may need reordering or preemption across the
reset.

v2: Tweak priorities to avoid starving the hanging thread.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180322073533.5313-2-chris@chris-wilson.co.uk
Reviewed-by: Jeff McGee <jeff.mcgee@intel.com>
---
 .../gpu/drm/i915/selftests/intel_hangcheck.c  | 187 ++++++++++++------
 1 file changed, 125 insertions(+), 62 deletions(-)

diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 9b235dae8dd9..9e4e0ad62724 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -25,6 +25,7 @@
 #include <linux/kthread.h>
 
 #include "../i915_selftest.h"
+#include "i915_random.h"
 
 #include "mock_context.h"
 #include "mock_drm.h"
@@ -486,6 +487,8 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
 
 		set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
 		do {
+			u32 seqno = intel_engine_get_seqno(engine);
+
 			if (active) {
 				struct i915_request *rq;
 
@@ -514,12 +517,13 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
 					break;
 				}
 
+				GEM_BUG_ON(!rq->global_seqno);
+				seqno = rq->global_seqno - 1;
 				i915_request_put(rq);
 			}
 
 			engine->hangcheck.stalled = true;
-			engine->hangcheck.seqno =
-				intel_engine_get_seqno(engine);
+			engine->hangcheck.seqno = seqno;
 
 			err = i915_reset_engine(engine, NULL);
 			if (err) {
@@ -576,11 +580,25 @@ static int igt_reset_active_engine(void *arg)
 	return __igt_reset_engine(arg, true);
 }
 
+struct active_engine {
+	struct task_struct *task;
+	struct intel_engine_cs *engine;
+	unsigned long resets;
+	unsigned int flags;
+};
+
+#define TEST_ACTIVE	BIT(0)
+#define TEST_OTHERS	BIT(1)
+#define TEST_SELF	BIT(2)
+#define TEST_PRIORITY	BIT(3)
+
 static int active_engine(void *data)
 {
-	struct intel_engine_cs *engine = data;
-	struct i915_request *rq[2] = {};
-	struct i915_gem_context *ctx[2];
+	I915_RND_STATE(prng);
+	struct active_engine *arg = data;
+	struct intel_engine_cs *engine = arg->engine;
+	struct i915_request *rq[8] = {};
+	struct i915_gem_context *ctx[ARRAY_SIZE(rq)];
 	struct drm_file *file;
 	unsigned long count = 0;
 	int err = 0;
@@ -589,25 +607,20 @@ static int active_engine(void *data)
 	if (IS_ERR(file))
 		return PTR_ERR(file);
 
-	mutex_lock(&engine->i915->drm.struct_mutex);
-	ctx[0] = live_context(engine->i915, file);
-	mutex_unlock(&engine->i915->drm.struct_mutex);
-	if (IS_ERR(ctx[0])) {
-		err = PTR_ERR(ctx[0]);
-		goto err_file;
-	}
-
-	mutex_lock(&engine->i915->drm.struct_mutex);
-	ctx[1] = live_context(engine->i915, file);
-	mutex_unlock(&engine->i915->drm.struct_mutex);
-	if (IS_ERR(ctx[1])) {
-		err = PTR_ERR(ctx[1]);
-		i915_gem_context_put(ctx[0]);
-		goto err_file;
+	for (count = 0; count < ARRAY_SIZE(ctx); count++) {
+		mutex_lock(&engine->i915->drm.struct_mutex);
+		ctx[count] = live_context(engine->i915, file);
+		mutex_unlock(&engine->i915->drm.struct_mutex);
+		if (IS_ERR(ctx[count])) {
+			err = PTR_ERR(ctx[count]);
+			while (--count)
+				i915_gem_context_put(ctx[count]);
+			goto err_file;
+		}
 	}
 
 	while (!kthread_should_stop()) {
-		unsigned int idx = count++ & 1;
+		unsigned int idx = count++ & (ARRAY_SIZE(rq) - 1);
 		struct i915_request *old = rq[idx];
 		struct i915_request *new;
 
@@ -619,6 +632,10 @@ static int active_engine(void *data)
 			break;
 		}
 
+		if (arg->flags & TEST_PRIORITY)
+			ctx[idx]->priority =
+				i915_prandom_u32_max_state(512, &prng);
+
 		rq[idx] = i915_request_get(new);
 		i915_request_add(new);
 		mutex_unlock(&engine->i915->drm.struct_mutex);
@@ -647,8 +664,9 @@ err_file:
 	return err;
 }
 
-static int __igt_reset_engine_others(struct drm_i915_private *i915,
-				     bool active)
+static int __igt_reset_engines(struct drm_i915_private *i915,
+			       const char *test_name,
+			       unsigned int flags)
 {
 	struct intel_engine_cs *engine, *other;
 	enum intel_engine_id id, tmp;
@@ -662,50 +680,61 @@ static int __igt_reset_engine_others(struct drm_i915_private *i915,
 	if (!intel_has_reset_engine(i915))
 		return 0;
 
-	if (active) {
+	if (flags & TEST_ACTIVE) {
 		mutex_lock(&i915->drm.struct_mutex);
 		err = hang_init(&h, i915);
 		mutex_unlock(&i915->drm.struct_mutex);
 		if (err)
 			return err;
+
+		if (flags & TEST_PRIORITY)
+			h.ctx->priority = 1024;
 	}
 
 	for_each_engine(engine, i915, id) {
-		struct task_struct *threads[I915_NUM_ENGINES] = {};
-		unsigned long resets[I915_NUM_ENGINES];
+		struct active_engine threads[I915_NUM_ENGINES] = {};
 		unsigned long global = i915_reset_count(&i915->gpu_error);
-		unsigned long count = 0;
+		unsigned long count = 0, reported;
 		IGT_TIMEOUT(end_time);
 
-		if (active && !intel_engine_can_store_dword(engine))
+		if (flags & TEST_ACTIVE &&
+		    !intel_engine_can_store_dword(engine))
 			continue;
 
 		memset(threads, 0, sizeof(threads));
 		for_each_engine(other, i915, tmp) {
 			struct task_struct *tsk;
 
-			resets[tmp] = i915_reset_engine_count(&i915->gpu_error,
-							      other);
+			threads[tmp].resets =
+				i915_reset_engine_count(&i915->gpu_error,
+							other);
 
-			if (other == engine)
+			if (!(flags & TEST_OTHERS))
 				continue;
 
-			tsk = kthread_run(active_engine, other,
+			if (other == engine && !(flags & TEST_SELF))
+				continue;
+
+			threads[tmp].engine = other;
+			threads[tmp].flags = flags;
+
+			tsk = kthread_run(active_engine, &threads[tmp],
 					  "igt/%s", other->name);
 			if (IS_ERR(tsk)) {
 				err = PTR_ERR(tsk);
 				goto unwind;
 			}
 
-			threads[tmp] = tsk;
+			threads[tmp].task = tsk;
 			get_task_struct(tsk);
 		}
 
 		set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
 		do {
-			if (active) {
-				struct i915_request *rq;
+			u32 seqno = intel_engine_get_seqno(engine);
+			struct i915_request *rq = NULL;
 
+			if (flags & TEST_ACTIVE) {
 				mutex_lock(&i915->drm.struct_mutex);
 				rq = hang_create_request(&h, engine);
 				if (IS_ERR(rq)) {
@@ -731,33 +760,38 @@ static int __igt_reset_engine_others(struct drm_i915_private *i915,
 					break;
 				}
 
-				i915_request_put(rq);
+				GEM_BUG_ON(!rq->global_seqno);
+				seqno = rq->global_seqno - 1;
 			}
 
 			engine->hangcheck.stalled = true;
-			engine->hangcheck.seqno =
-				intel_engine_get_seqno(engine);
+			engine->hangcheck.seqno = seqno;
 
 			err = i915_reset_engine(engine, NULL);
 			if (err) {
-				pr_err("i915_reset_engine(%s:%s) failed, err=%d\n",
-				       engine->name, active ? "active" : "idle", err);
+				pr_err("i915_reset_engine(%s:%s): failed, err=%d\n",
+				       engine->name, test_name, err);
 				break;
 			}
 
 			engine->hangcheck.stalled = false;
 			count++;
+
+			if (rq) {
+				i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
+				i915_request_put(rq);
+			}
 		} while (time_before(jiffies, end_time));
 		clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
 		pr_info("i915_reset_engine(%s:%s): %lu resets\n",
-			engine->name, active ? "active" : "idle", count);
+			engine->name, test_name, count);
 
-		if (i915_reset_engine_count(&i915->gpu_error, engine) -
-		    resets[engine->id] != (active ? count : 0)) {
-			pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu\n",
-			       engine->name, active ? "active" : "idle", count,
-			       i915_reset_engine_count(&i915->gpu_error,
-						       engine) - resets[engine->id]);
+		reported = i915_reset_engine_count(&i915->gpu_error, engine);
+		reported -= threads[engine->id].resets;
+		if (reported != (flags & TEST_ACTIVE ? count : 0)) {
+			pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu, expected %lu reported\n",
+			       engine->name, test_name, count, reported,
+			       (flags & TEST_ACTIVE ? count : 0));
 			if (!err)
 				err = -EINVAL;
 		}
@@ -766,24 +800,26 @@ unwind:
 		for_each_engine(other, i915, tmp) {
 			int ret;
 
-			if (!threads[tmp])
+			if (!threads[tmp].task)
 				continue;
 
-			ret = kthread_stop(threads[tmp]);
+			ret = kthread_stop(threads[tmp].task);
 			if (ret) {
 				pr_err("kthread for other engine %s failed, err=%d\n",
 				       other->name, ret);
 				if (!err)
 					err = ret;
 			}
-			put_task_struct(threads[tmp]);
+			put_task_struct(threads[tmp].task);
 
-			if (resets[tmp] != i915_reset_engine_count(&i915->gpu_error,
-								   other)) {
+			if (other != engine &&
+			    threads[tmp].resets !=
+			    i915_reset_engine_count(&i915->gpu_error, other)) {
 				pr_err("Innocent engine %s was reset (count=%ld)\n",
 				       other->name,
 				       i915_reset_engine_count(&i915->gpu_error,
-							       other) - resets[tmp]);
+							       other) -
+				       threads[tmp].resets);
 				if (!err)
 					err = -EINVAL;
 			}
@@ -807,7 +843,7 @@ unwind:
 	if (i915_terminally_wedged(&i915->gpu_error))
 		err = -EIO;
 
-	if (active) {
+	if (flags & TEST_ACTIVE) {
 		mutex_lock(&i915->drm.struct_mutex);
 		hang_fini(&h);
 		mutex_unlock(&i915->drm.struct_mutex);
@@ -816,14 +852,42 @@ unwind:
 	return err;
 }
 
-static int igt_reset_idle_engine_others(void *arg)
+static int igt_reset_engines(void *arg)
 {
-	return __igt_reset_engine_others(arg, false);
-}
+	static const struct {
+		const char *name;
+		unsigned int flags;
+	} phases[] = {
+		{ "idle", 0 },
+		{ "active", TEST_ACTIVE },
+		{ "others-idle", TEST_OTHERS },
+		{ "others-active", TEST_OTHERS | TEST_ACTIVE },
+		{
+			"others-priority",
+			TEST_OTHERS | TEST_ACTIVE | TEST_PRIORITY
+		},
+		{
+			"self-priority",
+			TEST_OTHERS | TEST_ACTIVE | TEST_PRIORITY | TEST_SELF,
+		},
+		{ }
+	};
+	struct drm_i915_private *i915 = arg;
+	typeof(*phases) *p;
+	int err;
 
-static int igt_reset_active_engine_others(void *arg)
-{
-	return __igt_reset_engine_others(arg, true);
+	for (p = phases; p->name; p++) {
+		if (p->flags & TEST_PRIORITY) {
+			if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
+				continue;
+		}
+
+		err = __igt_reset_engines(arg, p->name, p->flags);
+		if (err)
+			return err;
+	}
+
+	return 0;
 }
 
 static u32 fake_hangcheck(struct i915_request *rq)
@@ -1122,8 +1186,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(igt_hang_sanitycheck),
 		SUBTEST(igt_reset_idle_engine),
 		SUBTEST(igt_reset_active_engine),
-		SUBTEST(igt_reset_idle_engine_others),
-		SUBTEST(igt_reset_active_engine_others),
+		SUBTEST(igt_reset_engines),
 		SUBTEST(igt_wait_reset),
 		SUBTEST(igt_reset_queue),
 		SUBTEST(igt_handle_error),

From 1c645bf4378f1539df57c6228f5c4957c130324a Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Thu, 22 Mar 2018 07:35:32 +0000
Subject: [PATCH 0095/1286] drm/i915: Use full serialisation around
 engine->irq_posted
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Using engine->irq_posted for execlists, we are not always serialised by
the tasklet as we supposed. On the reset paths, the tasklet is disabled
and ignored. Instead, we manipulate the engine->irq_posted directly to
account for the reset, but if an interrupt fired before the reset and so
wrote to engine->irq_posted, that write may not be flushed from the
local CPU's cacheline until much later as the tasklet is already active
and so does not generate a mb(). To correctly serialise the interrupt
with reset, we need serialisation on the set_bit() itself.

And at last Mika can be happy.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Michał Winiarski <michal.winiarski@intel.com>
CC: Michel Thierry <michel.thierry@intel.com>
Cc: Jeff McGee <jeff.mcgee@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Jeff McGee <jeff.mcgee@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180322073533.5313-3-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_irq.c | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index fa7310766217..27aee25429b7 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1405,10 +1405,9 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
 	bool tasklet = false;
 
 	if (iir & GT_CONTEXT_SWITCH_INTERRUPT) {
-		if (READ_ONCE(engine->execlists.active)) {
-			__set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
-			tasklet = true;
-		}
+		if (READ_ONCE(engine->execlists.active))
+			tasklet = !test_and_set_bit(ENGINE_IRQ_EXECLIST,
+						    &engine->irq_posted);
 	}
 
 	if (iir & GT_RENDER_USER_INTERRUPT) {

From 0f36a85c3bd5e0dfcbb49af203a96a933dae86cf Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Thu, 22 Mar 2018 07:35:33 +0000
Subject: [PATCH 0096/1286] drm/i915: Flush pending interrupt following a GPU
 reset
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

After resetting the GPU (or subset of engines), call synchronize_irq()
to flush any pending irq before proceeding with the cleanup. For a
device level reset, we disable the interupts around the reset, but when
resetting just one engine, we have to avoid such global disabling. This
leaves us open to an interrupt arriving for the engine as we try to
reset it. We already do try to flush the IIR following the reset, but we
have to ensure that the in-flight interrupt does not land after we start
cleaning up after the reset; enter synchronize_irq().

As it current stands, we very rarely, but fatally, see sequences such as:

    2.... 57964564us : execlists_reset_prepare: rcs0
    2.... 57964613us : execlists_reset: rcs0 seqno=424
    0d.h1 57964615us : gen8_cs_irq_handler: rcs0 CS active=1
    2d..1 57964617us : __i915_request_unsubmit: rcs0 fence 29:1056 <- global_seqno 1060
    2.... 57964703us : execlists_reset_finish: rcs0
    0..s. 57964705us : execlists_submission_tasklet: rcs0 awake?=1, active=0, irq-posted?=1

v2: Move the sync into the execlists reset handler so that we coordinate
the flush with disabling the interrupt handling and canceling the
pending interrupt.
v3: Just use synchronize_hardirq() to avoid the might_sleep(), we do not
yet have threaded-irq to worry about.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Michel Thierry <michel.thierry@intel.com>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Cc: Jeff McGee <jeff.mcgee@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180322073533.5313-4-chris@chris-wilson.co.uk
Reviewed-by: Jeff McGee <jeff.mcgee@intel.com>
---
 drivers/gpu/drm/i915/intel_lrc.c    | 7 ++++---
 drivers/gpu/drm/i915/intel_uncore.c | 4 +++-
 2 files changed, 7 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 67b6a0f658d6..ce09c5ad334f 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -805,6 +805,10 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
 
 	spin_unlock(&engine->timeline->lock);
 
+	/* Mark all CS interrupts as complete */
+	smp_store_mb(execlists->active, 0);
+	synchronize_hardirq(engine->i915->drm.irq);
+
 	/*
 	 * The port is checked prior to scheduling a tasklet, but
 	 * just in case we have suspended the tasklet to do the
@@ -813,9 +817,6 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
 	 */
 	clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
 
-	/* Mark all CS interrupts as complete */
-	execlists->active = 0;
-
 	local_irq_restore(flags);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 4c616d074a97..f37ecfc69e49 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -2116,8 +2116,10 @@ int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
 		i915_stop_engines(dev_priv, engine_mask);
 
 		ret = -ENODEV;
-		if (reset)
+		if (reset) {
+			GEM_TRACE("engine_mask=%x\n", engine_mask);
 			ret = reset(dev_priv, engine_mask);
+		}
 		if (ret != -ETIMEDOUT)
 			break;
 

From 66c1f77ae2b773f349c1ea1312d69a6ab775cc26 Mon Sep 17 00:00:00 2001
From: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Tue, 20 Mar 2018 17:17:33 +0200
Subject: [PATCH 0097/1286] drm/i915: Avoid setting ring freq on invalid rps
 freqs

Looping through rps frequencies when both min and max are zero
ends up into an endless loop. This can happen during hardware
enablement.

Bail out early if rps frequencies are not correctly set yet.

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180320151734.11761-1-mika.kuoppala@linux.intel.com
---
 drivers/gpu/drm/i915/intel_pm.c | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index dd5ddb77b306..19e82aaa9863 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -6890,15 +6890,18 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
 static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
 {
 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-	int min_freq = 15;
+	const int min_freq = 15;
+	const int scaling_factor = 180;
 	unsigned int gpu_freq;
 	unsigned int max_ia_freq, min_ring_freq;
 	unsigned int max_gpu_freq, min_gpu_freq;
-	int scaling_factor = 180;
 	struct cpufreq_policy *policy;
 
 	WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
 
+	if (rps->max_freq <= rps->min_freq)
+		return;
+
 	policy = cpufreq_cpu_get(0);
 	if (policy) {
 		max_ia_freq = policy->cpuinfo.max_freq;
@@ -6932,7 +6935,7 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
 	 * the PCU should use as a reference to determine the ring frequency.
 	 */
 	for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
-		int diff = max_gpu_freq - gpu_freq;
+		const int diff = max_gpu_freq - gpu_freq;
 		unsigned int ia_freq = 0, ring_freq = 0;
 
 		if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {

From 0ef904bb3a6d9dc2d81301e84116eaff880412f2 Mon Sep 17 00:00:00 2001
From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Date: Wed, 21 Mar 2018 10:32:28 +0000
Subject: [PATCH 0098/1286] drm/i915: Skip logging impossible slices

Log up to sseu->max_slices instead basing on ARRAY_SIZE since to avoid
printing impossible and empty slices for a platform.

Also compact slice total and slice mask into one log line.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180321103228.32205-1-tvrtko.ursulin@linux.intel.com
---
 drivers/gpu/drm/i915/intel_device_info.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index a504281e2afa..0d1509e25db8 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -83,11 +83,11 @@ static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
 {
 	int s;
 
-	drm_printf(p, "slice mask: %04x\n", sseu->slice_mask);
-	drm_printf(p, "slice total: %u\n", hweight8(sseu->slice_mask));
+	drm_printf(p, "slice total: %u, mask=%04x\n",
+		   hweight8(sseu->slice_mask), sseu->slice_mask);
 	drm_printf(p, "subslice total: %u\n", sseu_subslice_total(sseu));
-	for (s = 0; s < ARRAY_SIZE(sseu->subslice_mask); s++) {
-		drm_printf(p, "slice%d %u subslices mask=%04x\n",
+	for (s = 0; s < sseu->max_slices; s++) {
+		drm_printf(p, "slice%d: %u subslices, mask=%04x\n",
 			   s, hweight8(sseu->subslice_mask[s]),
 			   sseu->subslice_mask[s]);
 	}

From 277ab5abc68df2f6f8fac7a46e50105b6648f432 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Thu, 22 Mar 2018 17:47:07 +0200
Subject: [PATCH 0099/1286] drm/i915: Don't spew errors when resetting HDMI
 scrambling/bit clock ratio fails
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

When we're disabling the HDMI link we try to reset the scrambling and
TMDS bit clock ratio back to the default values. This will fail if the
sink has already been disconnected. Thus we should not print an error
message when resetting the scrambling/TMDS bit clock ratio fail during
disable. During enable we do want the error, and during disable we may
still want to know what happended for debug purposes so let's use
DRM_DEBUG_KMS() there.

v2: Remember them consts
v3: Go back to just one function and print the errors/debugs
    from callers (Shashank)

Cc: Shashank Sharma <shashank.sharma@intel.com>
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=105644
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=105655
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180322154707.22103-1-ville.syrjala@linux.intel.com
Reviewed-by: Shashank Sharma <shashank.sharma@intel.com>
---
 drivers/gpu/drm/i915/intel_ddi.c  | 19 +++++++++------
 drivers/gpu/drm/i915/intel_drv.h  |  2 +-
 drivers/gpu/drm/i915/intel_hdmi.c | 40 +++++++++++++------------------
 3 files changed, 29 insertions(+), 32 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 8c2d778560f0..c449619427da 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -2424,12 +2424,14 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+	struct drm_connector *connector = conn_state->connector;
 	enum port port = encoder->port;
 
-	intel_hdmi_handle_sink_scrambling(encoder,
-					  conn_state->connector,
-					  crtc_state->hdmi_high_tmds_clock_ratio,
-					  crtc_state->hdmi_scrambling);
+	if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
+					       crtc_state->hdmi_high_tmds_clock_ratio,
+					       crtc_state->hdmi_scrambling))
+		DRM_ERROR("[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n",
+			  connector->base.id, connector->name);
 
 	/* Display WA #1143: skl,kbl,cfl */
 	if (IS_GEN9_BC(dev_priv)) {
@@ -2520,13 +2522,16 @@ static void intel_disable_ddi_hdmi(struct intel_encoder *encoder,
 				   const struct intel_crtc_state *old_crtc_state,
 				   const struct drm_connector_state *old_conn_state)
 {
+	struct drm_connector *connector = old_conn_state->connector;
+
 	if (old_crtc_state->has_audio)
 		intel_audio_codec_disable(encoder,
 					  old_crtc_state, old_conn_state);
 
-	intel_hdmi_handle_sink_scrambling(encoder,
-					  old_conn_state->connector,
-					  false, false);
+	if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
+					       false, false))
+		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Failed to reset sink scrambling/TMDS bit clock ratio\n",
+			      connector->base.id, connector->name);
 }
 
 static void intel_disable_ddi(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index de6db9196638..b79a01b7f008 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1782,7 +1782,7 @@ struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
 bool intel_hdmi_compute_config(struct intel_encoder *encoder,
 			       struct intel_crtc_state *pipe_config,
 			       struct drm_connector_state *conn_state);
-void intel_hdmi_handle_sink_scrambling(struct intel_encoder *intel_encoder,
+bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
 				       struct drm_connector *connector,
 				       bool high_tmds_clock_ratio,
 				       bool scrambling);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 1baef4ac7ecb..ee929f31f7db 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -2082,41 +2082,33 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
  * it enables scrambling. This should be called before enabling the HDMI
  * 2.0 port, as the sink can choose to disable the scrambling if it doesn't
  * detect a scrambled clock within 100 ms.
+ *
+ * Returns:
+ * True on success, false on failure.
  */
-void intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
+bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
 				       struct drm_connector *connector,
 				       bool high_tmds_clock_ratio,
 				       bool scrambling)
 {
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
-	struct drm_i915_private *dev_priv = connector->dev->dev_private;
 	struct drm_scrambling *sink_scrambling =
-				&connector->display_info.hdmi.scdc.scrambling;
-	struct i2c_adapter *adptr = intel_gmbus_get_adapter(dev_priv,
-							   intel_hdmi->ddc_bus);
-	bool ret;
+		&connector->display_info.hdmi.scdc.scrambling;
+	struct i2c_adapter *adapter =
+		intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
 
 	if (!sink_scrambling->supported)
-		return;
+		return true;
 
-	DRM_DEBUG_KMS("Setting sink scrambling for enc:%s connector:%s\n",
-		      encoder->base.name, connector->name);
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] scrambling=%s, TMDS bit clock ratio=1/%d\n",
+		      connector->base.id, connector->name,
+		      yesno(scrambling), high_tmds_clock_ratio ? 40 : 10);
 
-	/* Set TMDS bit clock ratio to 1/40 or 1/10 */
-	ret = drm_scdc_set_high_tmds_clock_ratio(adptr, high_tmds_clock_ratio);
-	if (!ret) {
-		DRM_ERROR("Set TMDS ratio failed\n");
-		return;
-	}
-
-	/* Enable/disable sink scrambling */
-	ret = drm_scdc_set_scrambling(adptr, scrambling);
-	if (!ret) {
-		DRM_ERROR("Set sink scrambling failed\n");
-		return;
-	}
-
-	DRM_DEBUG_KMS("sink scrambling handled\n");
+	/* Set TMDS bit clock ratio to 1/40 or 1/10, and enable/disable scrambling */
+	return drm_scdc_set_high_tmds_clock_ratio(adapter,
+						  high_tmds_clock_ratio) &&
+		drm_scdc_set_scrambling(adapter, scrambling);
 }
 
 static u8 chv_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)

From 28e0e8ac27409e1f2f2abd226548f0ebeef19ad8 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Piotr=20Pi=C3=B3rkowski?= <piotr.piorkowski@intel.com>
Date: Fri, 23 Mar 2018 12:23:18 +0100
Subject: [PATCH 0100/1286] drm/i915/guc: Fix null pointer dereference when GuC
 FW is not available
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

If GuC firmware is not available on the system and we load i915 with enable
GuC, then we hit this null pointer dereference issue:

[   71.098873] BUG: unable to handle kernel NULL pointer dereference at 0000000000000008
[   71.098938] IP: intel_uc_fw_upload+0x1f/0x360 [i915]
[   71.098947] PGD 0 P4D 0
[   71.098956] Oops: 0000 [#1] PREEMPT SMP PTI
[   71.098965] Modules linked in: i915(O+) netconsole x86_pkg_temp_thermal intel_powerclamp coretemp crct10dif_pclmul crc32_pclmul ghash_clmulni_intel mei_me i2c_i801 prime_numbers mei [last unloaded: i915]
[   71.099005] CPU: 2 PID: 1167 Comm: insmod Tainted: G     U  W  O     4.16.0-rc1+ #337
[   71.099018] Hardware name: /NUC6i5SYB, BIOS SYSKLi35.86A.0065.2018.0103.1000 01/03/2018
[   71.099077] RIP: 0010:intel_uc_fw_upload+0x1f/0x360 [i915]
[   71.099087] RSP: 0018:ffffc90000417aa0 EFLAGS: 00010282
[   71.099097] RAX: 0000000000000000 RBX: ffff88084cad12f8 RCX: ffffffffa03e9357
[   71.099108] RDX: 0000000000000002 RSI: ffffffffa034dba0 RDI: ffff88084cad12f8
[   71.099118] RBP: 0000000000000002 R08: ffff88085344ca90 R09: 0000000000000001
[   71.099128] R10: 0000000000000000 R11: 0000000000000000 R12: ffff88084cad0000
[   71.099139] R13: ffffffffa034dba0 R14: 00000000fffffff5 R15: ffff88084cad12b0
[   71.099151] FS:  00007f7f24ae2740(0000) GS:ffff88085e200000(0000) knlGS:0000000000000000
[   71.099162] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[   71.099171] CR2: 0000000000000008 CR3: 0000000855f48001 CR4: 00000000003606e0
[   71.099182] Call Trace:
[   71.099246]  intel_uc_init_hw+0xc8/0x520 [i915]
[   71.099303]  i915_gem_init_hw+0x11f/0x2d0 [i915]
[   71.099364]  i915_gem_init+0x2b9/0x640 [i915]
[   71.099413]  i915_driver_load+0xb74/0x1110 [i915]
[   71.099462]  i915_pci_probe+0x2e/0x90 [i915]
[   71.099476]  pci_device_probe+0xa1/0x130
[   71.099488]  driver_probe_device+0x302/0x470
[   71.099502]  __driver_attach+0xb9/0xe0
[   71.099513]  ? driver_probe_device+0x470/0x470
[   71.099525]  ? driver_probe_device+0x470/0x470
[   71.099538]  bus_for_each_dev+0x64/0x90
[   71.099550]  bus_add_driver+0x164/0x260
[   71.099561]  ? 0xffffffffa04d6000
[   71.099572]  driver_register+0x57/0xc0
[   71.099582]  ? 0xffffffffa04d6000
[   71.099593]  do_one_initcall+0x3b/0x160
[   71.099606]  ? kmem_cache_alloc_trace+0x1c3/0x2a0
[   71.099621]  do_init_module+0x5b/0x1f9
[   71.099635]  load_module+0x2467/0x2a70
[   71.099654]  ? SyS_finit_module+0xbd/0xe0
[   71.099668]  SyS_finit_module+0xbd/0xe0
[   71.099682]  do_syscall_64+0x73/0x1c0
[   71.099694]  entry_SYSCALL_64_after_hwframe+0x26/0x9b
[   71.099706] RIP: 0033:0x7f7f23fb40d9
[   71.099717] RSP: 002b:00007ffda7d67ed8 EFLAGS: 00000246 ORIG_RAX: 0000000000000139
[   71.099734] RAX: ffffffffffffffda RBX: 000055f96e2a8870 RCX: 00007f7f23fb40d9
[   71.099748] RDX: 0000000000000000 RSI: 000055f96e2a8260 RDI: 0000000000000003
[   71.099763] RBP: 000055f96e2a8260 R08: 0000000000000000 R09: 00007ffda7d68088
[   71.099777] R10: 0000000000000003 R11: 0000000000000246 R12: 0000000000000000
[   71.099791] R13: 000055f96e2a8830 R14: 0000000000000000 R15: 000055f96e2a8260
[   71.099810] Code: 00 00 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 41 55 41 54 49 89 f5 55 53 48 c7 c1 57 93 3e a0 48 8b 47 10 48 89 fb 4c 8b 07 <48> 8b 68 08 8b 47 28 85 c0 74 15 83 f8 01 48 c7 c1 5b 93 3e a0
[   71.100004] RIP: intel_uc_fw_upload+0x1f/0x360 [i915] RSP: ffffc90000417aa0
[   71.100020] CR2: 0000000000000008
[   71.100031] ---[ end trace d8ac93c30ceff5b2 ]--

Fixes: 6b0478fb722a ("drm/i915: Implement dynamic GuC WOPCM offset and size calculation")

v2: don't assume it is always GuC FW (Michal)
v3: added a new variable to avoid exceeding the number of characters in the
line (Michal)

Signed-off-by: Piotr Piórkowski <piotr.piorkowski@intel.com>
Reported-by: Radoslaw Szwichtenberg <radoslaw.szwichtenberg@intel.com>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Jackie Li <yaodong.li@intel.com>
Cc: Radoslaw Szwichtenberg <radoslaw.szwichtenberg@intel.com>
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: Jackie Li <yaodong.li@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180323112319.16293-1-piotr.piorkowski@intel.com
---
 drivers/gpu/drm/i915/intel_uc_fw.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c
index 30c73243f54d..6e8e0b546743 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/intel_uc_fw.c
@@ -199,8 +199,8 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
 		       int (*xfer)(struct intel_uc_fw *uc_fw,
 				   struct i915_vma *vma))
 {
-	struct drm_i915_private *i915 = to_i915(uc_fw->obj->base.dev);
 	struct i915_vma *vma;
+	u32 ggtt_pin_bias;
 	int err;
 
 	DRM_DEBUG_DRIVER("%s fw load %s\n",
@@ -222,9 +222,9 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
 		goto fail;
 	}
 
+	ggtt_pin_bias = to_i915(uc_fw->obj->base.dev)->guc.ggtt_pin_bias;
 	vma = i915_gem_object_ggtt_pin(uc_fw->obj, NULL, 0, 0,
-				       PIN_OFFSET_BIAS |
-				       i915->guc.ggtt_pin_bias);
+				       PIN_OFFSET_BIAS | ggtt_pin_bias);
 	if (IS_ERR(vma)) {
 		err = PTR_ERR(vma);
 		DRM_DEBUG_DRIVER("%s fw ggtt-pin err=%d\n",

From a0de908d44fb67500b7c45bd8325f316496227db Mon Sep 17 00:00:00 2001
From: Michal Wajdeczko <michal.wajdeczko@intel.com>
Date: Fri, 23 Mar 2018 12:34:49 +0000
Subject: [PATCH 0101/1286] drm/i915: Reorder early initialization

In upcoming patch, we want to perform more actions in early
initialization of the uC. This reordering will help resolve
new dependencies that will be introduced by future patch.

v2: s/i915_gem_load_init/i915_gem_init_early (Chris)
v3: s/i915_gem_load_cleanup/i915_gem_cleanup_early (Michal)

Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180323123451.59244-1-michal.wajdeczko@intel.com
---
 drivers/gpu/drm/i915/i915_drv.c | 17 ++++++++---------
 drivers/gpu/drm/i915/i915_drv.h |  4 ++--
 drivers/gpu/drm/i915/i915_gem.c |  5 ++---
 3 files changed, 12 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index a7d3275f45d2..2561974af79c 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -919,17 +919,21 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
 	mutex_init(&dev_priv->wm.wm_mutex);
 	mutex_init(&dev_priv->pps_mutex);
 
-	intel_wopcm_init_early(&dev_priv->wopcm);
-	intel_uc_init_early(dev_priv);
 	i915_memcpy_init_early(dev_priv);
 
 	ret = i915_workqueues_init(dev_priv);
 	if (ret < 0)
 		goto err_engines;
 
+	ret = i915_gem_init_early(dev_priv);
+	if (ret < 0)
+		goto err_workqueues;
+
 	/* This must be called before any calls to HAS_PCH_* */
 	intel_detect_pch(dev_priv);
 
+	intel_wopcm_init_early(&dev_priv->wopcm);
+	intel_uc_init_early(dev_priv);
 	intel_pm_setup(dev_priv);
 	intel_init_dpio(dev_priv);
 	intel_power_domains_init(dev_priv);
@@ -938,18 +942,13 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
 	intel_init_display_hooks(dev_priv);
 	intel_init_clock_gating_hooks(dev_priv);
 	intel_init_audio_hooks(dev_priv);
-	ret = i915_gem_load_init(dev_priv);
-	if (ret < 0)
-		goto err_irq;
-
 	intel_display_crc_init(dev_priv);
 
 	intel_detect_preproduction_hw(dev_priv);
 
 	return 0;
 
-err_irq:
-	intel_irq_fini(dev_priv);
+err_workqueues:
 	i915_workqueues_cleanup(dev_priv);
 err_engines:
 	i915_engines_cleanup(dev_priv);
@@ -962,8 +961,8 @@ err_engines:
  */
 static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
 {
-	i915_gem_load_cleanup(dev_priv);
 	intel_irq_fini(dev_priv);
+	i915_gem_cleanup_early(dev_priv);
 	i915_workqueues_cleanup(dev_priv);
 	i915_engines_cleanup(dev_priv);
 }
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c9c3b2ba6a86..28ab91812701 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2869,8 +2869,8 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
 int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file_priv);
 void i915_gem_sanitize(struct drm_i915_private *i915);
-int i915_gem_load_init(struct drm_i915_private *dev_priv);
-void i915_gem_load_cleanup(struct drm_i915_private *dev_priv);
+int i915_gem_init_early(struct drm_i915_private *dev_priv);
+void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
 void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
 int i915_gem_freeze(struct drm_i915_private *dev_priv);
 int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 802df8e1a544..9650a7b10c5f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -5502,8 +5502,7 @@ static void i915_gem_init__mm(struct drm_i915_private *i915)
 	INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
 }
 
-int
-i915_gem_load_init(struct drm_i915_private *dev_priv)
+int i915_gem_init_early(struct drm_i915_private *dev_priv)
 {
 	int err = -ENOMEM;
 
@@ -5578,7 +5577,7 @@ err_out:
 	return err;
 }
 
-void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
+void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
 {
 	i915_gem_drain_freed_objects(dev_priv);
 	GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));

From 8c650aefb82d559aa0e1b7c0c36346b906481106 Mon Sep 17 00:00:00 2001
From: Michal Wajdeczko <michal.wajdeczko@intel.com>
Date: Fri, 23 Mar 2018 12:34:50 +0000
Subject: [PATCH 0102/1286] drm/i915/uc: Fetch uC firmware in init_early

We were fetching uC firmwares in separate uc_init_fw step, while
there is no reason why we can't fetch them during init_early.
This will also simplify upcoming patches, as size of the firmware
may be used for register initialization.

Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Michal Winiarski <michal.winiarski@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180323123451.59244-2-michal.wajdeczko@intel.com
---
 drivers/gpu/drm/i915/i915_drv.c     |  8 ++----
 drivers/gpu/drm/i915/intel_guc_fw.c |  5 ++--
 drivers/gpu/drm/i915/intel_huc_fw.c |  5 ++--
 drivers/gpu/drm/i915/intel_uc.c     | 43 ++++++++++++++---------------
 drivers/gpu/drm/i915/intel_uc.h     |  3 +-
 5 files changed, 28 insertions(+), 36 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 2561974af79c..db223378d84b 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -692,11 +692,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
 	if (ret)
 		goto cleanup_irq;
 
-	intel_uc_init_fw(dev_priv);
-
 	ret = i915_gem_init(dev_priv);
 	if (ret)
-		goto cleanup_uc;
+		goto cleanup_irq;
 
 	intel_setup_overlay(dev_priv);
 
@@ -716,8 +714,6 @@ cleanup_gem:
 	if (i915_gem_suspend(dev_priv))
 		DRM_ERROR("failed to idle hardware; continuing to unload!\n");
 	i915_gem_fini(dev_priv);
-cleanup_uc:
-	intel_uc_fini_fw(dev_priv);
 cleanup_irq:
 	drm_irq_uninstall(dev);
 	intel_teardown_gmbus(dev_priv);
@@ -962,6 +958,7 @@ err_engines:
 static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
 {
 	intel_irq_fini(dev_priv);
+	intel_uc_cleanup_early(dev_priv);
 	i915_gem_cleanup_early(dev_priv);
 	i915_workqueues_cleanup(dev_priv);
 	i915_engines_cleanup(dev_priv);
@@ -1457,7 +1454,6 @@ void i915_driver_unload(struct drm_device *dev)
 	i915_reset_error_state(dev_priv);
 
 	i915_gem_fini(dev_priv);
-	intel_uc_fini_fw(dev_priv);
 	intel_fbc_cleanup_cfb(dev_priv);
 
 	intel_power_domains_fini(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c
index 978668cf82cc..a9e6fcce467c 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/intel_guc_fw.c
@@ -275,9 +275,8 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
  * Called from intel_uc_init_hw() during driver load, resume from sleep and
  * after a GPU reset.
  *
- * The firmware image should have already been fetched into memory by the
- * earlier call to intel_uc_init_fw(), so here we need to only check that
- * fetch succeeded, and then transfer the image to the h/w.
+ * The firmware image should have already been fetched into memory, so only
+ * check that fetch succeeded, and then transfer the image to the h/w.
  *
  * Return:	non-zero code on error
  */
diff --git a/drivers/gpu/drm/i915/intel_huc_fw.c b/drivers/gpu/drm/i915/intel_huc_fw.c
index bb0f8b7a8d2b..f93d2384d482 100644
--- a/drivers/gpu/drm/i915/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/intel_huc_fw.c
@@ -155,9 +155,8 @@ static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
  * Called from intel_uc_init_hw() during driver load, resume from sleep and
  * after a GPU reset. Note that HuC must be loaded before GuC.
  *
- * The firmware image should have already been fetched into memory by the
- * earlier call to intel_uc_init_fw(), so here we need to only check that
- * fetch succeeded, and then transfer the image to the h/w.
+ * The firmware image should have already been fetched into memory, so only
+ * check that fetch succeeded, and then transfer the image to the h/w.
  *
  * Return:	non-zero code on error
  */
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 34f8a2c219d8..4aad8442e789 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -162,36 +162,35 @@ static void sanitize_options_early(struct drm_i915_private *dev_priv)
 	GEM_BUG_ON(i915_modparams.guc_log_level < 0);
 }
 
-void intel_uc_init_early(struct drm_i915_private *dev_priv)
+void intel_uc_init_early(struct drm_i915_private *i915)
 {
-	intel_guc_init_early(&dev_priv->guc);
-	intel_huc_init_early(&dev_priv->huc);
+	struct intel_guc *guc = &i915->guc;
+	struct intel_huc *huc = &i915->huc;
 
-	sanitize_options_early(dev_priv);
+	intel_guc_init_early(guc);
+	intel_huc_init_early(huc);
+
+	sanitize_options_early(i915);
+
+	if (USES_GUC(i915))
+		intel_uc_fw_fetch(i915, &guc->fw);
+
+	if (USES_HUC(i915))
+		intel_uc_fw_fetch(i915, &huc->fw);
 }
 
-void intel_uc_init_fw(struct drm_i915_private *dev_priv)
+void intel_uc_cleanup_early(struct drm_i915_private *i915)
 {
-	if (!USES_GUC(dev_priv))
-		return;
+	struct intel_guc *guc = &i915->guc;
+	struct intel_huc *huc = &i915->huc;
 
-	if (USES_HUC(dev_priv))
-		intel_uc_fw_fetch(dev_priv, &dev_priv->huc.fw);
+	if (USES_HUC(i915))
+		intel_uc_fw_fini(&huc->fw);
 
-	intel_uc_fw_fetch(dev_priv, &dev_priv->guc.fw);
-}
+	if (USES_GUC(i915))
+		intel_uc_fw_fini(&guc->fw);
 
-void intel_uc_fini_fw(struct drm_i915_private *dev_priv)
-{
-	if (!USES_GUC(dev_priv))
-		return;
-
-	intel_uc_fw_fini(&dev_priv->guc.fw);
-
-	if (USES_HUC(dev_priv))
-		intel_uc_fw_fini(&dev_priv->huc.fw);
-
-	guc_free_load_err_log(&dev_priv->guc);
+	guc_free_load_err_log(guc);
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index 937e61175258..25d73ada74ae 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -29,9 +29,8 @@
 #include "i915_params.h"
 
 void intel_uc_init_early(struct drm_i915_private *dev_priv);
+void intel_uc_cleanup_early(struct drm_i915_private *dev_priv);
 void intel_uc_init_mmio(struct drm_i915_private *dev_priv);
-void intel_uc_init_fw(struct drm_i915_private *dev_priv);
-void intel_uc_fini_fw(struct drm_i915_private *dev_priv);
 int intel_uc_init_misc(struct drm_i915_private *dev_priv);
 void intel_uc_fini_misc(struct drm_i915_private *dev_priv);
 void intel_uc_sanitize(struct drm_i915_private *dev_priv);

From 46b3617dfec875c1414c6ccbfcab371c97735562 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Fri, 23 Mar 2018 10:18:24 +0000
Subject: [PATCH 0103/1286] drm/i915: Actually flush interrupts on reset not
 just wedging
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Commit 0f36a85c3bd5 ("drm/i915: Flush pending interrupt following a GPU
reset") got confused and only applied the flush to the set-wedge path
(which itself is proving troublesome), but we also need the
serialisation on the regular reset path. Oops.

Move the interrupt into reset_irq() and make it common to the reset and
final set-wedge.

v2: reset_irq() after port cancellation, as we assert that
execlists->active is sane for cancellation (and is being reset by
reset_irq).

References: 0f36a85c3bd5 ("drm/i915: Flush pending interrupt following a GPU reset")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Michel Thierry <michel.thierry@intel.com>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Cc: Jeff McGee <jeff.mcgee@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180323101824.14645-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_lrc.c | 107 +++++++++++++++----------------
 1 file changed, 53 insertions(+), 54 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index ce09c5ad334f..b4ab06b05e58 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -740,6 +740,57 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
 	}
 }
 
+static void clear_gtiir(struct intel_engine_cs *engine)
+{
+	static const u8 gtiir[] = {
+		[RCS]  = 0,
+		[BCS]  = 0,
+		[VCS]  = 1,
+		[VCS2] = 1,
+		[VECS] = 3,
+	};
+	struct drm_i915_private *dev_priv = engine->i915;
+	int i;
+
+	/* TODO: correctly reset irqs for gen11 */
+	if (WARN_ON_ONCE(INTEL_GEN(engine->i915) >= 11))
+		return;
+
+	GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir));
+
+	/*
+	 * Clear any pending interrupt state.
+	 *
+	 * We do it twice out of paranoia that some of the IIR are
+	 * double buffered, and so if we only reset it once there may
+	 * still be an interrupt pending.
+	 */
+	for (i = 0; i < 2; i++) {
+		I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
+			   engine->irq_keep_mask);
+		POSTING_READ(GEN8_GT_IIR(gtiir[engine->id]));
+	}
+	GEM_BUG_ON(I915_READ(GEN8_GT_IIR(gtiir[engine->id])) &
+		   engine->irq_keep_mask);
+}
+
+static void reset_irq(struct intel_engine_cs *engine)
+{
+	/* Mark all CS interrupts as complete */
+	smp_store_mb(engine->execlists.active, 0);
+	synchronize_hardirq(engine->i915->drm.irq);
+
+	clear_gtiir(engine);
+
+	/*
+	 * The port is checked prior to scheduling a tasklet, but
+	 * just in case we have suspended the tasklet to do the
+	 * wedging make sure that when it wakes, it decides there
+	 * is no work to do by clearing the irq_posted bit.
+	 */
+	clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+}
+
 static void execlists_cancel_requests(struct intel_engine_cs *engine)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -767,6 +818,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
 
 	/* Cancel the requests on the HW and clear the ELSP tracker. */
 	execlists_cancel_port_requests(execlists);
+	reset_irq(engine);
 
 	spin_lock(&engine->timeline->lock);
 
@@ -805,18 +857,6 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
 
 	spin_unlock(&engine->timeline->lock);
 
-	/* Mark all CS interrupts as complete */
-	smp_store_mb(execlists->active, 0);
-	synchronize_hardirq(engine->i915->drm.irq);
-
-	/*
-	 * The port is checked prior to scheduling a tasklet, but
-	 * just in case we have suspended the tasklet to do the
-	 * wedging make sure that when it wakes, it decides there
-	 * is no work to do by clearing the irq_posted bit.
-	 */
-	clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
-
 	local_irq_restore(flags);
 }
 
@@ -1566,14 +1606,6 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
 	return ret;
 }
 
-static u8 gtiir[] = {
-	[RCS] = 0,
-	[BCS] = 0,
-	[VCS] = 1,
-	[VCS2] = 1,
-	[VECS] = 3,
-};
-
 static void enable_execlists(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
@@ -1657,35 +1689,6 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
 	return init_workarounds_ring(engine);
 }
 
-static void reset_irq(struct intel_engine_cs *engine)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
-	int i;
-
-	/* TODO: correctly reset irqs for gen11 */
-	if (WARN_ON_ONCE(INTEL_GEN(engine->i915) >= 11))
-		return;
-
-	GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir));
-
-	/*
-	 * Clear any pending interrupt state.
-	 *
-	 * We do it twice out of paranoia that some of the IIR are double
-	 * buffered, and if we only reset it once there may still be
-	 * an interrupt pending.
-	 */
-	for (i = 0; i < 2; i++) {
-		I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
-			   engine->irq_keep_mask);
-		POSTING_READ(GEN8_GT_IIR(gtiir[engine->id]));
-	}
-	GEM_BUG_ON(I915_READ(GEN8_GT_IIR(gtiir[engine->id])) &
-		   engine->irq_keep_mask);
-
-	clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
-}
-
 static void reset_common_ring(struct intel_engine_cs *engine,
 			      struct i915_request *request)
 {
@@ -1699,8 +1702,6 @@ static void reset_common_ring(struct intel_engine_cs *engine,
 	/* See execlists_cancel_requests() for the irq/spinlock split. */
 	local_irq_save(flags);
 
-	reset_irq(engine);
-
 	/*
 	 * Catch up with any missed context-switch interrupts.
 	 *
@@ -1711,15 +1712,13 @@ static void reset_common_ring(struct intel_engine_cs *engine,
 	 * requests were completed.
 	 */
 	execlists_cancel_port_requests(execlists);
+	reset_irq(engine);
 
 	/* Push back any incomplete requests for replay after the reset. */
 	spin_lock(&engine->timeline->lock);
 	__unwind_incomplete_requests(engine);
 	spin_unlock(&engine->timeline->lock);
 
-	/* Mark all CS interrupts as complete */
-	execlists->active = 0;
-
 	local_irq_restore(flags);
 
 	/*

From 0f90603c33bdf6575cfdc81edd53f3f13ba166fb Mon Sep 17 00:00:00 2001
From: Imre Deak <imre.deak@intel.com>
Date: Thu, 22 Mar 2018 16:36:42 +0200
Subject: [PATCH 0104/1286] drm/i915: Fix hibernation with ACPI S0 target state
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

After

commit dd9f31c7a3887950cbd0d49eb9d43f7a1518a356
Author: Imre Deak <imre.deak@intel.com>
Date:   Wed Aug 16 17:46:07 2017 +0300

    drm/i915/gen9+: Set same power state before hibernation image
    save/restore

during hibernation/suspend the power domain functionality got disabled,
after which resume could leave it incorrectly disabled if the ACPI
target state was S0 during suspend and i915 was not loaded by the loader
kernel.

This was caused by not considering if we resumed from hibernation as the
condition for power domains reiniting.

Fix this by simply tracking if we suspended power domains during system
suspend and reinit power domains accordingly during resume. This will
result in reiniting power domains always when resuming from hibernation,
regardless of the platform and whether or not i915 is loaded by the
loader kernel.

The reason we didn't catch this earlier is that the enabled/disabled
state of power domains during PMSG_FREEZE/PMSG_QUIESCE is platform
and kernel config dependent: on my SKL the target state is S4
during PMSG_FREEZE and (with the driver loaded in the loader kernel)
S0 during PMSG_QUIESCE. On the reporter's machine it's S0 during
PMSG_FREEZE but (contrary to this) power domains are not initialized
during PMSG_QUIESCE since i915 is not loaded in the loader kernel, or
it's loaded but without the DMC firmware being available.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=105196
Reported-and-tested-by: amn-bas@hotmail.com
Fixes: dd9f31c7a388 ("drm/i915/gen9+: Set same power state before hibernation image save/restore")
Cc: amn-bas@hotmail.com
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180322143642.26883-1-imre.deak@intel.com
---
 drivers/gpu/drm/i915/i915_drv.c | 22 ++++++++++------------
 drivers/gpu/drm/i915/i915_drv.h |  2 +-
 2 files changed, 11 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index db223378d84b..d354627882e3 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1607,15 +1607,12 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct pci_dev *pdev = dev_priv->drm.pdev;
-	bool fw_csr;
 	int ret;
 
 	disable_rpm_wakeref_asserts(dev_priv);
 
 	intel_display_set_init_power(dev_priv, false);
 
-	fw_csr = !IS_GEN9_LP(dev_priv) && !hibernation &&
-		suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
 	/*
 	 * In case of firmware assisted context save/restore don't manually
 	 * deinit the power domains. This also means the CSR/DMC firmware will
@@ -1623,8 +1620,11 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
 	 * also enable deeper system power states that would be blocked if the
 	 * firmware was inactive.
 	 */
-	if (!fw_csr)
+	if (IS_GEN9_LP(dev_priv) || hibernation || !suspend_to_idle(dev_priv) ||
+	    dev_priv->csr.dmc_payload == NULL) {
 		intel_power_domains_suspend(dev_priv);
+		dev_priv->power_domains_suspended = true;
+	}
 
 	ret = 0;
 	if (IS_GEN9_LP(dev_priv))
@@ -1636,8 +1636,10 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
 
 	if (ret) {
 		DRM_ERROR("Suspend complete failed: %d\n", ret);
-		if (!fw_csr)
+		if (dev_priv->power_domains_suspended) {
 			intel_power_domains_init_hw(dev_priv, true);
+			dev_priv->power_domains_suspended = false;
+		}
 
 		goto out;
 	}
@@ -1658,8 +1660,6 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
 	if (!(hibernation && INTEL_GEN(dev_priv) < 6))
 		pci_set_power_state(pdev, PCI_D3hot);
 
-	dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
-
 out:
 	enable_rpm_wakeref_asserts(dev_priv);
 
@@ -1826,8 +1826,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
 	intel_uncore_resume_early(dev_priv);
 
 	if (IS_GEN9_LP(dev_priv)) {
-		if (!dev_priv->suspended_to_idle)
-			gen9_sanitize_dc_state(dev_priv);
+		gen9_sanitize_dc_state(dev_priv);
 		bxt_disable_dc9(dev_priv);
 	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 		hsw_disable_pc8(dev_priv);
@@ -1835,8 +1834,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
 
 	intel_uncore_sanitize(dev_priv);
 
-	if (IS_GEN9_LP(dev_priv) ||
-	    !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
+	if (dev_priv->power_domains_suspended)
 		intel_power_domains_init_hw(dev_priv, true);
 	else
 		intel_display_set_init_power(dev_priv, true);
@@ -1846,7 +1844,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
 	enable_rpm_wakeref_asserts(dev_priv);
 
 out:
-	dev_priv->suspended_to_idle = false;
+	dev_priv->power_domains_suspended = false;
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 28ab91812701..299b24045003 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1851,7 +1851,7 @@ struct drm_i915_private {
 	u32 bxt_phy_grc;
 
 	u32 suspend_count;
-	bool suspended_to_idle;
+	bool power_domains_suspended;
 	struct i915_suspend_saved_registers regfile;
 	struct vlv_s0ix_state vlv_s0ix_state;
 

From e52482dec8366a98ac380b3bdc1a4abb8a390914 Mon Sep 17 00:00:00 2001
From: Felix Kuehling <Felix.Kuehling@amd.com>
Date: Fri, 23 Mar 2018 15:32:28 -0400
Subject: [PATCH 0105/1286] drm/amdgpu: Add MMU notifier type for KFD userptr

This commit adds the notion of MMU notifier types GFX and HSA. GFX
continues to work like MMU notifiers did before. HSA adds support for
KFD userptr BOs. The implementation of KFD userptr eviction is a stub
for now.

Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h    |  1 +
 .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  |  7 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c        |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c        | 94 ++++++++++++++++---
 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h        | 11 ++-
 5 files changed, 97 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index c2c2bea731e0..83e0c5c331d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -104,6 +104,7 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
 void amdgpu_amdkfd_device_init(struct amdgpu_device *adev);
 void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev);
 
+int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm);
 int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
 				uint32_t vmid, uint64_t gpu_addr,
 				uint32_t *ib_cmd, uint32_t ib_len);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 1d6e1479da38..2463ff6ac9ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1418,6 +1418,13 @@ bo_reserve_failed:
 	return ret;
 }
 
+int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
+				struct mm_struct *mm)
+{
+	/* TODO */
+	return 0;
+}
+
 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
  *   KFD process identified by process_info
  *
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index dc34b50e6b29..8e66f3702b7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -536,7 +536,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
 	if (p->bo_list) {
 		amdgpu_bo_list_get_list(p->bo_list, &p->validated);
 		if (p->bo_list->first_userptr != p->bo_list->num_entries)
-			p->mn = amdgpu_mn_get(p->adev);
+			p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
 	}
 
 	INIT_LIST_HEAD(&duplicates);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index bd67f4cb8e6c..f2ed18e2ff03 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -36,12 +36,14 @@
 #include <drm/drm.h>
 
 #include "amdgpu.h"
+#include "amdgpu_amdkfd.h"
 
 struct amdgpu_mn {
 	/* constant after initialisation */
 	struct amdgpu_device	*adev;
 	struct mm_struct	*mm;
 	struct mmu_notifier	mn;
+	enum amdgpu_mn_type	type;
 
 	/* only used on destruction */
 	struct work_struct	work;
@@ -185,7 +187,7 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
 }
 
 /**
- * amdgpu_mn_invalidate_range_start - callback to notify about mm change
+ * amdgpu_mn_invalidate_range_start_gfx - callback to notify about mm change
  *
  * @mn: our notifier
  * @mn: the mm this callback is about
@@ -195,10 +197,10 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
  * We block for all BOs between start and end to be idle and
  * unmap them by move them into system domain again.
  */
-static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
-					     struct mm_struct *mm,
-					     unsigned long start,
-					     unsigned long end)
+static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
+						 struct mm_struct *mm,
+						 unsigned long start,
+						 unsigned long end)
 {
 	struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
 	struct interval_tree_node *it;
@@ -219,6 +221,49 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
 	}
 }
 
+/**
+ * amdgpu_mn_invalidate_range_start_hsa - callback to notify about mm change
+ *
+ * @mn: our notifier
+ * @mn: the mm this callback is about
+ * @start: start of updated range
+ * @end: end of updated range
+ *
+ * We temporarily evict all BOs between start and end. This
+ * necessitates evicting all user-mode queues of the process. The BOs
+ * are restorted in amdgpu_mn_invalidate_range_end_hsa.
+ */
+static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
+						 struct mm_struct *mm,
+						 unsigned long start,
+						 unsigned long end)
+{
+	struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
+	struct interval_tree_node *it;
+
+	/* notification is exclusive, but interval is inclusive */
+	end -= 1;
+
+	amdgpu_mn_read_lock(rmn);
+
+	it = interval_tree_iter_first(&rmn->objects, start, end);
+	while (it) {
+		struct amdgpu_mn_node *node;
+		struct amdgpu_bo *bo;
+
+		node = container_of(it, struct amdgpu_mn_node, it);
+		it = interval_tree_iter_next(it, start, end);
+
+		list_for_each_entry(bo, &node->bos, mn_list) {
+			struct kgd_mem *mem = bo->kfd_bo;
+
+			if (amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
+							 start, end))
+				amdgpu_amdkfd_evict_userptr(mem, mm);
+		}
+	}
+}
+
 /**
  * amdgpu_mn_invalidate_range_end - callback to notify about mm change
  *
@@ -239,23 +284,39 @@ static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn,
 	amdgpu_mn_read_unlock(rmn);
 }
 
-static const struct mmu_notifier_ops amdgpu_mn_ops = {
-	.release = amdgpu_mn_release,
-	.invalidate_range_start = amdgpu_mn_invalidate_range_start,
-	.invalidate_range_end = amdgpu_mn_invalidate_range_end,
+static const struct mmu_notifier_ops amdgpu_mn_ops[] = {
+	[AMDGPU_MN_TYPE_GFX] = {
+		.release = amdgpu_mn_release,
+		.invalidate_range_start = amdgpu_mn_invalidate_range_start_gfx,
+		.invalidate_range_end = amdgpu_mn_invalidate_range_end,
+	},
+	[AMDGPU_MN_TYPE_HSA] = {
+		.release = amdgpu_mn_release,
+		.invalidate_range_start = amdgpu_mn_invalidate_range_start_hsa,
+		.invalidate_range_end = amdgpu_mn_invalidate_range_end,
+	},
 };
 
+/* Low bits of any reasonable mm pointer will be unused due to struct
+ * alignment. Use these bits to make a unique key from the mm pointer
+ * and notifier type.
+ */
+#define AMDGPU_MN_KEY(mm, type) ((unsigned long)(mm) + (type))
+
 /**
  * amdgpu_mn_get - create notifier context
  *
  * @adev: amdgpu device pointer
+ * @type: type of MMU notifier context
  *
  * Creates a notifier context for current->mm.
  */
-struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
+struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
+				enum amdgpu_mn_type type)
 {
 	struct mm_struct *mm = current->mm;
 	struct amdgpu_mn *rmn;
+	unsigned long key = AMDGPU_MN_KEY(mm, type);
 	int r;
 
 	mutex_lock(&adev->mn_lock);
@@ -264,8 +325,8 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
 		return ERR_PTR(-EINTR);
 	}
 
-	hash_for_each_possible(adev->mn_hash, rmn, node, (unsigned long)mm)
-		if (rmn->mm == mm)
+	hash_for_each_possible(adev->mn_hash, rmn, node, key)
+		if (AMDGPU_MN_KEY(rmn->mm, rmn->type) == key)
 			goto release_locks;
 
 	rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
@@ -276,8 +337,9 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
 
 	rmn->adev = adev;
 	rmn->mm = mm;
-	rmn->mn.ops = &amdgpu_mn_ops;
 	init_rwsem(&rmn->lock);
+	rmn->type = type;
+	rmn->mn.ops = &amdgpu_mn_ops[type];
 	rmn->objects = RB_ROOT_CACHED;
 	mutex_init(&rmn->read_lock);
 	atomic_set(&rmn->recursion, 0);
@@ -286,7 +348,7 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
 	if (r)
 		goto free_rmn;
 
-	hash_add(adev->mn_hash, &rmn->node, (unsigned long)mm);
+	hash_add(adev->mn_hash, &rmn->node, AMDGPU_MN_KEY(mm, type));
 
 release_locks:
 	up_write(&mm->mmap_sem);
@@ -315,12 +377,14 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
 {
 	unsigned long end = addr + amdgpu_bo_size(bo) - 1;
 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+	enum amdgpu_mn_type type =
+		bo->kfd_bo ? AMDGPU_MN_TYPE_HSA : AMDGPU_MN_TYPE_GFX;
 	struct amdgpu_mn *rmn;
 	struct amdgpu_mn_node *node = NULL;
 	struct list_head bos;
 	struct interval_tree_node *it;
 
-	rmn = amdgpu_mn_get(adev);
+	rmn = amdgpu_mn_get(adev, type);
 	if (IS_ERR(rmn))
 		return PTR_ERR(rmn);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h
index d0095a3793b8..eb0f432f78fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h
@@ -29,16 +29,23 @@
  */
 struct amdgpu_mn;
 
+enum amdgpu_mn_type {
+	AMDGPU_MN_TYPE_GFX,
+	AMDGPU_MN_TYPE_HSA,
+};
+
 #if defined(CONFIG_MMU_NOTIFIER)
 void amdgpu_mn_lock(struct amdgpu_mn *mn);
 void amdgpu_mn_unlock(struct amdgpu_mn *mn);
-struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev);
+struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
+				enum amdgpu_mn_type type);
 int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
 void amdgpu_mn_unregister(struct amdgpu_bo *bo);
 #else
 static inline void amdgpu_mn_lock(struct amdgpu_mn *mn) {}
 static inline void amdgpu_mn_unlock(struct amdgpu_mn *mn) {}
-static inline struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
+static inline struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
+					      enum amdgpu_mn_type type)
 {
 	return NULL;
 }

From 0919195f2b0d7437cb0de49b8975fdd7b5575490 Mon Sep 17 00:00:00 2001
From: Felix Kuehling <Felix.Kuehling@amd.com>
Date: Fri, 23 Mar 2018 15:32:29 -0400
Subject: [PATCH 0106/1286] drm/amdgpu: Enable amdgpu_ttm_tt_get_user_pages in
 worker threads

This commit allows amdgpu_ttm_tt_get_user_pages to work in a worker
thread rather than regular process context. This will be used when
KFD userptr BOs are restored after an MMU-notifier eviction.

v2: Manage task reference with get_task_struct/put_task_struct

Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Acked-by: Oded Gabbay <oded.gabbay@gmail.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 38 +++++++++++++++++++------
 1 file changed, 29 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 205da3ff9cd0..c713d30cba86 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -695,7 +695,7 @@ struct amdgpu_ttm_tt {
 	struct ttm_dma_tt	ttm;
 	u64			offset;
 	uint64_t		userptr;
-	struct mm_struct	*usermm;
+	struct task_struct	*usertask;
 	uint32_t		userflags;
 	spinlock_t              guptasklock;
 	struct list_head        guptasks;
@@ -706,14 +706,18 @@ struct amdgpu_ttm_tt {
 int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
 {
 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
+	struct mm_struct *mm = gtt->usertask->mm;
 	unsigned int flags = 0;
 	unsigned pinned = 0;
 	int r;
 
+	if (!mm) /* Happens during process shutdown */
+		return -ESRCH;
+
 	if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
 		flags |= FOLL_WRITE;
 
-	down_read(&current->mm->mmap_sem);
+	down_read(&mm->mmap_sem);
 
 	if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
 		/* check that we only use anonymous memory
@@ -721,9 +725,9 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
 		unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
 		struct vm_area_struct *vma;
 
-		vma = find_vma(gtt->usermm, gtt->userptr);
+		vma = find_vma(mm, gtt->userptr);
 		if (!vma || vma->vm_file || vma->vm_end < end) {
-			up_read(&current->mm->mmap_sem);
+			up_read(&mm->mmap_sem);
 			return -EPERM;
 		}
 	}
@@ -739,7 +743,12 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
 		list_add(&guptask.list, &gtt->guptasks);
 		spin_unlock(&gtt->guptasklock);
 
-		r = get_user_pages(userptr, num_pages, flags, p, NULL);
+		if (mm == current->mm)
+			r = get_user_pages(userptr, num_pages, flags, p, NULL);
+		else
+			r = get_user_pages_remote(gtt->usertask,
+					mm, userptr, num_pages,
+					flags, p, NULL, NULL);
 
 		spin_lock(&gtt->guptasklock);
 		list_del(&guptask.list);
@@ -752,12 +761,12 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
 
 	} while (pinned < ttm->num_pages);
 
-	up_read(&current->mm->mmap_sem);
+	up_read(&mm->mmap_sem);
 	return 0;
 
 release_pages:
 	release_pages(pages, pinned);
-	up_read(&current->mm->mmap_sem);
+	up_read(&mm->mmap_sem);
 	return r;
 }
 
@@ -978,6 +987,9 @@ static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm)
 {
 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 
+	if (gtt->usertask)
+		put_task_struct(gtt->usertask);
+
 	ttm_dma_tt_fini(&gtt->ttm);
 	kfree(gtt);
 }
@@ -1079,8 +1091,13 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
 		return -EINVAL;
 
 	gtt->userptr = addr;
-	gtt->usermm = current->mm;
 	gtt->userflags = flags;
+
+	if (gtt->usertask)
+		put_task_struct(gtt->usertask);
+	gtt->usertask = current->group_leader;
+	get_task_struct(gtt->usertask);
+
 	spin_lock_init(&gtt->guptasklock);
 	INIT_LIST_HEAD(&gtt->guptasks);
 	atomic_set(&gtt->mmu_invalidations, 0);
@@ -1096,7 +1113,10 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
 	if (gtt == NULL)
 		return NULL;
 
-	return gtt->usermm;
+	if (gtt->usertask == NULL)
+		return NULL;
+
+	return gtt->usertask->mm;
 }
 
 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,

From 6e08e0995b8f339fd2a7ee4fa11f17396405ef60 Mon Sep 17 00:00:00 2001
From: Felix Kuehling <Felix.Kuehling@amd.com>
Date: Fri, 23 Mar 2018 15:32:30 -0400
Subject: [PATCH 0107/1286] drm/amdgpu: Avoid reclaim while holding locks taken
 in MMU notifier
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

When an MMU notifier runs in memory reclaim context, it can deadlock
trying to take locks that are already held in the thread causing the
memory reclaim. The solution is to avoid memory reclaim while holding
locks that are taken in MMU notifiers.

This commit fixes kmalloc while holding rmn->lock by moving the call
outside the lock. The GFX MMU notifier also locks reservation objects.
I have no good solution for avoiding reclaim while holding reservation
objects. The HSA MMU notifier will not lock any reservation objects.

v2: Moved allocation outside lock instead of using GFP_NOIO

Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Acked-by: Oded Gabbay <oded.gabbay@gmail.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 17 +++++++++--------
 1 file changed, 9 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index f2ed18e2ff03..83e344fbb50a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -380,7 +380,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
 	enum amdgpu_mn_type type =
 		bo->kfd_bo ? AMDGPU_MN_TYPE_HSA : AMDGPU_MN_TYPE_GFX;
 	struct amdgpu_mn *rmn;
-	struct amdgpu_mn_node *node = NULL;
+	struct amdgpu_mn_node *node = NULL, *new_node;
 	struct list_head bos;
 	struct interval_tree_node *it;
 
@@ -388,6 +388,10 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
 	if (IS_ERR(rmn))
 		return PTR_ERR(rmn);
 
+	new_node = kmalloc(sizeof(*new_node), GFP_KERNEL);
+	if (!new_node)
+		return -ENOMEM;
+
 	INIT_LIST_HEAD(&bos);
 
 	down_write(&rmn->lock);
@@ -401,13 +405,10 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
 		list_splice(&node->bos, &bos);
 	}
 
-	if (!node) {
-		node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
-		if (!node) {
-			up_write(&rmn->lock);
-			return -ENOMEM;
-		}
-	}
+	if (!node)
+		node = new_node;
+	else
+		kfree(new_node);
 
 	bo->mn = rmn;
 

From d1853f42b63da94fa0147091d22bf5675b0ff89b Mon Sep 17 00:00:00 2001
From: Felix Kuehling <Felix.Kuehling@amd.com>
Date: Fri, 23 Mar 2018 15:32:31 -0400
Subject: [PATCH 0108/1286] drm/amdkfd: GFP_NOIO while holding locks taken in
 MMU notifier

When an MMU notifier runs in memory reclaim context, it can deadlock
trying to take locks that are already held in the thread causing the
memory reclaim. The solution is to avoid memory reclaim while holding
locks that are taken in MMU notifiers by using GFP_NOIO.

This commit fixes memory allocations done while holding the dqm->lock
which is needed in the MMU notifier (dqm->ops.evict_process_queues).

Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_device.c          | 2 +-
 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 2 +-
 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c  | 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 3346699960dd..0434f659eeaf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -652,7 +652,7 @@ int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
 	if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
 		return -ENOMEM;
 
-	*mem_obj = kmalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
+	*mem_obj = kmalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
 	if ((*mem_obj) == NULL)
 		return -ENOMEM;
 
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index c00c325ed3c9..2bc49c62cc8c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -412,7 +412,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
 	if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
 		return NULL;
 
-	mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
+	mqd = kzalloc(sizeof(*mqd), GFP_NOIO);
 	if (!mqd)
 		return NULL;
 
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index 89e4242e43e7..481307b8b4db 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -394,7 +394,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
 	if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
 		return NULL;
 
-	mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
+	mqd = kzalloc(sizeof(*mqd), GFP_NOIO);
 	if (!mqd)
 		return NULL;
 

From 6b95e7973a136181e37446bd29b0b2e2f0d2d653 Mon Sep 17 00:00:00 2001
From: Felix Kuehling <Felix.Kuehling@amd.com>
Date: Fri, 23 Mar 2018 15:32:32 -0400
Subject: [PATCH 0109/1286] drm/amdkfd: Add quiesce_mm and resume_mm to
 kgd2kfd_calls

These interfaces allow KGD to stop and resume all GPU user mode queue
access to a process address space. This is needed for handling MMU
notifiers of userptrs mapped for GPU access in KFD VMs.

Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_device.c       | 38 +++++++++++++++++++
 drivers/gpu/drm/amd/amdkfd/kfd_module.c       |  2 +
 drivers/gpu/drm/amd/amdkfd/kfd_priv.h         |  4 ++
 drivers/gpu/drm/amd/amdkfd/kfd_process.c      | 10 ++---
 .../gpu/drm/amd/include/kgd_kfd_interface.h   |  6 +++
 5 files changed, 55 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 0434f659eeaf..7b5799530c0f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -541,6 +541,44 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
 	spin_unlock(&kfd->interrupt_lock);
 }
 
+int kgd2kfd_quiesce_mm(struct mm_struct *mm)
+{
+	struct kfd_process *p;
+	int r;
+
+	/* Because we are called from arbitrary context (workqueue) as opposed
+	 * to process context, kfd_process could attempt to exit while we are
+	 * running so the lookup function increments the process ref count.
+	 */
+	p = kfd_lookup_process_by_mm(mm);
+	if (!p)
+		return -ESRCH;
+
+	r = kfd_process_evict_queues(p);
+
+	kfd_unref_process(p);
+	return r;
+}
+
+int kgd2kfd_resume_mm(struct mm_struct *mm)
+{
+	struct kfd_process *p;
+	int r;
+
+	/* Because we are called from arbitrary context (workqueue) as opposed
+	 * to process context, kfd_process could attempt to exit while we are
+	 * running so the lookup function increments the process ref count.
+	 */
+	p = kfd_lookup_process_by_mm(mm);
+	if (!p)
+		return -ESRCH;
+
+	r = kfd_process_restore_queues(p);
+
+	kfd_unref_process(p);
+	return r;
+}
+
 /** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will
  *   prepare for safe eviction of KFD BOs that belong to the specified
  *   process.
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index e0c07d24d251..45bc458f7348 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -43,6 +43,8 @@ static const struct kgd2kfd_calls kgd2kfd = {
 	.interrupt	= kgd2kfd_interrupt,
 	.suspend	= kgd2kfd_suspend,
 	.resume		= kgd2kfd_resume,
+	.quiesce_mm	= kgd2kfd_quiesce_mm,
+	.resume_mm	= kgd2kfd_resume_mm,
 	.schedule_evict_and_restore_process =
 			  kgd2kfd_schedule_evict_and_restore_process,
 };
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 96a9cc0f02c9..4d5c49ef2dc5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -512,6 +512,8 @@ struct qcm_process_device {
 /* Approx. time before evicting the process again */
 #define PROCESS_ACTIVE_TIME_MS 10
 
+int kgd2kfd_quiesce_mm(struct mm_struct *mm);
+int kgd2kfd_resume_mm(struct mm_struct *mm);
 int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
 					       struct dma_fence *fence);
 
@@ -681,6 +683,8 @@ struct kfd_process *kfd_get_process(const struct task_struct *);
 struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid);
 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm);
 void kfd_unref_process(struct kfd_process *p);
+int kfd_process_evict_queues(struct kfd_process *p);
+int kfd_process_restore_queues(struct kfd_process *p);
 void kfd_suspend_all_processes(void);
 int kfd_resume_all_processes(void);
 
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 1711ad0642f7..2791e72c2058 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -808,7 +808,7 @@ struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
  * Eviction is reference-counted per process-device. This means multiple
  * evictions from different sources can be nested safely.
  */
-static int process_evict_queues(struct kfd_process *p)
+int kfd_process_evict_queues(struct kfd_process *p)
 {
 	struct kfd_process_device *pdd;
 	int r = 0;
@@ -844,7 +844,7 @@ fail:
 }
 
 /* process_restore_queues - Restore all user queues of a process */
-static  int process_restore_queues(struct kfd_process *p)
+int kfd_process_restore_queues(struct kfd_process *p)
 {
 	struct kfd_process_device *pdd;
 	int r, ret = 0;
@@ -886,7 +886,7 @@ static void evict_process_worker(struct work_struct *work)
 	flush_delayed_work(&p->restore_work);
 
 	pr_debug("Started evicting pasid %d\n", p->pasid);
-	ret = process_evict_queues(p);
+	ret = kfd_process_evict_queues(p);
 	if (!ret) {
 		dma_fence_signal(p->ef);
 		dma_fence_put(p->ef);
@@ -946,7 +946,7 @@ static void restore_process_worker(struct work_struct *work)
 		return;
 	}
 
-	ret = process_restore_queues(p);
+	ret = kfd_process_restore_queues(p);
 	if (!ret)
 		pr_debug("Finished restoring pasid %d\n", p->pasid);
 	else
@@ -963,7 +963,7 @@ void kfd_suspend_all_processes(void)
 		cancel_delayed_work_sync(&p->eviction_work);
 		cancel_delayed_work_sync(&p->restore_work);
 
-		if (process_evict_queues(p))
+		if (kfd_process_evict_queues(p))
 			pr_err("Failed to suspend process %d\n", p->pasid);
 		dma_fence_signal(p->ef);
 		dma_fence_put(p->ef);
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 237289a72bb7..286cfe7068c1 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -382,6 +382,10 @@ struct kfd2kgd_calls {
  *
  * @resume: Notifies amdkfd about a resume action done to a kgd device
  *
+ * @quiesce_mm: Quiesce all user queue access to specified MM address space
+ *
+ * @resume_mm: Resume user queue access to specified MM address space
+ *
  * @schedule_evict_and_restore_process: Schedules work queue that will prepare
  * for safe eviction of KFD BOs that belong to the specified process.
  *
@@ -399,6 +403,8 @@ struct kgd2kfd_calls {
 	void (*interrupt)(struct kfd_dev *kfd, const void *ih_ring_entry);
 	void (*suspend)(struct kfd_dev *kfd);
 	int (*resume)(struct kfd_dev *kfd);
+	int (*quiesce_mm)(struct mm_struct *mm);
+	int (*resume_mm)(struct mm_struct *mm);
 	int (*schedule_evict_and_restore_process)(struct mm_struct *mm,
 			struct dma_fence *fence);
 };

From 5ae0283e831a94c714fce61063e4724baf364ef3 Mon Sep 17 00:00:00 2001
From: Felix Kuehling <Felix.Kuehling@amd.com>
Date: Fri, 23 Mar 2018 15:32:33 -0400
Subject: [PATCH 0110/1286] drm/amdgpu: Add userptr support for KFD

This adds support for allocating, mapping, unmapping and freeing
userptr BOs, and for handling MMU notifiers.

v2: updated a comment

Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h    |  11 +
 .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  | 567 +++++++++++++++++-
 2 files changed, 554 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 83e0c5c331d2..c3024b143f3d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -28,6 +28,7 @@
 #include <linux/types.h>
 #include <linux/mm.h>
 #include <linux/mmu_context.h>
+#include <linux/workqueue.h>
 #include <kgd_kfd_interface.h>
 #include <drm/ttm/ttm_execbuf_util.h>
 #include "amdgpu_sync.h"
@@ -59,7 +60,9 @@ struct kgd_mem {
 
 	uint32_t mapping_flags;
 
+	atomic_t invalid;
 	struct amdkfd_process_info *process_info;
+	struct page **user_pages;
 
 	struct amdgpu_sync sync;
 
@@ -84,6 +87,9 @@ struct amdkfd_process_info {
 	struct list_head vm_list_head;
 	/* List head for all KFD BOs that belong to a KFD process. */
 	struct list_head kfd_bo_list;
+	/* List of userptr BOs that are valid or invalid */
+	struct list_head userptr_valid_list;
+	struct list_head userptr_inval_list;
 	/* Lock to protect kfd_bo_list */
 	struct mutex lock;
 
@@ -91,6 +97,11 @@ struct amdkfd_process_info {
 	unsigned int n_vms;
 	/* Eviction Fence */
 	struct amdgpu_amdkfd_fence *eviction_fence;
+
+	/* MMU-notifier related fields */
+	atomic_t evicted_bos;
+	struct delayed_work restore_userptr_work;
+	struct pid *pid;
 };
 
 int amdgpu_amdkfd_init(void);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 2463ff6ac9ca..5296e24fd662 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -23,6 +23,7 @@
 #define pr_fmt(fmt) "kfd2kgd: " fmt
 
 #include <linux/list.h>
+#include <linux/sched/mm.h>
 #include <drm/drmP.h>
 #include "amdgpu_object.h"
 #include "amdgpu_vm.h"
@@ -33,10 +34,20 @@
  */
 #define VI_BO_SIZE_ALIGN (0x8000)
 
+/* BO flag to indicate a KFD userptr BO */
+#define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
+
+/* Userptr restore delay, just long enough to allow consecutive VM
+ * changes to accumulate
+ */
+#define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
+
 /* Impose limit on how much memory KFD can use */
 static struct {
 	uint64_t max_system_mem_limit;
+	uint64_t max_userptr_mem_limit;
 	int64_t system_mem_used;
+	int64_t userptr_mem_used;
 	spinlock_t mem_limit_lock;
 } kfd_mem_limit;
 
@@ -57,6 +68,7 @@ static const char * const domain_bit_to_string[] = {
 
 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
 
+static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
 
 
 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
@@ -78,6 +90,7 @@ static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
 
 /* Set memory usage limits. Current, limits are
  *  System (kernel) memory - 3/8th System RAM
+ *  Userptr memory - 3/4th System RAM
  */
 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
 {
@@ -90,8 +103,10 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
 
 	spin_lock_init(&kfd_mem_limit.mem_limit_lock);
 	kfd_mem_limit.max_system_mem_limit = (mem >> 1) - (mem >> 3);
-	pr_debug("Kernel memory limit %lluM\n",
-		(kfd_mem_limit.max_system_mem_limit >> 20));
+	kfd_mem_limit.max_userptr_mem_limit = mem - (mem >> 2);
+	pr_debug("Kernel memory limit %lluM, userptr limit %lluM\n",
+		(kfd_mem_limit.max_system_mem_limit >> 20),
+		(kfd_mem_limit.max_userptr_mem_limit >> 20));
 }
 
 static int amdgpu_amdkfd_reserve_system_mem_limit(struct amdgpu_device *adev,
@@ -111,6 +126,16 @@ static int amdgpu_amdkfd_reserve_system_mem_limit(struct amdgpu_device *adev,
 			goto err_no_mem;
 		}
 		kfd_mem_limit.system_mem_used += (acc_size + size);
+	} else if (domain == AMDGPU_GEM_DOMAIN_CPU) {
+		if ((kfd_mem_limit.system_mem_used + acc_size >
+			kfd_mem_limit.max_system_mem_limit) ||
+			(kfd_mem_limit.userptr_mem_used + (size + acc_size) >
+			kfd_mem_limit.max_userptr_mem_limit)) {
+			ret = -ENOMEM;
+			goto err_no_mem;
+		}
+		kfd_mem_limit.system_mem_used += acc_size;
+		kfd_mem_limit.userptr_mem_used += size;
 	}
 err_no_mem:
 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
@@ -126,10 +151,16 @@ static void unreserve_system_mem_limit(struct amdgpu_device *adev,
 				       sizeof(struct amdgpu_bo));
 
 	spin_lock(&kfd_mem_limit.mem_limit_lock);
-	if (domain == AMDGPU_GEM_DOMAIN_GTT)
+	if (domain == AMDGPU_GEM_DOMAIN_GTT) {
 		kfd_mem_limit.system_mem_used -= (acc_size + size);
+	} else if (domain == AMDGPU_GEM_DOMAIN_CPU) {
+		kfd_mem_limit.system_mem_used -= acc_size;
+		kfd_mem_limit.userptr_mem_used -= size;
+	}
 	WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
 		  "kfd system memory accounting unbalanced");
+	WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0,
+		  "kfd userptr memory accounting unbalanced");
 
 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
 }
@@ -138,12 +169,17 @@ void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
 {
 	spin_lock(&kfd_mem_limit.mem_limit_lock);
 
-	if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT) {
+	if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
+		kfd_mem_limit.system_mem_used -= bo->tbo.acc_size;
+		kfd_mem_limit.userptr_mem_used -= amdgpu_bo_size(bo);
+	} else if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT) {
 		kfd_mem_limit.system_mem_used -=
 			(bo->tbo.acc_size + amdgpu_bo_size(bo));
 	}
 	WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
 		  "kfd system memory accounting unbalanced");
+	WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0,
+		  "kfd userptr memory accounting unbalanced");
 
 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
 }
@@ -506,7 +542,8 @@ static void remove_bo_from_vm(struct amdgpu_device *adev,
 }
 
 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
-				struct amdkfd_process_info *process_info)
+				struct amdkfd_process_info *process_info,
+				bool userptr)
 {
 	struct ttm_validate_buffer *entry = &mem->validate_list;
 	struct amdgpu_bo *bo = mem->bo;
@@ -515,10 +552,95 @@ static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
 	entry->shared = true;
 	entry->bo = &bo->tbo;
 	mutex_lock(&process_info->lock);
-	list_add_tail(&entry->head, &process_info->kfd_bo_list);
+	if (userptr)
+		list_add_tail(&entry->head, &process_info->userptr_valid_list);
+	else
+		list_add_tail(&entry->head, &process_info->kfd_bo_list);
 	mutex_unlock(&process_info->lock);
 }
 
+/* Initializes user pages. It registers the MMU notifier and validates
+ * the userptr BO in the GTT domain.
+ *
+ * The BO must already be on the userptr_valid_list. Otherwise an
+ * eviction and restore may happen that leaves the new BO unmapped
+ * with the user mode queues running.
+ *
+ * Takes the process_info->lock to protect against concurrent restore
+ * workers.
+ *
+ * Returns 0 for success, negative errno for errors.
+ */
+static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
+			   uint64_t user_addr)
+{
+	struct amdkfd_process_info *process_info = mem->process_info;
+	struct amdgpu_bo *bo = mem->bo;
+	struct ttm_operation_ctx ctx = { true, false };
+	int ret = 0;
+
+	mutex_lock(&process_info->lock);
+
+	ret = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, user_addr, 0);
+	if (ret) {
+		pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
+		goto out;
+	}
+
+	ret = amdgpu_mn_register(bo, user_addr);
+	if (ret) {
+		pr_err("%s: Failed to register MMU notifier: %d\n",
+		       __func__, ret);
+		goto out;
+	}
+
+	/* If no restore worker is running concurrently, user_pages
+	 * should not be allocated
+	 */
+	WARN(mem->user_pages, "Leaking user_pages array");
+
+	mem->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
+					   sizeof(struct page *),
+					   GFP_KERNEL | __GFP_ZERO);
+	if (!mem->user_pages) {
+		pr_err("%s: Failed to allocate pages array\n", __func__);
+		ret = -ENOMEM;
+		goto unregister_out;
+	}
+
+	ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, mem->user_pages);
+	if (ret) {
+		pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
+		goto free_out;
+	}
+
+	amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, mem->user_pages);
+
+	ret = amdgpu_bo_reserve(bo, true);
+	if (ret) {
+		pr_err("%s: Failed to reserve BO\n", __func__);
+		goto release_out;
+	}
+	amdgpu_ttm_placement_from_domain(bo, mem->domain);
+	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+	if (ret)
+		pr_err("%s: failed to validate BO\n", __func__);
+	amdgpu_bo_unreserve(bo);
+
+release_out:
+	if (ret)
+		release_pages(mem->user_pages, bo->tbo.ttm->num_pages);
+free_out:
+	kvfree(mem->user_pages);
+	mem->user_pages = NULL;
+unregister_out:
+	if (ret)
+		amdgpu_mn_unregister(bo);
+out:
+	mutex_unlock(&process_info->lock);
+	return ret;
+}
+
 /* Reserving a BO and its page table BOs must happen atomically to
  * avoid deadlocks. Some operations update multiple VMs at once. Track
  * all the reservation info in a context structure. Optionally a sync
@@ -748,7 +870,8 @@ static int update_gpuvm_pte(struct amdgpu_device *adev,
 }
 
 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
-		struct kfd_bo_va_list *entry, struct amdgpu_sync *sync)
+		struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
+		bool no_update_pte)
 {
 	int ret;
 
@@ -762,6 +885,9 @@ static int map_bo_to_gpuvm(struct amdgpu_device *adev,
 		return ret;
 	}
 
+	if (no_update_pte)
+		return 0;
+
 	ret = update_gpuvm_pte(adev, entry, sync);
 	if (ret) {
 		pr_err("update_gpuvm_pte() failed\n");
@@ -820,6 +946,8 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
 		mutex_init(&info->lock);
 		INIT_LIST_HEAD(&info->vm_list_head);
 		INIT_LIST_HEAD(&info->kfd_bo_list);
+		INIT_LIST_HEAD(&info->userptr_valid_list);
+		INIT_LIST_HEAD(&info->userptr_inval_list);
 
 		info->eviction_fence =
 			amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
@@ -830,6 +958,11 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
 			goto create_evict_fence_fail;
 		}
 
+		info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
+		atomic_set(&info->evicted_bos, 0);
+		INIT_DELAYED_WORK(&info->restore_userptr_work,
+				  amdgpu_amdkfd_restore_userptr_worker);
+
 		*process_info = info;
 		*ef = dma_fence_get(&info->eviction_fence->base);
 	}
@@ -872,6 +1005,7 @@ reserve_pd_fail:
 		dma_fence_put(*ef);
 		*ef = NULL;
 		*process_info = NULL;
+		put_pid(info->pid);
 create_evict_fence_fail:
 		mutex_destroy(&info->lock);
 		kfree(info);
@@ -967,8 +1101,12 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
 	/* Release per-process resources when last compute VM is destroyed */
 	if (!process_info->n_vms) {
 		WARN_ON(!list_empty(&process_info->kfd_bo_list));
+		WARN_ON(!list_empty(&process_info->userptr_valid_list));
+		WARN_ON(!list_empty(&process_info->userptr_inval_list));
 
 		dma_fence_put(&process_info->eviction_fence->base);
+		cancel_delayed_work_sync(&process_info->restore_userptr_work);
+		put_pid(process_info->pid);
 		mutex_destroy(&process_info->lock);
 		kfree(process_info);
 	}
@@ -1003,9 +1141,10 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
 {
 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+	uint64_t user_addr = 0;
 	struct amdgpu_bo *bo;
 	int byte_align;
-	u32 alloc_domain;
+	u32 domain, alloc_domain;
 	u64 alloc_flags;
 	uint32_t mapping_flags;
 	int ret;
@@ -1014,14 +1153,21 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
 	 * Check on which domain to allocate BO
 	 */
 	if (flags & ALLOC_MEM_FLAGS_VRAM) {
-		alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
+		domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
 		alloc_flags = AMDGPU_GEM_CREATE_VRAM_CLEARED;
 		alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ?
 			AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
 			AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
 	} else if (flags & ALLOC_MEM_FLAGS_GTT) {
-		alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
+		domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
 		alloc_flags = 0;
+	} else if (flags & ALLOC_MEM_FLAGS_USERPTR) {
+		domain = AMDGPU_GEM_DOMAIN_GTT;
+		alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
+		alloc_flags = 0;
+		if (!offset || !*offset)
+			return -EINVAL;
+		user_addr = *offset;
 	} else {
 		return -EINVAL;
 	}
@@ -1078,18 +1224,34 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
 	}
 	bo->kfd_bo = *mem;
 	(*mem)->bo = bo;
+	if (user_addr)
+		bo->flags |= AMDGPU_AMDKFD_USERPTR_BO;
 
 	(*mem)->va = va;
-	(*mem)->domain = alloc_domain;
+	(*mem)->domain = domain;
 	(*mem)->mapped_to_gpu_memory = 0;
 	(*mem)->process_info = avm->process_info;
-	add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info);
+	add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
+
+	if (user_addr) {
+		ret = init_user_pages(*mem, current->mm, user_addr);
+		if (ret) {
+			mutex_lock(&avm->process_info->lock);
+			list_del(&(*mem)->validate_list.head);
+			mutex_unlock(&avm->process_info->lock);
+			goto allocate_init_user_pages_failed;
+		}
+	}
 
 	if (offset)
 		*offset = amdgpu_bo_mmap_offset(bo);
 
 	return 0;
 
+allocate_init_user_pages_failed:
+	amdgpu_bo_unref(&bo);
+	/* Don't unreserve system mem limit twice */
+	goto err_reserve_system_mem;
 err_bo_create:
 	unreserve_system_mem_limit(adev, size, alloc_domain);
 err_reserve_system_mem:
@@ -1122,12 +1284,24 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
 	 * be freed anyway
 	 */
 
+	/* No more MMU notifiers */
+	amdgpu_mn_unregister(mem->bo);
+
 	/* Make sure restore workers don't access the BO any more */
 	bo_list_entry = &mem->validate_list;
 	mutex_lock(&process_info->lock);
 	list_del(&bo_list_entry->head);
 	mutex_unlock(&process_info->lock);
 
+	/* Free user pages if necessary */
+	if (mem->user_pages) {
+		pr_debug("%s: Freeing user_pages array\n", __func__);
+		if (mem->user_pages[0])
+			release_pages(mem->user_pages,
+					mem->bo->tbo.ttm->num_pages);
+		kvfree(mem->user_pages);
+	}
+
 	ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
 	if (unlikely(ret))
 		return ret;
@@ -1173,21 +1347,32 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
 	struct kfd_bo_va_list *bo_va_entry = NULL;
 	struct kfd_bo_va_list *bo_va_entry_aql = NULL;
 	unsigned long bo_size;
+	bool is_invalid_userptr = false;
 
-	/* Make sure restore is not running concurrently.
+	bo = mem->bo;
+	if (!bo) {
+		pr_err("Invalid BO when mapping memory to GPU\n");
+		return -EINVAL;
+	}
+
+	/* Make sure restore is not running concurrently. Since we
+	 * don't map invalid userptr BOs, we rely on the next restore
+	 * worker to do the mapping
 	 */
 	mutex_lock(&mem->process_info->lock);
 
-	mutex_lock(&mem->lock);
-
-	bo = mem->bo;
-
-	if (!bo) {
-		pr_err("Invalid BO when mapping memory to GPU\n");
-		ret = -EINVAL;
-		goto out;
+	/* Lock mmap-sem. If we find an invalid userptr BO, we can be
+	 * sure that the MMU notifier is no longer running
+	 * concurrently and the queues are actually stopped
+	 */
+	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
+		down_write(&current->mm->mmap_sem);
+		is_invalid_userptr = atomic_read(&mem->invalid);
+		up_write(&current->mm->mmap_sem);
 	}
 
+	mutex_lock(&mem->lock);
+
 	domain = mem->domain;
 	bo_size = bo->tbo.mem.size;
 
@@ -1200,6 +1385,14 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
 	if (unlikely(ret))
 		goto out;
 
+	/* Userptr can be marked as "not invalid", but not actually be
+	 * validated yet (still in the system domain). In that case
+	 * the queues are still stopped and we can leave mapping for
+	 * the next restore worker
+	 */
+	if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
+		is_invalid_userptr = true;
+
 	if (check_if_add_bo_to_vm(avm, mem)) {
 		ret = add_bo_to_vm(adev, mem, avm, false,
 				&bo_va_entry);
@@ -1217,7 +1410,8 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
 			goto add_bo_to_vm_failed;
 	}
 
-	if (mem->mapped_to_gpu_memory == 0) {
+	if (mem->mapped_to_gpu_memory == 0 &&
+	    !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
 		/* Validate BO only once. The eviction fence gets added to BO
 		 * the first time it is mapped. Validate will wait for all
 		 * background evictions to complete.
@@ -1235,7 +1429,8 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
 					entry->va, entry->va + bo_size,
 					entry);
 
-			ret = map_bo_to_gpuvm(adev, entry, ctx.sync);
+			ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
+					      is_invalid_userptr);
 			if (ret) {
 				pr_err("Failed to map radeon bo to gpuvm\n");
 				goto map_bo_to_gpuvm_failed;
@@ -1418,13 +1613,337 @@ bo_reserve_failed:
 	return ret;
 }
 
+/* Evict a userptr BO by stopping the queues if necessary
+ *
+ * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
+ * cannot do any memory allocations, and cannot take any locks that
+ * are held elsewhere while allocating memory. Therefore this is as
+ * simple as possible, using atomic counters.
+ *
+ * It doesn't do anything to the BO itself. The real work happens in
+ * restore, where we get updated page addresses. This function only
+ * ensures that GPU access to the BO is stopped.
+ */
 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
 				struct mm_struct *mm)
 {
-	/* TODO */
+	struct amdkfd_process_info *process_info = mem->process_info;
+	int invalid, evicted_bos;
+	int r = 0;
+
+	invalid = atomic_inc_return(&mem->invalid);
+	evicted_bos = atomic_inc_return(&process_info->evicted_bos);
+	if (evicted_bos == 1) {
+		/* First eviction, stop the queues */
+		r = kgd2kfd->quiesce_mm(mm);
+		if (r)
+			pr_err("Failed to quiesce KFD\n");
+		schedule_delayed_work(&process_info->restore_userptr_work,
+			msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
+	}
+
+	return r;
+}
+
+/* Update invalid userptr BOs
+ *
+ * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
+ * userptr_inval_list and updates user pages for all BOs that have
+ * been invalidated since their last update.
+ */
+static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
+				     struct mm_struct *mm)
+{
+	struct kgd_mem *mem, *tmp_mem;
+	struct amdgpu_bo *bo;
+	struct ttm_operation_ctx ctx = { false, false };
+	int invalid, ret;
+
+	/* Move all invalidated BOs to the userptr_inval_list and
+	 * release their user pages by migration to the CPU domain
+	 */
+	list_for_each_entry_safe(mem, tmp_mem,
+				 &process_info->userptr_valid_list,
+				 validate_list.head) {
+		if (!atomic_read(&mem->invalid))
+			continue; /* BO is still valid */
+
+		bo = mem->bo;
+
+		if (amdgpu_bo_reserve(bo, true))
+			return -EAGAIN;
+		amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
+		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+		amdgpu_bo_unreserve(bo);
+		if (ret) {
+			pr_err("%s: Failed to invalidate userptr BO\n",
+			       __func__);
+			return -EAGAIN;
+		}
+
+		list_move_tail(&mem->validate_list.head,
+			       &process_info->userptr_inval_list);
+	}
+
+	if (list_empty(&process_info->userptr_inval_list))
+		return 0; /* All evicted userptr BOs were freed */
+
+	/* Go through userptr_inval_list and update any invalid user_pages */
+	list_for_each_entry(mem, &process_info->userptr_inval_list,
+			    validate_list.head) {
+		invalid = atomic_read(&mem->invalid);
+		if (!invalid)
+			/* BO hasn't been invalidated since the last
+			 * revalidation attempt. Keep its BO list.
+			 */
+			continue;
+
+		bo = mem->bo;
+
+		if (!mem->user_pages) {
+			mem->user_pages =
+				kvmalloc_array(bo->tbo.ttm->num_pages,
+						 sizeof(struct page *),
+						 GFP_KERNEL | __GFP_ZERO);
+			if (!mem->user_pages) {
+				pr_err("%s: Failed to allocate pages array\n",
+				       __func__);
+				return -ENOMEM;
+			}
+		} else if (mem->user_pages[0]) {
+			release_pages(mem->user_pages, bo->tbo.ttm->num_pages);
+		}
+
+		/* Get updated user pages */
+		ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
+						   mem->user_pages);
+		if (ret) {
+			mem->user_pages[0] = NULL;
+			pr_info("%s: Failed to get user pages: %d\n",
+				__func__, ret);
+			/* Pretend it succeeded. It will fail later
+			 * with a VM fault if the GPU tries to access
+			 * it. Better than hanging indefinitely with
+			 * stalled user mode queues.
+			 */
+		}
+
+		/* Mark the BO as valid unless it was invalidated
+		 * again concurrently
+		 */
+		if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
+			return -EAGAIN;
+	}
+
 	return 0;
 }
 
+/* Validate invalid userptr BOs
+ *
+ * Validates BOs on the userptr_inval_list, and moves them back to the
+ * userptr_valid_list. Also updates GPUVM page tables with new page
+ * addresses and waits for the page table updates to complete.
+ */
+static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
+{
+	struct amdgpu_bo_list_entry *pd_bo_list_entries;
+	struct list_head resv_list, duplicates;
+	struct ww_acquire_ctx ticket;
+	struct amdgpu_sync sync;
+
+	struct amdgpu_vm *peer_vm;
+	struct kgd_mem *mem, *tmp_mem;
+	struct amdgpu_bo *bo;
+	struct ttm_operation_ctx ctx = { false, false };
+	int i, ret;
+
+	pd_bo_list_entries = kcalloc(process_info->n_vms,
+				     sizeof(struct amdgpu_bo_list_entry),
+				     GFP_KERNEL);
+	if (!pd_bo_list_entries) {
+		pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&resv_list);
+	INIT_LIST_HEAD(&duplicates);
+
+	/* Get all the page directory BOs that need to be reserved */
+	i = 0;
+	list_for_each_entry(peer_vm, &process_info->vm_list_head,
+			    vm_list_node)
+		amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
+				    &pd_bo_list_entries[i++]);
+	/* Add the userptr_inval_list entries to resv_list */
+	list_for_each_entry(mem, &process_info->userptr_inval_list,
+			    validate_list.head) {
+		list_add_tail(&mem->resv_list.head, &resv_list);
+		mem->resv_list.bo = mem->validate_list.bo;
+		mem->resv_list.shared = mem->validate_list.shared;
+	}
+
+	/* Reserve all BOs and page tables for validation */
+	ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
+	WARN(!list_empty(&duplicates), "Duplicates should be empty");
+	if (ret)
+		goto out;
+
+	amdgpu_sync_create(&sync);
+
+	/* Avoid triggering eviction fences when unmapping invalid
+	 * userptr BOs (waits for all fences, doesn't use
+	 * FENCE_OWNER_VM)
+	 */
+	list_for_each_entry(peer_vm, &process_info->vm_list_head,
+			    vm_list_node)
+		amdgpu_amdkfd_remove_eviction_fence(peer_vm->root.base.bo,
+						process_info->eviction_fence,
+						NULL, NULL);
+
+	ret = process_validate_vms(process_info);
+	if (ret)
+		goto unreserve_out;
+
+	/* Validate BOs and update GPUVM page tables */
+	list_for_each_entry_safe(mem, tmp_mem,
+				 &process_info->userptr_inval_list,
+				 validate_list.head) {
+		struct kfd_bo_va_list *bo_va_entry;
+
+		bo = mem->bo;
+
+		/* Copy pages array and validate the BO if we got user pages */
+		if (mem->user_pages[0]) {
+			amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
+						     mem->user_pages);
+			amdgpu_ttm_placement_from_domain(bo, mem->domain);
+			ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+			if (ret) {
+				pr_err("%s: failed to validate BO\n", __func__);
+				goto unreserve_out;
+			}
+		}
+
+		/* Validate succeeded, now the BO owns the pages, free
+		 * our copy of the pointer array. Put this BO back on
+		 * the userptr_valid_list. If we need to revalidate
+		 * it, we need to start from scratch.
+		 */
+		kvfree(mem->user_pages);
+		mem->user_pages = NULL;
+		list_move_tail(&mem->validate_list.head,
+			       &process_info->userptr_valid_list);
+
+		/* Update mapping. If the BO was not validated
+		 * (because we couldn't get user pages), this will
+		 * clear the page table entries, which will result in
+		 * VM faults if the GPU tries to access the invalid
+		 * memory.
+		 */
+		list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
+			if (!bo_va_entry->is_mapped)
+				continue;
+
+			ret = update_gpuvm_pte((struct amdgpu_device *)
+					       bo_va_entry->kgd_dev,
+					       bo_va_entry, &sync);
+			if (ret) {
+				pr_err("%s: update PTE failed\n", __func__);
+				/* make sure this gets validated again */
+				atomic_inc(&mem->invalid);
+				goto unreserve_out;
+			}
+		}
+	}
+
+	/* Update page directories */
+	ret = process_update_pds(process_info, &sync);
+
+unreserve_out:
+	list_for_each_entry(peer_vm, &process_info->vm_list_head,
+			    vm_list_node)
+		amdgpu_bo_fence(peer_vm->root.base.bo,
+				&process_info->eviction_fence->base, true);
+	ttm_eu_backoff_reservation(&ticket, &resv_list);
+	amdgpu_sync_wait(&sync, false);
+	amdgpu_sync_free(&sync);
+out:
+	kfree(pd_bo_list_entries);
+
+	return ret;
+}
+
+/* Worker callback to restore evicted userptr BOs
+ *
+ * Tries to update and validate all userptr BOs. If successful and no
+ * concurrent evictions happened, the queues are restarted. Otherwise,
+ * reschedule for another attempt later.
+ */
+static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
+{
+	struct delayed_work *dwork = to_delayed_work(work);
+	struct amdkfd_process_info *process_info =
+		container_of(dwork, struct amdkfd_process_info,
+			     restore_userptr_work);
+	struct task_struct *usertask;
+	struct mm_struct *mm;
+	int evicted_bos;
+
+	evicted_bos = atomic_read(&process_info->evicted_bos);
+	if (!evicted_bos)
+		return;
+
+	/* Reference task and mm in case of concurrent process termination */
+	usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
+	if (!usertask)
+		return;
+	mm = get_task_mm(usertask);
+	if (!mm) {
+		put_task_struct(usertask);
+		return;
+	}
+
+	mutex_lock(&process_info->lock);
+
+	if (update_invalid_user_pages(process_info, mm))
+		goto unlock_out;
+	/* userptr_inval_list can be empty if all evicted userptr BOs
+	 * have been freed. In that case there is nothing to validate
+	 * and we can just restart the queues.
+	 */
+	if (!list_empty(&process_info->userptr_inval_list)) {
+		if (atomic_read(&process_info->evicted_bos) != evicted_bos)
+			goto unlock_out; /* Concurrent eviction, try again */
+
+		if (validate_invalid_user_pages(process_info))
+			goto unlock_out;
+	}
+	/* Final check for concurrent evicton and atomic update. If
+	 * another eviction happens after successful update, it will
+	 * be a first eviction that calls quiesce_mm. The eviction
+	 * reference counting inside KFD will handle this case.
+	 */
+	if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
+	    evicted_bos)
+		goto unlock_out;
+	evicted_bos = 0;
+	if (kgd2kfd->resume_mm(mm)) {
+		pr_err("%s: Failed to resume KFD\n", __func__);
+		/* No recovery from this failure. Probably the CP is
+		 * hanging. No point trying again.
+		 */
+	}
+unlock_out:
+	mutex_unlock(&process_info->lock);
+	mmput(mm);
+	put_task_struct(usertask);
+
+	/* If validation failed, reschedule another attempt */
+	if (evicted_bos)
+		schedule_delayed_work(&process_info->restore_userptr_work,
+			msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
+}
+
 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
  *   KFD process identified by process_info
  *

From 5bb975de3f279c6577fb54334cdd7e55c47a362c Mon Sep 17 00:00:00 2001
From: Manasi Navare <manasi.d.navare@intel.com>
Date: Fri, 23 Mar 2018 10:24:13 -0700
Subject: [PATCH 0111/1286] drm/i915/icl: Add register definitions for Combo
 PHY vswing sequences.

This patch defines register definitions required for ICL voltage
vswing programming for Combo PHY DDI Ports. It uses the same bit
definitions and macros as the CNL voltage swing sequences.

v8 (from Paulo):
* Rebase.
v7:
* Kill _MMIIO_PORT2_LN (Paulo)
v6:
* Replace some spaces with TAB (Paulo)
v5:
* Use _PORT instead of _PICK (Paulo)
* Remove DW7 defs for ICL, not used (Paulo)
v4:
* Rebase after _PICK was used instead of _PORT3
* Use _PICK for _MMIO_PORT2 since address of B is less
than address of A so cant use the math (Paulo)
v3:
* Make changes to the existing macro in a diff patch (Paulo)
v2:
* Add new defs fro ICL regs (Paulo)

Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Signed-off-by: Manasi Navare <manasi.d.navare@intel.com>
Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180323172419.24911-2-paulo.r.zanoni@intel.com
---
 drivers/gpu/drm/i915/i915_reg.h | 44 +++++++++++++++++++++++++++++++++
 1 file changed, 44 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 4e31dfff940a..407ee5ca527f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1697,6 +1697,16 @@ enum i915_power_well_id {
 						    _CNL_PORT_PCS_DW1_LN0_D, \
 						    _CNL_PORT_PCS_DW1_LN0_AE, \
 						    _CNL_PORT_PCS_DW1_LN0_F))
+#define _ICL_PORT_PCS_DW1_GRP_A		0x162604
+#define _ICL_PORT_PCS_DW1_GRP_B		0x6C604
+#define _ICL_PORT_PCS_DW1_LN0_A		0x162804
+#define _ICL_PORT_PCS_DW1_LN0_B		0x6C804
+#define ICL_PORT_PCS_DW1_GRP(port)	_MMIO_PORT(port,\
+						   _ICL_PORT_PCS_DW1_GRP_A, \
+						   _ICL_PORT_PCS_DW1_GRP_B)
+#define ICL_PORT_PCS_DW1_LN0(port)	_MMIO_PORT(port, \
+						   _ICL_PORT_PCS_DW1_LN0_A, \
+						   _ICL_PORT_PCS_DW1_LN0_B)
 #define   COMMON_KEEPER_EN		(1 << 26)
 
 /* CNL Port TX registers */
@@ -1729,6 +1739,16 @@ enum i915_power_well_id {
 
 #define CNL_PORT_TX_DW2_GRP(port)	_MMIO(_CNL_PORT_TX_DW_GRP((port), 2))
 #define CNL_PORT_TX_DW2_LN0(port)	_MMIO(_CNL_PORT_TX_DW_LN0((port), 2))
+#define _ICL_PORT_TX_DW2_GRP_A		0x162688
+#define _ICL_PORT_TX_DW2_GRP_B		0x6C688
+#define _ICL_PORT_TX_DW2_LN0_A		0x162888
+#define _ICL_PORT_TX_DW2_LN0_B		0x6C888
+#define ICL_PORT_TX_DW2_GRP(port)	_MMIO_PORT(port, \
+						   _ICL_PORT_TX_DW2_GRP_A, \
+						   _ICL_PORT_TX_DW2_GRP_B)
+#define ICL_PORT_TX_DW2_LN0(port)	_MMIO_PORT(port, \
+						   _ICL_PORT_TX_DW2_LN0_A, \
+						   _ICL_PORT_TX_DW2_LN0_B)
 #define   SWING_SEL_UPPER(x)		((x >> 3) << 15)
 #define   SWING_SEL_UPPER_MASK		(1 << 15)
 #define   SWING_SEL_LOWER(x)		((x & 0x7) << 11)
@@ -1743,6 +1763,19 @@ enum i915_power_well_id {
 #define CNL_PORT_TX_DW4_LN(port, ln)   _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \
 					     (ln * (_CNL_PORT_TX_DW4_LN1_AE - \
 						    _CNL_PORT_TX_DW4_LN0_AE)))
+#define _ICL_PORT_TX_DW4_GRP_A		0x162690
+#define _ICL_PORT_TX_DW4_GRP_B		0x6C690
+#define _ICL_PORT_TX_DW4_LN0_A		0x162890
+#define _ICL_PORT_TX_DW4_LN1_A		0x162990
+#define _ICL_PORT_TX_DW4_LN0_B		0x6C890
+#define ICL_PORT_TX_DW4_GRP(port)	_MMIO_PORT(port, \
+						   _ICL_PORT_TX_DW4_GRP_A, \
+						   _ICL_PORT_TX_DW4_GRP_B)
+#define ICL_PORT_TX_DW4_LN(port, ln)	_MMIO(_PORT(port, \
+						   _ICL_PORT_TX_DW4_LN0_A, \
+						   _ICL_PORT_TX_DW4_LN0_B) + \
+					      (ln * (_ICL_PORT_TX_DW4_LN1_A - \
+						     _ICL_PORT_TX_DW4_LN0_A)))
 #define   LOADGEN_SELECT		(1 << 31)
 #define   POST_CURSOR_1(x)		((x) << 12)
 #define   POST_CURSOR_1_MASK		(0x3F << 12)
@@ -1753,7 +1786,18 @@ enum i915_power_well_id {
 
 #define CNL_PORT_TX_DW5_GRP(port)	_MMIO(_CNL_PORT_TX_DW_GRP((port), 5))
 #define CNL_PORT_TX_DW5_LN0(port)	_MMIO(_CNL_PORT_TX_DW_LN0((port), 5))
+#define _ICL_PORT_TX_DW5_GRP_A		0x162694
+#define _ICL_PORT_TX_DW5_GRP_B		0x6C694
+#define _ICL_PORT_TX_DW5_LN0_A		0x162894
+#define _ICL_PORT_TX_DW5_LN0_B		0x6C894
+#define ICL_PORT_TX_DW5_GRP(port)	_MMIO_PORT(port, \
+						   _ICL_PORT_TX_DW5_GRP_A, \
+						   _ICL_PORT_TX_DW5_GRP_B)
+#define ICL_PORT_TX_DW5_LN0(port)	_MMIO_PORT(port, \
+						   _ICL_PORT_TX_DW5_LN0_A, \
+						   _ICL_PORT_TX_DW5_LN0_B)
 #define   TX_TRAINING_EN		(1 << 31)
+#define   TAP2_DISABLE			(1 << 30)
 #define   TAP3_DISABLE			(1 << 29)
 #define   SCALING_MODE_SEL(x)		((x) << 18)
 #define   SCALING_MODE_SEL_MASK		(0x7 << 18)

From 19b904f8df5c6c1418769de35bf238ef72e49814 Mon Sep 17 00:00:00 2001
From: Manasi Navare <manasi.d.navare@intel.com>
Date: Fri, 23 Mar 2018 10:24:14 -0700
Subject: [PATCH 0112/1286] drm/i915/icl: Add Combo PHY DDI Buffer translation
 tables for Icelake.

These tables are used on voltage vswing sequence initialization on
Icelake.

The swing_sel on the spec's table is defined in a 4 bits binary like
1010.  However the register bits are split in upper 1 bit swing_sel
and lower 3 bits swing sel.

In this table here we store this value as a single value in hex like
it is mentioned in the Bspec and split it to the upper and lower bit
values only while programming the registers.

For instance: b1010 is written as 0xA and then while writing to the
register, the upper 1 bit is obtained by (0xA & 0x8) and shifting by
appropriate bits while lower 3 bits are obtained by (0xA & 0x7) and
shifting by appropriate bits.

Some of the columns need to be updated after the spec is updated.

v5 (from Paulo):
* Checkpatch fixes.
v4 (from Paulo):
* Fix minor typo
* Coding style conformance
v3:
* Get rid of HDMI/DVI tables, same as DP (Paulo)
* Use combo_phy in ddi buf trans table defs (Paulo)
v2:
* Added DW4_scaling_hex column to the translation tables (Rodrigo)

Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Signed-off-by: Manasi Navare <manasi.d.navare@intel.com>
Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180323172419.24911-3-paulo.r.zanoni@intel.com
---
 drivers/gpu/drm/i915/intel_ddi.c | 99 ++++++++++++++++++++++++++++++++
 1 file changed, 99 insertions(+)

diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index c449619427da..229b9d5250c4 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -493,6 +493,105 @@ static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = {
 	{ 0x2, 0x7F, 0x3F, 0x00, 0x00 },	/* 400   400      0.0   */
 };
 
+struct icl_combo_phy_ddi_buf_trans {
+	u32 dw2_swing_select;
+	u32 dw2_swing_scalar;
+	u32 dw4_scaling;
+};
+
+/* Voltage Swing Programming for VccIO 0.85V for DP */
+static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_85V[] = {
+				/* Voltage mV  db    */
+	{ 0x2, 0x98, 0x0018 },	/* 400         0.0   */
+	{ 0x2, 0x98, 0x3015 },	/* 400         3.5   */
+	{ 0x2, 0x98, 0x6012 },	/* 400         6.0   */
+	{ 0x2, 0x98, 0x900F },	/* 400         9.5   */
+	{ 0xB, 0x70, 0x0018 },	/* 600         0.0   */
+	{ 0xB, 0x70, 0x3015 },	/* 600         3.5   */
+	{ 0xB, 0x70, 0x6012 },	/* 600         6.0   */
+	{ 0x5, 0x00, 0x0018 },	/* 800         0.0   */
+	{ 0x5, 0x00, 0x3015 },	/* 800         3.5   */
+	{ 0x6, 0x98, 0x0018 },	/* 1200        0.0   */
+};
+
+/* FIXME - After table is updated in Bspec */
+/* Voltage Swing Programming for VccIO 0.85V for eDP */
+static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_85V[] = {
+				/* Voltage mV  db    */
+	{ 0x0, 0x00, 0x00 },	/* 200         0.0   */
+	{ 0x0, 0x00, 0x00 },	/* 200         1.5   */
+	{ 0x0, 0x00, 0x00 },	/* 200         4.0   */
+	{ 0x0, 0x00, 0x00 },	/* 200         6.0   */
+	{ 0x0, 0x00, 0x00 },	/* 250         0.0   */
+	{ 0x0, 0x00, 0x00 },	/* 250         1.5   */
+	{ 0x0, 0x00, 0x00 },	/* 250         4.0   */
+	{ 0x0, 0x00, 0x00 },	/* 300         0.0   */
+	{ 0x0, 0x00, 0x00 },	/* 300         1.5   */
+	{ 0x0, 0x00, 0x00 },	/* 350         0.0   */
+};
+
+/* Voltage Swing Programming for VccIO 0.95V for DP */
+static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_95V[] = {
+				/* Voltage mV  db    */
+	{ 0x2, 0x98, 0x0018 },	/* 400         0.0   */
+	{ 0x2, 0x98, 0x3015 },	/* 400         3.5   */
+	{ 0x2, 0x98, 0x6012 },	/* 400         6.0   */
+	{ 0x2, 0x98, 0x900F },	/* 400         9.5   */
+	{ 0x4, 0x98, 0x0018 },	/* 600         0.0   */
+	{ 0x4, 0x98, 0x3015 },	/* 600         3.5   */
+	{ 0x4, 0x98, 0x6012 },	/* 600         6.0   */
+	{ 0x5, 0x76, 0x0018 },	/* 800         0.0   */
+	{ 0x5, 0x76, 0x3015 },	/* 800         3.5   */
+	{ 0x6, 0x98, 0x0018 },	/* 1200        0.0   */
+};
+
+/* FIXME - After table is updated in Bspec */
+/* Voltage Swing Programming for VccIO 0.95V for eDP */
+static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_95V[] = {
+				/* Voltage mV  db    */
+	{ 0x0, 0x00, 0x00 },	/* 200         0.0   */
+	{ 0x0, 0x00, 0x00 },	/* 200         1.5   */
+	{ 0x0, 0x00, 0x00 },	/* 200         4.0   */
+	{ 0x0, 0x00, 0x00 },	/* 200         6.0   */
+	{ 0x0, 0x00, 0x00 },	/* 250         0.0   */
+	{ 0x0, 0x00, 0x00 },	/* 250         1.5   */
+	{ 0x0, 0x00, 0x00 },	/* 250         4.0   */
+	{ 0x0, 0x00, 0x00 },	/* 300         0.0   */
+	{ 0x0, 0x00, 0x00 },	/* 300         1.5   */
+	{ 0x0, 0x00, 0x00 },	/* 350         0.0   */
+};
+
+/* Voltage Swing Programming for VccIO 1.05V for DP */
+static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_1_05V[] = {
+				/* Voltage mV  db    */
+	{ 0x2, 0x98, 0x0018 },	/* 400         0.0   */
+	{ 0x2, 0x98, 0x3015 },	/* 400         3.5   */
+	{ 0x2, 0x98, 0x6012 },	/* 400         6.0   */
+	{ 0x2, 0x98, 0x900F },	/* 400         9.5   */
+	{ 0x4, 0x98, 0x0018 },	/* 600         0.0   */
+	{ 0x4, 0x98, 0x3015 },	/* 600         3.5   */
+	{ 0x4, 0x98, 0x6012 },	/* 600         6.0   */
+	{ 0x5, 0x71, 0x0018 },	/* 800         0.0   */
+	{ 0x5, 0x71, 0x3015 },	/* 800         3.5   */
+	{ 0x6, 0x98, 0x0018 },	/* 1200        0.0   */
+};
+
+/* FIXME - After table is updated in Bspec */
+/* Voltage Swing Programming for VccIO 1.05V for eDP */
+static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_1_05V[] = {
+				/* Voltage mV  db    */
+	{ 0x0, 0x00, 0x00 },	/* 200         0.0   */
+	{ 0x0, 0x00, 0x00 },	/* 200         1.5   */
+	{ 0x0, 0x00, 0x00 },	/* 200         4.0   */
+	{ 0x0, 0x00, 0x00 },	/* 200         6.0   */
+	{ 0x0, 0x00, 0x00 },	/* 250         0.0   */
+	{ 0x0, 0x00, 0x00 },	/* 250         1.5   */
+	{ 0x0, 0x00, 0x00 },	/* 250         4.0   */
+	{ 0x0, 0x00, 0x00 },	/* 300         0.0   */
+	{ 0x0, 0x00, 0x00 },	/* 300         1.5   */
+	{ 0x0, 0x00, 0x00 },	/* 350         0.0   */
+};
+
 static const struct ddi_buf_trans *
 bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
 {

From c92f47b5ec977a31c72a3c3514ae460b3dd725ff Mon Sep 17 00:00:00 2001
From: Manasi Navare <manasi.d.navare@intel.com>
Date: Fri, 23 Mar 2018 10:24:15 -0700
Subject: [PATCH 0113/1286] drm/i915/icl: Add register defs for voltage swing
 sequences for MG PHY DDI

On Icelake platform, MG PHY is used when operating in DP alternate
mode or the legacy HDMI or DP modes. DDI Ports C, D, E, F are MG PHY
DDI ports on ICL.

This patch adds the necessary voltage swing programming related
register definitions and macros for MG PHY DDI ports.

v4 (from Paulo):
* Use _PORT instead of _PICK
* Change some mask names to our current coding standards
* Stay under 80 columns
v3:
* Rebase on new revision of patches
v2:
* Remove whitespaces in the #defines (Paulo)

Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Signed-off-by: Manasi Navare <manasi.d.navare@intel.com>
Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180323172419.24911-4-paulo.r.zanoni@intel.com
---
 drivers/gpu/drm/i915/i915_reg.h | 116 ++++++++++++++++++++++++++++++++
 1 file changed, 116 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 407ee5ca527f..aa001dd98cc5 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1809,6 +1809,122 @@ enum i915_power_well_id {
 #define   N_SCALAR(x)			((x) << 24)
 #define   N_SCALAR_MASK			(0x7F << 24)
 
+#define _ICL_MG_PHY_PORT_LN(port, ln, ln0p1, ln0p2, ln1p1) \
+	_MMIO(_PORT((port) - PORT_C, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1)))
+
+#define _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT1		0x16812C
+#define _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT1		0x16852C
+#define _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT2		0x16912C
+#define _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT2		0x16952C
+#define _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT3		0x16A12C
+#define _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT3		0x16A52C
+#define _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT4		0x16B12C
+#define _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT4		0x16B52C
+#define ICL_PORT_MG_TX1_LINK_PARAMS(port, ln) \
+	_ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT1, \
+				      _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT2, \
+				      _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT1)
+
+#define _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT1		0x1680AC
+#define _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT1		0x1684AC
+#define _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT2		0x1690AC
+#define _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT2		0x1694AC
+#define _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT3		0x16A0AC
+#define _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT3		0x16A4AC
+#define _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT4		0x16B0AC
+#define _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT4		0x16B4AC
+#define ICL_PORT_MG_TX2_LINK_PARAMS(port, ln) \
+	_ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT1, \
+				      _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT2, \
+				      _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT1)
+#define CRI_USE_FS32			(1 << 5)
+
+#define _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT1		0x16814C
+#define _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT1		0x16854C
+#define _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT2		0x16914C
+#define _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT2		0x16954C
+#define _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT3		0x16A14C
+#define _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT3		0x16A54C
+#define _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT4		0x16B14C
+#define _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT4		0x16B54C
+#define ICL_PORT_MG_TX1_PISO_READLOAD(port, ln) \
+	_ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT1, \
+				      _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT2, \
+				      _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT1)
+
+#define _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT1		0x1680CC
+#define _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT1		0x1684CC
+#define _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT2		0x1690CC
+#define _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT2		0x1694CC
+#define _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT3		0x16A0CC
+#define _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT3		0x16A4CC
+#define _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT4		0x16B0CC
+#define _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT4		0x16B4CC
+#define ICL_PORT_MG_TX2_PISO_READLOAD(port, ln) \
+	_ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT1, \
+				      _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT2, \
+				      _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT1)
+#define CRI_CALCINIT					(1 << 1)
+
+#define _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT1		0x168148
+#define _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT1		0x168548
+#define _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT2		0x169148
+#define _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT2		0x169548
+#define _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT3		0x16A148
+#define _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT3		0x16A548
+#define _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT4		0x16B148
+#define _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT4		0x16B548
+#define ICL_PORT_MG_TX1_SWINGCTRL(port, ln) \
+	_ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT1, \
+				      _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT2, \
+				      _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT1)
+
+#define _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT1		0x1680C8
+#define _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT1		0x1684C8
+#define _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT2		0x1690C8
+#define _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT2		0x1694C8
+#define _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT3		0x16A0C8
+#define _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT3		0x16A4C8
+#define _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT4		0x16B0C8
+#define _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT4		0x16B4C8
+#define ICL_PORT_MG_TX2_SWINGCTRL(port, ln) \
+	_ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT1, \
+				      _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT2, \
+				      _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT1)
+#define CRI_TXDEEMPH_OVERRIDE_17_12(x)			((x) << 0)
+#define CRI_TXDEEMPH_OVERRIDE_17_12_MASK		(0x3F << 0)
+
+#define _ICL_MG_TX_DRVCTRL_TX1LN0_PORT1			0x168144
+#define _ICL_MG_TX_DRVCTRL_TX1LN1_PORT1			0x168544
+#define _ICL_MG_TX_DRVCTRL_TX1LN0_PORT2			0x169144
+#define _ICL_MG_TX_DRVCTRL_TX1LN1_PORT2			0x169544
+#define _ICL_MG_TX_DRVCTRL_TX1LN0_PORT3			0x16A144
+#define _ICL_MG_TX_DRVCTRL_TX1LN1_PORT3			0x16A544
+#define _ICL_MG_TX_DRVCTRL_TX1LN0_PORT4			0x16B144
+#define _ICL_MG_TX_DRVCTRL_TX1LN1_PORT4			0x16B544
+#define ICL_PORT_MG_TX1_DRVCTRL(port, ln) \
+	_ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_DRVCTRL_TX1LN0_PORT1, \
+				      _ICL_MG_TX_DRVCTRL_TX1LN0_PORT2, \
+				      _ICL_MG_TX_DRVCTRL_TX1LN1_PORT1)
+
+#define _ICL_MG_TX_DRVCTRL_TX2LN0_PORT1			0x1680C4
+#define _ICL_MG_TX_DRVCTRL_TX2LN1_PORT1			0x1684C4
+#define _ICL_MG_TX_DRVCTRL_TX2LN0_PORT2			0x1690C4
+#define _ICL_MG_TX_DRVCTRL_TX2LN1_PORT2			0x1694C4
+#define _ICL_MG_TX_DRVCTRL_TX2LN0_PORT3			0x16A0C4
+#define _ICL_MG_TX_DRVCTRL_TX2LN1_PORT3			0x16A4C4
+#define _ICL_MG_TX_DRVCTRL_TX2LN0_PORT4			0x16B0C4
+#define _ICL_MG_TX_DRVCTRL_TX2LN1_PORT4			0x16B4C4
+#define ICL_PORT_MG_TX2_DRVCTRL(port, ln) \
+	_ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_DRVCTRL_TX2LN0_PORT1, \
+				      _ICL_MG_TX_DRVCTRL_TX2LN0_PORT2, \
+				      _ICL_MG_TX_DRVCTRL_TX2LN1_PORT1)
+#define CRI_TXDEEMPH_OVERRIDE_11_6(x)			((x) << 24)
+#define CRI_TXDEEMPH_OVERRIDE_11_6_MASK			(0x3F << 24)
+#define CRI_TXDEEMPH_OVERRIDE_EN			(1 << 22)
+#define CRI_TXDEEMPH_OVERRIDE_5_0(x)			((x) << 16)
+#define CRI_TXDEEMPH_OVERRIDE_5_0_MASK			(0x3F << 16)
+
 /* The spec defines this only for BXT PHY0, but lets assume that this
  * would exist for PHY1 too if it had a second channel.
  */

From cd96bea7ba90c45c8d1d315433c78021e56ec8c7 Mon Sep 17 00:00:00 2001
From: Manasi Navare <manasi.d.navare@intel.com>
Date: Fri, 23 Mar 2018 10:24:16 -0700
Subject: [PATCH 0114/1286] drm/i915/icl: Add Voltage swing table for MG PHY
 DDI Buffer

This table is used for voltage swing programming sequence during DDI
Buffer initialization for MG PHY DDI Buffers on Icelake.

v2 (from Paulo):
* Fix white space issues.

Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Signed-off-by: Manasi Navare <manasi.d.navare@intel.com>
Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180323172419.24911-5-paulo.r.zanoni@intel.com
---
 drivers/gpu/drm/i915/intel_ddi.c | 20 ++++++++++++++++++++
 1 file changed, 20 insertions(+)

diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 229b9d5250c4..359acbfec4b1 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -592,6 +592,26 @@ static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_e
 	{ 0x0, 0x00, 0x00 },	/* 350         0.0   */
 };
 
+struct icl_mg_phy_ddi_buf_trans {
+	u32 cri_txdeemph_override_5_0;
+	u32 cri_txdeemph_override_11_6;
+	u32 cri_txdeemph_override_17_12;
+};
+
+static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations[] = {
+				/* Voltage swing  pre-emphasis */
+	{ 0x0, 0x1B, 0x00 },	/* 0              0   */
+	{ 0x0, 0x23, 0x08 },	/* 0              1   */
+	{ 0x0, 0x2D, 0x12 },	/* 0              2   */
+	{ 0x0, 0x00, 0x00 },	/* 0              3   */
+	{ 0x0, 0x23, 0x00 },	/* 1              0   */
+	{ 0x0, 0x2B, 0x09 },	/* 1              1   */
+	{ 0x0, 0x2E, 0x11 },	/* 1              2   */
+	{ 0x0, 0x2F, 0x00 },	/* 2              0   */
+	{ 0x0, 0x33, 0x0C },	/* 2              1   */
+	{ 0x0, 0x00, 0x00 },	/* 3              0   */
+};
+
 static const struct ddi_buf_trans *
 bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
 {

From 96ae48311ebc23cfe4f929754dc1e1cfb0d031b0 Mon Sep 17 00:00:00 2001
From: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Date: Fri, 23 Mar 2018 10:24:17 -0700
Subject: [PATCH 0115/1286] drm/i915/icl: HPD pin for port F

Extend enum hpd_pin to port F so that we can start using this for ICL.

v2: Rebase.

Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180323172419.24911-6-paulo.r.zanoni@intel.com
---
 drivers/gpu/drm/i915/i915_drv.h      | 1 +
 drivers/gpu/drm/i915/intel_hotplug.c | 3 +++
 2 files changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 299b24045003..800230ba1c3b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -262,6 +262,7 @@ enum hpd_pin {
 	HPD_PORT_C,
 	HPD_PORT_D,
 	HPD_PORT_E,
+	HPD_PORT_F,
 	HPD_NUM_PINS
 };
 
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 0e3d3e89d66a..43aa92beff2a 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -100,6 +100,8 @@ enum port intel_hpd_pin_to_port(struct drm_i915_private *dev_priv,
 		if (IS_CNL_WITH_PORT_F(dev_priv))
 			return PORT_F;
 		return PORT_E;
+	case HPD_PORT_F:
+		return PORT_F;
 	default:
 		return PORT_NONE; /* no port for this pin */
 	}
@@ -132,6 +134,7 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
 	case PORT_F:
 		if (IS_CNL_WITH_PORT_F(dev_priv))
 			return HPD_PORT_E;
+		return HPD_PORT_F;
 	default:
 		MISSING_CASE(port);
 		return HPD_NONE;

From 323301af974cdd4b797e5b54f5c418554f39d1fa Mon Sep 17 00:00:00 2001
From: Nabendu Maiti <nabendu.bikash.maiti@intel.com>
Date: Fri, 23 Mar 2018 10:24:18 -0700
Subject: [PATCH 0116/1286] drm/i915/icl: Added 5k source scaling support for
 Gen11 platform

Gen11 supports upto 5k source scaling

v2: Re-factoring of code as per review
v3: Corrected max Vertical size and indentation
v4: Added max Vertical dst size in same patch

Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Signed-off-by: Nabendu Maiti <nabendu.bikash.maiti@intel.com>
Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180323172419.24911-7-paulo.r.zanoni@intel.com
---
 drivers/gpu/drm/i915/intel_display.c | 11 +++++++----
 drivers/gpu/drm/i915/intel_drv.h     |  4 ++++
 2 files changed, 11 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index b31b80643f87..d5b3c7eb2353 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4756,10 +4756,13 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
 
 	/* range checks */
 	if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
-		dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
-
-		src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
-		dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
+	    dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
+	    (IS_GEN11(dev_priv) &&
+	     (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
+	      dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
+	    (!IS_GEN11(dev_priv) &&
+	     (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
+	      dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H)))	{
 		DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
 			"size is out of scaler range\n",
 			intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index b79a01b7f008..d2935acfedb1 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -548,6 +548,10 @@ struct intel_initial_plane_config {
 #define SKL_MAX_DST_W 4096
 #define SKL_MIN_DST_H 8
 #define SKL_MAX_DST_H 4096
+#define ICL_MAX_SRC_W 5120
+#define ICL_MAX_SRC_H 4096
+#define ICL_MAX_DST_W 5120
+#define ICL_MAX_DST_H 4096
 
 struct intel_scaler {
 	int in_use;

From 7487508eff1fe787573aa6e0f3daaa6b12bd4520 Mon Sep 17 00:00:00 2001
From: Paulo Zanoni <paulo.r.zanoni@intel.com>
Date: Fri, 23 Mar 2018 12:58:53 -0700
Subject: [PATCH 0117/1286] drm/i915: protect macro parameters in
 SWING_SEL_{UPP,LO}WER

Protect the macro parameters with parens in order to avoid priority
issues on macro evaluation when the macro argument is not a single
operand.

This is not a problem today, but it could be in the future. I found
this while reviewing a patch that introduces new callers for the
macros.

v2: Rebase.

Reference: commit 04416108ccea ("drm/i915/cnl: Add registers related to voltage swing sequences.")
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180323195853.4599-1-paulo.r.zanoni@intel.com
---
 drivers/gpu/drm/i915/i915_reg.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index aa001dd98cc5..b0c55f9d401b 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1749,9 +1749,9 @@ enum i915_power_well_id {
 #define ICL_PORT_TX_DW2_LN0(port)	_MMIO_PORT(port, \
 						   _ICL_PORT_TX_DW2_LN0_A, \
 						   _ICL_PORT_TX_DW2_LN0_B)
-#define   SWING_SEL_UPPER(x)		((x >> 3) << 15)
+#define   SWING_SEL_UPPER(x)		(((x) >> 3) << 15)
 #define   SWING_SEL_UPPER_MASK		(1 << 15)
-#define   SWING_SEL_LOWER(x)		((x & 0x7) << 11)
+#define   SWING_SEL_LOWER(x)		(((x) & 0x7) << 11)
 #define   SWING_SEL_LOWER_MASK		(0x7 << 11)
 #define   RCOMP_SCALAR(x)		((x) << 0)
 #define   RCOMP_SCALAR_MASK		(0xFF << 0)

From aec06c7606be1635d82c16dda66282c2010bcd39 Mon Sep 17 00:00:00 2001
From: Arushi Singhal <arushisinghal19971997@gmail.com>
Date: Sun, 25 Mar 2018 23:31:32 +0530
Subject: [PATCH 0118/1286] gpu: drm/lease:: Use list_{next/prev}_entry instead
 of list_entry

It's better to use list_entry instead of list_{next/prev}_entry
as it makes the code more clear to read.
This patch replace list_entry with list_{next/prev}_entry.

Signed-off-by: Arushi Singhal <arushisinghal19971997@gmail.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/1522000893-5331-2-git-send-email-arushisinghal19971997@gmail.com
---
 drivers/gpu/drm/drm_lease.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index d345563fdff3..50c73c0a20b9 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -340,7 +340,7 @@ static void _drm_lease_revoke(struct drm_master *top)
 				break;
 
 			/* Over */
-			master = list_entry(master->lessee_list.next, struct drm_master, lessee_list);
+			master = list_next_entry(master, lessee_list);
 		}
 	}
 }

From 3f07f28b9712605d1adb589344ced72e8397dc8a Mon Sep 17 00:00:00 2001
From: Arushi Singhal <arushisinghal19971997@gmail.com>
Date: Sun, 25 Mar 2018 23:31:33 +0530
Subject: [PATCH 0119/1286] gpu: drm: nouveau: Use list_{next/prev}_entry
 instead of list_entry

It's better to use list_entry instead of list_{next/prev}_entry
as it makes the code more clear to read.
This patch replace list_entry with list_{next/prev}_entry.

Signed-off-by: Arushi Singhal <arushisinghal19971997@gmail.com>
Acked-by: Ben Skeggs <bskeggs@redhat.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/1522000893-5331-3-git-send-email-arushisinghal19971997@gmail.com
---
 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
index e4c8d310d870..81c3567d4e67 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
@@ -134,7 +134,7 @@ nvkm_cstate_find_best(struct nvkm_clk *clk, struct nvkm_pstate *pstate,
 			       nvkm_volt_map(volt, volt->max2_id, clk->temp));
 
 	for (cstate = start; &cstate->head != &pstate->list;
-	     cstate = list_entry(cstate->head.prev, typeof(*cstate), head)) {
+	     cstate = list_prev_entry(cstate, head)) {
 		if (nvkm_cstate_valid(clk, cstate, max_volt, clk->temp))
 			break;
 	}

From 3903117609f40a0bc21b8c48533ac07a574c4cdc Mon Sep 17 00:00:00 2001
From: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Thu, 22 Mar 2018 09:02:33 +0100
Subject: [PATCH 0120/1286] drm/gem: Document that handle_create must be the
 last step

It published the gem object to userspace, by that point other threads
can guess the id and start using it. And gem IDs are _very_ easy to
guess (it's just an idr).

Since gem objects is the only thing we allow drivers to create
themselves (all the kms/prime/syncobj stuff is handled by the core) no
other functions seem to be in need of this clarification.

Motivated by reviewing the xen-front kms driver.

Cc: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
Reviewed-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180322080233.17266-1-daniel.vetter@ffwll.ch
---
 drivers/gpu/drm/drm_gem.c | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 4975ba9a7bc8..4a16d7b26c89 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -436,9 +436,12 @@ err_unref:
  * @obj: object to register
  * @handlep: pionter to return the created handle to the caller
  *
- * Create a handle for this object. This adds a handle reference
- * to the object, which includes a regular reference count. Callers
- * will likely want to dereference the object afterwards.
+ * Create a handle for this object. This adds a handle reference to the object,
+ * which includes a regular reference count. Callers will likely want to
+ * dereference the object afterwards.
+ *
+ * Since this publishes @obj to userspace it must be fully set up by this point,
+ * drivers must call this last in their buffer object creation callbacks.
  */
 int drm_gem_handle_create(struct drm_file *file_priv,
 			  struct drm_gem_object *obj,

From 8017e422af3f5ab90210bfbd17f9a8d8d56db289 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Fri, 23 Mar 2018 20:25:37 +0200
Subject: [PATCH 0121/1286] drm/scdc-helper: Convert errors into debug messages
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Since we may attempt to reconfigure SCDC when the sink has already been
disconnected we probably shouldn't scare the user with errors in dmesg
that are 100% expected in that case. Just leave it up to the caller
whether to print an error message or not, and just output debug
messages from the helper itself.

Cc: Shashank Sharma <shashank.sharma@intel.com>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180323182537.30784-1-ville.syrjala@linux.intel.com
Reviewed-by: Shashank Sharma <shashank.sharma@intel.com>
---
 drivers/gpu/drm/drm_scdc_helper.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/drm_scdc_helper.c b/drivers/gpu/drm/drm_scdc_helper.c
index 657ea5ab6c3f..870e25f1f788 100644
--- a/drivers/gpu/drm/drm_scdc_helper.c
+++ b/drivers/gpu/drm/drm_scdc_helper.c
@@ -141,7 +141,7 @@ bool drm_scdc_get_scrambling_status(struct i2c_adapter *adapter)
 
 	ret = drm_scdc_readb(adapter, SCDC_SCRAMBLER_STATUS, &status);
 	if (ret < 0) {
-		DRM_ERROR("Failed to read scrambling status: %d\n", ret);
+		DRM_DEBUG_KMS("Failed to read scrambling status: %d\n", ret);
 		return false;
 	}
 
@@ -168,7 +168,7 @@ bool drm_scdc_set_scrambling(struct i2c_adapter *adapter, bool enable)
 
 	ret = drm_scdc_readb(adapter, SCDC_TMDS_CONFIG, &config);
 	if (ret < 0) {
-		DRM_ERROR("Failed to read TMDS config: %d\n", ret);
+		DRM_DEBUG_KMS("Failed to read TMDS config: %d\n", ret);
 		return false;
 	}
 
@@ -179,7 +179,7 @@ bool drm_scdc_set_scrambling(struct i2c_adapter *adapter, bool enable)
 
 	ret = drm_scdc_writeb(adapter, SCDC_TMDS_CONFIG, config);
 	if (ret < 0) {
-		DRM_ERROR("Failed to enable scrambling: %d\n", ret);
+		DRM_DEBUG_KMS("Failed to enable scrambling: %d\n", ret);
 		return false;
 	}
 
@@ -223,7 +223,7 @@ bool drm_scdc_set_high_tmds_clock_ratio(struct i2c_adapter *adapter, bool set)
 
 	ret = drm_scdc_readb(adapter, SCDC_TMDS_CONFIG, &config);
 	if (ret < 0) {
-		DRM_ERROR("Failed to read TMDS config: %d\n", ret);
+		DRM_DEBUG_KMS("Failed to read TMDS config: %d\n", ret);
 		return false;
 	}
 
@@ -234,7 +234,7 @@ bool drm_scdc_set_high_tmds_clock_ratio(struct i2c_adapter *adapter, bool set)
 
 	ret = drm_scdc_writeb(adapter, SCDC_TMDS_CONFIG, config);
 	if (ret < 0) {
-		DRM_ERROR("Failed to set TMDS clock ratio: %d\n", ret);
+		DRM_DEBUG_KMS("Failed to set TMDS clock ratio: %d\n", ret);
 		return false;
 	}
 

From a7d2a87e99deb5481b5dd723408c42f460de25a3 Mon Sep 17 00:00:00 2001
From: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Thu, 22 Mar 2018 11:51:28 +0100
Subject: [PATCH 0122/1286] drm/tinydrm: Use gem_free_object_unlocked
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

tinydrm doesn't use dev->struct_mutex and therefore has no need to use
gem_free_object.

Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: "Noralf Trønnes" <noralf@tronnes.org>
Acked-by: Noralf Trønnes <noralf@tronnes.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20180322105133.11211-2-daniel.vetter@ffwll.ch
---
 drivers/gpu/drm/tinydrm/core/tinydrm-core.c | 2 +-
 include/drm/tinydrm/tinydrm.h               | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
index 4c6616278c48..24a33bf862fa 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
@@ -91,7 +91,7 @@ EXPORT_SYMBOL(tinydrm_gem_cma_prime_import_sg_table);
  * GEM object state and frees the memory used to store the object itself using
  * drm_gem_cma_free_object(). It also handles PRIME buffers which has the kernel
  * virtual address set by tinydrm_gem_cma_prime_import_sg_table(). Drivers
- * can use this as their &drm_driver->gem_free_object callback.
+ * can use this as their &drm_driver->gem_free_object_unlocked callback.
  */
 void tinydrm_gem_cma_free_object(struct drm_gem_object *gem_obj)
 {
diff --git a/include/drm/tinydrm/tinydrm.h b/include/drm/tinydrm/tinydrm.h
index 07a9a11fe19d..77a93ec577fd 100644
--- a/include/drm/tinydrm/tinydrm.h
+++ b/include/drm/tinydrm/tinydrm.h
@@ -41,7 +41,7 @@ pipe_to_tinydrm(struct drm_simple_display_pipe *pipe)
  * the &drm_driver structure.
  */
 #define TINYDRM_GEM_DRIVER_OPS \
-	.gem_free_object	= tinydrm_gem_cma_free_object, \
+	.gem_free_object_unlocked = tinydrm_gem_cma_free_object, \
 	.gem_print_info		= drm_gem_cma_print_info, \
 	.gem_vm_ops		= &drm_gem_cma_vm_ops, \
 	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd, \

From eed7ec52f214bac2f25395ccaad610fbeb842a6e Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Sat, 24 Mar 2018 12:58:29 +0000
Subject: [PATCH 0123/1286] drm/i915/execlists: Clear user-active flag on
 preemption completion
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

When cancelling the requests and clearing out the ports following a
successful preemption completion, also clear the active flag. I had
assumed that all preemptions would be followed by an immediate dequeue
(preserving the active user flag), but under rare circumstances we may
be triggering a preemption for the second port only for it to have
completed before the preemotion kicks in; leaving execlists->active set
even though the system is now idle.

We can clear the flag inside the common execlists_cancel_port_requests()
as the other users also expect the semantics of active being cleared.

Fixes: f6322eddaff7 ("drm/i915/preemption: Allow preemption between submission ports")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Cc: Michel Thierry <michel.thierry@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180324125829.27026-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_lrc.c | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index b4ab06b05e58..9c84af53db94 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -577,6 +577,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 		 * know the next preemption status we see corresponds
 		 * to this ELSP update.
 		 */
+		GEM_BUG_ON(!execlists_is_active(execlists,
+						EXECLISTS_ACTIVE_USER));
 		GEM_BUG_ON(!port_count(&port[0]));
 		if (port_count(&port[0]) > 1)
 			goto unlock;
@@ -738,6 +740,8 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
 		memset(port, 0, sizeof(*port));
 		port++;
 	}
+
+	execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
 }
 
 static void clear_gtiir(struct intel_engine_cs *engine)
@@ -1042,6 +1046,11 @@ static void execlists_submission_tasklet(unsigned long data)
 
 	if (fw)
 		intel_uncore_forcewake_put(dev_priv, execlists->fw_domains);
+
+	/* If the engine is now idle, so should be the flag; and vice versa. */
+	GEM_BUG_ON(execlists_is_active(&engine->execlists,
+				       EXECLISTS_ACTIVE_USER) ==
+		   !port_isset(engine->execlists.port));
 }
 
 static void queue_request(struct intel_engine_cs *engine,

From 9040871336db2c2e379402d7ba9c275d6495b9e0 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Mon, 26 Mar 2018 12:50:36 +0100
Subject: [PATCH 0124/1286] drm/i915: Include submission tasklet state in
 engine dump

For the off-chance we have an interrupt posted and haven't processed the
CSB.

v2: Include tasklet enable/disable state for good measure.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180326115044.2505-4-chris@chris-wilson.co.uk
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_engine_cs.c | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index de09fa42a509..12486d8f534b 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1859,12 +1859,15 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
 		ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
 		read = GEN8_CSB_READ_PTR(ptr);
 		write = GEN8_CSB_WRITE_PTR(ptr);
-		drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s\n",
+		drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s, tasklet queued? %s (%s)\n",
 			   read, execlists->csb_head,
 			   write,
 			   intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)),
 			   yesno(test_bit(ENGINE_IRQ_EXECLIST,
-					  &engine->irq_posted)));
+					  &engine->irq_posted)),
+			   yesno(test_bit(TASKLET_STATE_SCHED,
+					  &engine->execlists.tasklet.state)),
+			   enableddisabled(!atomic_read(&engine->execlists.tasklet.count)));
 		if (read >= GEN8_CSB_ENTRIES)
 			read = 0;
 		if (write >= GEN8_CSB_ENTRIES)

From ae2f5c009335f4d273d2431539de91c8f28d4736 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Mon, 26 Mar 2018 12:50:34 +0100
Subject: [PATCH 0125/1286] drm/i915/execlists: Avoid kicking the submission
 too early for rescheduling
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

If the request is still waiting on external fences, it has not yet been
submitted to the HW queue and so we can forgo kicking the submission
tasklet when re-evaluating its priority.

This should have no impact other than reducing the number of tasklet
wakeups under signal heavy workloads (e.g. switching between engines).

v2: Use prebaked container_of()

References: f6322eddaff7 ("drm/i915/preemption: Allow preemption between submission ports")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Cc: Michel Thierry <michel.thierry@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180326115044.2505-2-chris@chris-wilson.co.uk
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_lrc.c | 17 ++++++++++++-----
 1 file changed, 12 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 9c84af53db94..ba7f7831f934 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1060,12 +1060,16 @@ static void queue_request(struct intel_engine_cs *engine,
 	list_add_tail(&pt->link, &lookup_priolist(engine, pt, prio)->requests);
 }
 
+static void __submit_queue(struct intel_engine_cs *engine, int prio)
+{
+	engine->execlists.queue_priority = prio;
+	tasklet_hi_schedule(&engine->execlists.tasklet);
+}
+
 static void submit_queue(struct intel_engine_cs *engine, int prio)
 {
-	if (prio > engine->execlists.queue_priority) {
-		engine->execlists.queue_priority = prio;
-		tasklet_hi_schedule(&engine->execlists.tasklet);
-	}
+	if (prio > engine->execlists.queue_priority)
+		__submit_queue(engine, prio);
 }
 
 static void execlists_submit_request(struct i915_request *request)
@@ -1198,7 +1202,10 @@ static void execlists_schedule(struct i915_request *request, int prio)
 			__list_del_entry(&pt->link);
 			queue_request(engine, pt, prio);
 		}
-		submit_queue(engine, prio);
+
+		if (prio > engine->execlists.queue_priority &&
+		    i915_sw_fence_done(&pt_to_request(pt)->submit))
+			__submit_queue(engine, prio);
 	}
 
 	spin_unlock_irq(&engine->timeline->lock);

From 57bdff48a0a553468f16a65149d51213b5f25fee Mon Sep 17 00:00:00 2001
From: Lucas De Marchi <lucas.demarchi@intel.com>
Date: Mon, 19 Mar 2018 10:37:20 -0700
Subject: [PATCH 0126/1286] drm/i915: Reword warning for missing cases
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

In some places we end up converting switch statements to a series of
if/else, particularly when introducing helper functions to handle a
group of cases. It's tempting to either leave a wrong warning (since now
we don't have a switch case anymore) or to convert to WARN(1, ...),
but we can just provide a better message and avoid the doubt when such
conversions arrise.

Introducing a warning inside i915_driver_load() just for tests we get:

[ 4535.233717] Missing case (ret == 0)
[ 4535.233868] WARNING: CPU: 1 PID: 795 at drivers/gpu/drm/i915/i915_drv.c:1341 i915_driver_load+0x42/0x10e0 [i915]

which is clear enough.

v2: remove __func__ since this is already on the warning.

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180319173720.6974-1-lucas.demarchi@intel.com
---
 drivers/gpu/drm/i915/i915_utils.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 51dbfe5bb418..0695717522ea 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -40,8 +40,8 @@
 #undef WARN_ON_ONCE
 #define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")")
 
-#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
-			     (long)(x), __func__)
+#define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \
+			     __stringify(x), (long)(x))
 
 #if GCC_VERSION >= 70000
 #define add_overflows(A, B) \

From 47aa1e73e72e5e0f5a07e30b84478461195845f5 Mon Sep 17 00:00:00 2001
From: Lucas De Marchi <lucas.demarchi@intel.com>
Date: Tue, 20 Mar 2018 15:06:31 -0700
Subject: [PATCH 0127/1286] drm/i915: move dpll_info to header
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This will allow the struct to be embedded in intel_shared_dpll.

Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180320220637.21480-2-lucas.demarchi@intel.com
---
 drivers/gpu/drm/i915/intel_dpll_mgr.c |  7 -------
 drivers/gpu/drm/i915/intel_dpll_mgr.h | 10 ++++++++++
 2 files changed, 10 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 51c5ae4e9116..52d6e731c3e9 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -1877,13 +1877,6 @@ static void intel_ddi_pll_init(struct drm_device *dev)
 	}
 }
 
-struct dpll_info {
-	const char *name;
-	const int id;
-	const struct intel_shared_dpll_funcs *funcs;
-	uint32_t flags;
-};
-
 struct intel_dpll_mgr {
 	const struct dpll_info *dpll_info;
 
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index f24ccf443d25..e99d6385478a 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -205,6 +205,16 @@ struct intel_shared_dpll_funcs {
 			     struct intel_dpll_hw_state *hw_state);
 };
 
+/**
+ * struct dpll_info - display PLL platform specific info
+ */
+struct dpll_info {
+	const char *name;
+	const int id;
+	const struct intel_shared_dpll_funcs *funcs;
+	uint32_t flags;
+};
+
 /**
  * struct intel_shared_dpll - display PLL with tracked state and users
  */

From e30379637fc712991ec8fff1f79344f384ace0aa Mon Sep 17 00:00:00 2001
From: Lucas De Marchi <lucas.demarchi@intel.com>
Date: Tue, 20 Mar 2018 15:06:32 -0700
Subject: [PATCH 0128/1286] drm/i915: add dpll_info inside intel_shared_dpll
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This way we can stop copying fields from dpll_info to intel_shared_dpll
one by one. The migration of each field will come on separate patches.

Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180320220637.21480-3-lucas.demarchi@intel.com
---
 drivers/gpu/drm/i915/intel_dpll_mgr.c | 1 +
 drivers/gpu/drm/i915/intel_dpll_mgr.h | 5 +++++
 2 files changed, 6 insertions(+)

diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 52d6e731c3e9..30a9ac5322fe 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -2410,6 +2410,7 @@ void intel_shared_dpll_init(struct drm_device *dev)
 
 	for (i = 0; dpll_info[i].id >= 0; i++) {
 		WARN_ON(i != dpll_info[i].id);
+		dev_priv->shared_dplls[i].info = &dpll_info[i];
 
 		dev_priv->shared_dplls[i].id = dpll_info[i].id;
 		dev_priv->shared_dplls[i].name = dpll_info[i].name;
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index e99d6385478a..bd2d3652cec4 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -253,6 +253,11 @@ struct intel_shared_dpll {
 	 */
 	struct intel_shared_dpll_funcs funcs;
 
+	/**
+	 * @info: platform specific info
+	 */
+	const struct dpll_info *info;
+
 #define INTEL_DPLL_ALWAYS_ON	(1 << 0)
 	/**
 	 * @flags:

From ee1398ba01b308015e568d46fd62a38ceebd7abe Mon Sep 17 00:00:00 2001
From: Lucas De Marchi <lucas.demarchi@intel.com>
Date: Tue, 20 Mar 2018 15:06:33 -0700
Subject: [PATCH 0129/1286] drm/i915: use funcs from intel_shared_dpll.info
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Replace all users of pll->funcs.* to use
pll->info->funcs->*. The extra indirection here is not on any critical
path and we can leave all const data together.

Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180320220637.21480-4-lucas.demarchi@intel.com
---
 drivers/gpu/drm/i915/intel_display.c  | 16 ++++++++--------
 drivers/gpu/drm/i915/intel_dpll_mgr.c |  9 ++++-----
 drivers/gpu/drm/i915/intel_dpll_mgr.h |  8 +++-----
 3 files changed, 15 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index d5b3c7eb2353..cee9ea3b7eeb 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -8763,8 +8763,8 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
 			intel_get_shared_dpll_by_id(dev_priv, pll_id);
 		pll = pipe_config->shared_dpll;
 
-		WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
-						 &pipe_config->dpll_hw_state));
+		WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
+						&pipe_config->dpll_hw_state));
 
 		tmp = pipe_config->dpll_hw_state.dpll;
 		pipe_config->pixel_multiplier =
@@ -9240,8 +9240,8 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
 
 	pll = pipe_config->shared_dpll;
 	if (pll) {
-		WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
-						 &pipe_config->dpll_hw_state));
+		WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
+						&pipe_config->dpll_hw_state));
 	}
 
 	/*
@@ -11655,7 +11655,7 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
 
 	DRM_DEBUG_KMS("%s\n", pll->name);
 
-	active = pll->funcs.get_hw_state(dev_priv, pll, &dpll_hw_state);
+	active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
 
 	if (!(pll->flags & INTEL_DPLL_ALWAYS_ON)) {
 		I915_STATE_WARN(!pll->on && pll->active_mask,
@@ -15128,8 +15128,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
 
-		pll->on = pll->funcs.get_hw_state(dev_priv, pll,
-						  &pll->state.hw_state);
+		pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
+							&pll->state.hw_state);
 		pll->state.crtc_mask = 0;
 		for_each_intel_crtc(dev, crtc) {
 			struct intel_crtc_state *crtc_state =
@@ -15318,7 +15318,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
 
 		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
 
-		pll->funcs.disable(dev_priv, pll);
+		pll->info->funcs->disable(dev_priv, pll);
 		pll->on = false;
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 30a9ac5322fe..24d9aa180e0c 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -118,7 +118,7 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
 	if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
 		return;
 
-	cur_state = pll->funcs.get_hw_state(dev_priv, pll, &hw_state);
+	cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state);
 	I915_STATE_WARN(cur_state != state,
 	     "%s assertion failure (expected %s, current %s)\n",
 			pll->name, onoff(state), onoff(cur_state));
@@ -147,7 +147,7 @@ void intel_prepare_shared_dpll(struct intel_crtc *crtc)
 		WARN_ON(pll->on);
 		assert_shared_dpll_disabled(dev_priv, pll);
 
-		pll->funcs.prepare(dev_priv, pll);
+		pll->info->funcs->prepare(dev_priv, pll);
 	}
 	mutex_unlock(&dev_priv->dpll_lock);
 }
@@ -190,7 +190,7 @@ void intel_enable_shared_dpll(struct intel_crtc *crtc)
 	WARN_ON(pll->on);
 
 	DRM_DEBUG_KMS("enabling %s\n", pll->name);
-	pll->funcs.enable(dev_priv, pll);
+	pll->info->funcs->enable(dev_priv, pll);
 	pll->on = true;
 
 out:
@@ -232,7 +232,7 @@ void intel_disable_shared_dpll(struct intel_crtc *crtc)
 		goto out;
 
 	DRM_DEBUG_KMS("disabling %s\n", pll->name);
-	pll->funcs.disable(dev_priv, pll);
+	pll->info->funcs->disable(dev_priv, pll);
 	pll->on = false;
 
 out:
@@ -2414,7 +2414,6 @@ void intel_shared_dpll_init(struct drm_device *dev)
 
 		dev_priv->shared_dplls[i].id = dpll_info[i].id;
 		dev_priv->shared_dplls[i].name = dpll_info[i].name;
-		dev_priv->shared_dplls[i].funcs = *dpll_info[i].funcs;
 		dev_priv->shared_dplls[i].flags = dpll_info[i].flags;
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index bd2d3652cec4..f49382207a0a 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -211,6 +211,9 @@ struct intel_shared_dpll_funcs {
 struct dpll_info {
 	const char *name;
 	const int id;
+	/**
+	 * @funcs: platform specific hooks
+	 */
 	const struct intel_shared_dpll_funcs *funcs;
 	uint32_t flags;
 };
@@ -248,11 +251,6 @@ struct intel_shared_dpll {
 	 */
 	enum intel_dpll_id id;
 
-	/**
-	 * @funcs: platform specific hooks
-	 */
-	struct intel_shared_dpll_funcs funcs;
-
 	/**
 	 * @info: platform specific info
 	 */

From 72f775fa284886893bec4a189ed38ac30e2535aa Mon Sep 17 00:00:00 2001
From: Lucas De Marchi <lucas.demarchi@intel.com>
Date: Tue, 20 Mar 2018 15:06:34 -0700
Subject: [PATCH 0130/1286] drm/i915: use name from intel_shared_dpll.info
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Replace all users of pll->name to use pll->info->name.

Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180320220637.21480-5-lucas.demarchi@intel.com
---
 drivers/gpu/drm/i915/i915_debugfs.c   |  3 ++-
 drivers/gpu/drm/i915/intel_display.c  |  7 ++++---
 drivers/gpu/drm/i915/intel_dpll_mgr.c | 26 ++++++++++++++------------
 drivers/gpu/drm/i915/intel_dpll_mgr.h |  8 +++-----
 4 files changed, 23 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 7816cd53100a..057fe12124d8 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -3285,7 +3285,8 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
 
-		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
+		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
+			   pll->id);
 		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
 			   pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
 		seq_printf(m, " tracked hardware state:\n");
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index cee9ea3b7eeb..5f79444d12c5 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11653,7 +11653,7 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
 
 	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
 
-	DRM_DEBUG_KMS("%s\n", pll->name);
+	DRM_DEBUG_KMS("%s\n", pll->info->name);
 
 	active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
 
@@ -15142,7 +15142,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
 		pll->active_mask = pll->state.crtc_mask;
 
 		DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
-			      pll->name, pll->state.crtc_mask, pll->on);
+			      pll->info->name, pll->state.crtc_mask, pll->on);
 	}
 
 	for_each_intel_encoder(dev, encoder) {
@@ -15316,7 +15316,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
 		if (!pll->on || pll->active_mask)
 			continue;
 
-		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
+		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
+			      pll->info->name);
 
 		pll->info->funcs->disable(dev_priv, pll);
 		pll->on = false;
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 24d9aa180e0c..ed46ade0efff 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -121,7 +121,7 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
 	cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state);
 	I915_STATE_WARN(cur_state != state,
 	     "%s assertion failure (expected %s, current %s)\n",
-			pll->name, onoff(state), onoff(cur_state));
+			pll->info->name, onoff(state), onoff(cur_state));
 }
 
 /**
@@ -143,7 +143,7 @@ void intel_prepare_shared_dpll(struct intel_crtc *crtc)
 	mutex_lock(&dev_priv->dpll_lock);
 	WARN_ON(!pll->state.crtc_mask);
 	if (!pll->active_mask) {
-		DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
+		DRM_DEBUG_DRIVER("setting up %s\n", pll->info->name);
 		WARN_ON(pll->on);
 		assert_shared_dpll_disabled(dev_priv, pll);
 
@@ -179,7 +179,7 @@ void intel_enable_shared_dpll(struct intel_crtc *crtc)
 	pll->active_mask |= crtc_mask;
 
 	DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n",
-		      pll->name, pll->active_mask, pll->on,
+		      pll->info->name, pll->active_mask, pll->on,
 		      crtc->base.base.id);
 
 	if (old_mask) {
@@ -189,7 +189,7 @@ void intel_enable_shared_dpll(struct intel_crtc *crtc)
 	}
 	WARN_ON(pll->on);
 
-	DRM_DEBUG_KMS("enabling %s\n", pll->name);
+	DRM_DEBUG_KMS("enabling %s\n", pll->info->name);
 	pll->info->funcs->enable(dev_priv, pll);
 	pll->on = true;
 
@@ -221,7 +221,7 @@ void intel_disable_shared_dpll(struct intel_crtc *crtc)
 		goto out;
 
 	DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n",
-		      pll->name, pll->active_mask, pll->on,
+		      pll->info->name, pll->active_mask, pll->on,
 		      crtc->base.base.id);
 
 	assert_shared_dpll_enabled(dev_priv, pll);
@@ -231,7 +231,7 @@ void intel_disable_shared_dpll(struct intel_crtc *crtc)
 	if (pll->active_mask)
 		goto out;
 
-	DRM_DEBUG_KMS("disabling %s\n", pll->name);
+	DRM_DEBUG_KMS("disabling %s\n", pll->info->name);
 	pll->info->funcs->disable(dev_priv, pll);
 	pll->on = false;
 
@@ -263,7 +263,8 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
 			   &shared_dpll[i].hw_state,
 			   sizeof(crtc_state->dpll_hw_state)) == 0) {
 			DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
-				      crtc->base.base.id, crtc->base.name, pll->name,
+				      crtc->base.base.id, crtc->base.name,
+				      pll->info->name,
 				      shared_dpll[i].crtc_mask,
 				      pll->active_mask);
 			return pll;
@@ -275,7 +276,8 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
 		pll = &dev_priv->shared_dplls[i];
 		if (shared_dpll[i].crtc_mask == 0) {
 			DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
-				      crtc->base.base.id, crtc->base.name, pll->name);
+				      crtc->base.base.id, crtc->base.name,
+				      pll->info->name);
 			return pll;
 		}
 	}
@@ -298,7 +300,7 @@ intel_reference_shared_dpll(struct intel_shared_dpll *pll,
 			crtc_state->dpll_hw_state;
 
 	crtc_state->shared_dpll = pll;
-	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
+	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->info->name,
 			 pipe_name(crtc->pipe));
 
 	shared_dpll[pll->id].crtc_mask |= 1 << crtc->pipe;
@@ -429,7 +431,8 @@ ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
 		pll = &dev_priv->shared_dplls[i];
 
 		DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
-			      crtc->base.base.id, crtc->base.name, pll->name);
+			      crtc->base.base.id, crtc->base.name,
+			      pll->info->name);
 	} else {
 		pll = intel_find_shared_dpll(crtc, crtc_state,
 					     DPLL_ID_PCH_PLL_A,
@@ -1824,7 +1827,7 @@ bxt_get_dpll(struct intel_crtc *crtc,
 	pll = intel_get_shared_dpll_by_id(dev_priv, i);
 
 	DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
-		      crtc->base.base.id, crtc->base.name, pll->name);
+		      crtc->base.base.id, crtc->base.name, pll->info->name);
 
 	intel_reference_shared_dpll(pll, crtc_state);
 
@@ -2413,7 +2416,6 @@ void intel_shared_dpll_init(struct drm_device *dev)
 		dev_priv->shared_dplls[i].info = &dpll_info[i];
 
 		dev_priv->shared_dplls[i].id = dpll_info[i].id;
-		dev_priv->shared_dplls[i].name = dpll_info[i].name;
 		dev_priv->shared_dplls[i].flags = dpll_info[i].flags;
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index f49382207a0a..e5ed3e0269e3 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -209,6 +209,9 @@ struct intel_shared_dpll_funcs {
  * struct dpll_info - display PLL platform specific info
  */
 struct dpll_info {
+	/**
+	 * @name: DPLL name; used for logging
+	 */
 	const char *name;
 	const int id;
 	/**
@@ -240,11 +243,6 @@ struct intel_shared_dpll {
 	 */
 	bool on;
 
-	/**
-	 * @name: DPLL name; used for logging
-	 */
-	const char *name;
-
 	/**
 	 * @id: unique indentifier for this DPLL; should match the index in the
 	 * dev_priv->shared_dplls array

From 0823eb9c52f9e47564277e5bdd6b78cd95cf9f9c Mon Sep 17 00:00:00 2001
From: Lucas De Marchi <lucas.demarchi@intel.com>
Date: Tue, 20 Mar 2018 15:06:35 -0700
Subject: [PATCH 0131/1286] drm/i915: use id from intel_shared_dpll.info
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Replace all users of pll->id to use pll->info->id. In functions using
this more than once it was preferred to add an id variable to make the
code easier to read.

Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180320220637.21480-6-lucas.demarchi@intel.com
---
 drivers/gpu/drm/i915/i915_debugfs.c   |   2 +-
 drivers/gpu/drm/i915/intel_ddi.c      |   8 +-
 drivers/gpu/drm/i915/intel_dpll_mgr.c | 160 ++++++++++++++------------
 drivers/gpu/drm/i915/intel_dpll_mgr.h |  10 +-
 4 files changed, 98 insertions(+), 82 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 057fe12124d8..ff90577da450 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -3286,7 +3286,7 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
 
 		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
-			   pll->id);
+			   pll->info->id);
 		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
 			   pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
 		seq_printf(m, " tracked hardware state:\n");
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 359acbfec4b1..a6672a9abd85 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -994,7 +994,7 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
 
 static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
 {
-	switch (pll->id) {
+	switch (pll->info->id) {
 	case DPLL_ID_WRPLL1:
 		return PORT_CLK_SEL_WRPLL1;
 	case DPLL_ID_WRPLL2:
@@ -1008,7 +1008,7 @@ static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
 	case DPLL_ID_LCPLL_2700:
 		return PORT_CLK_SEL_LCPLL_2700;
 	default:
-		MISSING_CASE(pll->id);
+		MISSING_CASE(pll->info->id);
 		return PORT_CLK_SEL_NONE;
 	}
 }
@@ -2250,7 +2250,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
 		/* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
 		val = I915_READ(DPCLKA_CFGCR0);
 		val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
-		val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->id, port);
+		val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
 		I915_WRITE(DPCLKA_CFGCR0, val);
 
 		/*
@@ -2267,7 +2267,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
 
 		val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) |
 			 DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
-		val |= (DPLL_CTRL2_DDI_CLK_SEL(pll->id, port) |
+		val |= (DPLL_CTRL2_DDI_CLK_SEL(pll->info->id, port) |
 			DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
 
 		I915_WRITE(DPLL_CTRL2, val);
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index ed46ade0efff..48466b19d1f6 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -291,19 +291,19 @@ intel_reference_shared_dpll(struct intel_shared_dpll *pll,
 {
 	struct intel_shared_dpll_state *shared_dpll;
 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	enum intel_dpll_id i = pll->id;
+	const enum intel_dpll_id id = pll->info->id;
 
 	shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
 
-	if (shared_dpll[i].crtc_mask == 0)
-		shared_dpll[i].hw_state =
+	if (shared_dpll[id].crtc_mask == 0)
+		shared_dpll[id].hw_state =
 			crtc_state->dpll_hw_state;
 
 	crtc_state->shared_dpll = pll;
 	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->info->name,
 			 pipe_name(crtc->pipe));
 
-	shared_dpll[pll->id].crtc_mask |= 1 << crtc->pipe;
+	shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
 }
 
 /**
@@ -343,15 +343,16 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
 				      struct intel_shared_dpll *pll,
 				      struct intel_dpll_hw_state *hw_state)
 {
+	const enum intel_dpll_id id = pll->info->id;
 	uint32_t val;
 
 	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
 		return false;
 
-	val = I915_READ(PCH_DPLL(pll->id));
+	val = I915_READ(PCH_DPLL(id));
 	hw_state->dpll = val;
-	hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
-	hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
+	hw_state->fp0 = I915_READ(PCH_FP0(id));
+	hw_state->fp1 = I915_READ(PCH_FP1(id));
 
 	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
 
@@ -361,8 +362,10 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
 static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
 				 struct intel_shared_dpll *pll)
 {
-	I915_WRITE(PCH_FP0(pll->id), pll->state.hw_state.fp0);
-	I915_WRITE(PCH_FP1(pll->id), pll->state.hw_state.fp1);
+	const enum intel_dpll_id id = pll->info->id;
+
+	I915_WRITE(PCH_FP0(id), pll->state.hw_state.fp0);
+	I915_WRITE(PCH_FP1(id), pll->state.hw_state.fp1);
 }
 
 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
@@ -381,13 +384,15 @@ static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
 				struct intel_shared_dpll *pll)
 {
+	const enum intel_dpll_id id = pll->info->id;
+
 	/* PCH refclock must be enabled first */
 	ibx_assert_pch_refclk_enabled(dev_priv);
 
-	I915_WRITE(PCH_DPLL(pll->id), pll->state.hw_state.dpll);
+	I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
 
 	/* Wait for the clocks to stabilize. */
-	POSTING_READ(PCH_DPLL(pll->id));
+	POSTING_READ(PCH_DPLL(id));
 	udelay(150);
 
 	/* The pixel multiplier can only be updated once the
@@ -395,14 +400,15 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
 	 *
 	 * So write it again.
 	 */
-	I915_WRITE(PCH_DPLL(pll->id), pll->state.hw_state.dpll);
-	POSTING_READ(PCH_DPLL(pll->id));
+	I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
+	POSTING_READ(PCH_DPLL(id));
 	udelay(200);
 }
 
 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
 				 struct intel_shared_dpll *pll)
 {
+	const enum intel_dpll_id id = pll->info->id;
 	struct drm_device *dev = &dev_priv->drm;
 	struct intel_crtc *crtc;
 
@@ -412,8 +418,8 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
 			assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
 	}
 
-	I915_WRITE(PCH_DPLL(pll->id), 0);
-	POSTING_READ(PCH_DPLL(pll->id));
+	I915_WRITE(PCH_DPLL(id), 0);
+	POSTING_READ(PCH_DPLL(id));
 	udelay(200);
 }
 
@@ -469,8 +475,10 @@ static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
 			       struct intel_shared_dpll *pll)
 {
-	I915_WRITE(WRPLL_CTL(pll->id), pll->state.hw_state.wrpll);
-	POSTING_READ(WRPLL_CTL(pll->id));
+	const enum intel_dpll_id id = pll->info->id;
+
+	I915_WRITE(WRPLL_CTL(id), pll->state.hw_state.wrpll);
+	POSTING_READ(WRPLL_CTL(id));
 	udelay(20);
 }
 
@@ -485,11 +493,12 @@ static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
 				  struct intel_shared_dpll *pll)
 {
+	const enum intel_dpll_id id = pll->info->id;
 	uint32_t val;
 
-	val = I915_READ(WRPLL_CTL(pll->id));
-	I915_WRITE(WRPLL_CTL(pll->id), val & ~WRPLL_PLL_ENABLE);
-	POSTING_READ(WRPLL_CTL(pll->id));
+	val = I915_READ(WRPLL_CTL(id));
+	I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
+	POSTING_READ(WRPLL_CTL(id));
 }
 
 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
@@ -506,12 +515,13 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
 				       struct intel_shared_dpll *pll,
 				       struct intel_dpll_hw_state *hw_state)
 {
+	const enum intel_dpll_id id = pll->info->id;
 	uint32_t val;
 
 	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
 		return false;
 
-	val = I915_READ(WRPLL_CTL(pll->id));
+	val = I915_READ(WRPLL_CTL(id));
 	hw_state->wrpll = val;
 
 	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
@@ -917,13 +927,15 @@ static const struct skl_dpll_regs skl_dpll_regs[4] = {
 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
 				    struct intel_shared_dpll *pll)
 {
+	const enum intel_dpll_id id = pll->info->id;
 	uint32_t val;
 
 	val = I915_READ(DPLL_CTRL1);
 
-	val &= ~(DPLL_CTRL1_HDMI_MODE(pll->id) | DPLL_CTRL1_SSC(pll->id) |
-		 DPLL_CTRL1_LINK_RATE_MASK(pll->id));
-	val |= pll->state.hw_state.ctrl1 << (pll->id * 6);
+	val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
+		 DPLL_CTRL1_SSC(id) |
+		 DPLL_CTRL1_LINK_RATE_MASK(id));
+	val |= pll->state.hw_state.ctrl1 << (id * 6);
 
 	I915_WRITE(DPLL_CTRL1, val);
 	POSTING_READ(DPLL_CTRL1);
@@ -933,24 +945,25 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
 			       struct intel_shared_dpll *pll)
 {
 	const struct skl_dpll_regs *regs = skl_dpll_regs;
+	const enum intel_dpll_id id = pll->info->id;
 
 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
 
-	I915_WRITE(regs[pll->id].cfgcr1, pll->state.hw_state.cfgcr1);
-	I915_WRITE(regs[pll->id].cfgcr2, pll->state.hw_state.cfgcr2);
-	POSTING_READ(regs[pll->id].cfgcr1);
-	POSTING_READ(regs[pll->id].cfgcr2);
+	I915_WRITE(regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
+	I915_WRITE(regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
+	POSTING_READ(regs[id].cfgcr1);
+	POSTING_READ(regs[id].cfgcr2);
 
 	/* the enable bit is always bit 31 */
-	I915_WRITE(regs[pll->id].ctl,
-		   I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE);
+	I915_WRITE(regs[id].ctl,
+		   I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE);
 
 	if (intel_wait_for_register(dev_priv,
 				    DPLL_STATUS,
-				    DPLL_LOCK(pll->id),
-				    DPLL_LOCK(pll->id),
+				    DPLL_LOCK(id),
+				    DPLL_LOCK(id),
 				    5))
-		DRM_ERROR("DPLL %d not locked\n", pll->id);
+		DRM_ERROR("DPLL %d not locked\n", id);
 }
 
 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
@@ -963,11 +976,12 @@ static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
 				struct intel_shared_dpll *pll)
 {
 	const struct skl_dpll_regs *regs = skl_dpll_regs;
+	const enum intel_dpll_id id = pll->info->id;
 
 	/* the enable bit is always bit 31 */
-	I915_WRITE(regs[pll->id].ctl,
-		   I915_READ(regs[pll->id].ctl) & ~LCPLL_PLL_ENABLE);
-	POSTING_READ(regs[pll->id].ctl);
+	I915_WRITE(regs[id].ctl,
+		   I915_READ(regs[id].ctl) & ~LCPLL_PLL_ENABLE);
+	POSTING_READ(regs[id].ctl);
 }
 
 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
@@ -981,6 +995,7 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
 {
 	uint32_t val;
 	const struct skl_dpll_regs *regs = skl_dpll_regs;
+	const enum intel_dpll_id id = pll->info->id;
 	bool ret;
 
 	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
@@ -988,17 +1003,17 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
 
 	ret = false;
 
-	val = I915_READ(regs[pll->id].ctl);
+	val = I915_READ(regs[id].ctl);
 	if (!(val & LCPLL_PLL_ENABLE))
 		goto out;
 
 	val = I915_READ(DPLL_CTRL1);
-	hw_state->ctrl1 = (val >> (pll->id * 6)) & 0x3f;
+	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
 
 	/* avoid reading back stale values if HDMI mode is not enabled */
-	if (val & DPLL_CTRL1_HDMI_MODE(pll->id)) {
-		hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1);
-		hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2);
+	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
+		hw_state->cfgcr1 = I915_READ(regs[id].cfgcr1);
+		hw_state->cfgcr2 = I915_READ(regs[id].cfgcr2);
 	}
 	ret = true;
 
@@ -1014,6 +1029,7 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
 {
 	uint32_t val;
 	const struct skl_dpll_regs *regs = skl_dpll_regs;
+	const enum intel_dpll_id id = pll->info->id;
 	bool ret;
 
 	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
@@ -1022,12 +1038,12 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
 	ret = false;
 
 	/* DPLL0 is always enabled since it drives CDCLK */
-	val = I915_READ(regs[pll->id].ctl);
+	val = I915_READ(regs[id].ctl);
 	if (WARN_ON(!(val & LCPLL_PLL_ENABLE)))
 		goto out;
 
 	val = I915_READ(DPLL_CTRL1);
-	hw_state->ctrl1 = (val >> (pll->id * 6)) & 0x3f;
+	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
 
 	ret = true;
 
@@ -1427,7 +1443,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
 				struct intel_shared_dpll *pll)
 {
 	uint32_t temp;
-	enum port port = (enum port)pll->id;	/* 1:1 port->PLL mapping */
+	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
 	enum dpio_phy phy;
 	enum dpio_channel ch;
 
@@ -1546,7 +1562,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
 					struct intel_shared_dpll *pll)
 {
-	enum port port = (enum port)pll->id;	/* 1:1 port->PLL mapping */
+	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
 	uint32_t temp;
 
 	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
@@ -1569,7 +1585,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
 					struct intel_shared_dpll *pll,
 					struct intel_dpll_hw_state *hw_state)
 {
-	enum port port = (enum port)pll->id;	/* 1:1 port->PLL mapping */
+	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
 	uint32_t val;
 	bool ret;
 	enum dpio_phy phy;
@@ -1949,38 +1965,39 @@ static const struct intel_dpll_mgr bxt_pll_mgr = {
 static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
 			       struct intel_shared_dpll *pll)
 {
+	const enum intel_dpll_id id = pll->info->id;
 	uint32_t val;
 
 	/* 1. Enable DPLL power in DPLL_ENABLE. */
-	val = I915_READ(CNL_DPLL_ENABLE(pll->id));
+	val = I915_READ(CNL_DPLL_ENABLE(id));
 	val |= PLL_POWER_ENABLE;
-	I915_WRITE(CNL_DPLL_ENABLE(pll->id), val);
+	I915_WRITE(CNL_DPLL_ENABLE(id), val);
 
 	/* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
 	if (intel_wait_for_register(dev_priv,
-				    CNL_DPLL_ENABLE(pll->id),
+				    CNL_DPLL_ENABLE(id),
 				    PLL_POWER_STATE,
 				    PLL_POWER_STATE,
 				    5))
-		DRM_ERROR("PLL %d Power not enabled\n", pll->id);
+		DRM_ERROR("PLL %d Power not enabled\n", id);
 
 	/*
 	 * 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
 	 * select DP mode, and set DP link rate.
 	 */
 	val = pll->state.hw_state.cfgcr0;
-	I915_WRITE(CNL_DPLL_CFGCR0(pll->id), val);
+	I915_WRITE(CNL_DPLL_CFGCR0(id), val);
 
 	/* 4. Reab back to ensure writes completed */
-	POSTING_READ(CNL_DPLL_CFGCR0(pll->id));
+	POSTING_READ(CNL_DPLL_CFGCR0(id));
 
 	/* 3. Configure DPLL_CFGCR0 */
 	/* Avoid touch CFGCR1 if HDMI mode is not enabled */
 	if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
 		val = pll->state.hw_state.cfgcr1;
-		I915_WRITE(CNL_DPLL_CFGCR1(pll->id), val);
+		I915_WRITE(CNL_DPLL_CFGCR1(id), val);
 		/* 4. Reab back to ensure writes completed */
-		POSTING_READ(CNL_DPLL_CFGCR1(pll->id));
+		POSTING_READ(CNL_DPLL_CFGCR1(id));
 	}
 
 	/*
@@ -1993,17 +2010,17 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
 	 */
 
 	/* 6. Enable DPLL in DPLL_ENABLE. */
-	val = I915_READ(CNL_DPLL_ENABLE(pll->id));
+	val = I915_READ(CNL_DPLL_ENABLE(id));
 	val |= PLL_ENABLE;
-	I915_WRITE(CNL_DPLL_ENABLE(pll->id), val);
+	I915_WRITE(CNL_DPLL_ENABLE(id), val);
 
 	/* 7. Wait for PLL lock status in DPLL_ENABLE. */
 	if (intel_wait_for_register(dev_priv,
-				    CNL_DPLL_ENABLE(pll->id),
+				    CNL_DPLL_ENABLE(id),
 				    PLL_LOCK,
 				    PLL_LOCK,
 				    5))
-		DRM_ERROR("PLL %d not locked\n", pll->id);
+		DRM_ERROR("PLL %d not locked\n", id);
 
 	/*
 	 * 8. If the frequency will result in a change to the voltage
@@ -2023,6 +2040,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
 static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
 				struct intel_shared_dpll *pll)
 {
+	const enum intel_dpll_id id = pll->info->id;
 	uint32_t val;
 
 	/*
@@ -2040,17 +2058,17 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
 	 */
 
 	/* 3. Disable DPLL through DPLL_ENABLE. */
-	val = I915_READ(CNL_DPLL_ENABLE(pll->id));
+	val = I915_READ(CNL_DPLL_ENABLE(id));
 	val &= ~PLL_ENABLE;
-	I915_WRITE(CNL_DPLL_ENABLE(pll->id), val);
+	I915_WRITE(CNL_DPLL_ENABLE(id), val);
 
 	/* 4. Wait for PLL not locked status in DPLL_ENABLE. */
 	if (intel_wait_for_register(dev_priv,
-				    CNL_DPLL_ENABLE(pll->id),
+				    CNL_DPLL_ENABLE(id),
 				    PLL_LOCK,
 				    0,
 				    5))
-		DRM_ERROR("PLL %d locked\n", pll->id);
+		DRM_ERROR("PLL %d locked\n", id);
 
 	/*
 	 * 5. If the frequency will result in a change to the voltage
@@ -2062,23 +2080,24 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
 	 */
 
 	/* 6. Disable DPLL power in DPLL_ENABLE. */
-	val = I915_READ(CNL_DPLL_ENABLE(pll->id));
+	val = I915_READ(CNL_DPLL_ENABLE(id));
 	val &= ~PLL_POWER_ENABLE;
-	I915_WRITE(CNL_DPLL_ENABLE(pll->id), val);
+	I915_WRITE(CNL_DPLL_ENABLE(id), val);
 
 	/* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
 	if (intel_wait_for_register(dev_priv,
-				    CNL_DPLL_ENABLE(pll->id),
+				    CNL_DPLL_ENABLE(id),
 				    PLL_POWER_STATE,
 				    0,
 				    5))
-		DRM_ERROR("PLL %d Power not disabled\n", pll->id);
+		DRM_ERROR("PLL %d Power not disabled\n", id);
 }
 
 static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
 				     struct intel_shared_dpll *pll,
 				     struct intel_dpll_hw_state *hw_state)
 {
+	const enum intel_dpll_id id = pll->info->id;
 	uint32_t val;
 	bool ret;
 
@@ -2087,16 +2106,16 @@ static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
 
 	ret = false;
 
-	val = I915_READ(CNL_DPLL_ENABLE(pll->id));
+	val = I915_READ(CNL_DPLL_ENABLE(id));
 	if (!(val & PLL_ENABLE))
 		goto out;
 
-	val = I915_READ(CNL_DPLL_CFGCR0(pll->id));
+	val = I915_READ(CNL_DPLL_CFGCR0(id));
 	hw_state->cfgcr0 = val;
 
 	/* avoid reading back stale values if HDMI mode is not enabled */
 	if (val & DPLL_CFGCR0_HDMI_MODE) {
-		hw_state->cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll->id));
+		hw_state->cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(id));
 	}
 	ret = true;
 
@@ -2415,7 +2434,6 @@ void intel_shared_dpll_init(struct drm_device *dev)
 		WARN_ON(i != dpll_info[i].id);
 		dev_priv->shared_dplls[i].info = &dpll_info[i];
 
-		dev_priv->shared_dplls[i].id = dpll_info[i].id;
 		dev_priv->shared_dplls[i].flags = dpll_info[i].flags;
 	}
 
@@ -2476,7 +2494,7 @@ void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
 	struct intel_shared_dpll_state *shared_dpll_state;
 
 	shared_dpll_state = intel_atomic_get_shared_dpll_state(state);
-	shared_dpll_state[dpll->id].crtc_mask &= ~(1 << crtc->pipe);
+	shared_dpll_state[dpll->info->id].crtc_mask &= ~(1 << crtc->pipe);
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index e5ed3e0269e3..7c95ecce41ee 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -213,6 +213,10 @@ struct dpll_info {
 	 * @name: DPLL name; used for logging
 	 */
 	const char *name;
+	/**
+	 * @id: unique indentifier for this DPLL; should match the index in the
+	 * dev_priv->shared_dplls array
+	 */
 	const int id;
 	/**
 	 * @funcs: platform specific hooks
@@ -243,12 +247,6 @@ struct intel_shared_dpll {
 	 */
 	bool on;
 
-	/**
-	 * @id: unique indentifier for this DPLL; should match the index in the
-	 * dev_priv->shared_dplls array
-	 */
-	enum intel_dpll_id id;
-
 	/**
 	 * @info: platform specific info
 	 */

From 5cd281f679f3ccb517f87589fe4070a3f8014d08 Mon Sep 17 00:00:00 2001
From: Lucas De Marchi <lucas.demarchi@intel.com>
Date: Tue, 20 Mar 2018 15:06:36 -0700
Subject: [PATCH 0132/1286] drm/i915: use flags from dpll_info embedded in
 intel_shared_dpll
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Replace all users of pll->flags to use pll->info.flags.

Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180320220637.21480-7-lucas.demarchi@intel.com
---
 drivers/gpu/drm/i915/intel_display.c  |  2 +-
 drivers/gpu/drm/i915/intel_dpll_mgr.c |  2 --
 drivers/gpu/drm/i915/intel_dpll_mgr.h | 18 ++++++++----------
 3 files changed, 9 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5f79444d12c5..64dd88e49bd9 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11657,7 +11657,7 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
 
 	active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
 
-	if (!(pll->flags & INTEL_DPLL_ALWAYS_ON)) {
+	if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
 		I915_STATE_WARN(!pll->on && pll->active_mask,
 		     "pll in active use but not on in sw tracking\n");
 		I915_STATE_WARN(pll->on && !pll->active_mask,
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 48466b19d1f6..bda69e1ccd76 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -2433,8 +2433,6 @@ void intel_shared_dpll_init(struct drm_device *dev)
 	for (i = 0; dpll_info[i].id >= 0; i++) {
 		WARN_ON(i != dpll_info[i].id);
 		dev_priv->shared_dplls[i].info = &dpll_info[i];
-
-		dev_priv->shared_dplls[i].flags = dpll_info[i].flags;
 	}
 
 	dev_priv->dpll_mgr = dpll_mgr;
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index 7c95ecce41ee..e4c01e487be7 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -222,6 +222,14 @@ struct dpll_info {
 	 * @funcs: platform specific hooks
 	 */
 	const struct intel_shared_dpll_funcs *funcs;
+#define INTEL_DPLL_ALWAYS_ON	(1 << 0)
+	/**
+	 * @flags:
+	 *
+	 * INTEL_DPLL_ALWAYS_ON
+	 *     Inform the state checker that the DPLL is kept enabled even if
+	 *     not in use by any CRTC.
+	 */
 	uint32_t flags;
 };
 
@@ -251,16 +259,6 @@ struct intel_shared_dpll {
 	 * @info: platform specific info
 	 */
 	const struct dpll_info *info;
-
-#define INTEL_DPLL_ALWAYS_ON	(1 << 0)
-	/**
-	 * @flags:
-	 *
-	 * INTEL_DPLL_ALWAYS_ON
-	 *     Inform the state checker that the DPLL is kept enabled even if
-	 *     not in use by any CRTC.
-	 */
-	uint32_t flags;
 };
 
 #define SKL_DPLL0 0

From 7fd9e829931349ad417579a718213c2b8369bff4 Mon Sep 17 00:00:00 2001
From: Lucas De Marchi <lucas.demarchi@intel.com>
Date: Tue, 20 Mar 2018 15:06:37 -0700
Subject: [PATCH 0133/1286] drm/i915: reorder dpll_info members
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Remove 4-bytes hole in this struct an reorder tables accordingly. This
also changes the last element of the tables to be more future-proof.

Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180320220637.21480-8-lucas.demarchi@intel.com
---
 drivers/gpu/drm/i915/intel_dpll_mgr.c | 48 +++++++++++++--------------
 drivers/gpu/drm/i915/intel_dpll_mgr.h | 13 +++++---
 2 files changed, 32 insertions(+), 29 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index bda69e1ccd76..d5e114e9660b 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -1908,9 +1908,9 @@ struct intel_dpll_mgr {
 };
 
 static const struct dpll_info pch_plls[] = {
-	{ "PCH DPLL A", DPLL_ID_PCH_PLL_A, &ibx_pch_dpll_funcs, 0 },
-	{ "PCH DPLL B", DPLL_ID_PCH_PLL_B, &ibx_pch_dpll_funcs, 0 },
-	{ NULL, -1, NULL, 0 },
+	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
+	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
+	{ },
 };
 
 static const struct intel_dpll_mgr pch_pll_mgr = {
@@ -1920,13 +1920,13 @@ static const struct intel_dpll_mgr pch_pll_mgr = {
 };
 
 static const struct dpll_info hsw_plls[] = {
-	{ "WRPLL 1",    DPLL_ID_WRPLL1,     &hsw_ddi_wrpll_funcs, 0 },
-	{ "WRPLL 2",    DPLL_ID_WRPLL2,     &hsw_ddi_wrpll_funcs, 0 },
-	{ "SPLL",       DPLL_ID_SPLL,       &hsw_ddi_spll_funcs,  0 },
-	{ "LCPLL 810",  DPLL_ID_LCPLL_810,  &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON },
-	{ "LCPLL 1350", DPLL_ID_LCPLL_1350, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON },
-	{ "LCPLL 2700", DPLL_ID_LCPLL_2700, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON },
-	{ NULL, -1, NULL, },
+	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
+	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
+	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
+	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
+	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
+	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
+	{ },
 };
 
 static const struct intel_dpll_mgr hsw_pll_mgr = {
@@ -1936,11 +1936,11 @@ static const struct intel_dpll_mgr hsw_pll_mgr = {
 };
 
 static const struct dpll_info skl_plls[] = {
-	{ "DPLL 0", DPLL_ID_SKL_DPLL0, &skl_ddi_dpll0_funcs, INTEL_DPLL_ALWAYS_ON },
-	{ "DPLL 1", DPLL_ID_SKL_DPLL1, &skl_ddi_pll_funcs,   0 },
-	{ "DPLL 2", DPLL_ID_SKL_DPLL2, &skl_ddi_pll_funcs,   0 },
-	{ "DPLL 3", DPLL_ID_SKL_DPLL3, &skl_ddi_pll_funcs,   0 },
-	{ NULL, -1, NULL, },
+	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
+	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
+	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
+	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
+	{ },
 };
 
 static const struct intel_dpll_mgr skl_pll_mgr = {
@@ -1950,10 +1950,10 @@ static const struct intel_dpll_mgr skl_pll_mgr = {
 };
 
 static const struct dpll_info bxt_plls[] = {
-	{ "PORT PLL A", DPLL_ID_SKL_DPLL0, &bxt_ddi_pll_funcs, 0 },
-	{ "PORT PLL B", DPLL_ID_SKL_DPLL1, &bxt_ddi_pll_funcs, 0 },
-	{ "PORT PLL C", DPLL_ID_SKL_DPLL2, &bxt_ddi_pll_funcs, 0 },
-	{ NULL, -1, NULL, },
+	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
+	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
+	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
+	{ },
 };
 
 static const struct intel_dpll_mgr bxt_pll_mgr = {
@@ -2387,10 +2387,10 @@ static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
 };
 
 static const struct dpll_info cnl_plls[] = {
-	{ "DPLL 0", DPLL_ID_SKL_DPLL0, &cnl_ddi_pll_funcs, 0 },
-	{ "DPLL 1", DPLL_ID_SKL_DPLL1, &cnl_ddi_pll_funcs, 0 },
-	{ "DPLL 2", DPLL_ID_SKL_DPLL2, &cnl_ddi_pll_funcs, 0 },
-	{ NULL, -1, NULL, },
+	{ "DPLL 0", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
+	{ "DPLL 1", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
+	{ "DPLL 2", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
+	{ },
 };
 
 static const struct intel_dpll_mgr cnl_pll_mgr = {
@@ -2430,7 +2430,7 @@ void intel_shared_dpll_init(struct drm_device *dev)
 
 	dpll_info = dpll_mgr->dpll_info;
 
-	for (i = 0; dpll_info[i].id >= 0; i++) {
+	for (i = 0; dpll_info[i].name; i++) {
 		WARN_ON(i != dpll_info[i].id);
 		dev_priv->shared_dplls[i].info = &dpll_info[i];
 	}
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index e4c01e487be7..4febfaa90bde 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -213,15 +213,18 @@ struct dpll_info {
 	 * @name: DPLL name; used for logging
 	 */
 	const char *name;
-	/**
-	 * @id: unique indentifier for this DPLL; should match the index in the
-	 * dev_priv->shared_dplls array
-	 */
-	const int id;
+
 	/**
 	 * @funcs: platform specific hooks
 	 */
 	const struct intel_shared_dpll_funcs *funcs;
+
+	/**
+	 * @id: unique indentifier for this DPLL; should match the index in the
+	 * dev_priv->shared_dplls array
+	 */
+	enum intel_dpll_id id;
+
 #define INTEL_DPLL_ALWAYS_ON	(1 << 0)
 	/**
 	 * @flags:

From 7056a2bccc3b5afc51f9b35b30a46f0d9219968d Mon Sep 17 00:00:00 2001
From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Date: Mon, 19 Mar 2018 16:19:32 +0200
Subject: [PATCH 0134/1286] drm/dp/mst: Fix off-by-one typo when dump payload
 table

It seems there is a classical off-by-one typo from the beginning
when commit

  ad7f8a1f9ced ("drm/helper: add Displayport multi-stream helper (v0.6)")

introduced a new helper.

Fix a typo by introducing a macro constant.

Cc: Dave Airlie <airlied@redhat.com>
Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180319141932.37290-1-andriy.shevchenko@linux.intel.com
---
 drivers/gpu/drm/drm_dp_mst_topology.c | 9 +++++----
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 6fac4129e6a2..658830620ca3 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -2941,12 +2941,14 @@ static void drm_dp_mst_dump_mstb(struct seq_file *m,
 	}
 }
 
+#define DP_PAYLOAD_TABLE_SIZE		64
+
 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
 				  char *buf)
 {
 	int i;
 
-	for (i = 0; i < 64; i += 16) {
+	for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
 		if (drm_dp_dpcd_read(mgr->aux,
 				     DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
 				     &buf[i], 16) != 16)
@@ -3015,7 +3017,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
 
 	mutex_lock(&mgr->lock);
 	if (mgr->mst_primary) {
-		u8 buf[64];
+		u8 buf[DP_PAYLOAD_TABLE_SIZE];
 		int ret;
 
 		ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
@@ -3033,8 +3035,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
 		seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
 			   buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
 		if (dump_dp_payload_table(mgr, buf))
-			seq_printf(m, "payload table: %*ph\n", 63, buf);
-
+			seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
 	}
 
 	mutex_unlock(&mgr->lock);

From d1a9d710d12485710d424daaa2430fe93bc767f8 Mon Sep 17 00:00:00 2001
From: Jani Nikula <jani.nikula@intel.com>
Date: Tue, 27 Mar 2018 23:47:20 +0300
Subject: [PATCH 0135/1286] drm: prefer inline over __inline__

Remove last users of __inline__.

Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180327204722.31246-1-jani.nikula@intel.com
---
 include/drm/drmP.h       | 5 ++---
 include/drm/drm_legacy.h | 4 ++--
 2 files changed, 4 insertions(+), 5 deletions(-)

diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index c6666cd09347..4bbef061c9c0 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -123,8 +123,7 @@ static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
 #define DRM_SWITCH_POWER_CHANGING 2
 #define DRM_SWITCH_POWER_DYNAMIC_OFF 3
 
-static __inline__ int drm_core_check_feature(struct drm_device *dev,
-					     int feature)
+static inline int drm_core_check_feature(struct drm_device *dev, int feature)
 {
 	return ((dev->driver->driver_features & feature) ? 1 : 0);
 }
@@ -143,7 +142,7 @@ static __inline__ int drm_core_check_feature(struct drm_device *dev,
 /*@}*/
 
 /* returns true if currently okay to sleep */
-static __inline__ bool drm_can_sleep(void)
+static inline bool drm_can_sleep(void)
 {
 	if (in_atomic() || in_dbg_master() || irqs_disabled())
 		return false;
diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h
index cf0e7d89bcdf..8fad66f88e4f 100644
--- a/include/drm/drm_legacy.h
+++ b/include/drm/drm_legacy.h
@@ -194,8 +194,8 @@ void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev);
 void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
 void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
 
-static __inline__ struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
-							   unsigned int token)
+static inline struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
+						       unsigned int token)
 {
 	struct drm_map_list *_entry;
 	list_for_each_entry(_entry, &dev->maplist, head)

From 885a31cb6c752d5403adc6389894c27560fc6e6c Mon Sep 17 00:00:00 2001
From: Jani Nikula <jani.nikula@intel.com>
Date: Tue, 27 Mar 2018 23:47:21 +0300
Subject: [PATCH 0136/1286] drm: remove old documentation comment cruft from
 drmP.h

Throw out the leftovers.

Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180327204722.31246-2-jani.nikula@intel.com
---
 include/drm/drmP.h | 21 ---------------------
 1 file changed, 21 deletions(-)

diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 4bbef061c9c0..b5d52a3d7d19 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -95,14 +95,6 @@ struct dma_buf_attachment;
 struct pci_dev;
 struct pci_controller;
 
-/***********************************************************************/
-/** \name DRM template customization defaults */
-/*@{*/
-
-/***********************************************************************/
-/** \name Internal types and structures */
-/*@{*/
-
 #define DRM_IF_VERSION(maj, min) (maj << 16 | min)
 
 /**
@@ -128,19 +120,6 @@ static inline int drm_core_check_feature(struct drm_device *dev, int feature)
 	return ((dev->driver->driver_features & feature) ? 1 : 0);
 }
 
-/******************************************************************/
-/** \name Internal function definitions */
-/*@{*/
-
-				/* Driver support (drm_drv.h) */
-
-/*
- * These are exported to drivers so that they can implement fencing using
- * DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
- */
-
-/*@}*/
-
 /* returns true if currently okay to sleep */
 static inline bool drm_can_sleep(void)
 {

From f4392860b4fe55d7d7cadaa64743c9b2466e4fd8 Mon Sep 17 00:00:00 2001
From: Jani Nikula <jani.nikula@intel.com>
Date: Tue, 27 Mar 2018 23:47:22 +0300
Subject: [PATCH 0137/1286] drm: make drm_core_check_feature() bool that it is

Bool is the more appropriate return type here, use it.

Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180327204722.31246-3-jani.nikula@intel.com
---
 include/drm/drmP.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index b5d52a3d7d19..f5099c12c6a6 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -115,9 +115,9 @@ static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
 #define DRM_SWITCH_POWER_CHANGING 2
 #define DRM_SWITCH_POWER_DYNAMIC_OFF 3
 
-static inline int drm_core_check_feature(struct drm_device *dev, int feature)
+static inline bool drm_core_check_feature(struct drm_device *dev, int feature)
 {
-	return ((dev->driver->driver_features & feature) ? 1 : 0);
+	return dev->driver->driver_features & feature;
 }
 
 /* returns true if currently okay to sleep */

From 49efffc7fbd48d5ea3d0dd60c218c7502d4a179d Mon Sep 17 00:00:00 2001
From: Peter Ujfalusi <peter.ujfalusi@ti.com>
Date: Wed, 21 Mar 2018 12:20:24 +0200
Subject: [PATCH 0138/1286] drm: Add drm_mode_config->normalize_zpos boolean

Instead of drivers duplicating the drm_atomic_helper_check() code to be
able to normalize the zpos they can use the normalize_zpos flag to let the
drm core to do it.

Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ti.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180321102029.15248-2-peter.ujfalusi@ti.com
---
 drivers/gpu/drm/drm_atomic_helper.c | 11 +++++++++++
 include/drm/drm_mode_config.h       |  8 ++++++++
 include/drm/drm_plane.h             |  4 ++--
 3 files changed, 21 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index c35654591c12..d63c806e7d38 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -875,6 +875,11 @@ EXPORT_SYMBOL(drm_atomic_helper_check_planes);
  * functions depend upon an updated adjusted_mode.clock to e.g. properly compute
  * watermarks.
  *
+ * Note that zpos normalization will add all enable planes to the state which
+ * might not desired for some drivers.
+ * For example enable/disable of a cursor plane which have fixed zpos value
+ * would trigger all other enabled planes to be forced to the state change.
+ *
  * RETURNS:
  * Zero for success or -errno
  */
@@ -887,6 +892,12 @@ int drm_atomic_helper_check(struct drm_device *dev,
 	if (ret)
 		return ret;
 
+	if (dev->mode_config.normalize_zpos) {
+		ret = drm_atomic_normalize_zpos(dev, state);
+		if (ret)
+			return ret;
+	}
+
 	ret = drm_atomic_helper_check_planes(dev, state);
 	if (ret)
 		return ret;
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 7569f22ffef6..33b3a96d66d0 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -795,6 +795,14 @@ struct drm_mode_config {
 	 */
 	bool allow_fb_modifiers;
 
+	/**
+	 * @normalize_zpos:
+	 *
+	 * If true the drm core will call drm_atomic_normalize_zpos() as part of
+	 * atomic mode checking from drm_atomic_helper_check()
+	 */
+	bool normalize_zpos;
+
 	/**
 	 * @modifiers_property: Plane property to list support modifier/format
 	 * combination.
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index f7bf4a48b1c3..d6da26d66a4b 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -51,8 +51,8 @@ struct drm_modeset_acquire_ctx;
  *	plane with a lower ID.
  * @normalized_zpos: normalized value of zpos: unique, range from 0 to N-1
  *	where N is the number of active planes for given crtc. Note that
- *	the driver must call drm_atomic_normalize_zpos() to update this before
- *	it can be trusted.
+ *	the driver must set drm_mode_config.normalize_zpos or call
+ *	drm_atomic_normalize_zpos() to update this before it can be trusted.
  * @src: clipped source coordinates of the plane (in 16.16)
  * @dst: clipped destination coordinates of the plane
  * @state: backpointer to global drm_atomic_state

From a7da5cfe0cd6d36af6dc05ee4aa3e506c88e8f0e Mon Sep 17 00:00:00 2001
From: Peter Ujfalusi <peter.ujfalusi@ti.com>
Date: Wed, 21 Mar 2018 12:20:25 +0200
Subject: [PATCH 0139/1286] drm/exynos: Let core take care of normalizing the
 zpos

Instead of re-implementing the drm_atomic_helper_check() locally with just
adding drm_atomic_normalize_zpos() into it, set the
drm_mode_config->normalize_zpos.

Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
CC: Inki Dae <inki.dae@samsung.com>
CC: Joonyoung Shim <jy0922.shim@samsung.com>
CC: Seung-Woo Kim <sw0312.kim@samsung.com>
CC: Kyungmin Park <kyungmin.park@samsung.com>
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Acked-by: Inki Dae <inki.dae@samsung.com>
Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ti.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180321102029.15248-3-peter.ujfalusi@ti.com
---
 drivers/gpu/drm/exynos/exynos_drm_drv.c | 20 --------------------
 drivers/gpu/drm/exynos/exynos_drm_drv.h |  1 -
 drivers/gpu/drm/exynos/exynos_drm_fb.c  |  4 +++-
 3 files changed, 3 insertions(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index a518e9c6d6cc..39284bb7c2c2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -37,26 +37,6 @@
 #define DRIVER_MAJOR	1
 #define DRIVER_MINOR	0
 
-int exynos_atomic_check(struct drm_device *dev,
-			struct drm_atomic_state *state)
-{
-	int ret;
-
-	ret = drm_atomic_helper_check_modeset(dev, state);
-	if (ret)
-		return ret;
-
-	ret = drm_atomic_normalize_zpos(dev, state);
-	if (ret)
-		return ret;
-
-	ret = drm_atomic_helper_check_planes(dev, state);
-	if (ret)
-		return ret;
-
-	return ret;
-}
-
 static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
 {
 	struct drm_exynos_file_private *file_priv;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index df2262f70d91..075957cb6ba1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -275,7 +275,6 @@ static inline int exynos_dpi_bind(struct drm_device *dev,
 
 int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
 			 bool nonblock);
-int exynos_atomic_check(struct drm_device *dev, struct drm_atomic_state *state);
 
 
 extern struct platform_driver fimd_driver;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 0faaf829f5bf..2379d732da67 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -206,7 +206,7 @@ static struct drm_mode_config_helper_funcs exynos_drm_mode_config_helpers = {
 static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
 	.fb_create = exynos_user_fb_create,
 	.output_poll_changed = drm_fb_helper_output_poll_changed,
-	.atomic_check = exynos_atomic_check,
+	.atomic_check = drm_atomic_helper_check,
 	.atomic_commit = drm_atomic_helper_commit,
 };
 
@@ -227,4 +227,6 @@ void exynos_drm_mode_config_init(struct drm_device *dev)
 	dev->mode_config.helper_private = &exynos_drm_mode_config_helpers;
 
 	dev->mode_config.allow_fb_modifiers = true;
+
+	dev->mode_config.normalize_zpos = true;
 }

From a18301b9f556101b4b87cd83b050c553652e91e4 Mon Sep 17 00:00:00 2001
From: Peter Ujfalusi <peter.ujfalusi@ti.com>
Date: Wed, 21 Mar 2018 12:20:26 +0200
Subject: [PATCH 0140/1286] drm/tegra: Let core take care of normalizing the
 zpos

Set the drm_mode_config->normalize_zpos and call the generic
drm_atomic_helper_check() instead of duplicating it within
tegra_atomic_check().

Call tegra_display_hub_atomic_check() after the drm_atomic_helpre_check()
returned without error.

Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
CC: Thierry Reding <thierry.reding@gmail.com>
Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ti.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180321102029.15248-4-peter.ujfalusi@ti.com
---
 drivers/gpu/drm/tegra/drm.c | 21 ++++-----------------
 1 file changed, 4 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index e20e013151f0..ac1121172dc9 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -38,26 +38,11 @@ static int tegra_atomic_check(struct drm_device *drm,
 {
 	int err;
 
-	err = drm_atomic_helper_check_modeset(drm, state);
+	err = drm_atomic_helper_check(drm, state);
 	if (err < 0)
 		return err;
 
-	err = tegra_display_hub_atomic_check(drm, state);
-	if (err < 0)
-		return err;
-
-	err = drm_atomic_normalize_zpos(drm, state);
-	if (err < 0)
-		return err;
-
-	err = drm_atomic_helper_check_planes(drm, state);
-	if (err < 0)
-		return err;
-
-	if (state->legacy_cursor_update)
-		state->async_update = !drm_atomic_helper_async_check(drm, state);
-
-	return 0;
+	return tegra_display_hub_atomic_check(drm, state);
 }
 
 static const struct drm_mode_config_funcs tegra_drm_mode_config_funcs = {
@@ -151,6 +136,8 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
 
 	drm->mode_config.allow_fb_modifiers = true;
 
+	drm->mode_config.normalize_zpos = true;
+
 	drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
 	drm->mode_config.helper_private = &tegra_drm_mode_config_helpers;
 

From 352f9a8419df87b925ccc7fc56f1a75aa2290e93 Mon Sep 17 00:00:00 2001
From: Peter Ujfalusi <peter.ujfalusi@ti.com>
Date: Wed, 21 Mar 2018 12:20:27 +0200
Subject: [PATCH 0141/1286] drm/sti: Let core take care of normalizing the zpos

Instead of re-implementing the drm_atomic_helper_check() locally with just
adding drm_atomic_normalize_zpos() into it, set the
drm_mode_config->normalize_zpos.

Note: the drm_atomic_helper_check() now includes

if (state->legacy_cursor_update)
	state->async_update = !drm_atomic_helper_async_check(drm, state);

which was added after the driver moved away from using it
(38d868e41c4b9250d5a115c049dc2d48f4909581 drm: Don't force all planes to
be added to the state due to zpos)

Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
CC: Benjamin Gaignard <benjamin.gaignard@linaro.org>
CC: Vincent Abriou <vincent.abriou@st.com>
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Acked-by: Benjamin Gaignard <benjamin.gaignard@linaro.org>
Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ti.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180321102029.15248-5-peter.ujfalusi@ti.com
---
 drivers/gpu/drm/sti/sti_drv.c | 24 +++---------------------
 1 file changed, 3 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 55b6967d27e1..90c46b49c931 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -119,30 +119,10 @@ err:
 	return ret;
 }
 
-static int sti_atomic_check(struct drm_device *dev,
-			    struct drm_atomic_state *state)
-{
-	int ret;
-
-	ret = drm_atomic_helper_check_modeset(dev, state);
-	if (ret)
-		return ret;
-
-	ret = drm_atomic_normalize_zpos(dev, state);
-	if (ret)
-		return ret;
-
-	ret = drm_atomic_helper_check_planes(dev, state);
-	if (ret)
-		return ret;
-
-	return ret;
-}
-
 static const struct drm_mode_config_funcs sti_mode_config_funcs = {
 	.fb_create = drm_gem_fb_create,
 	.output_poll_changed = drm_fb_helper_output_poll_changed,
-	.atomic_check = sti_atomic_check,
+	.atomic_check = drm_atomic_helper_check,
 	.atomic_commit = drm_atomic_helper_commit,
 };
 
@@ -160,6 +140,8 @@ static void sti_mode_config_init(struct drm_device *dev)
 	dev->mode_config.max_height = STI_MAX_FB_HEIGHT;
 
 	dev->mode_config.funcs = &sti_mode_config_funcs;
+
+	dev->mode_config.normalize_zpos = true;
 }
 
 DEFINE_DRM_GEM_CMA_FOPS(sti_driver_fops);

From 75def7785f4901b65a89dc99ea9506b1395242fa Mon Sep 17 00:00:00 2001
From: Peter Ujfalusi <peter.ujfalusi@ti.com>
Date: Wed, 21 Mar 2018 12:20:28 +0200
Subject: [PATCH 0142/1286] drm: rcar-du: Let core take care of normalizing the
 zpos

Set the drm_mode_config->normalize_zpos and call drm_atomic_helper_check()
from rcar_du_atomic_check() instead of re implementing the function locally.

Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
CC: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ti.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180321102029.15248-6-peter.ujfalusi@ti.com
---
 drivers/gpu/drm/rcar-du/rcar_du_kms.c | 11 ++---------
 1 file changed, 2 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 0329b354bfa0..ab59d2061e06 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -233,15 +233,7 @@ static int rcar_du_atomic_check(struct drm_device *dev,
 	struct rcar_du_device *rcdu = dev->dev_private;
 	int ret;
 
-	ret = drm_atomic_helper_check_modeset(dev, state);
-	if (ret)
-		return ret;
-
-	ret = drm_atomic_normalize_zpos(dev, state);
-	if (ret)
-		return ret;
-
-	ret = drm_atomic_helper_check_planes(dev, state);
+	ret = drm_atomic_helper_check(dev, state);
 	if (ret)
 		return ret;
 
@@ -529,6 +521,7 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
 	dev->mode_config.min_height = 0;
 	dev->mode_config.max_width = 4095;
 	dev->mode_config.max_height = 2047;
+	dev->mode_config.normalize_zpos = true;
 	dev->mode_config.funcs = &rcar_du_mode_config_funcs;
 	dev->mode_config.helper_private = &rcar_du_mode_config_helper;
 

From 23936ba940fbccf08f9f61d7c6d39ba0feb383bc Mon Sep 17 00:00:00 2001
From: Peter Ujfalusi <peter.ujfalusi@ti.com>
Date: Wed, 21 Mar 2018 12:20:29 +0200
Subject: [PATCH 0143/1286] drm/omap: Use normalized zpos for plane placement

Planes with identical zpos value will result undefined behavior:
disappearing planes, screen flickering and it is not supported by the
hardware.

Use normalized zpos to make sure that we don't encounter invalid
configuration.

Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
CC: Tomi Valkeinen <tomi.valkeinen@ti.com>
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: Tomi Valkeinen <tomi.valkeinen@ti.com>
Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ti.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180321102029.15248-7-peter.ujfalusi@ti.com
---
 drivers/gpu/drm/omapdrm/omap_drv.c   | 3 +++
 drivers/gpu/drm/omapdrm/omap_plane.c | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 3632854c2b91..ef3b0e3571ec 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -319,6 +319,9 @@ static int omap_modeset_init(struct drm_device *dev)
 	dev->mode_config.max_width = 8192;
 	dev->mode_config.max_height = 8192;
 
+	/* We want the zpos to be normalized */
+	dev->mode_config.normalize_zpos = true;
+
 	dev->mode_config.funcs = &omap_mode_config_funcs;
 	dev->mode_config.helper_private = &omap_mode_config_helper_funcs;
 
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 2899435cad6e..161233cbc9a0 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -65,7 +65,7 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
 	info.rotation_type = OMAP_DSS_ROT_NONE;
 	info.rotation = DRM_MODE_ROTATE_0;
 	info.global_alpha = 0xff;
-	info.zorder = state->zpos;
+	info.zorder = state->normalized_zpos;
 
 	/* update scanout: */
 	omap_framebuffer_update_scanout(state->fb, state, &info);

From fbe6f8f2a648584b97beeaaaeff75b795fb3c6cb Mon Sep 17 00:00:00 2001
From: Yaodong Li <yaodong.li@intel.com>
Date: Thu, 22 Mar 2018 16:59:22 -0700
Subject: [PATCH 0144/1286] drm/i915: Use correct reST syntax for WOPCM and GuC
 kernel-doc diagrams

GuC Address Space and WOPCM Layout diagrams won't be generated correctly by
sphinx build if not using proper reST syntax.

This patch uses reST literal blocks to make sure GuC Address Space and
WOPCM Layout diagrams to be generated correctly, and it also corrects some
errors in the diagram description.

v2:
 - Fixed errors in diagram description

v3:
 - Updated GuC Address Space kernel-doc based on Michal's suggestion

v4:
 - Added WOPCM layout and GuC address space docs into i915.rst (Joonas)

Signed-off-by: Jackie Li <yaodong.li@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1521763162-11424-1-git-send-email-yaodong.li@intel.com
---
 Documentation/gpu/i915.rst         | 15 ++++++++
 drivers/gpu/drm/i915/intel_guc.c   | 56 ++++++++++++++++--------------
 drivers/gpu/drm/i915/intel_wopcm.c | 44 ++++++++++++-----------
 3 files changed, 67 insertions(+), 48 deletions(-)

diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index 41dc881b00dc..7ecad7134677 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -335,6 +335,15 @@ objects, which has the goal to make space in gpu virtual address spaces.
 .. kernel-doc:: drivers/gpu/drm/i915/i915_gem_shrinker.c
    :internal:
 
+WOPCM
+=====
+
+WOPCM Layout
+------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_wopcm.c
+   :doc: WOPCM Layout
+
 GuC
 ===
 
@@ -359,6 +368,12 @@ GuC Firmware Layout
 .. kernel-doc:: drivers/gpu/drm/i915/intel_guc_fwif.h
    :doc: GuC Firmware Layout
 
+GuC Address Space
+-----------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_guc.c
+   :doc: GuC Address Space
+
 Tracing
 =======
 
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index 8f93f5bef8fd..c5f64c762f0a 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -494,35 +494,37 @@ int intel_guc_resume(struct intel_guc *guc)
 /**
  * DOC: GuC Address Space
  *
- * The layout of GuC address space is shown as below:
+ * The layout of GuC address space is shown below:
  *
- *    +==============> +====================+ <== GUC_GGTT_TOP
- *    ^                |                    |
- *    |                |                    |
- *    |                |        DRAM        |
- *    |                |       Memory       |
- *    |                |                    |
- *   GuC               |                    |
- * Address  +========> +====================+ <== WOPCM Top
- *  Space   ^          |   HW contexts RSVD |
- *    |     |          |        WOPCM       |
- *    |     |     +==> +--------------------+ <== GuC WOPCM Top
- *    |    GuC    ^    |                    |
- *    |    GGTT   |    |                    |
- *    |    Pin   GuC   |        GuC         |
- *    |    Bias WOPCM  |       WOPCM        |
- *    |     |    Size  |                    |
- *    |     |     |    |                    |
- *    v     v     v    |                    |
- *    +=====+=====+==> +====================+ <== GuC WOPCM Base
- *                     |   Non-GuC WOPCM    |
- *                     |   (HuC/Reserved)   |
- *                     +====================+ <== WOPCM Base
+ * ::
  *
- * The lower part [0, GuC ggtt_pin_bias) is mapped to WOPCM which consists of
- * GuC WOPCM and WOPCM reserved for other usage (e.g.RC6 context). The value of
- * the GuC ggtt_pin_bias is determined by the actually GuC WOPCM size which is
- * set in GUC_WOPCM_SIZE register.
+ *     +==============> +====================+ <== GUC_GGTT_TOP
+ *     ^                |                    |
+ *     |                |                    |
+ *     |                |        DRAM        |
+ *     |                |       Memory       |
+ *     |                |                    |
+ *    GuC               |                    |
+ *  Address  +========> +====================+ <== WOPCM Top
+ *   Space   ^          |   HW contexts RSVD |
+ *     |     |          |        WOPCM       |
+ *     |     |     +==> +--------------------+ <== GuC WOPCM Top
+ *     |    GuC    ^    |                    |
+ *     |    GGTT   |    |                    |
+ *     |    Pin   GuC   |        GuC         |
+ *     |    Bias WOPCM  |       WOPCM        |
+ *     |     |    Size  |                    |
+ *     |     |     |    |                    |
+ *     v     v     v    |                    |
+ *     +=====+=====+==> +====================+ <== GuC WOPCM Base
+ *                      |   Non-GuC WOPCM    |
+ *                      |   (HuC/Reserved)   |
+ *                      +====================+ <== WOPCM Base
+ *
+ * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to WOPCM
+ * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped
+ * to DRAM. The value of the GuC ggtt_pin_bias is determined by WOPCM size and
+ * actual GuC WOPCM size.
  */
 
 /**
diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c
index 4117886bfb05..74bf76f3fddc 100644
--- a/drivers/gpu/drm/i915/intel_wopcm.c
+++ b/drivers/gpu/drm/i915/intel_wopcm.c
@@ -11,28 +11,30 @@
  * DOC: WOPCM Layout
  *
  * The layout of the WOPCM will be fixed after writing to GuC WOPCM size and
- * offset registers whose are calculated are determined by size of HuC/GuC
- * firmware size and set of hw requirements/restrictions as shown below:
+ * offset registers whose values are calculated and determined by HuC/GuC
+ * firmware size and set of hardware requirements/restrictions as shown below:
  *
- *   +=========> +====================+ <== WOPCM Top
- *   ^           |  HW contexts RSVD  |
- *   |     +===> +====================+ <== GuC WOPCM Top
- *   |     ^     |                    |
- *   |     |     |                    |
- *   |     |     |                    |
- *   |    GuC    |                    |
- *   |   WOPCM   |                    |
- *   |    Size   +--------------------+
- * WOPCM   |     |    GuC FW RSVD     |
- *   |     |     +--------------------+
- *   |     |     |   GuC Stack RSVD   |
- *   |     |     +------------------- +
- *   |     v     |   GuC WOPCM RSVD   |
- *   |     +===> +====================+ <== GuC WOPCM base
- *   |           |     WOPCM RSVD     |
- *   |           +------------------- + <== HuC Firmware Top
- *   v           |      HuC FW        |
- *   +=========> +====================+ <== WOPCM Base
+ * ::
+ *
+ *    +=========> +====================+ <== WOPCM Top
+ *    ^           |  HW contexts RSVD  |
+ *    |     +===> +====================+ <== GuC WOPCM Top
+ *    |     ^     |                    |
+ *    |     |     |                    |
+ *    |     |     |                    |
+ *    |    GuC    |                    |
+ *    |   WOPCM   |                    |
+ *    |    Size   +--------------------+
+ *  WOPCM   |     |    GuC FW RSVD     |
+ *    |     |     +--------------------+
+ *    |     |     |   GuC Stack RSVD   |
+ *    |     |     +------------------- +
+ *    |     v     |   GuC WOPCM RSVD   |
+ *    |     +===> +====================+ <== GuC WOPCM base
+ *    |           |     WOPCM RSVD     |
+ *    |           +------------------- + <== HuC Firmware Top
+ *    v           |      HuC FW        |
+ *    +=========> +====================+ <== WOPCM Base
  *
  * GuC accessible WOPCM starts at GuC WOPCM base and ends at GuC WOPCM top.
  * The top part of the WOPCM is reserved for hardware contexts (e.g. RC6

From 606f1fc5cf2c27e86f2ea03d293e77fac789fae5 Mon Sep 17 00:00:00 2001
From: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Tue, 27 Mar 2018 10:23:52 +0200
Subject: [PATCH 0145/1286] staging/vboxvideo: Use gem_free_object_unlocked

vboxvideo doesn't use dev->struct_mutex and therefore has no need to use
gem_free_object.

Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Hans de Goede <hdegoede@redhat.com>
Cc: Michael Thayer <michael.thayer@oracle.com>
Cc: Colin Ian King <colin.king@canonical.com>
Cc: Daniel Vetter <daniel.vetter@intel.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20180327082356.24516-1-daniel.vetter@ffwll.ch
---
 drivers/staging/vboxvideo/vbox_drv.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/staging/vboxvideo/vbox_drv.c b/drivers/staging/vboxvideo/vbox_drv.c
index e18642e5027e..f6d26beffa54 100644
--- a/drivers/staging/vboxvideo/vbox_drv.c
+++ b/drivers/staging/vboxvideo/vbox_drv.c
@@ -242,7 +242,7 @@ static struct drm_driver driver = {
 	.minor = DRIVER_MINOR,
 	.patchlevel = DRIVER_PATCHLEVEL,
 
-	.gem_free_object = vbox_gem_free_object,
+	.gem_free_object_unlocked = vbox_gem_free_object,
 	.dumb_create = vbox_dumb_create,
 	.dumb_map_offset = vbox_dumb_mmap_offset,
 	.dumb_destroy = drm_gem_dumb_destroy,

From fcb1e57f79c0ecf4c85fecd2294a469367cbddd0 Mon Sep 17 00:00:00 2001
From: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Tue, 27 Mar 2018 10:23:53 +0200
Subject: [PATCH 0146/1286] drm/rockchip: fixup comment for
 gem_free_object_unlocked
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Just want to clean out all grep hits. gem_free_object is deprecated.

Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Sandy Huang <hjc@rock-chips.com>
Cc: "Heiko Stübner" <heiko@sntech.de>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-rockchip@lists.infradead.org
Reviewed-by: Sean Paul <seanpaul@chromium.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20180327082356.24516-2-daniel.vetter@ffwll.ch
---
 drivers/gpu/drm/rockchip/rockchip_drm_gem.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 074db7a92809..a8db758d523e 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -357,8 +357,8 @@ err_free_rk_obj:
 }
 
 /*
- * rockchip_gem_free_object - (struct drm_driver)->gem_free_object callback
- * function
+ * rockchip_gem_free_object - (struct drm_driver)->gem_free_object_unlocked
+ * callback function
  */
 void rockchip_gem_free_object(struct drm_gem_object *obj)
 {

From ae358dacd217370cc362f1674712c4e9246ace8d Mon Sep 17 00:00:00 2001
From: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Tue, 27 Mar 2018 10:23:54 +0200
Subject: [PATCH 0147/1286] drm/udl: Get rid of dev->struct_mutex usage

It's only used to protect our page list, and only when we know we have
a full reference. This means none of these code paths can ever race
with the final unref, and hence we do not need dev->struct_mutex
serialization and can simply switch to our own locking.

For more context the only magic the locked gem_free_object provides is
that it prevents concurrent final unref (and destruction) of gem
objects while anyone is holding dev->struct_mutex. This was used by
i915 (and other drivers) to implement eviction handling with less
headaches.

Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Dave Airlie <airlied@redhat.com>
Reviewed-by: Sean Paul <seanpaul@chromium.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20180327082356.24516-3-daniel.vetter@ffwll.ch
---
 drivers/gpu/drm/udl/udl_dmabuf.c | 5 +++--
 drivers/gpu/drm/udl/udl_drv.c    | 2 +-
 drivers/gpu/drm/udl/udl_drv.h    | 2 ++
 drivers/gpu/drm/udl/udl_gem.c    | 5 +++--
 drivers/gpu/drm/udl/udl_main.c   | 2 ++
 5 files changed, 11 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/udl/udl_dmabuf.c b/drivers/gpu/drm/udl/udl_dmabuf.c
index 2867ed155ff6..0a20695eb120 100644
--- a/drivers/gpu/drm/udl/udl_dmabuf.c
+++ b/drivers/gpu/drm/udl/udl_dmabuf.c
@@ -76,6 +76,7 @@ static struct sg_table *udl_map_dma_buf(struct dma_buf_attachment *attach,
 	struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
 	struct udl_gem_object *obj = to_udl_bo(attach->dmabuf->priv);
 	struct drm_device *dev = obj->base.dev;
+	struct udl_device *udl = dev->dev_private;
 	struct scatterlist *rd, *wr;
 	struct sg_table *sgt = NULL;
 	unsigned int i;
@@ -112,7 +113,7 @@ static struct sg_table *udl_map_dma_buf(struct dma_buf_attachment *attach,
 		return ERR_PTR(-ENOMEM);
 	}
 
-	mutex_lock(&dev->struct_mutex);
+	mutex_lock(&udl->gem_lock);
 
 	rd = obj->sg->sgl;
 	wr = sgt->sgl;
@@ -137,7 +138,7 @@ static struct sg_table *udl_map_dma_buf(struct dma_buf_attachment *attach,
 	attach->priv = udl_attach;
 
 err_unlock:
-	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&udl->gem_lock);
 	return sgt;
 }
 
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 3c45a3064726..9ef515df724b 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -53,7 +53,7 @@ static struct drm_driver driver = {
 	.unload = udl_driver_unload,
 
 	/* gem hooks */
-	.gem_free_object = udl_gem_free_object,
+	.gem_free_object_unlocked = udl_gem_free_object,
 	.gem_vm_ops = &udl_gem_vm_ops,
 
 	.dumb_create = udl_dumb_create,
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 2a75ab80527a..55c0cc309198 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -54,6 +54,8 @@ struct udl_device {
 	struct usb_device *udev;
 	struct drm_crtc *crtc;
 
+	struct mutex gem_lock;
+
 	int sku_pixel_limit;
 
 	struct urb_list urbs;
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index dee6bd9a3dd1..9a15cce22cce 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -214,9 +214,10 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
 {
 	struct udl_gem_object *gobj;
 	struct drm_gem_object *obj;
+	struct udl_device *udl = dev->dev_private;
 	int ret = 0;
 
-	mutex_lock(&dev->struct_mutex);
+	mutex_lock(&udl->gem_lock);
 	obj = drm_gem_object_lookup(file, handle);
 	if (obj == NULL) {
 		ret = -ENOENT;
@@ -236,6 +237,6 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
 out:
 	drm_gem_object_put(&gobj->base);
 unlock:
-	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&udl->gem_lock);
 	return ret;
 }
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index f1ec4528a73e..d518de8f496b 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -324,6 +324,8 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags)
 	udl->ddev = dev;
 	dev->dev_private = udl;
 
+	mutex_init(&udl->gem_lock);
+
 	if (!udl_parse_vendor_descriptor(dev, udl->udev)) {
 		ret = -ENODEV;
 		DRM_ERROR("firmware not recognized. Assume incompatible device\n");

From 0c9c7fd00e17907efb35697ecb9f2df39a0b536c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Thu, 22 Mar 2018 22:27:37 +0200
Subject: [PATCH 0148/1286] drm/simple-kms-helper: Plumb plane state to the
 enable hook
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

tinydrm enable hook wants to play around with the new fb in
.atomic_enable(), thus we'll need access to the plane state.

Performed with coccinelle:
@r1@
identifier F =~ ".*enable$";
identifier P, CS;
@@
F(
	struct drm_simple_display_pipe *P
	,struct drm_crtc_state *CS
+	,struct drm_plane_state *plane_state
	)
{
...
}

@@
struct drm_simple_display_pipe *P;
expression E;
@@
{
+ struct drm_plane *plane;
...
+ plane = &P->plane;
P->funcs->enable(P
		,E
+		,plane->state
	);
...
}

@@
identifier P, CS;
@@
struct drm_simple_display_pipe_funcs {
...
        void (*enable)(struct drm_simple_display_pipe *P
	     		,struct drm_crtc_state *CS
+			,struct drm_plane_state *plane_state
		);
...
};

v2: Pimp the commit message (David)

Cc: Marek Vasut <marex@denx.de>
Cc: Eric Anholt <eric@anholt.net>
Cc: David Lechner <david@lechnology.com>
Cc: "Noralf Trønnes" <noralf@tronnes.org>
Cc: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180322202738.25817-1-ville.syrjala@linux.intel.com
Reviewed-by: Noralf Trønnes <noralf@tronnes.org>
---
 drivers/gpu/drm/drm_simple_kms_helper.c | 4 +++-
 drivers/gpu/drm/mxsfb/mxsfb_drv.c       | 3 ++-
 drivers/gpu/drm/pl111/pl111_display.c   | 3 ++-
 drivers/gpu/drm/tinydrm/ili9225.c       | 3 ++-
 drivers/gpu/drm/tinydrm/mi0283qt.c      | 3 ++-
 drivers/gpu/drm/tinydrm/repaper.c       | 3 ++-
 drivers/gpu/drm/tinydrm/st7586.c        | 3 ++-
 drivers/gpu/drm/tinydrm/st7735r.c       | 3 ++-
 drivers/gpu/drm/tve200/tve200_display.c | 3 ++-
 include/drm/drm_simple_kms_helper.h     | 3 ++-
 10 files changed, 21 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 987a353c7f72..7a00455ca568 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -64,13 +64,15 @@ static int drm_simple_kms_crtc_check(struct drm_crtc *crtc,
 static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc,
 				       struct drm_crtc_state *old_state)
 {
+	struct drm_plane *plane;
 	struct drm_simple_display_pipe *pipe;
 
 	pipe = container_of(crtc, struct drm_simple_display_pipe, crtc);
 	if (!pipe->funcs || !pipe->funcs->enable)
 		return;
 
-	pipe->funcs->enable(pipe, crtc->state);
+	plane = &pipe->plane;
+	pipe->funcs->enable(pipe, crtc->state, plane->state);
 }
 
 static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 5cae8db9dcd4..b9c7507813db 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -99,7 +99,8 @@ static const struct drm_mode_config_funcs mxsfb_mode_config_funcs = {
 };
 
 static void mxsfb_pipe_enable(struct drm_simple_display_pipe *pipe,
-			      struct drm_crtc_state *crtc_state)
+			      struct drm_crtc_state *crtc_state,
+			      struct drm_plane_state *plane_state)
 {
 	struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
 
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index 310646427907..1fee578e05b0 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -120,7 +120,8 @@ static int pl111_display_check(struct drm_simple_display_pipe *pipe,
 }
 
 static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
-				 struct drm_crtc_state *cstate)
+				 struct drm_crtc_state *cstate,
+				 struct drm_plane_state *plane_state)
 {
 	struct drm_crtc *crtc = &pipe->crtc;
 	struct drm_plane *plane = &pipe->plane;
diff --git a/drivers/gpu/drm/tinydrm/ili9225.c b/drivers/gpu/drm/tinydrm/ili9225.c
index a0759502b81a..089d22798c8b 100644
--- a/drivers/gpu/drm/tinydrm/ili9225.c
+++ b/drivers/gpu/drm/tinydrm/ili9225.c
@@ -176,7 +176,8 @@ static const struct drm_framebuffer_funcs ili9225_fb_funcs = {
 };
 
 static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
-				struct drm_crtc_state *crtc_state)
+				struct drm_crtc_state *crtc_state,
+				struct drm_plane_state *plane_state)
 {
 	struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
 	struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
diff --git a/drivers/gpu/drm/tinydrm/mi0283qt.c b/drivers/gpu/drm/tinydrm/mi0283qt.c
index d8ed6e6f8e05..82ad9b61898e 100644
--- a/drivers/gpu/drm/tinydrm/mi0283qt.c
+++ b/drivers/gpu/drm/tinydrm/mi0283qt.c
@@ -49,7 +49,8 @@
 #define ILI9341_MADCTL_MY	BIT(7)
 
 static void mi0283qt_enable(struct drm_simple_display_pipe *pipe,
-			    struct drm_crtc_state *crtc_state)
+			    struct drm_crtc_state *crtc_state,
+			    struct drm_plane_state *plane_state)
 {
 	struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
 	struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
diff --git a/drivers/gpu/drm/tinydrm/repaper.c b/drivers/gpu/drm/tinydrm/repaper.c
index 75740630c410..33b4a71916e4 100644
--- a/drivers/gpu/drm/tinydrm/repaper.c
+++ b/drivers/gpu/drm/tinydrm/repaper.c
@@ -659,7 +659,8 @@ static void power_off(struct repaper_epd *epd)
 }
 
 static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
-				struct drm_crtc_state *crtc_state)
+				struct drm_crtc_state *crtc_state,
+				struct drm_plane_state *plane_state)
 {
 	struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
 	struct repaper_epd *epd = epd_from_tinydrm(tdev);
diff --git a/drivers/gpu/drm/tinydrm/st7586.c b/drivers/gpu/drm/tinydrm/st7586.c
index a6396ef9cc4a..bb08b293c8ce 100644
--- a/drivers/gpu/drm/tinydrm/st7586.c
+++ b/drivers/gpu/drm/tinydrm/st7586.c
@@ -175,7 +175,8 @@ static const struct drm_framebuffer_funcs st7586_fb_funcs = {
 };
 
 static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
-			       struct drm_crtc_state *crtc_state)
+			       struct drm_crtc_state *crtc_state,
+			       struct drm_plane_state *plane_state)
 {
 	struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
 	struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
diff --git a/drivers/gpu/drm/tinydrm/st7735r.c b/drivers/gpu/drm/tinydrm/st7735r.c
index 67d197ecfc4b..19b28f8c78db 100644
--- a/drivers/gpu/drm/tinydrm/st7735r.c
+++ b/drivers/gpu/drm/tinydrm/st7735r.c
@@ -37,7 +37,8 @@
 #define ST7735R_MV	BIT(5)
 
 static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
-				      struct drm_crtc_state *crtc_state)
+				      struct drm_crtc_state *crtc_state,
+				      struct drm_plane_state *plane_state)
 {
 	struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
 	struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c
index db397fcb345a..108f3b2b5d25 100644
--- a/drivers/gpu/drm/tve200/tve200_display.c
+++ b/drivers/gpu/drm/tve200/tve200_display.c
@@ -120,7 +120,8 @@ static int tve200_display_check(struct drm_simple_display_pipe *pipe,
 }
 
 static void tve200_display_enable(struct drm_simple_display_pipe *pipe,
-				 struct drm_crtc_state *cstate)
+				 struct drm_crtc_state *cstate,
+				 struct drm_plane_state *plane_state)
 {
 	struct drm_crtc *crtc = &pipe->crtc;
 	struct drm_plane *plane = &pipe->plane;
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
index 1b4e352143fd..b02793742317 100644
--- a/include/drm/drm_simple_kms_helper.h
+++ b/include/drm/drm_simple_kms_helper.h
@@ -64,7 +64,8 @@ struct drm_simple_display_pipe_funcs {
 	 * This hook is optional.
 	 */
 	void (*enable)(struct drm_simple_display_pipe *pipe,
-		       struct drm_crtc_state *crtc_state);
+		       struct drm_crtc_state *crtc_state,
+		       struct drm_plane_state *plane_state);
 	/**
 	 * @disable:
 	 *

From e85d30060eddccfcfbf7fdbd61a23cfbda05cc59 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Fri, 23 Mar 2018 17:35:09 +0200
Subject: [PATCH 0149/1286] drm/tinydrm: Make fb_dirty into a lower level hook
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

mipi_dbi_enable_flush() wants to call the fb->dirty() hook from the
bowels of the .atomic_enable() hook. That prevents us from taking the
plane mutex in fb->dirty() unless we also plumb down the acquire
context.

Instead it seems simpler to split the fb->dirty() into a tinydrm
specific lower level hook that can be called from
mipi_dbi_enable_flush() and from a generic higher level
tinydrm_fb_dirty() helper. As we don't have a tinydrm specific
vfuncs table we'll just stick it into tinydrm_device directly
for now.

v2: Deal with the fb->dirty() in tinydrm_display_pipe_update() as well (Noralf)

Cc: "Noralf Trønnes" <noralf@tronnes.org>
Cc: David Lechner <david@lechnology.com>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Noralf Trønnes <noralf@tronnes.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20180323153509.15287-1-ville.syrjala@linux.intel.com
Reviewed-by: Noralf Trønnes <noralf@tronnes.org>
Tested-by: Noralf Trønnes <noralf@tronnes.org>
---
 .../gpu/drm/tinydrm/core/tinydrm-helpers.c    | 30 +++++++++++++++++++
 drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c   |  5 ++--
 drivers/gpu/drm/tinydrm/ili9225.c             | 23 ++++----------
 drivers/gpu/drm/tinydrm/mi0283qt.c            |  2 +-
 drivers/gpu/drm/tinydrm/mipi-dbi.c            | 30 +++++++------------
 drivers/gpu/drm/tinydrm/repaper.c             | 28 ++++++-----------
 drivers/gpu/drm/tinydrm/st7586.c              | 23 ++++----------
 drivers/gpu/drm/tinydrm/st7735r.c             |  2 +-
 include/drm/tinydrm/mipi-dbi.h                |  4 ++-
 include/drm/tinydrm/tinydrm-helpers.h         |  5 ++++
 include/drm/tinydrm/tinydrm.h                 |  4 +++
 11 files changed, 78 insertions(+), 78 deletions(-)

diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
index d1c3ce9ab294..dcd390163a4a 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
@@ -78,6 +78,36 @@ bool tinydrm_merge_clips(struct drm_clip_rect *dst,
 }
 EXPORT_SYMBOL(tinydrm_merge_clips);
 
+int tinydrm_fb_dirty(struct drm_framebuffer *fb,
+		     struct drm_file *file_priv,
+		     unsigned int flags, unsigned int color,
+		     struct drm_clip_rect *clips,
+		     unsigned int num_clips)
+{
+	struct tinydrm_device *tdev = fb->dev->dev_private;
+	struct drm_plane *plane = &tdev->pipe.plane;
+	int ret = 0;
+
+	drm_modeset_lock(&plane->mutex, NULL);
+
+	/* fbdev can flush even when we're not interested */
+	if (plane->state->fb == fb) {
+		mutex_lock(&tdev->dirty_lock);
+		ret = tdev->fb_dirty(fb, file_priv, flags,
+				     color, clips, num_clips);
+		mutex_unlock(&tdev->dirty_lock);
+	}
+
+	drm_modeset_unlock(&plane->mutex);
+
+	if (ret)
+		dev_err_once(fb->dev->dev,
+			     "Failed to update display %d\n", ret);
+
+	return ret;
+}
+EXPORT_SYMBOL(tinydrm_fb_dirty);
+
 /**
  * tinydrm_memcpy - Copy clip buffer
  * @dst: Destination buffer
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
index 11ae950b0fc9..e68b528ae64d 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
@@ -125,9 +125,8 @@ void tinydrm_display_pipe_update(struct drm_simple_display_pipe *pipe,
 	struct drm_crtc *crtc = &tdev->pipe.crtc;
 
 	if (fb && (fb != old_state->fb)) {
-		pipe->plane.fb = fb;
-		if (fb->funcs->dirty)
-			fb->funcs->dirty(fb, NULL, 0, 0, NULL, 0);
+		if (tdev->fb_dirty)
+			tdev->fb_dirty(fb, NULL, 0, 0, NULL, 0);
 	}
 
 	if (crtc->state->event) {
diff --git a/drivers/gpu/drm/tinydrm/ili9225.c b/drivers/gpu/drm/tinydrm/ili9225.c
index 089d22798c8b..0874e877b111 100644
--- a/drivers/gpu/drm/tinydrm/ili9225.c
+++ b/drivers/gpu/drm/tinydrm/ili9225.c
@@ -88,14 +88,8 @@ static int ili9225_fb_dirty(struct drm_framebuffer *fb,
 	bool full;
 	void *tr;
 
-	mutex_lock(&tdev->dirty_lock);
-
 	if (!mipi->enabled)
-		goto out_unlock;
-
-	/* fbdev can flush even when we're not interested */
-	if (tdev->pipe.plane.fb != fb)
-		goto out_unlock;
+		return 0;
 
 	full = tinydrm_merge_clips(&clip, clips, num_clips, flags,
 				   fb->width, fb->height);
@@ -108,7 +102,7 @@ static int ili9225_fb_dirty(struct drm_framebuffer *fb,
 		tr = mipi->tx_buf;
 		ret = mipi_dbi_buf_copy(mipi->tx_buf, fb, &clip, swap);
 		if (ret)
-			goto out_unlock;
+			return ret;
 	} else {
 		tr = cma_obj->vaddr;
 	}
@@ -159,20 +153,13 @@ static int ili9225_fb_dirty(struct drm_framebuffer *fb,
 	ret = mipi_dbi_command_buf(mipi, ILI9225_WRITE_DATA_TO_GRAM, tr,
 				(clip.x2 - clip.x1) * (clip.y2 - clip.y1) * 2);
 
-out_unlock:
-	mutex_unlock(&tdev->dirty_lock);
-
-	if (ret)
-		dev_err_once(fb->dev->dev, "Failed to update display %d\n",
-			     ret);
-
 	return ret;
 }
 
 static const struct drm_framebuffer_funcs ili9225_fb_funcs = {
 	.destroy	= drm_gem_fb_destroy,
 	.create_handle	= drm_gem_fb_create_handle,
-	.dirty		= ili9225_fb_dirty,
+	.dirty		= tinydrm_fb_dirty,
 };
 
 static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
@@ -269,7 +256,7 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
 
 	ili9225_command(mipi, ILI9225_DISPLAY_CONTROL_1, 0x1017);
 
-	mipi_dbi_enable_flush(mipi);
+	mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
 }
 
 static void ili9225_pipe_disable(struct drm_simple_display_pipe *pipe)
@@ -342,6 +329,8 @@ static int ili9225_init(struct device *dev, struct mipi_dbi *mipi,
 	if (ret)
 		return ret;
 
+	tdev->fb_dirty = ili9225_fb_dirty;
+
 	ret = tinydrm_display_pipe_init(tdev, pipe_funcs,
 					DRM_MODE_CONNECTOR_VIRTUAL,
 					ili9225_formats,
diff --git a/drivers/gpu/drm/tinydrm/mi0283qt.c b/drivers/gpu/drm/tinydrm/mi0283qt.c
index 82ad9b61898e..4e6d2ee94e55 100644
--- a/drivers/gpu/drm/tinydrm/mi0283qt.c
+++ b/drivers/gpu/drm/tinydrm/mi0283qt.c
@@ -127,7 +127,7 @@ static void mi0283qt_enable(struct drm_simple_display_pipe *pipe,
 	msleep(100);
 
 out_enable:
-	mipi_dbi_enable_flush(mipi);
+	mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
 }
 
 static const struct drm_simple_display_pipe_funcs mi0283qt_pipe_funcs = {
diff --git a/drivers/gpu/drm/tinydrm/mipi-dbi.c b/drivers/gpu/drm/tinydrm/mipi-dbi.c
index 9e903812b573..4d1fb31a781f 100644
--- a/drivers/gpu/drm/tinydrm/mipi-dbi.c
+++ b/drivers/gpu/drm/tinydrm/mipi-dbi.c
@@ -219,14 +219,8 @@ static int mipi_dbi_fb_dirty(struct drm_framebuffer *fb,
 	bool full;
 	void *tr;
 
-	mutex_lock(&tdev->dirty_lock);
-
 	if (!mipi->enabled)
-		goto out_unlock;
-
-	/* fbdev can flush even when we're not interested */
-	if (tdev->pipe.plane.fb != fb)
-		goto out_unlock;
+		return 0;
 
 	full = tinydrm_merge_clips(&clip, clips, num_clips, flags,
 				   fb->width, fb->height);
@@ -239,7 +233,7 @@ static int mipi_dbi_fb_dirty(struct drm_framebuffer *fb,
 		tr = mipi->tx_buf;
 		ret = mipi_dbi_buf_copy(mipi->tx_buf, fb, &clip, swap);
 		if (ret)
-			goto out_unlock;
+			return ret;
 	} else {
 		tr = cma_obj->vaddr;
 	}
@@ -254,20 +248,13 @@ static int mipi_dbi_fb_dirty(struct drm_framebuffer *fb,
 	ret = mipi_dbi_command_buf(mipi, MIPI_DCS_WRITE_MEMORY_START, tr,
 				(clip.x2 - clip.x1) * (clip.y2 - clip.y1) * 2);
 
-out_unlock:
-	mutex_unlock(&tdev->dirty_lock);
-
-	if (ret)
-		dev_err_once(fb->dev->dev, "Failed to update display %d\n",
-			     ret);
-
 	return ret;
 }
 
 static const struct drm_framebuffer_funcs mipi_dbi_fb_funcs = {
 	.destroy	= drm_gem_fb_destroy,
 	.create_handle	= drm_gem_fb_create_handle,
-	.dirty		= mipi_dbi_fb_dirty,
+	.dirty		= tinydrm_fb_dirty,
 };
 
 /**
@@ -278,13 +265,16 @@ static const struct drm_framebuffer_funcs mipi_dbi_fb_funcs = {
  * enables the backlight. Drivers can use this in their
  * &drm_simple_display_pipe_funcs->enable callback.
  */
-void mipi_dbi_enable_flush(struct mipi_dbi *mipi)
+void mipi_dbi_enable_flush(struct mipi_dbi *mipi,
+			   struct drm_crtc_state *crtc_state,
+			   struct drm_plane_state *plane_state)
 {
-	struct drm_framebuffer *fb = mipi->tinydrm.pipe.plane.fb;
+	struct tinydrm_device *tdev = &mipi->tinydrm;
+	struct drm_framebuffer *fb = plane_state->fb;
 
 	mipi->enabled = true;
 	if (fb)
-		fb->funcs->dirty(fb, NULL, 0, 0, NULL, 0);
+		tdev->fb_dirty(fb, NULL, 0, 0, NULL, 0);
 
 	backlight_enable(mipi->backlight);
 }
@@ -381,6 +371,8 @@ int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi,
 	if (ret)
 		return ret;
 
+	tdev->fb_dirty = mipi_dbi_fb_dirty;
+
 	/* TODO: Maybe add DRM_MODE_CONNECTOR_SPI */
 	ret = tinydrm_display_pipe_init(tdev, pipe_funcs,
 					DRM_MODE_CONNECTOR_VIRTUAL,
diff --git a/drivers/gpu/drm/tinydrm/repaper.c b/drivers/gpu/drm/tinydrm/repaper.c
index 33b4a71916e4..bb6f80a81899 100644
--- a/drivers/gpu/drm/tinydrm/repaper.c
+++ b/drivers/gpu/drm/tinydrm/repaper.c
@@ -540,14 +540,8 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb,
 	clip.y1 = 0;
 	clip.y2 = fb->height;
 
-	mutex_lock(&tdev->dirty_lock);
-
 	if (!epd->enabled)
-		goto out_unlock;
-
-	/* fbdev can flush even when we're not interested */
-	if (tdev->pipe.plane.fb != fb)
-		goto out_unlock;
+		return 0;
 
 	repaper_get_temperature(epd);
 
@@ -555,16 +549,14 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb,
 		  epd->factored_stage_time);
 
 	buf = kmalloc(fb->width * fb->height, GFP_KERNEL);
-	if (!buf) {
-		ret = -ENOMEM;
-		goto out_unlock;
-	}
+	if (!buf)
+		return -ENOMEM;
 
 	if (import_attach) {
 		ret = dma_buf_begin_cpu_access(import_attach->dmabuf,
 					       DMA_FROM_DEVICE);
 		if (ret)
-			goto out_unlock;
+			goto out_free;
 	}
 
 	tinydrm_xrgb8888_to_gray8(buf, cma_obj->vaddr, fb, &clip);
@@ -573,7 +565,7 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb,
 		ret = dma_buf_end_cpu_access(import_attach->dmabuf,
 					     DMA_FROM_DEVICE);
 		if (ret)
-			goto out_unlock;
+			goto out_free;
 	}
 
 	repaper_gray8_to_mono_reversed(buf, fb->width, fb->height);
@@ -625,11 +617,7 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb,
 			}
 	}
 
-out_unlock:
-	mutex_unlock(&tdev->dirty_lock);
-
-	if (ret)
-		DRM_DEV_ERROR(fb->dev->dev, "Failed to update display (%d)\n", ret);
+out_free:
 	kfree(buf);
 
 	return ret;
@@ -638,7 +626,7 @@ out_unlock:
 static const struct drm_framebuffer_funcs repaper_fb_funcs = {
 	.destroy	= drm_gem_fb_destroy,
 	.create_handle	= drm_gem_fb_create_handle,
-	.dirty		= repaper_fb_dirty,
+	.dirty		= tinydrm_fb_dirty,
 };
 
 static void power_off(struct repaper_epd *epd)
@@ -1070,6 +1058,8 @@ static int repaper_probe(struct spi_device *spi)
 	if (ret)
 		return ret;
 
+	tdev->fb_dirty = repaper_fb_dirty;
+
 	ret = tinydrm_display_pipe_init(tdev, &repaper_pipe_funcs,
 					DRM_MODE_CONNECTOR_VIRTUAL,
 					repaper_formats,
diff --git a/drivers/gpu/drm/tinydrm/st7586.c b/drivers/gpu/drm/tinydrm/st7586.c
index bb08b293c8ce..22644b88199a 100644
--- a/drivers/gpu/drm/tinydrm/st7586.c
+++ b/drivers/gpu/drm/tinydrm/st7586.c
@@ -120,14 +120,8 @@ static int st7586_fb_dirty(struct drm_framebuffer *fb,
 	int start, end;
 	int ret = 0;
 
-	mutex_lock(&tdev->dirty_lock);
-
 	if (!mipi->enabled)
-		goto out_unlock;
-
-	/* fbdev can flush even when we're not interested */
-	if (tdev->pipe.plane.fb != fb)
-		goto out_unlock;
+		return 0;
 
 	tinydrm_merge_clips(&clip, clips, num_clips, flags, fb->width,
 			    fb->height);
@@ -141,7 +135,7 @@ static int st7586_fb_dirty(struct drm_framebuffer *fb,
 
 	ret = st7586_buf_copy(mipi->tx_buf, fb, &clip);
 	if (ret)
-		goto out_unlock;
+		return ret;
 
 	/* Pixels are packed 3 per byte */
 	start = clip.x1 / 3;
@@ -158,20 +152,13 @@ static int st7586_fb_dirty(struct drm_framebuffer *fb,
 				   (u8 *)mipi->tx_buf,
 				   (end - start) * (clip.y2 - clip.y1));
 
-out_unlock:
-	mutex_unlock(&tdev->dirty_lock);
-
-	if (ret)
-		dev_err_once(fb->dev->dev, "Failed to update display %d\n",
-			     ret);
-
 	return ret;
 }
 
 static const struct drm_framebuffer_funcs st7586_fb_funcs = {
 	.destroy	= drm_gem_fb_destroy,
 	.create_handle	= drm_gem_fb_create_handle,
-	.dirty		= st7586_fb_dirty,
+	.dirty		= tinydrm_fb_dirty,
 };
 
 static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
@@ -238,7 +225,7 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
 
 	mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
 
-	mipi_dbi_enable_flush(mipi);
+	mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
 }
 
 static void st7586_pipe_disable(struct drm_simple_display_pipe *pipe)
@@ -278,6 +265,8 @@ static int st7586_init(struct device *dev, struct mipi_dbi *mipi,
 	if (ret)
 		return ret;
 
+	tdev->fb_dirty = st7586_fb_dirty;
+
 	ret = tinydrm_display_pipe_init(tdev, pipe_funcs,
 					DRM_MODE_CONNECTOR_VIRTUAL,
 					st7586_formats,
diff --git a/drivers/gpu/drm/tinydrm/st7735r.c b/drivers/gpu/drm/tinydrm/st7735r.c
index 19b28f8c78db..189a07894d36 100644
--- a/drivers/gpu/drm/tinydrm/st7735r.c
+++ b/drivers/gpu/drm/tinydrm/st7735r.c
@@ -99,7 +99,7 @@ static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
 
 	msleep(20);
 
-	mipi_dbi_enable_flush(mipi);
+	mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
 }
 
 static const struct drm_simple_display_pipe_funcs jd_t18003_t01_pipe_funcs = {
diff --git a/include/drm/tinydrm/mipi-dbi.h b/include/drm/tinydrm/mipi-dbi.h
index 44e824af2ef6..b8ba58861986 100644
--- a/include/drm/tinydrm/mipi-dbi.h
+++ b/include/drm/tinydrm/mipi-dbi.h
@@ -67,7 +67,9 @@ int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi,
 		  const struct drm_simple_display_pipe_funcs *pipe_funcs,
 		  struct drm_driver *driver,
 		  const struct drm_display_mode *mode, unsigned int rotation);
-void mipi_dbi_enable_flush(struct mipi_dbi *mipi);
+void mipi_dbi_enable_flush(struct mipi_dbi *mipi,
+			   struct drm_crtc_state *crtc_state,
+			   struct drm_plane_state *plan_state);
 void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe);
 void mipi_dbi_hw_reset(struct mipi_dbi *mipi);
 bool mipi_dbi_display_is_on(struct mipi_dbi *mipi);
diff --git a/include/drm/tinydrm/tinydrm-helpers.h b/include/drm/tinydrm/tinydrm-helpers.h
index 0a4ddbc04c60..5b96f0b12c8c 100644
--- a/include/drm/tinydrm/tinydrm-helpers.h
+++ b/include/drm/tinydrm/tinydrm-helpers.h
@@ -36,6 +36,11 @@ static inline bool tinydrm_machine_little_endian(void)
 bool tinydrm_merge_clips(struct drm_clip_rect *dst,
 			 struct drm_clip_rect *src, unsigned int num_clips,
 			 unsigned int flags, u32 max_width, u32 max_height);
+int tinydrm_fb_dirty(struct drm_framebuffer *fb,
+		     struct drm_file *file_priv,
+		     unsigned int flags, unsigned int color,
+		     struct drm_clip_rect *clips,
+		     unsigned int num_clips);
 void tinydrm_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb,
 		    struct drm_clip_rect *clip);
 void tinydrm_swab16(u16 *dst, void *vaddr, struct drm_framebuffer *fb,
diff --git a/include/drm/tinydrm/tinydrm.h b/include/drm/tinydrm/tinydrm.h
index 77a93ec577fd..6e2b960e25eb 100644
--- a/include/drm/tinydrm/tinydrm.h
+++ b/include/drm/tinydrm/tinydrm.h
@@ -26,6 +26,10 @@ struct tinydrm_device {
 	struct drm_simple_display_pipe pipe;
 	struct mutex dirty_lock;
 	const struct drm_framebuffer_funcs *fb_funcs;
+	int (*fb_dirty)(struct drm_framebuffer *framebuffer,
+			struct drm_file *file_priv, unsigned flags,
+			unsigned color, struct drm_clip_rect *clips,
+			unsigned num_clips);
 };
 
 static inline struct tinydrm_device *

From d775a7b1840ddc96e7f25af20989ff43f2809436 Mon Sep 17 00:00:00 2001
From: Paulo Zanoni <paulo.r.zanoni@intel.com>
Date: Tue, 9 Jan 2018 21:28:35 -0200
Subject: [PATCH 0150/1286] drm/i915/gen11: add support for reading the
 timestamp frequency

The only thing that differs here is that the crystal clock freq now
has four possible values.

This patch gets rid of the "Unknown gen, unable to compute..." message
at boot for gen11.

Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180109232835.11478-18-paulo.r.zanoni@intel.com
---
 drivers/gpu/drm/i915/i915_reg.h          |  6 +++
 drivers/gpu/drm/i915/intel_device_info.c | 69 ++++++++++++++++++------
 2 files changed, 60 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b0c55f9d401b..5a53d0e1583c 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -861,6 +861,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define  GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK	(1 << GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT)
 #define  GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ	0
 #define  GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ	1
+#define  GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT	3
+#define  GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK	(0x7 << GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT)
+#define  GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ	0
+#define  GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ	1
+#define  GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ	2
+#define  GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ	3
 #define  GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT	1
 #define  GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK	(0x3 << GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT)
 
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 0d1509e25db8..a32ba72c514e 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -596,6 +596,52 @@ static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
 	return base_freq + frac_freq;
 }
 
+static u32 gen10_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
+					u32 rpm_config_reg)
+{
+	u32 f19_2_mhz = 19200;
+	u32 f24_mhz = 24000;
+	u32 crystal_clock = (rpm_config_reg &
+			     GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
+			    GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
+
+	switch (crystal_clock) {
+	case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
+		return f19_2_mhz;
+	case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
+		return f24_mhz;
+	default:
+		MISSING_CASE(crystal_clock);
+		return 0;
+	}
+}
+
+static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
+					u32 rpm_config_reg)
+{
+	u32 f19_2_mhz = 19200;
+	u32 f24_mhz = 24000;
+	u32 f25_mhz = 25000;
+	u32 f38_4_mhz = 38400;
+	u32 crystal_clock = (rpm_config_reg &
+			     GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
+			    GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
+
+	switch (crystal_clock) {
+	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
+		return f24_mhz;
+	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
+		return f19_2_mhz;
+	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
+		return f38_4_mhz;
+	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
+		return f25_mhz;
+	default:
+		MISSING_CASE(crystal_clock);
+		return 0;
+	}
+}
+
 static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
 {
 	u32 f12_5_mhz = 12500;
@@ -636,10 +682,9 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
 		}
 
 		return freq;
-	} else if (INTEL_GEN(dev_priv) <= 10) {
+	} else if (INTEL_GEN(dev_priv) <= 11) {
 		u32 ctc_reg = I915_READ(CTC_MODE);
 		u32 freq = 0;
-		u32 rpm_config_reg = 0;
 
 		/* First figure out the reference frequency. There are 2 ways
 		 * we can compute the frequency, either through the
@@ -649,20 +694,14 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
 		if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
 			freq = read_reference_ts_freq(dev_priv);
 		} else {
-			u32 crystal_clock;
+			u32 rpm_config_reg = I915_READ(RPM_CONFIG0);
 
-			rpm_config_reg = I915_READ(RPM_CONFIG0);
-			crystal_clock = (rpm_config_reg &
-					 GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
-				GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
-			switch (crystal_clock) {
-			case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
-				freq = f19_2_mhz;
-				break;
-			case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
-				freq = f24_mhz;
-				break;
-			}
+			if (INTEL_GEN(dev_priv) <= 10)
+				freq = gen10_get_crystal_clock_freq(dev_priv,
+								rpm_config_reg);
+			else
+				freq = gen11_get_crystal_clock_freq(dev_priv,
+								rpm_config_reg);
 
 			/* Now figure out how the command stream's timestamp
 			 * register increments from this frequency (it might

From c216e90686105e5b9fdbb22f6cfcc38334e432cc Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Tue, 27 Mar 2018 22:01:36 +0100
Subject: [PATCH 0151/1286] drm/i915/execlists: Reset ring registers on
 rebinding contexts

Tvrtko uncovered a fun issue with recovering from a wedge device. In his
tests, he wedged the driver by injecting an unrecoverable hang whilst a
batch was spinning. As we reset the gpu in the middle of the spinner,
when resumed it would continue on from the next instruction in the ring
and write it's breadcrumb. However, on wedging we updated our
bookkeeping to indicate that the GPU had completed executing and would
restart from after the breadcrumb; so the emission of the stale
breadcrumb from before the reset came as a bit of a surprise.

A simple fix is to when rebinding the context into the GPU, we update
the ring register state in the context image to match our bookkeeping.
We already have to update the RING_START and RING_TAIL, so updating
RING_HEAD as well is trivial. This works because whenever we unbind the
context, we keep the bookkeeping in check; and on wedging we unbind all
contexts.

Testcase: igt/gem_eio
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180327210136.16750-1-chris@chris-wilson.co.uk
Tested-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/intel_lrc.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index ba7f7831f934..654634254b64 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1272,6 +1272,7 @@ execlists_context_pin(struct intel_engine_cs *engine,
 	ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
 	ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
 		i915_ggtt_offset(ce->ring->vma);
+	ce->lrc_reg_state[CTX_RING_HEAD+1] = ce->ring->head;
 
 	ce->state->obj->pin_global++;
 	i915_gem_context_get(ctx);

From 4d82a174847b1b2d5e541ca7040398ac56efed7b Mon Sep 17 00:00:00 2001
From: Michal Wajdeczko <michal.wajdeczko@intel.com>
Date: Mon, 26 Mar 2018 19:48:18 +0000
Subject: [PATCH 0152/1286] drm/i915/guc: Add documentation for MMIO based
 communication

As we are going to extend our use of MMIO based communication,
try to explain its mechanics and update corresponding definitions.

v2: fix checkpatch MACRO_ARG_REUSE

Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Kelvin Gardiner <kelvin.gardiner@intel.com>
Reviewed-by: Michel Thierry <michel.thierry@intel.com> #1
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180326194829.58836-2-michal.wajdeczko@intel.com
---
 drivers/gpu/drm/i915/intel_guc.c      | 20 +++---
 drivers/gpu/drm/i915/intel_guc_ct.c   |  2 +-
 drivers/gpu/drm/i915/intel_guc_fwif.h | 88 +++++++++++++++++++++------
 3 files changed, 82 insertions(+), 28 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index c5f64c762f0a..78e68b169cf2 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -329,6 +329,9 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len)
 	GEM_BUG_ON(!len);
 	GEM_BUG_ON(len > guc->send_regs.count);
 
+	/* We expect only action code */
+	GEM_BUG_ON(*action & ~INTEL_GUC_MSG_CODE_MASK);
+
 	/* If CT is available, we expect to use MMIO only during init/fini */
 	GEM_BUG_ON(HAS_GUC_CT(dev_priv) &&
 		*action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
@@ -350,18 +353,15 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len)
 	 */
 	ret = __intel_wait_for_register_fw(dev_priv,
 					   guc_send_reg(guc, 0),
-					   INTEL_GUC_RECV_MASK,
-					   INTEL_GUC_RECV_MASK,
+					   INTEL_GUC_MSG_TYPE_MASK,
+					   INTEL_GUC_MSG_TYPE_RESPONSE <<
+					   INTEL_GUC_MSG_TYPE_SHIFT,
 					   10, 10, &status);
-	if (status != INTEL_GUC_STATUS_SUCCESS) {
-		/*
-		 * Either the GuC explicitly returned an error (which
-		 * we convert to -EIO here) or no response at all was
-		 * received within the timeout limit (-ETIMEDOUT)
-		 */
-		if (ret != -ETIMEDOUT)
-			ret = -EIO;
+	/* If GuC explicitly returned an error, convert it to -EIO */
+	if (!ret && !INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(status))
+		ret = -EIO;
 
+	if (ret) {
 		DRM_DEBUG_DRIVER("INTEL_GUC_SEND: Action 0x%X failed;"
 				 " ret=%d status=0x%08X response=0x%08X\n",
 				 action[0], ret, status,
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c
index a726283489d1..1dafa7a20d89 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/intel_guc_ct.c
@@ -398,7 +398,7 @@ static int ctch_send(struct intel_guc *guc,
 	err = wait_for_response(desc, fence, status);
 	if (unlikely(err))
 		return err;
-	if (*status != INTEL_GUC_STATUS_SUCCESS)
+	if (!INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(*status))
 		return -EIO;
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 72941bd704fd..83143e8a0730 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -560,7 +560,68 @@ struct guc_shared_ctx_data {
 	struct guc_ctx_report preempt_ctx_report[GUC_MAX_ENGINES_NUM];
 } __packed;
 
-/* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */
+/**
+ * DOC: MMIO based communication
+ *
+ * The MMIO based communication between Host and GuC uses software scratch
+ * registers, where first register holds data treated as message header,
+ * and other registers are used to hold message payload.
+ *
+ * For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8
+ *
+ *      +-----------+---------+---------+---------+
+ *      |  MMIO[0]  | MMIO[1] |   ...   | MMIO[n] |
+ *      +-----------+---------+---------+---------+
+ *      | header    |      optional payload       |
+ *      +======+====+=========+=========+=========+
+ *      | 31:28|type|         |         |         |
+ *      +------+----+         |         |         |
+ *      | 27:16|data|         |         |         |
+ *      +------+----+         |         |         |
+ *      |  15:0|code|         |         |         |
+ *      +------+----+---------+---------+---------+
+ *
+ * The message header consists of:
+ *
+ * - **type**, indicates message type
+ * - **code**, indicates message code, is specific for **type**
+ * - **data**, indicates message data, optional, depends on **code**
+ *
+ * The following message **types** are supported:
+ *
+ * - **REQUEST**, indicates Host-to-GuC request, requested GuC action code
+ *   must be priovided in **code** field. Optional action specific parameters
+ *   can be provided in remaining payload registers or **data** field.
+ *
+ * - **RESPONSE**, indicates GuC-to-Host response from earlier GuC request,
+ *   action response status will be provided in **code** field. Optional
+ *   response data can be returned in remaining payload registers or **data**
+ *   field.
+ */
+
+#define INTEL_GUC_MSG_TYPE_SHIFT	28
+#define INTEL_GUC_MSG_TYPE_MASK		(0xF << INTEL_GUC_MSG_TYPE_SHIFT)
+#define INTEL_GUC_MSG_DATA_SHIFT	16
+#define INTEL_GUC_MSG_DATA_MASK		(0xFFF << INTEL_GUC_MSG_DATA_SHIFT)
+#define INTEL_GUC_MSG_CODE_SHIFT	0
+#define INTEL_GUC_MSG_CODE_MASK		(0xFFFF << INTEL_GUC_MSG_CODE_SHIFT)
+
+#define __INTEL_GUC_MSG_GET(T, m) \
+	(((m) & INTEL_GUC_MSG_ ## T ## _MASK) >> INTEL_GUC_MSG_ ## T ## _SHIFT)
+#define INTEL_GUC_MSG_TO_TYPE(m)	__INTEL_GUC_MSG_GET(TYPE, m)
+#define INTEL_GUC_MSG_TO_DATA(m)	__INTEL_GUC_MSG_GET(DATA, m)
+#define INTEL_GUC_MSG_TO_CODE(m)	__INTEL_GUC_MSG_GET(CODE, m)
+
+enum intel_guc_msg_type {
+	INTEL_GUC_MSG_TYPE_REQUEST = 0x0,
+	INTEL_GUC_MSG_TYPE_RESPONSE = 0xF,
+};
+
+#define __INTEL_GUC_MSG_TYPE_IS(T, m) \
+	(INTEL_GUC_MSG_TO_TYPE(m) == INTEL_GUC_MSG_TYPE_ ## T)
+#define INTEL_GUC_MSG_IS_REQUEST(m)	__INTEL_GUC_MSG_TYPE_IS(REQUEST, m)
+#define INTEL_GUC_MSG_IS_RESPONSE(m)	__INTEL_GUC_MSG_TYPE_IS(RESPONSE, m)
+
 enum intel_guc_action {
 	INTEL_GUC_ACTION_DEFAULT = 0x0,
 	INTEL_GUC_ACTION_REQUEST_PREEMPTION = 0x2,
@@ -597,24 +658,17 @@ enum intel_guc_report_status {
 #define GUC_LOG_CONTROL_VERBOSITY_MASK	(0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT)
 #define GUC_LOG_CONTROL_DEFAULT_LOGGING	(1 << 8)
 
-/*
- * The GuC sends its response to a command by overwriting the
- * command in SS0. The response is distinguishable from a command
- * by the fact that all the MASK bits are set. The remaining bits
- * give more detail.
- */
-#define	INTEL_GUC_RECV_MASK	((u32)0xF0000000)
-#define	INTEL_GUC_RECV_IS_RESPONSE(x)	((u32)(x) >= INTEL_GUC_RECV_MASK)
-#define	INTEL_GUC_RECV_STATUS(x)	(INTEL_GUC_RECV_MASK | (x))
-
-/* GUC will return status back to SOFT_SCRATCH_O_REG */
-enum intel_guc_status {
-	INTEL_GUC_STATUS_SUCCESS = INTEL_GUC_RECV_STATUS(0x0),
-	INTEL_GUC_STATUS_ALLOCATE_DOORBELL_FAIL = INTEL_GUC_RECV_STATUS(0x10),
-	INTEL_GUC_STATUS_DEALLOCATE_DOORBELL_FAIL = INTEL_GUC_RECV_STATUS(0x20),
-	INTEL_GUC_STATUS_GENERIC_FAIL = INTEL_GUC_RECV_STATUS(0x0000F000)
+enum intel_guc_response_status {
+	INTEL_GUC_RESPONSE_STATUS_SUCCESS = 0x0,
+	INTEL_GUC_RESPONSE_STATUS_GENERIC_FAIL = 0xF000,
 };
 
+#define INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(m) \
+	 (typecheck(u32, (m)) && \
+	  ((m) & (INTEL_GUC_MSG_TYPE_MASK | INTEL_GUC_MSG_CODE_MASK)) == \
+	  ((INTEL_GUC_MSG_TYPE_RESPONSE << INTEL_GUC_MSG_TYPE_SHIFT) | \
+	   (INTEL_GUC_RESPONSE_STATUS_SUCCESS << INTEL_GUC_MSG_CODE_SHIFT)))
+
 /* This action will be programmed in C1BC - SOFT_SCRATCH_15_REG */
 enum intel_guc_recv_message {
 	INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED = BIT(1),

From b839a869dfc9f01aab72c5dd26cb7a7f2e264201 Mon Sep 17 00:00:00 2001
From: Michal Wajdeczko <michal.wajdeczko@intel.com>
Date: Mon, 26 Mar 2018 19:48:19 +0000
Subject: [PATCH 0153/1286] drm/i915/guc: Add support for data reporting in GuC
 responses

GuC may return additional data in the response message.
Format and meaning of this data is action specific. We will
use this non-negative data as a new success return value.
Currently used actions don't return data that way yet.

v2: fix prohibited space after '~' (Michel)
    update commit message (Daniele)
v3: rebase

Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Oscar Mateo <oscar.mateo@intel.com>
Cc: Michel Thierry <michel.thierry@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Reviewed-by: Michel Thierry <michel.thierry@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180326194829.58836-3-michal.wajdeczko@intel.com
---
 drivers/gpu/drm/i915/intel_guc.c    |  3 +++
 drivers/gpu/drm/i915/intel_guc_ct.c | 14 ++++++++------
 2 files changed, 11 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index 78e68b169cf2..1af32a0648db 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -366,6 +366,9 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len)
 				 " ret=%d status=0x%08X response=0x%08X\n",
 				 action[0], ret, status,
 				 I915_READ(SOFT_SCRATCH(15)));
+	} else {
+		/* Use data from the GuC response as our return value */
+		ret = INTEL_GUC_MSG_TO_DATA(status);
 	}
 
 	intel_uncore_forcewake_put(dev_priv, guc->send_regs.fw_domains);
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c
index 1dafa7a20d89..fa522594d716 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/intel_guc_ct.c
@@ -400,7 +400,9 @@ static int ctch_send(struct intel_guc *guc,
 		return err;
 	if (!INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(*status))
 		return -EIO;
-	return 0;
+
+	/* Use data from the GuC status as our return value */
+	return INTEL_GUC_MSG_TO_DATA(*status);
 }
 
 /*
@@ -410,18 +412,18 @@ static int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len)
 {
 	struct intel_guc_ct_channel *ctch = &guc->ct.host_channel;
 	u32 status = ~0; /* undefined */
-	int err;
+	int ret;
 
 	mutex_lock(&guc->send_mutex);
 
-	err = ctch_send(guc, ctch, action, len, &status);
-	if (unlikely(err)) {
+	ret = ctch_send(guc, ctch, action, len, &status);
+	if (unlikely(ret < 0)) {
 		DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n",
-			  action[0], err, status);
+			  action[0], ret, status);
 	}
 
 	mutex_unlock(&guc->send_mutex);
-	return err;
+	return ret;
 }
 
 /**

From e09af3a6a62dfe0a9c4bc931c81447a3969177fb Mon Sep 17 00:00:00 2001
From: Michal Wajdeczko <michal.wajdeczko@intel.com>
Date: Mon, 26 Mar 2018 19:48:20 +0000
Subject: [PATCH 0154/1286] drm/i915/guc: Prepare send() function to accept
 bigger response

This is a preparation step for the upcoming patches.
We already can return some small data decoded from the command
status, but we will need more in the future.

v2: add explicit response buf size
v3: squash with helper patch

Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Oscar Mateo <oscar.mateo@intel.com>
Cc: Michel Thierry <michel.thierry@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Reviewed-by: Michel Thierry <michel.thierry@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180326194829.58836-4-michal.wajdeczko@intel.com
---
 drivers/gpu/drm/i915/intel_guc.c    |  6 ++++--
 drivers/gpu/drm/i915/intel_guc.h    | 18 ++++++++++++++----
 drivers/gpu/drm/i915/intel_guc_ct.c |  7 ++++---
 3 files changed, 22 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index 1af32a0648db..ba5a962ca3d4 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -310,7 +310,8 @@ void intel_guc_init_params(struct intel_guc *guc)
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_BLITTER);
 }
 
-int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len)
+int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
+		       u32 *response_buf, u32 response_buf_size)
 {
 	WARN(1, "Unexpected send: action=%#x\n", *action);
 	return -ENODEV;
@@ -319,7 +320,8 @@ int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len)
 /*
  * This function implements the MMIO based host to GuC interface.
  */
-int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len)
+int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
+			u32 *response_buf, u32 response_buf_size)
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 	u32 status;
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 13f3d1dbf38d..7ee0732d8b79 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -88,7 +88,8 @@ struct intel_guc {
 	struct mutex send_mutex;
 
 	/* GuC's FW specific send function */
-	int (*send)(struct intel_guc *guc, const u32 *data, u32 len);
+	int (*send)(struct intel_guc *guc, const u32 *data, u32 len,
+		    u32 *response_buf, u32 response_buf_size);
 
 	/* GuC's FW specific notify function */
 	void (*notify)(struct intel_guc *guc);
@@ -97,7 +98,14 @@ struct intel_guc {
 static
 inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
 {
-	return guc->send(guc, action, len);
+	return guc->send(guc, action, len, NULL, 0);
+}
+
+static inline int
+intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len,
+			   u32 *response_buf, u32 response_buf_size)
+{
+	return guc->send(guc, action, len, response_buf, response_buf_size);
 }
 
 static inline void intel_guc_notify(struct intel_guc *guc)
@@ -140,8 +148,10 @@ int intel_guc_init_wq(struct intel_guc *guc);
 void intel_guc_fini_wq(struct intel_guc *guc);
 int intel_guc_init(struct intel_guc *guc);
 void intel_guc_fini(struct intel_guc *guc);
-int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len);
-int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len);
+int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
+		       u32 *response_buf, u32 response_buf_size);
+int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
+			u32 *response_buf, u32 response_buf_size);
 void intel_guc_to_host_event_handler(struct intel_guc *guc);
 int intel_guc_sample_forcewake(struct intel_guc *guc);
 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c
index fa522594d716..a54bf58c64ab 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/intel_guc_ct.c
@@ -88,7 +88,7 @@ static int guc_action_register_ct_buffer(struct intel_guc *guc,
 	int err;
 
 	/* Can't use generic send(), CT registration must go over MMIO */
-	err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action));
+	err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
 	if (err)
 		DRM_ERROR("CT: register %s buffer failed; err=%d\n",
 			  guc_ct_buffer_type_to_str(type), err);
@@ -107,7 +107,7 @@ static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
 	int err;
 
 	/* Can't use generic send(), CT deregistration must go over MMIO */
-	err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action));
+	err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
 	if (err)
 		DRM_ERROR("CT: deregister %s buffer failed; owner=%d err=%d\n",
 			  guc_ct_buffer_type_to_str(type), owner, err);
@@ -408,7 +408,8 @@ static int ctch_send(struct intel_guc *guc,
 /*
  * Command Transport (CT) buffer based GuC send function.
  */
-static int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len)
+static int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len,
+			     u32 *response_buf, u32 response_buf_size)
 {
 	struct intel_guc_ct_channel *ctch = &guc->ct.host_channel;
 	u32 status = ~0; /* undefined */

From f6a70b59705109edb935674d36a5065fe2a5b4ad Mon Sep 17 00:00:00 2001
From: Michal Wajdeczko <michal.wajdeczko@intel.com>
Date: Mon, 26 Mar 2018 19:48:21 +0000
Subject: [PATCH 0155/1286] drm/i915/guc: Implement response handling in
 send_mmio()

We're using data encoded in the status MMIO as return value from send
function, but GuC may also write more data in remaining MMIO regs.
Let's copy content of these registers to the buffer provided by caller.

v2: new line (Michel)
v3: updated commit message

Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Oscar Mateo <oscar.mateo@intel.com>
Reviewed-by: Michel Thierry <michel.thierry@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180326194829.58836-5-michal.wajdeczko@intel.com
---
 drivers/gpu/drm/i915/intel_guc.c | 15 ++++++++++++---
 1 file changed, 12 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index ba5a962ca3d4..b83a5ad9cb79 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -368,11 +368,20 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
 				 " ret=%d status=0x%08X response=0x%08X\n",
 				 action[0], ret, status,
 				 I915_READ(SOFT_SCRATCH(15)));
-	} else {
-		/* Use data from the GuC response as our return value */
-		ret = INTEL_GUC_MSG_TO_DATA(status);
+		goto out;
 	}
 
+	if (response_buf) {
+		int count = min(response_buf_size, guc->send_regs.count - 1);
+
+		for (i = 0; i < count; i++)
+			response_buf[i] = I915_READ(guc_send_reg(guc, i + 1));
+	}
+
+	/* Use data from the GuC response as our return value */
+	ret = INTEL_GUC_MSG_TO_DATA(status);
+
+out:
 	intel_uncore_forcewake_put(dev_priv, guc->send_regs.fw_domains);
 	mutex_unlock(&guc->send_mutex);
 

From 769bfbf943235c8c54e189a0d12bf8d3195af0c5 Mon Sep 17 00:00:00 2001
From: Michal Wajdeczko <michal.wajdeczko@intel.com>
Date: Mon, 26 Mar 2018 19:48:22 +0000
Subject: [PATCH 0156/1286] drm/i915/guc: Make event handler a virtual function

On platforms with CTB based GuC communications, we will handle
GuC events in a different way. Let's make event handler a virtual
function to allow easy switch between those variants.

Credits-to: Oscar Mateo <oscar.mateo@intel.com>
Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Oscar Mateo <oscar.mateo@intel.com>
Reviewed-by: Michel Thierry <michel.thierry@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180326194829.58836-6-michal.wajdeczko@intel.com
---
 drivers/gpu/drm/i915/intel_guc.c |  8 +++++++-
 drivers/gpu/drm/i915/intel_guc.h | 10 ++++++++++
 drivers/gpu/drm/i915/intel_uc.c  |  2 ++
 3 files changed, 19 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index b83a5ad9cb79..411c8e910583 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -69,6 +69,7 @@ void intel_guc_init_early(struct intel_guc *guc)
 	mutex_init(&guc->send_mutex);
 	spin_lock_init(&guc->irq_lock);
 	guc->send = intel_guc_send_nop;
+	guc->handler = intel_guc_to_host_event_handler_nop;
 	guc->notify = gen8_guc_raise_irq;
 }
 
@@ -317,6 +318,11 @@ int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
 	return -ENODEV;
 }
 
+void intel_guc_to_host_event_handler_nop(struct intel_guc *guc)
+{
+	WARN(1, "Unexpected event: no suitable handler\n");
+}
+
 /*
  * This function implements the MMIO based host to GuC interface.
  */
@@ -388,7 +394,7 @@ out:
 	return ret;
 }
 
-void intel_guc_to_host_event_handler(struct intel_guc *guc)
+void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc)
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 	u32 msg, val;
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 7ee0732d8b79..6dc109ab61bc 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -91,6 +91,9 @@ struct intel_guc {
 	int (*send)(struct intel_guc *guc, const u32 *data, u32 len,
 		    u32 *response_buf, u32 response_buf_size);
 
+	/* GuC's FW specific event handler function */
+	void (*handler)(struct intel_guc *guc);
+
 	/* GuC's FW specific notify function */
 	void (*notify)(struct intel_guc *guc);
 };
@@ -113,6 +116,11 @@ static inline void intel_guc_notify(struct intel_guc *guc)
 	guc->notify(guc);
 }
 
+static inline void intel_guc_to_host_event_handler(struct intel_guc *guc)
+{
+	guc->handler(guc);
+}
+
 /* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
 #define GUC_GGTT_TOP	0xFEE00000
 
@@ -153,6 +161,8 @@ int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
 			u32 *response_buf, u32 response_buf_size);
 void intel_guc_to_host_event_handler(struct intel_guc *guc);
+void intel_guc_to_host_event_handler_nop(struct intel_guc *guc);
+void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc);
 int intel_guc_sample_forcewake(struct intel_guc *guc);
 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
 int intel_guc_suspend(struct intel_guc *guc);
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 4aad8442e789..081e42462aad 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -233,6 +233,7 @@ static int guc_enable_communication(struct intel_guc *guc)
 		return intel_guc_ct_enable(&guc->ct);
 
 	guc->send = intel_guc_send_mmio;
+	guc->handler = intel_guc_to_host_event_handler_mmio;
 	return 0;
 }
 
@@ -246,6 +247,7 @@ static void guc_disable_communication(struct intel_guc *guc)
 	gen9_disable_guc_interrupts(dev_priv);
 
 	guc->send = intel_guc_send_nop;
+	guc->handler = intel_guc_to_host_event_handler_nop;
 }
 
 int intel_uc_init_misc(struct drm_i915_private *dev_priv)

From 1d407096002becab2fd5b19253cee0de65aab668 Mon Sep 17 00:00:00 2001
From: Michal Wajdeczko <michal.wajdeczko@intel.com>
Date: Mon, 26 Mar 2018 19:48:23 +0000
Subject: [PATCH 0157/1286] drm/i915/guc: Prepare to handle messages from CT
 RECV buffer

GuC can respond to our commands not only by updating SEND buffer
descriptor, but can also send a response message over RECV buffer.
Guc can also send unsolicited request messages over RECV buffer.
Let's start reading those messages and make placeholders
for actual response/request handlers.

v2: misc improvements (Michal)
v3: change response detection (Michal)
    invalid status is protocol error (Michal)
v4: rebase
v5: fix checkpatch (Michel)
    don't use fields before check (Jani)
    add some documentation (Michal)

Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Oscar Mateo <oscar.mateo@intel.com>
Cc: Michel Thierry <michel.thierry@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Reviewed-by: Michel Thierry <michel.thierry@intel.com> # 4.5
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180326194829.58836-7-michal.wajdeczko@intel.com
---
 drivers/gpu/drm/i915/intel_guc_ct.c | 184 +++++++++++++++++++++++++++-
 1 file changed, 183 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c
index a54bf58c64ab..14f55de342e7 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/intel_guc_ct.c
@@ -273,6 +273,24 @@ static u32 ctch_get_next_fence(struct intel_guc_ct_channel *ctch)
 	return ++ctch->next_fence;
 }
 
+/**
+ * DOC: CTB Host to GuC request
+ *
+ * Format of the CTB Host to GuC request message is as follows::
+ *
+ *      +------------+---------+---------+---------+---------+
+ *      |   msg[0]   |   [1]   |   [2]   |   ...   |  [n-1]  |
+ *      +------------+---------+---------+---------+---------+
+ *      |   MESSAGE  |       MESSAGE PAYLOAD                 |
+ *      +   HEADER   +---------+---------+---------+---------+
+ *      |            |    0    |    1    |   ...   |    n    |
+ *      +============+=========+=========+=========+=========+
+ *      |  len >= 1  |  FENCE  |     request specific data   |
+ *      +------+-----+---------+---------+---------+---------+
+ *
+ *                   ^-----------------len-------------------^
+ */
+
 static int ctb_write(struct intel_guc_ct_buffer *ctb,
 		     const u32 *action,
 		     u32 len /* in dwords */,
@@ -305,7 +323,8 @@ static int ctb_write(struct intel_guc_ct_buffer *ctb,
 	if (unlikely(used + len + 1 >= size))
 		return -ENOSPC;
 
-	/* Write the message. The format is the following:
+	/*
+	 * Write the message. The format is the following:
 	 * DW0: header (including action code)
 	 * DW1: fence
 	 * DW2+: action data
@@ -427,6 +446,167 @@ static int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len,
 	return ret;
 }
 
+static inline unsigned int ct_header_get_len(u32 header)
+{
+	return (header >> GUC_CT_MSG_LEN_SHIFT) & GUC_CT_MSG_LEN_MASK;
+}
+
+static inline unsigned int ct_header_get_action(u32 header)
+{
+	return (header >> GUC_CT_MSG_ACTION_SHIFT) & GUC_CT_MSG_ACTION_MASK;
+}
+
+static inline bool ct_header_is_response(u32 header)
+{
+	return ct_header_get_action(header) == INTEL_GUC_ACTION_DEFAULT;
+}
+
+static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data)
+{
+	struct guc_ct_buffer_desc *desc = ctb->desc;
+	u32 head = desc->head / 4;	/* in dwords */
+	u32 tail = desc->tail / 4;	/* in dwords */
+	u32 size = desc->size / 4;	/* in dwords */
+	u32 *cmds = ctb->cmds;
+	s32 available;			/* in dwords */
+	unsigned int len;
+	unsigned int i;
+
+	GEM_BUG_ON(desc->size % 4);
+	GEM_BUG_ON(desc->head % 4);
+	GEM_BUG_ON(desc->tail % 4);
+	GEM_BUG_ON(tail >= size);
+	GEM_BUG_ON(head >= size);
+
+	/* tail == head condition indicates empty */
+	available = tail - head;
+	if (unlikely(available == 0))
+		return -ENODATA;
+
+	/* beware of buffer wrap case */
+	if (unlikely(available < 0))
+		available += size;
+	GEM_BUG_ON(available < 0);
+
+	data[0] = cmds[head];
+	head = (head + 1) % size;
+
+	/* message len with header */
+	len = ct_header_get_len(data[0]) + 1;
+	if (unlikely(len > (u32)available)) {
+		DRM_ERROR("CT: incomplete message %*phn %*phn %*phn\n",
+			  4, data,
+			  4 * (head + available - 1 > size ?
+			       size - head : available - 1), &cmds[head],
+			  4 * (head + available - 1 > size ?
+			       available - 1 - size + head : 0), &cmds[0]);
+		return -EPROTO;
+	}
+
+	for (i = 1; i < len; i++) {
+		data[i] = cmds[head];
+		head = (head + 1) % size;
+	}
+
+	desc->head = head * 4;
+	return 0;
+}
+
+/**
+ * DOC: CTB GuC to Host response
+ *
+ * Format of the CTB GuC to Host response message is as follows::
+ *
+ *      +------------+---------+---------+---------+---------+---------+
+ *      |   msg[0]   |   [1]   |   [2]   |   [3]   |   ...   |  [n-1]  |
+ *      +------------+---------+---------+---------+---------+---------+
+ *      |   MESSAGE  |       MESSAGE PAYLOAD                           |
+ *      +   HEADER   +---------+---------+---------+---------+---------+
+ *      |            |    0    |    1    |    2    |   ...   |    n    |
+ *      +============+=========+=========+=========+=========+=========+
+ *      |  len >= 2  |  FENCE  |  STATUS |   response specific data    |
+ *      +------+-----+---------+---------+---------+---------+---------+
+ *
+ *                   ^-----------------------len-----------------------^
+ */
+
+static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
+{
+	u32 header = msg[0];
+	u32 len = ct_header_get_len(header);
+	u32 msglen = len + 1; /* total message length including header */
+	u32 fence;
+	u32 status;
+
+	GEM_BUG_ON(!ct_header_is_response(header));
+
+	/* Response payload shall at least include fence and status */
+	if (unlikely(len < 2)) {
+		DRM_ERROR("CT: corrupted response %*phn\n", 4 * msglen, msg);
+		return -EPROTO;
+	}
+
+	fence = msg[1];
+	status = msg[2];
+
+	/* Format of the status follows RESPONSE message */
+	if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status))) {
+		DRM_ERROR("CT: corrupted response %*phn\n", 4 * msglen, msg);
+		return -EPROTO;
+	}
+
+	/* XXX */
+	return 0;
+}
+
+static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
+{
+	u32 header = msg[0];
+
+	GEM_BUG_ON(ct_header_is_response(header));
+
+	/* XXX */
+	return 0;
+}
+
+static void ct_process_host_channel(struct intel_guc_ct *ct)
+{
+	struct intel_guc_ct_channel *ctch = &ct->host_channel;
+	struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_RECV];
+	u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
+	int err = 0;
+
+	if (!ctch_is_open(ctch))
+		return;
+
+	do {
+		err = ctb_read(ctb, msg);
+		if (err)
+			break;
+
+		if (ct_header_is_response(msg[0]))
+			err = ct_handle_response(ct, msg);
+		else
+			err = ct_handle_request(ct, msg);
+	} while (!err);
+
+	if (GEM_WARN_ON(err == -EPROTO)) {
+		DRM_ERROR("CT: corrupted message detected!\n");
+		ctb->desc->is_in_error = 1;
+	}
+}
+
+/*
+ * When we're communicating with the GuC over CT, GuC uses events
+ * to notify us about new messages being posted on the RECV buffer.
+ */
+static void intel_guc_to_host_event_handler_ct(struct intel_guc *guc)
+{
+	struct intel_guc_ct *ct = &guc->ct;
+
+	ct_process_host_channel(ct);
+}
+
 /**
  * intel_guc_ct_enable - Enable buffer based command transport.
  * @ct: pointer to CT struct
@@ -450,6 +630,7 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
 
 	/* Switch into cmd transport buffer based send() */
 	guc->send = intel_guc_send_ct;
+	guc->handler = intel_guc_to_host_event_handler_ct;
 	DRM_INFO("CT: %s\n", enableddisabled(true));
 	return 0;
 }
@@ -475,5 +656,6 @@ void intel_guc_ct_disable(struct intel_guc_ct *ct)
 
 	/* Disable send */
 	guc->send = intel_guc_send_nop;
+	guc->handler = intel_guc_to_host_event_handler_nop;
 	DRM_INFO("CT: %s\n", enableddisabled(false));
 }

From 24827cd0dd86a3158abc90ee64dcaf18aa843970 Mon Sep 17 00:00:00 2001
From: Michal Wajdeczko <michal.wajdeczko@intel.com>
Date: Mon, 26 Mar 2018 19:48:24 +0000
Subject: [PATCH 0158/1286] drm/i915/guc: Use better name for helper wait
 function

In next patch we will introduce another way of waiting for the response
that will use RECV buffer. To avoid misleading names, rename old wait
function to reflect the fact that it is based on descriptor update.

v2: fix comment style (Michal)
v3: use more specific name (Michel)

Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: Michel Thierry <michel.thierry@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180326194829.58836-8-michal.wajdeczko@intel.com
---
 drivers/gpu/drm/i915/intel_guc_ct.c | 25 +++++++++++++++++--------
 1 file changed, 17 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c
index 14f55de342e7..2d805a2fd1f0 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/intel_guc_ct.c
@@ -351,16 +351,25 @@ static int ctb_write(struct intel_guc_ct_buffer *ctb,
 	return 0;
 }
 
-/* Wait for the response from the GuC.
+/**
+ * wait_for_ctb_desc_update - Wait for the CT buffer descriptor update.
+ * @desc:	buffer descriptor
  * @fence:	response fence
  * @status:	placeholder for status
- * return:	0 response received (status is valid)
- *		-ETIMEDOUT no response within hardcoded timeout
- *		-EPROTO no response, ct buffer was in error
+ *
+ * Guc will update CT buffer descriptor with new fence and status
+ * after processing the command identified by the fence. Wait for
+ * specified fence and then read from the descriptor status of the
+ * command.
+ *
+ * Return:
+ * *	0 response received (status is valid)
+ * *	-ETIMEDOUT no response within hardcoded timeout
+ * *	-EPROTO no response, CT buffer is in error
  */
-static int wait_for_response(struct guc_ct_buffer_desc *desc,
-			     u32 fence,
-			     u32 *status)
+static int wait_for_ctb_desc_update(struct guc_ct_buffer_desc *desc,
+				    u32 fence,
+				    u32 *status)
 {
 	int err;
 
@@ -414,7 +423,7 @@ static int ctch_send(struct intel_guc *guc,
 
 	intel_guc_notify(guc);
 
-	err = wait_for_response(desc, fence, status);
+	err = wait_for_ctb_desc_update(desc, fence, status);
 	if (unlikely(err))
 		return err;
 	if (!INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(*status))

From 9ef4c75e06006eb041c0bcc578548e3cdb8f9e03 Mon Sep 17 00:00:00 2001
From: Michal Wajdeczko <michal.wajdeczko@intel.com>
Date: Tue, 27 Mar 2018 12:14:39 +0000
Subject: [PATCH 0159/1286] drm/i915/guc: Implement response handling in
 send_ct()

Instead of returning small data in response status dword,
GuC may append longer data as response message payload.
If caller provides response buffer, we will copy received
data and use number of received data dwords as new success
return value. We will WARN if response from GuC does not
match caller expectation.

v2: fix timeout and checkpatch warnings (Michal)
v3: fix checkpatch again (Michel)
    update wait function name (Michal)
    no need for spinlock_irqsave (MichalWi)
    no magic numbers (MichalWi)
    must check before use (Jani)
    add some more documentation (Michal)
v4: update documentation (Michal)

Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Oscar Mateo <oscar.mateo@intel.com>
Cc: Michel Thierry <michel.thierry@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Reviewed-by: Michel Thierry <michel.thierry@intel.com> #2.5
Cc: Michal Winiarski <michal.winiarski@intel.com>
Cc: Jani Nikula <jani.nikula@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180327121439.70096-1-michal.wajdeczko@intel.com
---
 drivers/gpu/drm/i915/intel_guc_ct.c   | 142 +++++++++++++++++++++++---
 drivers/gpu/drm/i915/intel_guc_ct.h   |   6 ++
 drivers/gpu/drm/i915/intel_guc_fwif.h |  52 ++++++++++
 3 files changed, 186 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c
index 2d805a2fd1f0..41b071c6d757 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/intel_guc_ct.c
@@ -24,6 +24,14 @@
 #include "i915_drv.h"
 #include "intel_guc_ct.h"
 
+struct ct_request {
+	struct list_head link;
+	u32 fence;
+	u32 status;
+	u32 response_len;
+	u32 *response_buf;
+};
+
 enum { CTB_SEND = 0, CTB_RECV = 1 };
 
 enum { CTB_OWNER_HOST = 0 };
@@ -36,6 +44,9 @@ void intel_guc_ct_init_early(struct intel_guc_ct *ct)
 {
 	/* we're using static channel owners */
 	ct->host_channel.owner = CTB_OWNER_HOST;
+
+	spin_lock_init(&ct->lock);
+	INIT_LIST_HEAD(&ct->pending_requests);
 }
 
 static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
@@ -294,7 +305,8 @@ static u32 ctch_get_next_fence(struct intel_guc_ct_channel *ctch)
 static int ctb_write(struct intel_guc_ct_buffer *ctb,
 		     const u32 *action,
 		     u32 len /* in dwords */,
-		     u32 fence)
+		     u32 fence,
+		     bool want_response)
 {
 	struct guc_ct_buffer_desc *desc = ctb->desc;
 	u32 head = desc->head / 4;	/* in dwords */
@@ -331,6 +343,7 @@ static int ctb_write(struct intel_guc_ct_buffer *ctb,
 	 */
 	header = (len << GUC_CT_MSG_LEN_SHIFT) |
 		 (GUC_CT_MSG_WRITE_FENCE_TO_DESC) |
+		 (want_response ? GUC_CT_MSG_SEND_STATUS : 0) |
 		 (action[0] << GUC_CT_MSG_ACTION_SHIFT);
 
 	cmds[tail] = header;
@@ -401,36 +414,108 @@ static int wait_for_ctb_desc_update(struct guc_ct_buffer_desc *desc,
 	return err;
 }
 
-static int ctch_send(struct intel_guc *guc,
+/**
+ * wait_for_ct_request_update - Wait for CT request state update.
+ * @req:	pointer to pending request
+ * @status:	placeholder for status
+ *
+ * For each sent request, Guc shall send bac CT response message.
+ * Our message handler will update status of tracked request once
+ * response message with given fence is received. Wait here and
+ * check for valid response status value.
+ *
+ * Return:
+ * *	0 response received (status is valid)
+ * *	-ETIMEDOUT no response within hardcoded timeout
+ */
+static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
+{
+	int err;
+
+	/*
+	 * Fast commands should complete in less than 10us, so sample quickly
+	 * up to that length of time, then switch to a slower sleep-wait loop.
+	 * No GuC command should ever take longer than 10ms.
+	 */
+#define done INTEL_GUC_MSG_IS_RESPONSE(READ_ONCE(req->status))
+	err = wait_for_us(done, 10);
+	if (err)
+		err = wait_for(done, 10);
+#undef done
+
+	if (unlikely(err))
+		DRM_ERROR("CT: fence %u err %d\n", req->fence, err);
+
+	*status = req->status;
+	return err;
+}
+
+static int ctch_send(struct intel_guc_ct *ct,
 		     struct intel_guc_ct_channel *ctch,
 		     const u32 *action,
 		     u32 len,
+		     u32 *response_buf,
+		     u32 response_buf_size,
 		     u32 *status)
 {
 	struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_SEND];
 	struct guc_ct_buffer_desc *desc = ctb->desc;
+	struct ct_request request;
+	unsigned long flags;
 	u32 fence;
 	int err;
 
 	GEM_BUG_ON(!ctch_is_open(ctch));
 	GEM_BUG_ON(!len);
 	GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
+	GEM_BUG_ON(!response_buf && response_buf_size);
 
 	fence = ctch_get_next_fence(ctch);
-	err = ctb_write(ctb, action, len, fence);
+	request.fence = fence;
+	request.status = 0;
+	request.response_len = response_buf_size;
+	request.response_buf = response_buf;
+
+	spin_lock_irqsave(&ct->lock, flags);
+	list_add_tail(&request.link, &ct->pending_requests);
+	spin_unlock_irqrestore(&ct->lock, flags);
+
+	err = ctb_write(ctb, action, len, fence, !!response_buf);
 	if (unlikely(err))
-		return err;
+		goto unlink;
 
-	intel_guc_notify(guc);
+	intel_guc_notify(ct_to_guc(ct));
 
-	err = wait_for_ctb_desc_update(desc, fence, status);
+	if (response_buf)
+		err = wait_for_ct_request_update(&request, status);
+	else
+		err = wait_for_ctb_desc_update(desc, fence, status);
 	if (unlikely(err))
-		return err;
-	if (!INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(*status))
-		return -EIO;
+		goto unlink;
 
-	/* Use data from the GuC status as our return value */
-	return INTEL_GUC_MSG_TO_DATA(*status);
+	if (!INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(*status)) {
+		err = -EIO;
+		goto unlink;
+	}
+
+	if (response_buf) {
+		/* There shall be no data in the status */
+		WARN_ON(INTEL_GUC_MSG_TO_DATA(request.status));
+		/* Return actual response len */
+		err = request.response_len;
+	} else {
+		/* There shall be no response payload */
+		WARN_ON(request.response_len);
+		/* Return data decoded from the status dword */
+		err = INTEL_GUC_MSG_TO_DATA(*status);
+	}
+
+unlink:
+	spin_lock_irqsave(&ct->lock, flags);
+	list_del(&request.link);
+	spin_unlock_irqrestore(&ct->lock, flags);
+
+	return err;
 }
 
 /*
@@ -439,13 +524,15 @@ static int ctch_send(struct intel_guc *guc,
 static int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len,
 			     u32 *response_buf, u32 response_buf_size)
 {
-	struct intel_guc_ct_channel *ctch = &guc->ct.host_channel;
+	struct intel_guc_ct *ct = &guc->ct;
+	struct intel_guc_ct_channel *ctch = &ct->host_channel;
 	u32 status = ~0; /* undefined */
 	int ret;
 
 	mutex_lock(&guc->send_mutex);
 
-	ret = ctch_send(guc, ctch, action, len, &status);
+	ret = ctch_send(ct, ctch, action, len, response_buf, response_buf_size,
+			&status);
 	if (unlikely(ret < 0)) {
 		DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n",
 			  action[0], ret, status);
@@ -546,8 +633,12 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
 	u32 msglen = len + 1; /* total message length including header */
 	u32 fence;
 	u32 status;
+	u32 datalen;
+	struct ct_request *req;
+	bool found = false;
 
 	GEM_BUG_ON(!ct_header_is_response(header));
+	GEM_BUG_ON(!in_irq());
 
 	/* Response payload shall at least include fence and status */
 	if (unlikely(len < 2)) {
@@ -557,6 +648,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
 
 	fence = msg[1];
 	status = msg[2];
+	datalen = len - 2;
 
 	/* Format of the status follows RESPONSE message */
 	if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status))) {
@@ -564,7 +656,29 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
 		return -EPROTO;
 	}
 
-	/* XXX */
+	spin_lock(&ct->lock);
+	list_for_each_entry(req, &ct->pending_requests, link) {
+		if (unlikely(fence != req->fence)) {
+			DRM_DEBUG_DRIVER("CT: request %u awaits response\n",
+					 req->fence);
+			continue;
+		}
+		if (unlikely(datalen > req->response_len)) {
+			DRM_ERROR("CT: response %u too long %*phn\n",
+				  req->fence, 4 * msglen, msg);
+			datalen = 0;
+		}
+		if (datalen)
+			memcpy(req->response_buf, msg + 3, 4 * datalen);
+		req->response_len = datalen;
+		WRITE_ONCE(req->status, status);
+		found = true;
+		break;
+	}
+	spin_unlock(&ct->lock);
+
+	if (!found)
+		DRM_ERROR("CT: unsolicited response %*phn\n", 4 * msglen, msg);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.h b/drivers/gpu/drm/i915/intel_guc_ct.h
index 595c8ad5bd4a..fac6e53194c6 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.h
+++ b/drivers/gpu/drm/i915/intel_guc_ct.h
@@ -75,6 +75,12 @@ struct intel_guc_ct_channel {
 struct intel_guc_ct {
 	struct intel_guc_ct_channel host_channel;
 	/* other channels are tbd */
+
+	/** @lock: protects pending requests list */
+	spinlock_t lock;
+
+	/** @pending_requests: list of requests waiting for response */
+	struct list_head pending_requests;
 };
 
 void intel_guc_ct_init_early(struct intel_guc_ct *ct);
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 83143e8a0730..d73673f5d30c 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -327,6 +327,58 @@ struct guc_stage_desc {
 	u64 desc_private;
 } __packed;
 
+/**
+ * DOC: CTB based communication
+ *
+ * The CTB (command transport buffer) communication between Host and GuC
+ * is based on u32 data stream written to the shared buffer. One buffer can
+ * be used to transmit data only in one direction (one-directional channel).
+ *
+ * Current status of the each buffer is stored in the buffer descriptor.
+ * Buffer descriptor holds tail and head fields that represents active data
+ * stream. The tail field is updated by the data producer (sender), and head
+ * field is updated by the data consumer (receiver)::
+ *
+ *      +------------+
+ *      | DESCRIPTOR |          +=================+============+========+
+ *      +============+          |                 | MESSAGE(s) |        |
+ *      | address    |--------->+=================+============+========+
+ *      +------------+
+ *      | head       |          ^-----head--------^
+ *      +------------+
+ *      | tail       |          ^---------tail-----------------^
+ *      +------------+
+ *      | size       |          ^---------------size--------------------^
+ *      +------------+
+ *
+ * Each message in data stream starts with the single u32 treated as a header,
+ * followed by optional set of u32 data that makes message specific payload::
+ *
+ *      +------------+---------+---------+---------+
+ *      |         MESSAGE                          |
+ *      +------------+---------+---------+---------+
+ *      |   msg[0]   |   [1]   |   ...   |  [n-1]  |
+ *      +------------+---------+---------+---------+
+ *      |   MESSAGE  |       MESSAGE PAYLOAD       |
+ *      +   HEADER   +---------+---------+---------+
+ *      |            |    0    |   ...   |    n    |
+ *      +======+=====+=========+=========+=========+
+ *      | 31:16| code|         |         |         |
+ *      +------+-----+         |         |         |
+ *      |  15:5|flags|         |         |         |
+ *      +------+-----+         |         |         |
+ *      |   4:0|  len|         |         |         |
+ *      +------+-----+---------+---------+---------+
+ *
+ *                   ^-------------len-------------^
+ *
+ * The message header consists of:
+ *
+ * - **len**, indicates length of the message payload (in u32)
+ * - **code**, indicates message code
+ * - **flags**, holds various bits to control message handling
+ */
+
 /*
  * Describes single command transport buffer.
  * Used by both guc-master and clients.

From 6c77a2b05841a4082f746bf58345209df57175c9 Mon Sep 17 00:00:00 2001
From: Michal Wajdeczko <michal.wajdeczko@intel.com>
Date: Mon, 26 Mar 2018 19:48:26 +0000
Subject: [PATCH 0160/1286] drm/i915/guc: Prepare to process incoming requests
 from CT

Requests are read from CT in the irq handler, but actual processing
will be done in the work thread. Processing of specific actions will
be added in the upcoming patches.

v2: don't use GEM_BUG_ON (Chris)
    don't kmalloc too large buffer (Michal)
v3: rebased
v4: don't name it 'dispatch' (Michel) and fix checkpatch
    add some documentation (Michal)

Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Oscar Mateo <oscar.mateo@intel.com>
Cc: Michel Thierry <michel.thierry@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Reviewed-by: Michel Thierry <michel.thierry@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180326194829.58836-10-michal.wajdeczko@intel.com
---
 drivers/gpu/drm/i915/intel_guc_ct.c | 95 ++++++++++++++++++++++++++++-
 drivers/gpu/drm/i915/intel_guc_ct.h |  6 ++
 2 files changed, 100 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c
index 41b071c6d757..aa810aded442 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/intel_guc_ct.c
@@ -32,10 +32,17 @@ struct ct_request {
 	u32 *response_buf;
 };
 
+struct ct_incoming_request {
+	struct list_head link;
+	u32 msg[];
+};
+
 enum { CTB_SEND = 0, CTB_RECV = 1 };
 
 enum { CTB_OWNER_HOST = 0 };
 
+static void ct_incoming_request_worker_func(struct work_struct *w);
+
 /**
  * intel_guc_ct_init_early - Initialize CT state without requiring device access
  * @ct: pointer to CT struct
@@ -47,6 +54,8 @@ void intel_guc_ct_init_early(struct intel_guc_ct *ct)
 
 	spin_lock_init(&ct->lock);
 	INIT_LIST_HEAD(&ct->pending_requests);
+	INIT_LIST_HEAD(&ct->incoming_requests);
+	INIT_WORK(&ct->worker, ct_incoming_request_worker_func);
 }
 
 static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
@@ -682,13 +691,97 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
 	return 0;
 }
 
+static void ct_process_request(struct intel_guc_ct *ct,
+			       u32 action, u32 len, const u32 *payload)
+{
+	switch (action) {
+	default:
+		DRM_ERROR("CT: unexpected request %x %*phn\n",
+			  action, 4 * len, payload);
+		break;
+	}
+}
+
+static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
+{
+	unsigned long flags;
+	struct ct_incoming_request *request;
+	u32 header;
+	u32 *payload;
+	bool done;
+
+	spin_lock_irqsave(&ct->lock, flags);
+	request = list_first_entry_or_null(&ct->incoming_requests,
+					   struct ct_incoming_request, link);
+	if (request)
+		list_del(&request->link);
+	done = !!list_empty(&ct->incoming_requests);
+	spin_unlock_irqrestore(&ct->lock, flags);
+
+	if (!request)
+		return true;
+
+	header = request->msg[0];
+	payload = &request->msg[1];
+	ct_process_request(ct,
+			   ct_header_get_action(header),
+			   ct_header_get_len(header),
+			   payload);
+
+	kfree(request);
+	return done;
+}
+
+static void ct_incoming_request_worker_func(struct work_struct *w)
+{
+	struct intel_guc_ct *ct = container_of(w, struct intel_guc_ct, worker);
+	bool done;
+
+	done = ct_process_incoming_requests(ct);
+	if (!done)
+		queue_work(system_unbound_wq, &ct->worker);
+}
+
+/**
+ * DOC: CTB GuC to Host request
+ *
+ * Format of the CTB GuC to Host request message is as follows::
+ *
+ *      +------------+---------+---------+---------+---------+---------+
+ *      |   msg[0]   |   [1]   |   [2]   |   [3]   |   ...   |  [n-1]  |
+ *      +------------+---------+---------+---------+---------+---------+
+ *      |   MESSAGE  |       MESSAGE PAYLOAD                           |
+ *      +   HEADER   +---------+---------+---------+---------+---------+
+ *      |            |    0    |    1    |    2    |   ...   |    n    |
+ *      +============+=========+=========+=========+=========+=========+
+ *      |     len    |            request specific data                |
+ *      +------+-----+---------+---------+---------+---------+---------+
+ *
+ *                   ^-----------------------len-----------------------^
+ */
+
 static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
 {
 	u32 header = msg[0];
+	u32 len = ct_header_get_len(header);
+	u32 msglen = len + 1; /* total message length including header */
+	struct ct_incoming_request *request;
+	unsigned long flags;
 
 	GEM_BUG_ON(ct_header_is_response(header));
 
-	/* XXX */
+	request = kmalloc(sizeof(*request) + 4 * msglen, GFP_ATOMIC);
+	if (unlikely(!request)) {
+		DRM_ERROR("CT: dropping request %*phn\n", 4 * msglen, msg);
+		return 0; /* XXX: -ENOMEM ? */
+	}
+	memcpy(request->msg, msg, 4 * msglen);
+
+	spin_lock_irqsave(&ct->lock, flags);
+	list_add_tail(&request->link, &ct->incoming_requests);
+	spin_unlock_irqrestore(&ct->lock, flags);
+
+	queue_work(system_unbound_wq, &ct->worker);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.h b/drivers/gpu/drm/i915/intel_guc_ct.h
index fac6e53194c6..d774895ab143 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.h
+++ b/drivers/gpu/drm/i915/intel_guc_ct.h
@@ -81,6 +81,12 @@ struct intel_guc_ct {
 
 	/** @pending_requests: list of requests waiting for response */
 	struct list_head pending_requests;
+
+	/** @incoming_requests: list of incoming requests */
+	struct list_head incoming_requests;
+
+	/** @worker: worker for handling incoming requests */
+	struct work_struct worker;
 };
 
 void intel_guc_ct_init_early(struct intel_guc_ct *ct);

From b6b0166d49f04ace13885a7ef42cb4a63ecf1f02 Mon Sep 17 00:00:00 2001
From: Michal Wajdeczko <michal.wajdeczko@intel.com>
Date: Tue, 27 Mar 2018 21:41:24 +0000
Subject: [PATCH 0161/1286] drm/i915/guc: Handle default action received over
 CT

When running on platform with CTB based GuC communication enabled,
GuC to Host event data will be delivered as CT request message.
However, content of the data[1] of this CT message follows format
of the scratch register used in MMIO based communication, so some
code reuse is still possible.

v2:  filter disabled messages (Daniele)

Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Oscar Mateo <oscar.mateo@intel.com>
Reviewed-by: Michel Thierry <michel.thierry@intel.com> #1
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Acked-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180327214124.70680-1-michal.wajdeczko@intel.com
---
 drivers/gpu/drm/i915/intel_guc.c    | 8 ++++++++
 drivers/gpu/drm/i915/intel_guc.h    | 1 +
 drivers/gpu/drm/i915/intel_guc_ct.c | 9 +++++++++
 3 files changed, 18 insertions(+)

diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index 411c8e910583..a00a59a7d9ec 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -416,6 +416,14 @@ void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc)
 	I915_WRITE(SOFT_SCRATCH(15), val & ~msg);
 	spin_unlock(&guc->irq_lock);
 
+	intel_guc_to_host_process_recv_msg(guc, msg);
+}
+
+void intel_guc_to_host_process_recv_msg(struct intel_guc *guc, u32 msg)
+{
+	/* Make sure to handle only enabled messages */
+	msg &= guc->msg_enabled_mask;
+
 	if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
 		   INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED))
 		intel_guc_log_handle_flush_event(&guc->log);
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 6dc109ab61bc..f1265e122d30 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -163,6 +163,7 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
 void intel_guc_to_host_event_handler(struct intel_guc *guc);
 void intel_guc_to_host_event_handler_nop(struct intel_guc *guc);
 void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc);
+void intel_guc_to_host_process_recv_msg(struct intel_guc *guc, u32 msg);
 int intel_guc_sample_forcewake(struct intel_guc *guc);
 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
 int intel_guc_suspend(struct intel_guc *guc);
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c
index aa810aded442..e8370846c2c9 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/intel_guc_ct.c
@@ -694,8 +694,17 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
 static void ct_process_request(struct intel_guc_ct *ct,
 			       u32 action, u32 len, const u32 *payload)
 {
+	struct intel_guc *guc = ct_to_guc(ct);
+
 	switch (action) {
+	case INTEL_GUC_ACTION_DEFAULT:
+		if (unlikely(len < 1))
+			goto fail_unexpected;
+		intel_guc_to_host_process_recv_msg(guc, *payload);
+		break;
+
 	default:
+fail_unexpected:
 		DRM_ERROR("CT: unexpected request %x %*phn\n",
 			  action, 4 * len, payload);
 		break;

From 0a015ff9730c169aa0d581e3f7727752ba3ff5b3 Mon Sep 17 00:00:00 2001
From: Michal Wajdeczko <michal.wajdeczko@intel.com>
Date: Mon, 26 Mar 2018 19:48:28 +0000
Subject: [PATCH 0162/1286] drm/i915/guc: Trace messages from CT while in debug

During debug we may want to investigate all communication
from the Guc. Add proper tracing macros in debug config.

v2: convert remaining DRM_DEBUG into new CT_DEBUG (Michal)
v3: use dedicated Kconfig (Daniele)
v4: checkpatch

Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Acked-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180326194829.58836-12-michal.wajdeczko@intel.com
---
 drivers/gpu/drm/i915/Kconfig.debug  | 12 ++++++++
 drivers/gpu/drm/i915/intel_guc_ct.c | 43 +++++++++++++++++++++--------
 2 files changed, 43 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index dd5bf6389ead..80efee1ff7f3 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -90,6 +90,18 @@ config DRM_I915_SW_FENCE_CHECK_DAG
 
           If in doubt, say "N".
 
+config DRM_I915_DEBUG_GUC
+        bool "Enable additional driver debugging for GuC"
+        depends on DRM_I915
+        default n
+        help
+          Choose this option to turn on extra driver debugging that may affect
+          performance but will help resolve GuC related issues.
+
+          Recommended for driver developers only.
+
+          If in doubt, say "N".
+
 config DRM_I915_SELFTEST
 	bool "Enable selftests upon driver load"
 	depends on DRM_I915
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c
index e8370846c2c9..990141d5f195 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/intel_guc_ct.c
@@ -24,6 +24,12 @@
 #include "i915_drv.h"
 #include "intel_guc_ct.h"
 
+#ifdef CONFIG_DRM_I915_DEBUG_GUC
+#define CT_DEBUG_DRIVER(...)	DRM_DEBUG_DRIVER(__VA_ARGS__)
+#else
+#define CT_DEBUG_DRIVER(...)	do { } while (0)
+#endif
+
 struct ct_request {
 	struct list_head link;
 	u32 fence;
@@ -78,8 +84,8 @@ static inline const char *guc_ct_buffer_type_to_str(u32 type)
 static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
 				    u32 cmds_addr, u32 size, u32 owner)
 {
-	DRM_DEBUG_DRIVER("CT: desc %p init addr=%#x size=%u owner=%u\n",
-			 desc, cmds_addr, size, owner);
+	CT_DEBUG_DRIVER("CT: desc %p init addr=%#x size=%u owner=%u\n",
+			desc, cmds_addr, size, owner);
 	memset(desc, 0, sizeof(*desc));
 	desc->addr = cmds_addr;
 	desc->size = size;
@@ -88,8 +94,8 @@ static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
 
 static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc)
 {
-	DRM_DEBUG_DRIVER("CT: desc %p reset head=%u tail=%u\n",
-			 desc, desc->head, desc->tail);
+	CT_DEBUG_DRIVER("CT: desc %p reset head=%u tail=%u\n",
+			desc, desc->head, desc->tail);
 	desc->head = 0;
 	desc->tail = 0;
 	desc->is_in_error = 0;
@@ -185,8 +191,8 @@ static int ctch_init(struct intel_guc *guc,
 		err = PTR_ERR(blob);
 		goto err_vma;
 	}
-	DRM_DEBUG_DRIVER("CT: vma base=%#x\n",
-			 intel_guc_ggtt_offset(guc, ctch->vma));
+	CT_DEBUG_DRIVER("CT: vma base=%#x\n",
+			intel_guc_ggtt_offset(guc, ctch->vma));
 
 	/* store pointers to desc and cmds */
 	for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) {
@@ -200,8 +206,8 @@ static int ctch_init(struct intel_guc *guc,
 err_vma:
 	i915_vma_unpin_and_release(&ctch->vma);
 err_out:
-	DRM_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n",
-			 ctch->owner, err);
+	CT_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n",
+			ctch->owner, err);
 	return err;
 }
 
@@ -221,8 +227,8 @@ static int ctch_open(struct intel_guc *guc,
 	int err;
 	int i;
 
-	DRM_DEBUG_DRIVER("CT: channel %d reopen=%s\n",
-			 ctch->owner, yesno(ctch_is_open(ctch)));
+	CT_DEBUG_DRIVER("CT: channel %d reopen=%s\n",
+			ctch->owner, yesno(ctch_is_open(ctch)));
 
 	if (!ctch->vma) {
 		err = ctch_init(guc, ctch);
@@ -355,6 +361,10 @@ static int ctb_write(struct intel_guc_ct_buffer *ctb,
 		 (want_response ? GUC_CT_MSG_SEND_STATUS : 0) |
 		 (action[0] << GUC_CT_MSG_ACTION_SHIFT);
 
+	CT_DEBUG_DRIVER("CT: writing %*phn %*phn %*phn\n",
+			4, &header, 4, &fence,
+			4 * (len - 1), &action[1]);
+
 	cmds[tail] = header;
 	tail = (tail + 1) % size;
 
@@ -545,6 +555,9 @@ static int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len,
 	if (unlikely(ret < 0)) {
 		DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n",
 			  action[0], ret, status);
+	} else if (unlikely(ret)) {
+		CT_DEBUG_DRIVER("CT: send action %#x returned %d (%#x)\n",
+				action[0], ret, ret);
 	}
 
 	mutex_unlock(&guc->send_mutex);
@@ -591,6 +604,7 @@ static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data)
 	/* beware of buffer wrap case */
 	if (unlikely(available < 0))
 		available += size;
+	CT_DEBUG_DRIVER("CT: available %d (%u:%u)\n", available, head, tail);
 	GEM_BUG_ON(available < 0);
 
 	data[0] = cmds[head];
@@ -612,6 +626,7 @@ static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data)
 		data[i] = cmds[head];
 		head = (head + 1) % size;
 	}
+	CT_DEBUG_DRIVER("CT: received %*phn\n", 4 * len, data);
 
 	desc->head = head * 4;
 	return 0;
@@ -665,11 +680,13 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
 		return -EPROTO;
 	}
 
+	CT_DEBUG_DRIVER("CT: response fence %u status %#x\n", fence, status);
+
 	spin_lock(&ct->lock);
 	list_for_each_entry(req, &ct->pending_requests, link) {
 		if (unlikely(fence != req->fence)) {
-			DRM_DEBUG_DRIVER("CT: request %u awaits response\n",
-					 req->fence);
+			CT_DEBUG_DRIVER("CT: request %u awaits response\n",
+					req->fence);
 			continue;
 		}
 		if (unlikely(datalen > req->response_len)) {
@@ -696,6 +713,8 @@ static void ct_process_request(struct intel_guc_ct *ct,
 {
 	struct intel_guc *guc = ct_to_guc(ct);
 
+	CT_DEBUG_DRIVER("CT: request %x %*phn\n", action, 4 * len, payload);
+
 	switch (action) {
 	case INTEL_GUC_ACTION_DEFAULT:
 		if (unlikely(len < 1))

From 790861cc34f872015806cef311d5c64cc3167a0d Mon Sep 17 00:00:00 2001
From: Samuel Li <Samuel.Li@amd.com>
Date: Mon, 26 Mar 2018 15:22:25 -0400
Subject: [PATCH 0163/1286] drm: add parameter explanation for some gem
 dmabuf_ops

To reduce some warnings.

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Samuel Li <Samuel.Li@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1522092145-12645-1-git-send-email-Samuel.Li@amd.com
---
 drivers/gpu/drm/drm_prime.c | 13 +++++++++++++
 1 file changed, 13 insertions(+)

diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 7856a9b3f8a8..caf675e3e692 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -331,6 +331,9 @@ EXPORT_SYMBOL(drm_gem_map_dma_buf);
 
 /**
  * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
+ * @attach: attachment to unmap buffer from
+ * @sgt: scatterlist info of the buffer to unmap
+ * @dir: direction of DMA transfer
  *
  * Not implemented. The unmap is done at drm_gem_map_detach().  This can be
  * used as the &dma_buf_ops.unmap_dma_buf callback.
@@ -429,6 +432,8 @@ EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
 
 /**
  * drm_gem_dmabuf_kmap_atomic - map_atomic implementation for GEM
+ * @dma_buf: buffer to be mapped
+ * @page_num: page number within the buffer
  *
  * Not implemented. This can be used as the &dma_buf_ops.map_atomic callback.
  */
@@ -441,6 +446,9 @@ EXPORT_SYMBOL(drm_gem_dmabuf_kmap_atomic);
 
 /**
  * drm_gem_dmabuf_kunmap_atomic - unmap_atomic implementation for GEM
+ * @dma_buf: buffer to be unmapped
+ * @page_num: page number within the buffer
+ * @addr: virtual address of the buffer
  *
  * Not implemented. This can be used as the &dma_buf_ops.unmap_atomic callback.
  */
@@ -453,6 +461,8 @@ EXPORT_SYMBOL(drm_gem_dmabuf_kunmap_atomic);
 
 /**
  * drm_gem_dmabuf_kmap - map implementation for GEM
+ * @dma_buf: buffer to be mapped
+ * @page_num: page number within the buffer
  *
  * Not implemented. This can be used as the &dma_buf_ops.map callback.
  */
@@ -464,6 +474,9 @@ EXPORT_SYMBOL(drm_gem_dmabuf_kmap);
 
 /**
  * drm_gem_dmabuf_kunmap - unmap implementation for GEM
+ * @dma_buf: buffer to be unmapped
+ * @page_num: page number within the buffer
+ * @addr: virtual address of the buffer
  *
  * Not implemented. This can be used as the &dma_buf_ops.unmap callback.
  */

From bee330f3d67273a68dcb99f59480d59553c008b2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Noralf=20Tr=C3=B8nnes?= <noralf@tronnes.org>
Date: Wed, 28 Mar 2018 10:38:35 +0300
Subject: [PATCH 0164/1286] drm: Use srcu to protect drm_device.unplugged
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Use srcu to protect drm_device.unplugged in a race free manner.
Drivers can use drm_dev_enter()/drm_dev_exit() to protect and mark
sections preventing access to device resources that are not available
after the device is gone.

Suggested-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Noralf Trønnes <noralf@tronnes.org>
Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
Reviewed-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
Tested-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
Cc: intel-gfx@lists.freedesktop.org
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/1522222715-11814-1-git-send-email-andr2000@gmail.com
---
 drivers/gpu/drm/drm_drv.c | 54 +++++++++++++++++++++++++++++++++++----
 include/drm/drm_device.h  |  9 ++++++-
 include/drm/drm_drv.h     | 15 ++++++++---
 3 files changed, 68 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index a1b9338736e3..32a83b41ab61 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -32,6 +32,7 @@
 #include <linux/moduleparam.h>
 #include <linux/mount.h>
 #include <linux/slab.h>
+#include <linux/srcu.h>
 
 #include <drm/drm_drv.h>
 #include <drm/drmP.h>
@@ -75,6 +76,8 @@ static bool drm_core_init_complete = false;
 
 static struct dentry *drm_debugfs_root;
 
+DEFINE_STATIC_SRCU(drm_unplug_srcu);
+
 /*
  * DRM Minors
  * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
@@ -318,18 +321,51 @@ void drm_put_dev(struct drm_device *dev)
 }
 EXPORT_SYMBOL(drm_put_dev);
 
-static void drm_device_set_unplugged(struct drm_device *dev)
+/**
+ * drm_dev_enter - Enter device critical section
+ * @dev: DRM device
+ * @idx: Pointer to index that will be passed to the matching drm_dev_exit()
+ *
+ * This function marks and protects the beginning of a section that should not
+ * be entered after the device has been unplugged. The section end is marked
+ * with drm_dev_exit(). Calls to this function can be nested.
+ *
+ * Returns:
+ * True if it is OK to enter the section, false otherwise.
+ */
+bool drm_dev_enter(struct drm_device *dev, int *idx)
 {
-	smp_wmb();
-	atomic_set(&dev->unplugged, 1);
+	*idx = srcu_read_lock(&drm_unplug_srcu);
+
+	if (dev->unplugged) {
+		srcu_read_unlock(&drm_unplug_srcu, *idx);
+		return false;
+	}
+
+	return true;
 }
+EXPORT_SYMBOL(drm_dev_enter);
+
+/**
+ * drm_dev_exit - Exit device critical section
+ * @idx: index returned from drm_dev_enter()
+ *
+ * This function marks the end of a section that should not be entered after
+ * the device has been unplugged.
+ */
+void drm_dev_exit(int idx)
+{
+	srcu_read_unlock(&drm_unplug_srcu, idx);
+}
+EXPORT_SYMBOL(drm_dev_exit);
 
 /**
  * drm_dev_unplug - unplug a DRM device
  * @dev: DRM device
  *
  * This unplugs a hotpluggable DRM device, which makes it inaccessible to
- * userspace operations. Entry-points can use drm_dev_is_unplugged(). This
+ * userspace operations. Entry-points can use drm_dev_enter() and
+ * drm_dev_exit() to protect device resources in a race free manner. This
  * essentially unregisters the device like drm_dev_unregister(), but can be
  * called while there are still open users of @dev.
  */
@@ -338,10 +374,18 @@ void drm_dev_unplug(struct drm_device *dev)
 	drm_dev_unregister(dev);
 
 	mutex_lock(&drm_global_mutex);
-	drm_device_set_unplugged(dev);
 	if (dev->open_count == 0)
 		drm_dev_put(dev);
 	mutex_unlock(&drm_global_mutex);
+
+	/*
+	 * After synchronizing any critical read section is guaranteed to see
+	 * the new value of ->unplugged, and any critical section which might
+	 * still have seen the old value of ->unplugged is guaranteed to have
+	 * finished.
+	 */
+	dev->unplugged = true;
+	synchronize_srcu(&drm_unplug_srcu);
 }
 EXPORT_SYMBOL(drm_dev_unplug);
 
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index 7c4fa32f3fc6..3a0eac2885b7 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -46,7 +46,14 @@ struct drm_device {
 	/* currently active master for this device. Protected by master_mutex */
 	struct drm_master *master;
 
-	atomic_t unplugged;			/**< Flag whether dev is dead */
+	/**
+	 * @unplugged:
+	 *
+	 * Flag to tell if the device has been unplugged.
+	 * See drm_dev_enter() and drm_dev_is_unplugged().
+	 */
+	bool unplugged;
+
 	struct inode *anon_inode;		/**< inode for private address-space */
 	char *unique;				/**< unique name of the device */
 	/*@} */
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index d32b688eb346..ff7312c40cd8 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -623,6 +623,8 @@ void drm_dev_get(struct drm_device *dev);
 void drm_dev_put(struct drm_device *dev);
 void drm_dev_unref(struct drm_device *dev);
 void drm_put_dev(struct drm_device *dev);
+bool drm_dev_enter(struct drm_device *dev, int *idx);
+void drm_dev_exit(int idx);
 void drm_dev_unplug(struct drm_device *dev);
 
 /**
@@ -634,11 +636,16 @@ void drm_dev_unplug(struct drm_device *dev);
  * unplugged, these two functions guarantee that any store before calling
  * drm_dev_unplug() is visible to callers of this function after it completes
  */
-static inline int drm_dev_is_unplugged(struct drm_device *dev)
+static inline bool drm_dev_is_unplugged(struct drm_device *dev)
 {
-	int ret = atomic_read(&dev->unplugged);
-	smp_rmb();
-	return ret;
+	int idx;
+
+	if (drm_dev_enter(dev, &idx)) {
+		drm_dev_exit(idx);
+		return false;
+	}
+
+	return true;
 }
 
 

From 4d07f6c40a764748f3c30c65d9f8e75a36fd1ad9 Mon Sep 17 00:00:00 2001
From: Michel Thierry <michel.thierry@intel.com>
Date: Wed, 28 Mar 2018 13:58:50 -0700
Subject: [PATCH 0165/1286] drm/i915/guc: enable guc interrupts unconditionally
 in uc_resume
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Probably lost while rebasing commit eacd8391f977 ("drm/i915/guc: Keep GuC
interrupts enabled when using GuC").

Not really needed since i915_gem_init_hw is called before uc_resume, but
it brings symmetry to uc_suspend.

Signed-off-by: Michel Thierry <michel.thierry@intel.com>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Reviewed-by: Michał Winiarski <michal.winiarski@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180328205851.16188-1-michel.thierry@intel.com
---
 drivers/gpu/drm/i915/intel_uc.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 081e42462aad..1cffaf7b5dbe 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -480,8 +480,7 @@ int intel_uc_resume(struct drm_i915_private *i915)
 	if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
 		return 0;
 
-	if (i915_modparams.guc_log_level)
-		gen9_enable_guc_interrupts(i915);
+	gen9_enable_guc_interrupts(i915);
 
 	err = intel_guc_resume(guc);
 	if (err) {

From e770276079fd6e1088a255dee182a3c09a2d7aa9 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Tue, 27 Mar 2018 22:01:57 +0100
Subject: [PATCH 0166/1286] drm/i915: Include the HW breadcrumb whenever we
 trace the global_seqno

When we include a request's global_seqno in a GEM_TRACE it often helps
to know how that relates to the current breadcrumb as seen by the
hardware.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180327210157.16896-3-chris@chris-wilson.co.uk
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/i915_request.c | 28 +++++++++++++++++-----------
 drivers/gpu/drm/i915/intel_lrc.c    |  6 ++++--
 2 files changed, 21 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 2314a26cd7f8..585242831974 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -214,8 +214,11 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
 		struct i915_gem_timeline *timeline;
 		struct intel_timeline *tl = engine->timeline;
 
-		GEM_TRACE("%s seqno %d -> %d\n",
-			  engine->name, tl->seqno, seqno);
+		GEM_TRACE("%s seqno %d (current %d) -> %d\n",
+			  engine->name,
+			  tl->seqno,
+			  intel_engine_get_seqno(engine),
+			  seqno);
 
 		if (!i915_seqno_passed(seqno, tl->seqno)) {
 			/* Flush any waiters before we reuse the seqno */
@@ -386,10 +389,11 @@ static void i915_request_retire(struct i915_request *request)
 	struct intel_engine_cs *engine = request->engine;
 	struct i915_gem_active *active, *next;
 
-	GEM_TRACE("%s(%d) fence %llx:%d, global_seqno %d\n",
-		  engine->name, intel_engine_get_seqno(engine),
+	GEM_TRACE("%s fence %llx:%d, global_seqno %d, current %d\n",
+		  engine->name,
 		  request->fence.context, request->fence.seqno,
-		  request->global_seqno);
+		  request->global_seqno,
+		  intel_engine_get_seqno(engine));
 
 	lockdep_assert_held(&request->i915->drm.struct_mutex);
 	GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
@@ -508,10 +512,11 @@ void __i915_request_submit(struct i915_request *request)
 	struct intel_engine_cs *engine = request->engine;
 	u32 seqno;
 
-	GEM_TRACE("%s fence %llx:%d -> global_seqno %d\n",
-		  request->engine->name,
+	GEM_TRACE("%s fence %llx:%d -> global_seqno %d, current %d\n",
+		  engine->name,
 		  request->fence.context, request->fence.seqno,
-		  engine->timeline->seqno + 1);
+		  engine->timeline->seqno + 1,
+		  intel_engine_get_seqno(engine));
 
 	GEM_BUG_ON(!irqs_disabled());
 	lockdep_assert_held(&engine->timeline->lock);
@@ -557,10 +562,11 @@ void __i915_request_unsubmit(struct i915_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
 
-	GEM_TRACE("%s fence %llx:%d <- global_seqno %d\n",
-		  request->engine->name,
+	GEM_TRACE("%s fence %llx:%d <- global_seqno %d, current %d\n",
+		  engine->name,
 		  request->fence.context, request->fence.seqno,
-		  request->global_seqno);
+		  request->global_seqno,
+		  intel_engine_get_seqno(engine));
 
 	GEM_BUG_ON(!irqs_disabled());
 	lockdep_assert_held(&engine->timeline->lock);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 654634254b64..f60b61bf8b3b 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -454,10 +454,11 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
 			desc = execlists_update_context(rq);
 			GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
 
-			GEM_TRACE("%s in[%d]:  ctx=%d.%d, seqno=%x, prio=%d\n",
+			GEM_TRACE("%s in[%d]:  ctx=%d.%d, seqno=%d (current %d), prio=%d\n",
 				  engine->name, n,
 				  port[n].context_id, count,
 				  rq->global_seqno,
+				  intel_engine_get_seqno(engine),
 				  rq_prio(rq));
 		} else {
 			GEM_BUG_ON(!n);
@@ -999,10 +1000,11 @@ static void execlists_submission_tasklet(unsigned long data)
 							EXECLISTS_ACTIVE_USER));
 
 			rq = port_unpack(port, &count);
-			GEM_TRACE("%s out[0]: ctx=%d.%d, seqno=%x, prio=%d\n",
+			GEM_TRACE("%s out[0]: ctx=%d.%d, seqno=%d (current %d), prio=%d\n",
 				  engine->name,
 				  port->context_id, count,
 				  rq ? rq->global_seqno : 0,
+				  intel_engine_get_seqno(engine),
 				  rq ? rq_prio(rq) : 0);
 
 			/* Check the context/desc id for this event matches */

From 1de401c08fa805f3ac34604af1d43f48aeb17eb4 Mon Sep 17 00:00:00 2001
From: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Date: Mon, 26 Mar 2018 14:39:48 +0100
Subject: [PATCH 0167/1286] drm/i915/perf: enable perf support on ICL

No significant changes from either context offsets, nor report
formats, nor register whitelist.

v2: Also drop slice/unslice clock ratio changes (Matt)

Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180326133949.12469-3-lionel.g.landwerlin@intel.com
---
 drivers/gpu/drm/i915/Makefile      |   3 +-
 drivers/gpu/drm/i915/i915_oa_icl.c | 118 +++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/i915_oa_icl.h |  34 +++++++++
 drivers/gpu/drm/i915/i915_perf.c   |   7 +-
 4 files changed, 159 insertions(+), 3 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/i915_oa_icl.c
 create mode 100644 drivers/gpu/drm/i915/i915_oa_icl.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 552e43e9663f..0c79c19223af 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -172,7 +172,8 @@ i915-y += i915_perf.o \
 	  i915_oa_glk.o \
 	  i915_oa_cflgt2.o \
 	  i915_oa_cflgt3.o \
-	  i915_oa_cnl.o
+	  i915_oa_cnl.o \
+	  i915_oa_icl.o
 
 ifeq ($(CONFIG_DRM_I915_GVT),y)
 i915-y += intel_gvt.o
diff --git a/drivers/gpu/drm/i915/i915_oa_icl.c b/drivers/gpu/drm/i915/i915_oa_icl.c
new file mode 100644
index 000000000000..a5667926e3de
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_icl.c
@@ -0,0 +1,118 @@
+/*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ *
+ *
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/sysfs.h>
+
+#include "i915_drv.h"
+#include "i915_oa_icl.h"
+
+static const struct i915_oa_reg b_counter_config_test_oa[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0xf0800000 },
+	{ _MMIO(0x2770), 0x00000004 },
+	{ _MMIO(0x2774), 0x0000ffff },
+	{ _MMIO(0x2778), 0x00000003 },
+	{ _MMIO(0x277c), 0x0000ffff },
+	{ _MMIO(0x2780), 0x00000007 },
+	{ _MMIO(0x2784), 0x0000ffff },
+	{ _MMIO(0x2788), 0x00100002 },
+	{ _MMIO(0x278c), 0x0000fff7 },
+	{ _MMIO(0x2790), 0x00100002 },
+	{ _MMIO(0x2794), 0x0000ffcf },
+	{ _MMIO(0x2798), 0x00100082 },
+	{ _MMIO(0x279c), 0x0000ffef },
+	{ _MMIO(0x27a0), 0x001000c2 },
+	{ _MMIO(0x27a4), 0x0000ffe7 },
+	{ _MMIO(0x27a8), 0x00100001 },
+	{ _MMIO(0x27ac), 0x0000ffe7 },
+};
+
+static const struct i915_oa_reg flex_eu_config_test_oa[] = {
+};
+
+static const struct i915_oa_reg mux_config_test_oa[] = {
+	{ _MMIO(0xd04), 0x00000200 },
+	{ _MMIO(0x9840), 0x00000000 },
+	{ _MMIO(0x9884), 0x00000000 },
+	{ _MMIO(0x9888), 0x10060000 },
+	{ _MMIO(0x9888), 0x22060000 },
+	{ _MMIO(0x9888), 0x16060000 },
+	{ _MMIO(0x9888), 0x24060000 },
+	{ _MMIO(0x9888), 0x18060000 },
+	{ _MMIO(0x9888), 0x1a060000 },
+	{ _MMIO(0x9888), 0x12060000 },
+	{ _MMIO(0x9888), 0x14060000 },
+	{ _MMIO(0x9888), 0x10060000 },
+	{ _MMIO(0x9888), 0x22060000 },
+	{ _MMIO(0x9884), 0x00000003 },
+	{ _MMIO(0x9888), 0x16130000 },
+	{ _MMIO(0x9888), 0x24000001 },
+	{ _MMIO(0x9888), 0x0e130056 },
+	{ _MMIO(0x9888), 0x10130000 },
+	{ _MMIO(0x9888), 0x1a130000 },
+	{ _MMIO(0x9888), 0x541f0001 },
+	{ _MMIO(0x9888), 0x181f0000 },
+	{ _MMIO(0x9888), 0x4c1f0000 },
+	{ _MMIO(0x9888), 0x301f0000 },
+};
+
+static ssize_t
+show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "1\n");
+}
+
+void
+i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv)
+{
+	strlcpy(dev_priv->perf.oa.test_config.uuid,
+		"a291665e-244b-4b76-9b9a-01de9d3c8068",
+		sizeof(dev_priv->perf.oa.test_config.uuid));
+	dev_priv->perf.oa.test_config.id = 1;
+
+	dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
+	dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+
+	dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
+	dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+
+	dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
+	dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+
+	dev_priv->perf.oa.test_config.sysfs_metric.name = "a291665e-244b-4b76-9b9a-01de9d3c8068";
+	dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+
+	dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+
+	dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
+	dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
+	dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+}
diff --git a/drivers/gpu/drm/i915/i915_oa_icl.h b/drivers/gpu/drm/i915/i915_oa_icl.h
new file mode 100644
index 000000000000..ae1c24aafe4f
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_icl.h
@@ -0,0 +1,34 @@
+/*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ *
+ *
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_OA_ICL_H__
+#define __I915_OA_ICL_H__
+
+extern void i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index abaca6edeb71..30444bb3aaa1 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -209,6 +209,7 @@
 #include "i915_oa_cflgt2.h"
 #include "i915_oa_cflgt3.h"
 #include "i915_oa_cnl.h"
+#include "i915_oa_icl.h"
 
 /* HW requires this to be a power of two, between 128k and 16M, though driver
  * is currently generally designed assuming the largest 16M size is used such
@@ -1840,7 +1841,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
 	 * be read back from automatically triggered reports, as part of the
 	 * RPT_ID field.
 	 */
-	if (IS_GEN9(dev_priv) || IS_GEN10(dev_priv)) {
+	if (IS_GEN(dev_priv, 9, 11)) {
 		I915_WRITE(GEN8_OA_DEBUG,
 			   _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
 					      GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
@@ -2935,6 +2936,8 @@ void i915_perf_register(struct drm_i915_private *dev_priv)
 			i915_perf_load_test_config_cflgt3(dev_priv);
 	} else if (IS_CANNONLAKE(dev_priv)) {
 		i915_perf_load_test_config_cnl(dev_priv);
+	} else if (IS_ICELAKE(dev_priv)) {
+		i915_perf_load_test_config_icl(dev_priv);
 	}
 
 	if (dev_priv->perf.oa.test_config.id == 0)
@@ -3467,7 +3470,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
 
 				dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
 			}
-		} else if (IS_GEN10(dev_priv)) {
+		} else if (IS_GEN(dev_priv, 10, 11)) {
 			dev_priv->perf.oa.ops.is_valid_b_counter_reg =
 				gen7_is_valid_b_counter_addr;
 			dev_priv->perf.oa.ops.is_valid_mux_reg =

From b6dd47b9c82d619195370c38b7579fe18a8f6055 Mon Sep 17 00:00:00 2001
From: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Date: Mon, 26 Mar 2018 10:08:22 +0100
Subject: [PATCH 0168/1286] drm/i915/perf: check the value of PROP_SAMPLE_OA
 uapi parameter

We've been a bit loose about this opening parameter. We should only
add the flag for writing OA reports when the value of this parameter
is != 0.

Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180326090831.22686-3-lionel.g.landwerlin@intel.com
---
 drivers/gpu/drm/i915/i915_perf.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 30444bb3aaa1..21a985bd4413 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -2746,7 +2746,8 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
 			props->ctx_handle = value;
 			break;
 		case DRM_I915_PERF_PROP_SAMPLE_OA:
-			props->sample_flags |= SAMPLE_OA_REPORT;
+			if (value)
+				props->sample_flags |= SAMPLE_OA_REPORT;
 			break;
 		case DRM_I915_PERF_PROP_OA_METRICS_SET:
 			if (value == 0) {

From 11051303344bd4b3334a7b456f6152f4d6032c61 Mon Sep 17 00:00:00 2001
From: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Date: Mon, 26 Mar 2018 10:08:23 +0100
Subject: [PATCH 0169/1286] drm/i915/perf: simplify OA unit enabling on gen7

In commit d79651522e89c ("drm/i915: Enable i915 perf stream for
Haswell OA unit") the enable/disable vfunc hadn't appear yet and the
same function would deal with enabling/disabling the OA unit.

This was split later on for gen8 but the gen7 retained some code that
isn't actually useful anymore.

Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180326090831.22686-4-lionel.g.landwerlin@intel.com
---
 drivers/gpu/drm/i915/i915_perf.c | 34 ++++++++++++++------------------
 1 file changed, 15 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 21a985bd4413..f1af58a1cc5a 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1886,6 +1886,13 @@ static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)
 
 static void gen7_oa_enable(struct drm_i915_private *dev_priv)
 {
+	struct i915_gem_context *ctx =
+			dev_priv->perf.oa.exclusive_stream->ctx;
+	u32 ctx_id = dev_priv->perf.oa.specific_ctx_id;
+	bool periodic = dev_priv->perf.oa.periodic;
+	u32 period_exponent = dev_priv->perf.oa.period_exponent;
+	u32 report_format = dev_priv->perf.oa.oa_buffer.format;
+
 	/*
 	 * Reset buf pointers so we don't forward reports from before now.
 	 *
@@ -1897,25 +1904,14 @@ static void gen7_oa_enable(struct drm_i915_private *dev_priv)
 	 */
 	gen7_init_oa_buffer(dev_priv);
 
-	if (dev_priv->perf.oa.exclusive_stream->enabled) {
-		struct i915_gem_context *ctx =
-			dev_priv->perf.oa.exclusive_stream->ctx;
-		u32 ctx_id = dev_priv->perf.oa.specific_ctx_id;
-
-		bool periodic = dev_priv->perf.oa.periodic;
-		u32 period_exponent = dev_priv->perf.oa.period_exponent;
-		u32 report_format = dev_priv->perf.oa.oa_buffer.format;
-
-		I915_WRITE(GEN7_OACONTROL,
-			   (ctx_id & GEN7_OACONTROL_CTX_MASK) |
-			   (period_exponent <<
-			    GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
-			   (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
-			   (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
-			   (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
-			   GEN7_OACONTROL_ENABLE);
-	} else
-		I915_WRITE(GEN7_OACONTROL, 0);
+	I915_WRITE(GEN7_OACONTROL,
+		   (ctx_id & GEN7_OACONTROL_CTX_MASK) |
+		   (period_exponent <<
+		    GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
+		   (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
+		   (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
+		   (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
+		   GEN7_OACONTROL_ENABLE);
 }
 
 static void gen8_oa_enable(struct drm_i915_private *dev_priv)

From 53744104bec47a3357622f9d6815fda1c0d0f842 Mon Sep 17 00:00:00 2001
From: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Date: Mon, 26 Mar 2018 10:08:25 +0100
Subject: [PATCH 0170/1286] drm/i915/perf: remove empty line

This was added by mistake in commit 28964cf25ee67 ("drm/i915/perf:
disable NOA logic when not used").

Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180326090831.22686-6-lionel.g.landwerlin@intel.com
---
 drivers/gpu/drm/i915/i915_perf.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index f1af58a1cc5a..b5c65ab2615e 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1871,7 +1871,6 @@ static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
 
 	I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
 				      ~GT_NOA_ENABLE));
-
 }
 
 static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)

From b82ed43de5c01e41a7e5d756da3287fc66e7600b Mon Sep 17 00:00:00 2001
From: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Date: Mon, 26 Mar 2018 10:08:26 +0100
Subject: [PATCH 0171/1286] drm/i915: rename PPGTT/GGTT fields OA registers

We had a generic field name used across 2 registers but it feels like
it's clearer we make it obvious what register this field belongs to.

Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180326090831.22686-7-lionel.g.landwerlin@intel.com
---
 drivers/gpu/drm/i915/i915_perf.c | 7 ++++---
 drivers/gpu/drm/i915/i915_reg.h  | 6 +++---
 2 files changed, 7 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index b5c65ab2615e..d41a2529bb76 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1043,7 +1043,7 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
 
 		I915_WRITE(GEN7_OASTATUS2,
 			   ((head & GEN7_OASTATUS2_HEAD_MASK) |
-			    OA_MEM_SELECT_GGTT));
+			    GEN7_OASTATUS2_MEM_SELECT_GGTT));
 		dev_priv->perf.oa.oa_buffer.head = head;
 
 		spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
@@ -1333,7 +1333,8 @@ static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv)
 	/* Pre-DevBDW: OABUFFER must be set with counters off,
 	 * before OASTATUS1, but after OASTATUS2
 	 */
-	I915_WRITE(GEN7_OASTATUS2, gtt_offset | OA_MEM_SELECT_GGTT); /* head */
+	I915_WRITE(GEN7_OASTATUS2,
+		   gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT); /* head */
 	dev_priv->perf.oa.oa_buffer.head = gtt_offset;
 
 	I915_WRITE(GEN7_OABUFFER, gtt_offset);
@@ -1393,7 +1394,7 @@ static void gen8_init_oa_buffer(struct drm_i915_private *dev_priv)
 	 *  bit."
 	 */
 	I915_WRITE(GEN8_OABUFFER, gtt_offset |
-		   OABUFFER_SIZE_16M | OA_MEM_SELECT_GGTT);
+		   OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
 	I915_WRITE(GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
 
 	/* Mark that we need updated tail pointers to read from... */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 5a53d0e1583c..b926520803b6 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -536,6 +536,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 
 #define GEN8_OABUFFER_UDW _MMIO(0x23b4)
 #define GEN8_OABUFFER _MMIO(0x2b14)
+#define  GEN8_OABUFFER_MEM_SELECT_GGTT      (1 << 0)  /* 0: PPGTT, 1: GGTT */
 
 #define GEN7_OASTATUS1 _MMIO(0x2364)
 #define  GEN7_OASTATUS1_TAIL_MASK	    0xffffffc0
@@ -544,7 +545,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define  GEN7_OASTATUS1_REPORT_LOST	    (1<<0)
 
 #define GEN7_OASTATUS2 _MMIO(0x2368)
-#define GEN7_OASTATUS2_HEAD_MASK    0xffffffc0
+#define  GEN7_OASTATUS2_HEAD_MASK           0xffffffc0
+#define  GEN7_OASTATUS2_MEM_SELECT_GGTT     (1 << 0) /* 0: PPGTT, 1: GGTT */
 
 #define GEN8_OASTATUS _MMIO(0x2b08)
 #define  GEN8_OASTATUS_OVERRUN_STATUS	    (1<<3)
@@ -566,8 +568,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define OABUFFER_SIZE_8M    (6<<3)
 #define OABUFFER_SIZE_16M   (7<<3)
 
-#define OA_MEM_SELECT_GGTT  (1<<0)
-
 /*
  * Flexible, Aggregate EU Counter Registers.
  * Note: these aren't contiguous

From 9bd9be666008499bb9071e9e5472ded24e522f0b Mon Sep 17 00:00:00 2001
From: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Date: Mon, 26 Mar 2018 10:08:28 +0100
Subject: [PATCH 0172/1286] drm/i915/perf: add more debug message on perf open
 & configs

This will make it easier to spot issues related to config
creation/usage.

Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180326090831.22686-9-lionel.g.landwerlin@intel.com
---
 drivers/gpu/drm/i915/i915_perf.c | 17 ++++++++++++++---
 1 file changed, 14 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index d41a2529bb76..bfc906cd4e5e 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -2096,13 +2096,17 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
 
 	if (stream->ctx) {
 		ret = oa_get_render_ctx_id(stream);
-		if (ret)
+		if (ret) {
+			DRM_DEBUG("Invalid context id to filter with\n");
 			return ret;
+		}
 	}
 
 	ret = get_oa_config(dev_priv, props->metrics_set, &stream->oa_config);
-	if (ret)
+	if (ret) {
+		DRM_DEBUG("Invalid OA config id=%i\n", props->metrics_set);
 		goto err_config;
+	}
 
 	/* PRM - observability performance counters:
 	 *
@@ -2129,8 +2133,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
 
 	ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv,
 						      stream->oa_config);
-	if (ret)
+	if (ret) {
+		DRM_DEBUG("Unable to enable metric set\n");
 		goto err_enable;
+	}
 
 	stream->ops = &i915_oa_stream_ops;
 
@@ -3292,6 +3298,8 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
 
 	mutex_unlock(&dev_priv->perf.metrics_lock);
 
+	DRM_DEBUG("Added config %s id=%i\n", oa_config->uuid, oa_config->id);
+
 	return oa_config->id;
 
 sysfs_err:
@@ -3348,6 +3356,9 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
 			   &oa_config->sysfs_metric);
 
 	idr_remove(&dev_priv->perf.metrics_idr, *arg);
+
+	DRM_DEBUG("Removed config %s id=%i\n", oa_config->uuid, oa_config->id);
+
 	put_oa_config(dev_priv, oa_config);
 
 config_err:

From 5e9cfeba6abb7e1a3f240bd24eb29178f0b83716 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Thu, 22 Mar 2018 17:22:51 +0200
Subject: [PATCH 0173/1286] drm/atomic-helper: Drop plane->fb references only
 for drm_atomic_helper_shutdown()
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

drm_atomic_helper_shutdown() needs to release the reference held by
plane->fb. Since commit 49d70aeaeca8 ("drm/atomic-helper: Fix leak in
disable_all") we're doing that by calling drm_atomic_clean_old_fb() in
drm_atomic_helper_disable_all(). This also leaves plane->fb == NULL
afterwards. However, since drm_atomic_helper_disable_all() is also
used by the i915 gpu reset code
drm_atomic_helper_commit_duplicated_state() then has to undo the
damage and put the correct plane->fb pointers back in (and also
adjust the ref counts to match again as well).

That approach doesn't work so well for load detection as nothing
sets up the plane->old_fb pointers for us. This causes us to
leak an extra reference for each plane->fb when
drm_atomic_helper_commit_duplicated_state() calls
drm_atomic_clean_old_fb() after load detection.

To fix this let's call drm_atomic_clean_old_fb() only for
drm_atomic_helper_shutdown() as that's the only time we need to
actually drop the plane->fb references. In all the other cases
(load detection, gpu reset) we want to leave plane->fb alone.

v2: Don't inflict the clean_old_fbs bool to drivers (Daniel)
v3: Squash in the revert and rewrite the commit msg (Daniel)

Cc: martin.peres@free.fr
Cc: chris@chris-wilson.co.uk
Cc: Dave Airlie <airlied@gmail.com>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180322152313.6561-3-ville.syrjala@linux.intel.com
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> #pre-squash
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
---
 drivers/gpu/drm/drm_atomic_helper.c | 78 ++++++++++++++---------------
 1 file changed, 39 insertions(+), 39 deletions(-)

diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index d63c806e7d38..ef4ddfecc4a4 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -2892,31 +2892,9 @@ commit:
 	return 0;
 }
 
-/**
- * drm_atomic_helper_disable_all - disable all currently active outputs
- * @dev: DRM device
- * @ctx: lock acquisition context
- *
- * Loops through all connectors, finding those that aren't turned off and then
- * turns them off by setting their DPMS mode to OFF and deactivating the CRTC
- * that they are connected to.
- *
- * This is used for example in suspend/resume to disable all currently active
- * functions when suspending. If you just want to shut down everything at e.g.
- * driver unload, look at drm_atomic_helper_shutdown().
- *
- * Note that if callers haven't already acquired all modeset locks this might
- * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- *
- * See also:
- * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and
- * drm_atomic_helper_shutdown().
- */
-int drm_atomic_helper_disable_all(struct drm_device *dev,
-				  struct drm_modeset_acquire_ctx *ctx)
+static int __drm_atomic_helper_disable_all(struct drm_device *dev,
+					   struct drm_modeset_acquire_ctx *ctx,
+					   bool clean_old_fbs)
 {
 	struct drm_atomic_state *state;
 	struct drm_connector_state *conn_state;
@@ -2968,8 +2946,11 @@ int drm_atomic_helper_disable_all(struct drm_device *dev,
 			goto free;
 
 		drm_atomic_set_fb_for_plane(plane_state, NULL);
-		plane_mask |= BIT(drm_plane_index(plane));
-		plane->old_fb = plane->fb;
+
+		if (clean_old_fbs) {
+			plane->old_fb = plane->fb;
+			plane_mask |= BIT(drm_plane_index(plane));
+		}
 	}
 
 	ret = drm_atomic_commit(state);
@@ -2980,6 +2961,34 @@ free:
 	return ret;
 }
 
+/**
+ * drm_atomic_helper_disable_all - disable all currently active outputs
+ * @dev: DRM device
+ * @ctx: lock acquisition context
+ *
+ * Loops through all connectors, finding those that aren't turned off and then
+ * turns them off by setting their DPMS mode to OFF and deactivating the CRTC
+ * that they are connected to.
+ *
+ * This is used for example in suspend/resume to disable all currently active
+ * functions when suspending. If you just want to shut down everything at e.g.
+ * driver unload, look at drm_atomic_helper_shutdown().
+ *
+ * Note that if callers haven't already acquired all modeset locks this might
+ * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ *
+ * See also:
+ * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and
+ * drm_atomic_helper_shutdown().
+ */
+int drm_atomic_helper_disable_all(struct drm_device *dev,
+				  struct drm_modeset_acquire_ctx *ctx)
+{
+	return __drm_atomic_helper_disable_all(dev, ctx, false);
+}
 EXPORT_SYMBOL(drm_atomic_helper_disable_all);
 
 /**
@@ -3002,7 +3011,7 @@ void drm_atomic_helper_shutdown(struct drm_device *dev)
 	while (1) {
 		ret = drm_modeset_lock_all_ctx(dev, &ctx);
 		if (!ret)
-			ret = drm_atomic_helper_disable_all(dev, &ctx);
+			ret = __drm_atomic_helper_disable_all(dev, &ctx, true);
 
 		if (ret != -EDEADLK)
 			break;
@@ -3106,16 +3115,11 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
 	struct drm_connector_state *new_conn_state;
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *new_crtc_state;
-	unsigned plane_mask = 0;
-	struct drm_device *dev = state->dev;
-	int ret;
 
 	state->acquire_ctx = ctx;
 
-	for_each_new_plane_in_state(state, plane, new_plane_state, i) {
-		plane_mask |= BIT(drm_plane_index(plane));
+	for_each_new_plane_in_state(state, plane, new_plane_state, i)
 		state->planes[i].old_state = plane->state;
-	}
 
 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
 		state->crtcs[i].old_state = crtc->state;
@@ -3123,11 +3127,7 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
 	for_each_new_connector_in_state(state, connector, new_conn_state, i)
 		state->connectors[i].old_state = connector->state;
 
-	ret = drm_atomic_commit(state);
-	if (plane_mask)
-		drm_atomic_clean_old_fb(dev, plane_mask, ret);
-
-	return ret;
+	return drm_atomic_commit(state);
 }
 EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state);
 

From 7e7de761af2e4760cc5ad2968c37c2814317ede6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Thu, 22 Mar 2018 17:22:53 +0200
Subject: [PATCH 0174/1286] drm: Clear crtc->primary->crtc when disabling the
 crtc via setcrtc()
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Keep the primary->crtc in sync with the state->crtc (also with
primary->fb and state->fb) when disabling the crtc (and thus also
the primary) via setcrtc().

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180322152313.6561-4-ville.syrjala@linux.intel.com
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
---
 drivers/gpu/drm/drm_crtc.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 03583887cfec..7a973ada7195 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -471,7 +471,7 @@ static int __drm_mode_set_config_internal(struct drm_mode_set *set,
 
 	ret = crtc->funcs->set_config(set, ctx);
 	if (ret == 0) {
-		crtc->primary->crtc = crtc;
+		crtc->primary->crtc = fb ? crtc : NULL;
 		crtc->primary->fb = fb;
 	}
 

From 5e78d01fa1a7f1794dfc44a6eb77bfec7a8d590d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Thu, 22 Mar 2018 17:22:54 +0200
Subject: [PATCH 0175/1286] drm/atomic-helper: WARN if legacy plane fb pointers
 are bogus when committing duplicated state
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

drm_atomic_helper_commit_duplicated_state() should only be called
resume/reset/load_detect paths where plane->old_fb should always be
NULL and plane->fb should be equal to the new_plane_state->fb.
Assert that is indeed the case.

Cc: martin.peres@free.fr
Cc: chris@chris-wilson.co.uk
Cc: Dave Airlie <airlied@gmail.com> (v1)
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Daniel Vetter <daniel.vetter@intel.com>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180322152313.6561-5-ville.syrjala@linux.intel.com
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
---
 drivers/gpu/drm/drm_atomic_helper.c | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index ef4ddfecc4a4..fe09d6254c19 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -3118,8 +3118,13 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
 
 	state->acquire_ctx = ctx;
 
-	for_each_new_plane_in_state(state, plane, new_plane_state, i)
+	for_each_new_plane_in_state(state, plane, new_plane_state, i) {
+		WARN_ON(plane->crtc != new_plane_state->crtc);
+		WARN_ON(plane->fb != new_plane_state->fb);
+		WARN_ON(plane->old_fb);
+
 		state->planes[i].old_state = plane->state;
+	}
 
 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
 		state->crtcs[i].old_state = crtc->state;

From 64c32b490333c9ccb05b172997c4f2f940c5d4d1 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Thu, 22 Mar 2018 17:22:55 +0200
Subject: [PATCH 0176/1286] drm: Add local 'plane' variable for primary/cursor
 planes
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Make the code a bit more readable by storing the plane pointer in a
local variable rather than having to do crtc->{primary,cursor} all the
time.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180322152313.6561-6-ville.syrjala@linux.intel.com
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
---
 drivers/gpu/drm/drm_crtc.c  | 32 +++++++++++++++++++-------------
 drivers/gpu/drm/drm_plane.c | 32 ++++++++++++++++++--------------
 2 files changed, 37 insertions(+), 27 deletions(-)

diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 7a973ada7195..8552ed419056 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -402,6 +402,7 @@ int drm_mode_getcrtc(struct drm_device *dev,
 {
 	struct drm_mode_crtc *crtc_resp = data;
 	struct drm_crtc *crtc;
+	struct drm_plane *plane;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		return -EINVAL;
@@ -410,21 +411,23 @@ int drm_mode_getcrtc(struct drm_device *dev,
 	if (!crtc)
 		return -ENOENT;
 
+	plane = crtc->primary;
+
 	crtc_resp->gamma_size = crtc->gamma_size;
 
-	drm_modeset_lock(&crtc->primary->mutex, NULL);
-	if (crtc->primary->state && crtc->primary->state->fb)
-		crtc_resp->fb_id = crtc->primary->state->fb->base.id;
-	else if (!crtc->primary->state && crtc->primary->fb)
-		crtc_resp->fb_id = crtc->primary->fb->base.id;
+	drm_modeset_lock(&plane->mutex, NULL);
+	if (plane->state && plane->state->fb)
+		crtc_resp->fb_id = plane->state->fb->base.id;
+	else if (!plane->state && plane->fb)
+		crtc_resp->fb_id = plane->fb->base.id;
 	else
 		crtc_resp->fb_id = 0;
 
-	if (crtc->primary->state) {
-		crtc_resp->x = crtc->primary->state->src_x >> 16;
-		crtc_resp->y = crtc->primary->state->src_y >> 16;
+	if (plane->state) {
+		crtc_resp->x = plane->state->src_x >> 16;
+		crtc_resp->y = plane->state->src_y >> 16;
 	}
-	drm_modeset_unlock(&crtc->primary->mutex);
+	drm_modeset_unlock(&plane->mutex);
 
 	drm_modeset_lock(&crtc->mutex, NULL);
 	if (crtc->state) {
@@ -554,6 +557,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
 	struct drm_mode_config *config = &dev->mode_config;
 	struct drm_mode_crtc *crtc_req = data;
 	struct drm_crtc *crtc;
+	struct drm_plane *plane;
 	struct drm_connector **connector_set = NULL, *connector;
 	struct drm_framebuffer *fb = NULL;
 	struct drm_display_mode *mode = NULL;
@@ -580,6 +584,8 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
 	}
 	DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
 
+	plane = crtc->primary;
+
 	mutex_lock(&crtc->dev->mode_config.mutex);
 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
 retry:
@@ -590,12 +596,12 @@ retry:
 		/* If we have a mode we need a framebuffer. */
 		/* If we pass -1, set the mode with the currently bound fb */
 		if (crtc_req->fb_id == -1) {
-			if (!crtc->primary->fb) {
+			if (!plane->fb) {
 				DRM_DEBUG_KMS("CRTC doesn't have current FB\n");
 				ret = -EINVAL;
 				goto out;
 			}
-			fb = crtc->primary->fb;
+			fb = plane->fb;
 			/* Make refcounting symmetric with the lookup path. */
 			drm_framebuffer_get(fb);
 		} else {
@@ -627,8 +633,8 @@ retry:
 		 * match real hardware capabilities. Skip the check in that
 		 * case.
 		 */
-		if (!crtc->primary->format_default) {
-			ret = drm_plane_check_pixel_format(crtc->primary,
+		if (!plane->format_default) {
+			ret = drm_plane_check_pixel_format(plane,
 							   fb->format->format,
 							   fb->modifier);
 			if (ret) {
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 6d2a6e428a3e..38e2a628bfa2 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -756,6 +756,7 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
 				     struct drm_modeset_acquire_ctx *ctx)
 {
 	struct drm_device *dev = crtc->dev;
+	struct drm_plane *plane = crtc->cursor;
 	struct drm_framebuffer *fb = NULL;
 	struct drm_mode_fb_cmd2 fbreq = {
 		.width = req->width,
@@ -769,8 +770,8 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
 	uint32_t src_w = 0, src_h = 0;
 	int ret = 0;
 
-	BUG_ON(!crtc->cursor);
-	WARN_ON(crtc->cursor->crtc != crtc && crtc->cursor->crtc != NULL);
+	BUG_ON(!plane);
+	WARN_ON(plane->crtc != crtc && plane->crtc != NULL);
 
 	/*
 	 * Obtain fb we'll be using (either new or existing) and take an extra
@@ -790,7 +791,7 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
 			fb = NULL;
 		}
 	} else {
-		fb = crtc->cursor->fb;
+		fb = plane->fb;
 		if (fb)
 			drm_framebuffer_get(fb);
 	}
@@ -810,7 +811,7 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
 		src_h = fb->height << 16;
 	}
 
-	ret = __setplane_internal(crtc->cursor, crtc, fb,
+	ret = __setplane_internal(plane, crtc, fb,
 				  crtc_x, crtc_y, crtc_w, crtc_h,
 				  0, 0, src_w, src_h, ctx);
 
@@ -931,6 +932,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
 {
 	struct drm_mode_crtc_page_flip_target *page_flip = data;
 	struct drm_crtc *crtc;
+	struct drm_plane *plane;
 	struct drm_framebuffer *fb = NULL;
 	struct drm_pending_vblank_event *e = NULL;
 	u32 target_vblank = page_flip->sequence;
@@ -959,6 +961,8 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
 	if (!crtc)
 		return -ENOENT;
 
+	plane = crtc->primary;
+
 	if (crtc->funcs->page_flip_target) {
 		u32 current_vblank;
 		int r;
@@ -1003,11 +1007,11 @@ retry:
 	ret = drm_modeset_lock(&crtc->mutex, &ctx);
 	if (ret)
 		goto out;
-	ret = drm_modeset_lock(&crtc->primary->mutex, &ctx);
+	ret = drm_modeset_lock(&plane->mutex, &ctx);
 	if (ret)
 		goto out;
 
-	if (crtc->primary->fb == NULL) {
+	if (plane->fb == NULL) {
 		/* The framebuffer is currently unbound, presumably
 		 * due to a hotplug event, that userspace has not
 		 * yet discovered.
@@ -1023,7 +1027,7 @@ retry:
 	}
 
 	if (crtc->state) {
-		const struct drm_plane_state *state = crtc->primary->state;
+		const struct drm_plane_state *state = plane->state;
 
 		ret = drm_framebuffer_check_src_coords(state->src_x,
 						       state->src_y,
@@ -1036,7 +1040,7 @@ retry:
 	if (ret)
 		goto out;
 
-	if (crtc->primary->fb->format != fb->format) {
+	if (plane->fb->format != fb->format) {
 		DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
 		ret = -EINVAL;
 		goto out;
@@ -1060,7 +1064,7 @@ retry:
 		}
 	}
 
-	crtc->primary->old_fb = crtc->primary->fb;
+	plane->old_fb = plane->fb;
 	if (crtc->funcs->page_flip_target)
 		ret = crtc->funcs->page_flip_target(crtc, fb, e,
 						    page_flip->flags,
@@ -1073,9 +1077,9 @@ retry:
 		if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT)
 			drm_event_cancel_free(dev, &e->base);
 		/* Keep the old fb, don't unref it. */
-		crtc->primary->old_fb = NULL;
+		plane->old_fb = NULL;
 	} else {
-		crtc->primary->fb = fb;
+		plane->fb = fb;
 		/* Unref only the old framebuffer. */
 		fb = NULL;
 	}
@@ -1083,9 +1087,9 @@ retry:
 out:
 	if (fb)
 		drm_framebuffer_put(fb);
-	if (crtc->primary->old_fb)
-		drm_framebuffer_put(crtc->primary->old_fb);
-	crtc->primary->old_fb = NULL;
+	if (plane->old_fb)
+		drm_framebuffer_put(plane->old_fb);
+	plane->old_fb = NULL;
 
 	if (ret == -EDEADLK) {
 		ret = drm_modeset_backoff(&ctx);

From bf2d5eb902e29d6e7c25540b85242744591b703e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Thu, 22 Mar 2018 17:22:56 +0200
Subject: [PATCH 0177/1286] drm: Adjust whitespace for legibility
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Add a bit of whitespace here and there to make the code look a bit
more structured.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180322152313.6561-7-ville.syrjala@linux.intel.com
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
---
 drivers/gpu/drm/drm_crtc.c  | 4 +++-
 drivers/gpu/drm/drm_plane.c | 6 +++++-
 2 files changed, 8 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 8552ed419056..537ffaab855c 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -434,13 +434,13 @@ int drm_mode_getcrtc(struct drm_device *dev,
 		if (crtc->state->enable) {
 			drm_mode_convert_to_umode(&crtc_resp->mode, &crtc->state->mode);
 			crtc_resp->mode_valid = 1;
-
 		} else {
 			crtc_resp->mode_valid = 0;
 		}
 	} else {
 		crtc_resp->x = crtc->x;
 		crtc_resp->y = crtc->y;
+
 		if (crtc->enabled) {
 			drm_mode_convert_to_umode(&crtc_resp->mode, &crtc->mode);
 			crtc_resp->mode_valid = 1;
@@ -592,6 +592,7 @@ retry:
 	ret = drm_modeset_lock_all_ctx(crtc->dev, &ctx);
 	if (ret)
 		goto out;
+
 	if (crtc_req->mode_valid) {
 		/* If we have a mode we need a framebuffer. */
 		/* If we pass -1, set the mode with the currently bound fb */
@@ -601,6 +602,7 @@ retry:
 				ret = -EINVAL;
 				goto out;
 			}
+
 			fb = plane->fb;
 			/* Make refcounting symmetric with the lookup path. */
 			drm_framebuffer_get(fb);
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 38e2a628bfa2..bedceca7dd06 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -785,6 +785,7 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
 				DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
 				return PTR_ERR(fb);
 			}
+
 			fb->hot_x = req->hot_x;
 			fb->hot_y = req->hot_y;
 		} else {
@@ -1035,7 +1036,8 @@ retry:
 						       state->src_h,
 						       fb);
 	} else {
-		ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb);
+		ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y,
+					      &crtc->mode, fb);
 	}
 	if (ret)
 		goto out;
@@ -1052,10 +1054,12 @@ retry:
 			ret = -ENOMEM;
 			goto out;
 		}
+
 		e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
 		e->event.base.length = sizeof(e->event);
 		e->event.vbl.user_data = page_flip->user_data;
 		e->event.vbl.crtc_id = crtc->base.id;
+
 		ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
 		if (ret) {
 			kfree(e);

From 23a5e1fb349d80d8c0eb97644dea16d2d7cfac26 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Thu, 22 Mar 2018 17:22:57 +0200
Subject: [PATCH 0178/1286] drm: Make the fb refcount handover less magic
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Instead of assigning the plane->fb pointer and clearing the fb pointer
to hand over the reference, let's just do it by grabbing another
referece for plane->fb and let fb keep its original one.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180322152313.6561-8-ville.syrjala@linux.intel.com
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
---
 drivers/gpu/drm/drm_plane.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index bedceca7dd06..008f9456a5e8 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -1084,8 +1084,7 @@ retry:
 		plane->old_fb = NULL;
 	} else {
 		plane->fb = fb;
-		/* Unref only the old framebuffer. */
-		fb = NULL;
+		drm_framebuffer_get(fb);
 	}
 
 out:

From a36c027db57b6a33970c5c830a0d143f0e98c248 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Thu, 22 Mar 2018 17:22:58 +0200
Subject: [PATCH 0179/1286] drm: Use plane->state->fb over plane->fb
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Stop looking at plane->fb on atomic drivers. Use plane->state->fb
instead.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180322152313.6561-9-ville.syrjala@linux.intel.com
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
---
 drivers/gpu/drm/drm_atomic_helper.c |  2 +-
 drivers/gpu/drm/drm_crtc.c          | 11 +++++++++--
 drivers/gpu/drm/drm_plane.c         | 19 ++++++++++++++-----
 3 files changed, 24 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index fe09d6254c19..ee03c1ed2521 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -2670,7 +2670,7 @@ int drm_atomic_helper_disable_plane(struct drm_plane *plane,
 		goto fail;
 	}
 
-	if (plane_state->crtc && (plane == plane->crtc->cursor))
+	if (plane_state->crtc && plane_state->crtc->cursor == plane)
 		plane_state->state->legacy_cursor_update = true;
 
 	ret = __drm_atomic_helper_disable_plane(plane, plane_state);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 537ffaab855c..a231dd5dce16 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -597,13 +597,20 @@ retry:
 		/* If we have a mode we need a framebuffer. */
 		/* If we pass -1, set the mode with the currently bound fb */
 		if (crtc_req->fb_id == -1) {
-			if (!plane->fb) {
+			struct drm_framebuffer *old_fb;
+
+			if (plane->state)
+				old_fb = plane->state->fb;
+			else
+				old_fb = plane->fb;
+
+			if (!old_fb) {
 				DRM_DEBUG_KMS("CRTC doesn't have current FB\n");
 				ret = -EINVAL;
 				goto out;
 			}
 
-			fb = plane->fb;
+			fb = old_fb;
 			/* Make refcounting symmetric with the lookup path. */
 			drm_framebuffer_get(fb);
 		} else {
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 008f9456a5e8..035054455301 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -792,7 +792,11 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
 			fb = NULL;
 		}
 	} else {
-		fb = plane->fb;
+		if (plane->state)
+			fb = plane->state->fb;
+		else
+			fb = plane->fb;
+
 		if (fb)
 			drm_framebuffer_get(fb);
 	}
@@ -934,7 +938,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
 	struct drm_mode_crtc_page_flip_target *page_flip = data;
 	struct drm_crtc *crtc;
 	struct drm_plane *plane;
-	struct drm_framebuffer *fb = NULL;
+	struct drm_framebuffer *fb = NULL, *old_fb;
 	struct drm_pending_vblank_event *e = NULL;
 	u32 target_vblank = page_flip->sequence;
 	struct drm_modeset_acquire_ctx ctx;
@@ -1012,7 +1016,12 @@ retry:
 	if (ret)
 		goto out;
 
-	if (plane->fb == NULL) {
+	if (plane->state)
+		old_fb = plane->state->fb;
+	else
+		old_fb = plane->fb;
+
+	if (old_fb == NULL) {
 		/* The framebuffer is currently unbound, presumably
 		 * due to a hotplug event, that userspace has not
 		 * yet discovered.
@@ -1027,7 +1036,7 @@ retry:
 		goto out;
 	}
 
-	if (crtc->state) {
+	if (plane->state) {
 		const struct drm_plane_state *state = plane->state;
 
 		ret = drm_framebuffer_check_src_coords(state->src_x,
@@ -1042,7 +1051,7 @@ retry:
 	if (ret)
 		goto out;
 
-	if (plane->fb->format != fb->format) {
+	if (old_fb->format != fb->format) {
 		DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
 		ret = -EINVAL;
 		goto out;

From 8bc20f6594a10d3e69b37fc223e43da3804673ef Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Thu, 22 Mar 2018 17:22:59 +0200
Subject: [PATCH 0180/1286] drm/i915: Stop consulting plane->fb
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

We want to get rid of plane->fb on atomic drivers. Stop looking at it.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180322152313.6561-10-ville.syrjala@linux.intel.com
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_display.c | 2 +-
 drivers/gpu/drm/i915/intel_fbdev.c   | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3b48fd2561fe..54d4c369cae2 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2824,7 +2824,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
 			continue;
 
 		if (intel_plane_ggtt_offset(state) == plane_config->base) {
-			fb = c->primary->fb;
+			fb = state->base.fb;
 			drm_framebuffer_get(fb);
 			goto valid_fb;
 		}
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 6f12adc06365..89592ecc44ca 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -640,7 +640,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
 		if (!crtc->state->active)
 			continue;
 
-		WARN(!crtc->primary->fb,
+		WARN(!crtc->primary->state->fb,
 		     "re-used BIOS config but lost an fb on crtc %d\n",
 		     crtc->base.id);
 	}

From bf4a7a227511410cd6d6ca623e9ed17e5fc5f30b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Thu, 22 Mar 2018 17:23:01 +0200
Subject: [PATCH 0181/1286] drm/sti: Stop consulting plane->fb
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

We want to get rid of plane->fb on atomic drivers. Stop looking at it.

Cc: Benjamin Gaignard <benjamin.gaignard@linaro.org>
Cc: Vincent Abriou <vincent.abriou@st.com>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180322152313.6561-12-ville.syrjala@linux.intel.com
Acked-by: Benjamin Gaignard <benjamin.gaignard@linaro.org>
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
---
 drivers/gpu/drm/sti/sti_plane.c | 9 +++++----
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/sti/sti_plane.c b/drivers/gpu/drm/sti/sti_plane.c
index b074609c960a..b48cd86e0250 100644
--- a/drivers/gpu/drm/sti/sti_plane.c
+++ b/drivers/gpu/drm/sti/sti_plane.c
@@ -40,6 +40,7 @@ void sti_plane_update_fps(struct sti_plane *plane,
 			  bool new_frame,
 			  bool new_field)
 {
+	struct drm_plane_state *state = plane->drm_plane.state;
 	ktime_t now;
 	struct sti_fps_info *fps;
 	int fpks, fipks, ms_since_last, num_frames, num_fields;
@@ -66,14 +67,14 @@ void sti_plane_update_fps(struct sti_plane *plane,
 	fps->last_timestamp = now;
 	fps->last_frame_counter = fps->curr_frame_counter;
 
-	if (plane->drm_plane.fb) {
+	if (state->fb) {
 		fpks = (num_frames * 1000000) / ms_since_last;
 		snprintf(plane->fps_info.fps_str, FPS_LENGTH,
 			 "%-8s %4dx%-4d %.4s @ %3d.%-3.3d fps (%s)",
 			 plane->drm_plane.name,
-			 plane->drm_plane.fb->width,
-			 plane->drm_plane.fb->height,
-			 (char *)&plane->drm_plane.fb->format->format,
+			 state->fb->width,
+			 state->fb->height,
+			 (char *)&state->fb->format->format,
 			 fpks / 1000, fpks % 1000,
 			 sti_plane_to_str(plane));
 	}

From 2bb01c4cc0592ff69323077c093a67f45155d72f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Thu, 22 Mar 2018 17:23:02 +0200
Subject: [PATCH 0182/1286] drm/vmwgfx: Stop consulting plane->fb
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

We want to get rid of plane->fb on atomic drivers. Stop looking at it.

Cc: VMware Graphics <linux-graphics-maintainer@vmware.com>
Cc: Sinclair Yeh <syeh@vmware.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180322152313.6561-13-ville.syrjala@linux.intel.com
Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
---
 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 34ecc27fc30a..9fdb3ec9b4c4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -385,9 +385,9 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
 	hotspot_x = du->hotspot_x;
 	hotspot_y = du->hotspot_y;
 
-	if (plane->fb) {
-		hotspot_x += plane->fb->hot_x;
-		hotspot_y += plane->fb->hot_y;
+	if (plane->state->fb) {
+		hotspot_x += plane->state->fb->hot_x;
+		hotspot_y += plane->state->fb->hot_y;
 	}
 
 	du->cursor_surface = vps->surf;

From 6f6887dad5405bc87fc26eddca1fad64d04360fc Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Mon, 26 Mar 2018 15:14:42 +0300
Subject: [PATCH 0183/1286] drm/zte: Stop consulting plane->crtc
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

We want to get rid of plane->crtc on atomic drivers. Stop looking at it.

v2: Use old_state->crtc (Maarten)
v3: s/fb/crtc/ in commit message to actually match the patch (Shawn)

Cc: Shawn Guo <shawnguo@kernel.org>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Acked-by: Shawn Guo <shawnguo@kernel.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20180326121442.32009-1-ville.syrjala@linux.intel.com
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
---
 drivers/gpu/drm/zte/zx_plane.c | 2 +-
 drivers/gpu/drm/zte/zx_vou.c   | 5 +++--
 drivers/gpu/drm/zte/zx_vou.h   | 3 ++-
 3 files changed, 6 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c
index 94545adac50d..d1931f5ea0b2 100644
--- a/drivers/gpu/drm/zte/zx_plane.c
+++ b/drivers/gpu/drm/zte/zx_plane.c
@@ -268,7 +268,7 @@ static void zx_plane_atomic_disable(struct drm_plane *plane,
 	struct zx_plane *zplane = to_zx_plane(plane);
 	void __iomem *hbsc = zplane->hbsc;
 
-	zx_vou_layer_disable(plane);
+	zx_vou_layer_disable(plane, old_state);
 
 	/* Disable HBSC block */
 	zx_writel_mask(hbsc + HBSC_CTRL0, HBSC_CTRL_EN, 0);
diff --git a/drivers/gpu/drm/zte/zx_vou.c b/drivers/gpu/drm/zte/zx_vou.c
index 7491813131f3..442311d31110 100644
--- a/drivers/gpu/drm/zte/zx_vou.c
+++ b/drivers/gpu/drm/zte/zx_vou.c
@@ -627,9 +627,10 @@ void zx_vou_layer_enable(struct drm_plane *plane)
 	zx_writel_mask(vou->osd + OSD_CTRL0, bits->enable, bits->enable);
 }
 
-void zx_vou_layer_disable(struct drm_plane *plane)
+void zx_vou_layer_disable(struct drm_plane *plane,
+			  struct drm_plane_state *old_state)
 {
-	struct zx_crtc *zcrtc = to_zx_crtc(plane->crtc);
+	struct zx_crtc *zcrtc = to_zx_crtc(old_state->crtc);
 	struct zx_vou_hw *vou = zcrtc->vou;
 	struct zx_plane *zplane = to_zx_plane(plane);
 	const struct vou_layer_bits *bits = zplane->bits;
diff --git a/drivers/gpu/drm/zte/zx_vou.h b/drivers/gpu/drm/zte/zx_vou.h
index 97d72bfce982..5b7f84fbb112 100644
--- a/drivers/gpu/drm/zte/zx_vou.h
+++ b/drivers/gpu/drm/zte/zx_vou.h
@@ -62,6 +62,7 @@ void zx_vou_config_dividers(struct drm_crtc *crtc,
 			    struct vou_div_config *configs, int num);
 
 void zx_vou_layer_enable(struct drm_plane *plane);
-void zx_vou_layer_disable(struct drm_plane *plane);
+void zx_vou_layer_disable(struct drm_plane *plane,
+			  struct drm_plane_state *old_state);
 
 #endif /* __ZX_VOU_H__ */

From be90cc318b4c8d5c3999eb25783d8055e6a67855 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Thu, 22 Mar 2018 17:23:12 +0200
Subject: [PATCH 0184/1286] drm/i915: Restore planes after load detection
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Actually turn the planes back on after were done with
the load detection.

Fixes: 20bdc112bbe4 ("drm/i915: Disable all planes for load detection, v2.")
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180322152313.6561-23-ville.syrjala@linux.intel.com
---
 drivers/gpu/drm/i915/intel_display.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 54d4c369cae2..182f9bf98484 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -9974,6 +9974,8 @@ found:
 	ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
 	if (!ret)
 		ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
+	if (!ret)
+		ret = drm_atomic_add_affected_planes(restore_state, crtc);
 	if (ret) {
 		DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
 		goto fail;

From 4165791d29f64e01860a064f3c649447dbac41c3 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Thu, 22 Mar 2018 19:41:35 +0200
Subject: [PATCH 0185/1286] drm/i915: Make force_load_detect effective even w/
 DMI quirks/hotplug
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

When doing forced load detection testing we should totally ignore any
hotplug status for the connector. This is mostly relevant for machines
where we already ignore the hotplug status based on the DMI quirks. On
other machines we would currently skip the force load detection tests
on account of the connector already being connected.

v2: Drop the other force_load_detect check since it's useless now (Maarten)

Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180322174135.5982-1-ville.syrjala@linux.intel.com
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_crt.c | 8 +++++++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index c0a8805b277f..de0e22322c76 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -748,6 +748,11 @@ intel_crt_detect(struct drm_connector *connector,
 		      connector->base.id, connector->name,
 		      force);
 
+	if (i915_modparams.load_detect_test) {
+		intel_display_power_get(dev_priv, intel_encoder->power_domain);
+		goto load_detect;
+	}
+
 	/* Skip machines without VGA that falsely report hotplug events */
 	if (dmi_check_system(intel_spurious_crt_detect))
 		return connector_status_disconnected;
@@ -776,11 +781,12 @@ intel_crt_detect(struct drm_connector *connector,
 	 * broken monitor (without edid) to work behind a broken kvm (that fails
 	 * to have the right resistors for HP detection) needs to fix this up.
 	 * For now just bail out. */
-	if (I915_HAS_HOTPLUG(dev_priv) && !i915_modparams.load_detect_test) {
+	if (I915_HAS_HOTPLUG(dev_priv)) {
 		status = connector_status_disconnected;
 		goto out;
 	}
 
+load_detect:
 	if (!force) {
 		status = connector->status;
 		goto out;

From 5807e1c21dbd56c87f0e86ae6fe49ec745660c0d Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Thu, 29 Mar 2018 23:45:18 +0100
Subject: [PATCH 0186/1286] drm/i915: Avoid sleeping inside per-engine reset
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Only sleep and repeat when asked for a full device reset (ALL_ENGINES)
and avoid using sleeping waits when asked for a per-engine reset. The
goal is to be able to use a per-engine reset from hardirq/softirq/timer
context. A consequence is that our individual wait timeouts are a
thousand times shorter, on the order of a hundred microseconds rather
than hundreds of millisecond. This may make hitting the timeouts more
common, but hopefully the fallover to the full-device reset will be
sufficient to pick up the pieces.

Note, that the sleeps inside older gen (pre-gen8) have been left as they
are only used in full device reset mode.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Michał Winiarski <michal.winiarski@intel.com>
CC: Michel Thierry <michel.thierry@intel.com>
Cc: Jeff McGee <jeff.mcgee@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Michel Thierry <michel.thierry@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180329224519.13598-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_uncore.c | 51 +++++++++++++++++++----------
 1 file changed, 34 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index f37ecfc69e49..a0d7e0cfbd32 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1702,11 +1702,10 @@ static void gen3_stop_engine(struct intel_engine_cs *engine)
 	const i915_reg_t mode = RING_MI_MODE(base);
 
 	I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
-	if (intel_wait_for_register_fw(dev_priv,
-				       mode,
-				       MODE_IDLE,
-				       MODE_IDLE,
-				       500))
+	if (__intel_wait_for_register_fw(dev_priv,
+					 mode, MODE_IDLE, MODE_IDLE,
+					 500, 0,
+					 NULL))
 		DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n",
 				 engine->name);
 
@@ -1860,9 +1859,10 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
 	__raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
 
 	/* Wait for the device to ack the reset requests */
-	err = intel_wait_for_register_fw(dev_priv,
-					  GEN6_GDRST, hw_domain_mask, 0,
-					  500);
+	err = __intel_wait_for_register_fw(dev_priv,
+					   GEN6_GDRST, hw_domain_mask, 0,
+					   500, 0,
+					   NULL);
 	if (err)
 		DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
 				 hw_domain_mask);
@@ -2027,11 +2027,12 @@ static int gen8_reset_engine_start(struct intel_engine_cs *engine)
 	I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
 		      _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
 
-	ret = intel_wait_for_register_fw(dev_priv,
-					 RING_RESET_CTL(engine->mmio_base),
-					 RESET_CTL_READY_TO_RESET,
-					 RESET_CTL_READY_TO_RESET,
-					 700);
+	ret = __intel_wait_for_register_fw(dev_priv,
+					   RING_RESET_CTL(engine->mmio_base),
+					   RESET_CTL_READY_TO_RESET,
+					   RESET_CTL_READY_TO_RESET,
+					   700, 0,
+					   NULL);
 	if (ret)
 		DRM_ERROR("%s: reset request timeout\n", engine->name);
 
@@ -2094,15 +2095,31 @@ int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
 	int retry;
 	int ret;
 
-	might_sleep();
+	/*
+	 * We want to perform per-engine reset from atomic context (e.g.
+	 * softirq), which imposes the constraint that we cannot sleep.
+	 * However, experience suggests that spending a bit of time waiting
+	 * for a reset helps in various cases, so for a full-device reset
+	 * we apply the opposite rule and wait if we want to. As we should
+	 * always follow up a failed per-engine reset with a full device reset,
+	 * being a little faster, stricter and more error prone for the
+	 * atomic case seems an acceptable compromise.
+	 *
+	 * Unfortunately this leads to a bimodal routine, when the goal was
+	 * to have a single reset function that worked for resetting any
+	 * number of engines simultaneously.
+	 */
+	might_sleep_if(engine_mask == ALL_ENGINES);
 
-	/* If the power well sleeps during the reset, the reset
+	/*
+	 * If the power well sleeps during the reset, the reset
 	 * request may be dropped and never completes (causing -EIO).
 	 */
 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 	for (retry = 0; retry < 3; retry++) {
 
-		/* We stop engines, otherwise we might get failed reset and a
+		/*
+		 * We stop engines, otherwise we might get failed reset and a
 		 * dead gpu (on elk). Also as modern gpu as kbl can suffer
 		 * from system hang if batchbuffer is progressing when
 		 * the reset is issued, regardless of READY_TO_RESET ack.
@@ -2120,7 +2137,7 @@ int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
 			GEM_TRACE("engine_mask=%x\n", engine_mask);
 			ret = reset(dev_priv, engine_mask);
 		}
-		if (ret != -ETIMEDOUT)
+		if (ret != -ETIMEDOUT || engine_mask != ALL_ENGINES)
 			break;
 
 		cond_resched();

From 3df82dd43be4b6efde20f819d5829c8ed5e95476 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Thu, 29 Mar 2018 23:45:19 +0100
Subject: [PATCH 0187/1286] drm/i915: Only warn for might_sleep() before a slow
 wait_for_register

As intel_wait_for_register_fw() may use, and if successful only use, a
busy-wait loop, the might_sleep() warning is a little over-zealous.
Restrict it to a might_sleep_if() a slow timeout is specified (and so
the caller authorises use of a usleep).

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180329224519.13598-2-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_uncore.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index a0d7e0cfbd32..e7540bb9786c 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1996,7 +1996,7 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv,
 	u32 reg_value;
 	int ret;
 
-	might_sleep();
+	might_sleep_if(slow_timeout_ms);
 
 	spin_lock_irq(&dev_priv->uncore.lock);
 	intel_uncore_forcewake_get__locked(dev_priv, fw);
@@ -2008,7 +2008,7 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv,
 	intel_uncore_forcewake_put__locked(dev_priv, fw);
 	spin_unlock_irq(&dev_priv->uncore.lock);
 
-	if (ret)
+	if (ret && slow_timeout_ms)
 		ret = __wait_for(reg_value = I915_READ_NOTRACE(reg),
 				 (reg_value & mask) == value,
 				 slow_timeout_ms * 1000, 10, 1000);

From 487da6172f5678496699bec685797dc816f6a131 Mon Sep 17 00:00:00 2001
From: Daniel Stone <daniels@collabora.com>
Date: Fri, 23 Mar 2018 13:45:51 +0000
Subject: [PATCH 0188/1286] drm: Reshuffle getfb error returns
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Make it a little more clear what's going on inside of getfb, and also
make it easier to add alternate paths to get a handle in future.

Signed-off-by: Daniel Stone <daniels@collabora.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180323134553.15993-3-daniels@collabora.com
---
 drivers/gpu/drm/drm_framebuffer.c | 34 ++++++++++++++++---------------
 1 file changed, 18 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index ad67203de715..8c4d32adcc17 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -468,29 +468,31 @@ int drm_mode_getfb(struct drm_device *dev,
 		goto out;
 	}
 
+	if (!fb->funcs->create_handle) {
+		ret = -ENODEV;
+		goto out;
+	}
+
 	r->height = fb->height;
 	r->width = fb->width;
 	r->depth = fb->format->depth;
 	r->bpp = fb->format->cpp[0] * 8;
 	r->pitch = fb->pitches[0];
-	if (fb->funcs->create_handle) {
-		if (drm_is_current_master(file_priv) || capable(CAP_SYS_ADMIN) ||
-		    drm_is_control_client(file_priv)) {
-			ret = fb->funcs->create_handle(fb, file_priv,
-						       &r->handle);
-		} else {
-			/* GET_FB() is an unprivileged ioctl so we must not
-			 * return a buffer-handle to non-master processes! For
-			 * backwards-compatibility reasons, we cannot make
-			 * GET_FB() privileged, so just return an invalid handle
-			 * for non-masters. */
-			r->handle = 0;
-			ret = 0;
-		}
-	} else {
-		ret = -ENODEV;
+
+	/* GET_FB() is an unprivileged ioctl so we must not return a
+	 * buffer-handle to non-master processes! For
+	 * backwards-compatibility reasons, we cannot make GET_FB() privileged,
+	 * so just return an invalid handle for non-masters.
+	 */
+	if (!drm_is_current_master(file_priv) && !capable(CAP_SYS_ADMIN) &&
+	    !drm_is_control_client(file_priv)) {
+		r->handle = 0;
+		ret = 0;
+		goto out;
 	}
 
+	ret = fb->funcs->create_handle(fb, file_priv, &r->handle);
+
 out:
 	drm_framebuffer_put(fb);
 

From 4f212e40468650e220c1770876c7f25b8e0c1ff5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= <jose.souza@intel.com>
Date: Wed, 28 Mar 2018 15:30:37 -0700
Subject: [PATCH 0189/1286] drm: Add DP PSR2 sink enable bit
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

To comply with eDP1.4a this bit should be set when enabling PSR2.

Signed-off-by: José Roberto de Souza <jose.souza@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180328223046.16125-1-jose.souza@intel.com
---
 include/drm/drm_dp_helper.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 4de97e94ef9d..a62714578b93 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -477,6 +477,7 @@
 # define DP_PSR_FRAME_CAPTURE		    (1 << 3)
 # define DP_PSR_SELECTIVE_UPDATE	    (1 << 4)
 # define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS     (1 << 5)
+# define DP_PSR_ENABLE_PSR2		    (1 << 6) /* eDP 1.4a */
 
 #define DP_ADAPTER_CTRL			    0x1a0
 # define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE   (1 << 0)

From fe36948afb08e361d12b2878348778aeaba74134 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= <jose.souza@intel.com>
Date: Wed, 28 Mar 2018 15:30:38 -0700
Subject: [PATCH 0190/1286] drm: Add DP last received PSR SDP VSC register and
 bits
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This is a register to help debug what is in the last SDP VSC
packet revived by sink.

Signed-off-by: José Roberto de Souza <jose.souza@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180328223046.16125-2-jose.souza@intel.com
---
 include/drm/drm_dp_helper.h | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index a62714578b93..c6853f0fef2a 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -794,6 +794,15 @@
 # define DP_LAST_ACTUAL_SYNCHRONIZATION_LATENCY_MASK	(0xf << 4)
 # define DP_LAST_ACTUAL_SYNCHRONIZATION_LATENCY_SHIFT	4
 
+#define DP_LAST_RECEIVED_PSR_SDP	    0x200a /* eDP 1.2 */
+# define DP_PSR_STATE_BIT		    (1 << 0) /* eDP 1.2 */
+# define DP_UPDATE_RFB_BIT		    (1 << 1) /* eDP 1.2 */
+# define DP_CRC_VALID_BIT		    (1 << 2) /* eDP 1.2 */
+# define DP_SU_VALID			    (1 << 3) /* eDP 1.4 */
+# define DP_FIRST_SCAN_LINE_SU_REGION	    (1 << 4) /* eDP 1.4 */
+# define DP_LAST_SCAN_LINE_SU_REGION	    (1 << 5) /* eDP 1.4 */
+# define DP_Y_COORDINATE_VALID		    (1 << 6) /* eDP 1.4a */
+
 #define DP_RECEIVER_ALPM_STATUS		    0x200b  /* eDP 1.4 */
 # define DP_ALPM_LOCK_TIMEOUT_ERROR	    (1 << 0)
 

From 6ce9b78a7388c37b33293666aa6fc61c177046e6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= <jose.souza@intel.com>
Date: Wed, 28 Mar 2018 15:30:39 -0700
Subject: [PATCH 0191/1286] drm/i915/psr: Nuke aux frame sync
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

eDP spec states that aux frame is required to do PSR2 selective
update but i915 don't fully implement it. It sends the aux frame
sync messages but the value is always zero as the GTC is not enabled
in driver.

Through tests was findout that pannels can do selective update when
the y-coordinate is also included in SDP, that is why it is required
to run PSR2 in i915.

A dummy value is not useful at all to sink, so removing everything
related to aux frame sync, if GTC is enabled we can bring this back.

Cc: Vathsala Nagaraju <vathsala.nagaraju@intel.com>
Acked-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Signed-off-by: José Roberto de Souza <jose.souza@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180328223046.16125-3-jose.souza@intel.com
---
 drivers/gpu/drm/i915/i915_drv.h  |  1 -
 drivers/gpu/drm/i915/intel_psr.c | 24 +-----------------------
 2 files changed, 1 insertion(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 800230ba1c3b..fade9029b6f5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -603,7 +603,6 @@ struct i915_psr {
 	struct delayed_work work;
 	unsigned busy_frontbuffer_bits;
 	bool psr2_support;
-	bool aux_frame_sync;
 	bool link_standby;
 	bool y_cord_support;
 	bool colorimetry_support;
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index b8e083e10029..c0a6f63b586f 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -137,16 +137,9 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
 
 	if (INTEL_GEN(dev_priv) >= 9 &&
 	    (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
-		uint8_t frame_sync_cap;
 
 		dev_priv->psr.sink_support = true;
-		if (drm_dp_dpcd_readb(&intel_dp->aux,
-				      DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
-				      &frame_sync_cap) != 1)
-			frame_sync_cap = 0;
-		dev_priv->psr.aux_frame_sync = frame_sync_cap & DP_AUX_FRAME_SYNC_CAP;
-		/* PSR2 needs frame sync as well */
-		dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
+		dev_priv->psr.psr2_support = true;
 		DRM_DEBUG_KMS("PSR2 %s on sink",
 			      dev_priv->psr.psr2_support ? "supported" : "not supported");
 
@@ -268,12 +261,6 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
 	struct drm_device *dev = dig_port->base.base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
-
-	/* Enable AUX frame sync at sink */
-	if (dev_priv->psr.aux_frame_sync)
-		drm_dp_dpcd_writeb(&intel_dp->aux,
-				DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
-				DP_AUX_FRAME_SYNC_ENABLE);
 	/* Enable ALPM at sink for psr2 */
 	if (dev_priv->psr.psr2_support && dev_priv->psr.alpm)
 		drm_dp_dpcd_writeb(&intel_dp->aux,
@@ -712,11 +699,6 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
 		i915_reg_t psr_status;
 		u32 psr_status_mask;
 
-		if (dev_priv->psr.aux_frame_sync)
-			drm_dp_dpcd_writeb(&intel_dp->aux,
-					DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
-					0);
-
 		if (dev_priv->psr.psr2_support) {
 			psr_status = EDP_PSR2_STATUS;
 			psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
@@ -860,10 +842,6 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv)
 		return;
 
 	if (HAS_DDI(dev_priv)) {
-		if (dev_priv->psr.aux_frame_sync)
-			drm_dp_dpcd_writeb(&intel_dp->aux,
-					DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
-					0);
 		if (dev_priv->psr.psr2_support) {
 			val = I915_READ(EDP_PSR2_CTL);
 			WARN_ON(!(val & EDP_PSR2_ENABLE));

From aee3bac0a3a89ea12644533142ba69eebb602e4c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= <jose.souza@intel.com>
Date: Wed, 28 Mar 2018 15:30:40 -0700
Subject: [PATCH 0192/1286] drm/i915/psr: Tie PSR2 support to Y coordinate
 requirement
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Although i915 don't implement aux sync frame through tests was
findout that pannels can do selective update when the y-coordinate
is also included in SDP, that is why it is required to run PSR2 in
i915.

So moving to only one place the sink requirements that the actual
driver needs to enable PSR2.

Also intel_psr2_config_valid() is called every time the crtc config
is computed, wasting some time every time it was checking for
Y coordinate requirement.

This allow us to nuke y_cord_support and some of VSC setup code that
was handling a scenario that would never happen(PSR2 without Y
coordinate).

Also here renaming intel_dp_get_y_cord_status() to
intel_dp_get_y_coord_required() as it more accurate to the name and
function of bit according to eDP spec.

Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Signed-off-by: José Roberto de Souza <jose.souza@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180328223046.16125-4-jose.souza@intel.com
---
 drivers/gpu/drm/i915/i915_drv.h  |  1 -
 drivers/gpu/drm/i915/intel_psr.c | 46 +++++++++++++-------------------
 2 files changed, 19 insertions(+), 28 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index fade9029b6f5..92cf6f4e9e00 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -604,7 +604,6 @@ struct i915_psr {
 	unsigned busy_frontbuffer_bits;
 	bool psr2_support;
 	bool link_standby;
-	bool y_cord_support;
 	bool colorimetry_support;
 	bool alpm;
 	bool has_hw_tracking;
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index c0a6f63b586f..fb2d0fe7106b 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -93,7 +93,7 @@ static void psr_aux_io_power_put(struct intel_dp *intel_dp)
 	intel_display_power_put(dev_priv, psr_aux_domain(intel_dp));
 }
 
-static bool intel_dp_get_y_cord_status(struct intel_dp *intel_dp)
+static bool intel_dp_get_y_coord_required(struct intel_dp *intel_dp)
 {
 	uint8_t psr_caps = 0;
 
@@ -130,22 +130,29 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
 	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
 			 sizeof(intel_dp->psr_dpcd));
 
-	if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
+	if (intel_dp->psr_dpcd[0]) {
 		dev_priv->psr.sink_support = true;
 		DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
 	}
 
 	if (INTEL_GEN(dev_priv) >= 9 &&
-	    (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
-
-		dev_priv->psr.sink_support = true;
-		dev_priv->psr.psr2_support = true;
+	    (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
+		/*
+		 * All panels that supports PSR version 03h (PSR2 +
+		 * Y-coordinate) can handle Y-coordinates in VSC but we are
+		 * only sure that it is going to be used when required by the
+		 * panel. This way panel is capable to do selective update
+		 * without a aux frame sync.
+		 *
+		 * To support PSR version 02h and PSR version 03h without
+		 * Y-coordinate requirement panels we would need to enable
+		 * GTC first.
+		 */
+		dev_priv->psr.psr2_support = intel_dp_get_y_coord_required(intel_dp);
 		DRM_DEBUG_KMS("PSR2 %s on sink",
 			      dev_priv->psr.psr2_support ? "supported" : "not supported");
 
 		if (dev_priv->psr.psr2_support) {
-			dev_priv->psr.y_cord_support =
-				intel_dp_get_y_cord_status(intel_dp);
 			dev_priv->psr.colorimetry_support =
 				intel_dp_get_colorimetry_status(intel_dp);
 			dev_priv->psr.alpm =
@@ -191,16 +198,12 @@ static void hsw_psr_setup_vsc(struct intel_dp *intel_dp,
 		memset(&psr_vsc, 0, sizeof(psr_vsc));
 		psr_vsc.sdp_header.HB0 = 0;
 		psr_vsc.sdp_header.HB1 = 0x7;
-		if (dev_priv->psr.colorimetry_support &&
-		    dev_priv->psr.y_cord_support) {
+		if (dev_priv->psr.colorimetry_support) {
 			psr_vsc.sdp_header.HB2 = 0x5;
 			psr_vsc.sdp_header.HB3 = 0x13;
-		} else if (dev_priv->psr.y_cord_support) {
+		} else {
 			psr_vsc.sdp_header.HB2 = 0x4;
 			psr_vsc.sdp_header.HB3 = 0xe;
-		} else {
-			psr_vsc.sdp_header.HB2 = 0x3;
-			psr_vsc.sdp_header.HB3 = 0xc;
 		}
 	} else {
 		/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
@@ -457,15 +460,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
 		return false;
 	}
 
-	/*
-	 * FIXME:enable psr2 only for y-cordinate psr2 panels
-	 * After gtc implementation , remove this restriction.
-	 */
-	if (!dev_priv->psr.y_cord_support) {
-		DRM_DEBUG_KMS("PSR2 not enabled, panel does not support Y coordinate\n");
-		return false;
-	}
-
 	return true;
 }
 
@@ -565,7 +559,6 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp,
 	struct drm_device *dev = dig_port->base.base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
-	u32 chicken;
 
 	psr_aux_io_power_get(intel_dp);
 
@@ -576,9 +569,8 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp,
 		hsw_psr_setup_aux(intel_dp);
 
 	if (dev_priv->psr.psr2_support) {
-		chicken = PSR2_VSC_ENABLE_PROG_HEADER;
-		if (dev_priv->psr.y_cord_support)
-			chicken |= PSR2_ADD_VERTICAL_LINE_COUNT;
+		u32 chicken = PSR2_VSC_ENABLE_PROG_HEADER
+			      | PSR2_ADD_VERTICAL_LINE_COUNT;
 		I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);
 
 		I915_WRITE(EDP_PSR_DEBUG,

From 5e87325f5c57ba59cc6908bf38efd40146d7ad9c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= <jose.souza@intel.com>
Date: Wed, 28 Mar 2018 15:30:41 -0700
Subject: [PATCH 0193/1286] drm/i915/psr/cnl: Enable Y-coordinate support in
 source
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

For Geminilake and Cannonlake+ the Y-coordinate support must be
enabled in PSR2_CTL too.

Spec: 7713 and 7720

Cc: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: José Roberto de Souza <jose.souza@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180328223046.16125-5-jose.souza@intel.com
---
 drivers/gpu/drm/i915/i915_reg.h  |  3 +++
 drivers/gpu/drm/i915/intel_psr.c | 16 ++++++++++++----
 2 files changed, 15 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b926520803b6..6566f6bc5417 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -4058,6 +4058,8 @@ enum {
 #define EDP_PSR2_CTL			_MMIO(0x6f900)
 #define   EDP_PSR2_ENABLE		(1<<31)
 #define   EDP_SU_TRACK_ENABLE		(1<<30)
+#define   EDP_Y_COORDINATE_VALID	(1<<26) /* GLK and CNL+ */
+#define   EDP_Y_COORDINATE_ENABLE	(1<<25) /* GLK and CNL+ */
 #define   EDP_MAX_SU_DISABLE_TIME(t)	((t)<<20)
 #define   EDP_MAX_SU_DISABLE_TIME_MASK	(0x1f<<20)
 #define   EDP_PSR2_TP2_TIME_500		(0<<8)
@@ -7042,6 +7044,7 @@ enum {
 #define CHICKEN_TRANS_A         0x420c0
 #define CHICKEN_TRANS_B         0x420c4
 #define CHICKEN_TRANS(trans) _MMIO_TRANS(trans, CHICKEN_TRANS_A, CHICKEN_TRANS_B)
+#define  VSC_DATA_SEL_SOFTWARE_CONTROL	(1<<25) /* GLK and CNL+ */
 #define  DDI_TRAINING_OVERRIDE_ENABLE	(1<<19)
 #define  DDI_TRAINING_OVERRIDE_VALUE	(1<<18)
 #define  DDIE_TRAINING_OVERRIDE_ENABLE	(1<<17) /* CHICKEN_TRANS_A only */
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index fb2d0fe7106b..84e1f8be5c48 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -386,8 +386,10 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
 	/* FIXME: selective update is probably totally broken because it doesn't
 	 * mesh at all with our frontbuffer tracking. And the hw alone isn't
 	 * good enough. */
-	val |= EDP_PSR2_ENABLE |
-		EDP_SU_TRACK_ENABLE;
+	val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
+	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+		val |= EDP_Y_COORDINATE_VALID | EDP_Y_COORDINATE_ENABLE;
+	}
 
 	if (drm_dp_dpcd_readb(&intel_dp->aux,
 				DP_SYNCHRONIZATION_LATENCY_IN_SINK,
@@ -569,8 +571,14 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp,
 		hsw_psr_setup_aux(intel_dp);
 
 	if (dev_priv->psr.psr2_support) {
-		u32 chicken = PSR2_VSC_ENABLE_PROG_HEADER
-			      | PSR2_ADD_VERTICAL_LINE_COUNT;
+		u32 chicken = I915_READ(CHICKEN_TRANS(cpu_transcoder));
+
+		if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv))
+			chicken |= (PSR2_VSC_ENABLE_PROG_HEADER
+				   | PSR2_ADD_VERTICAL_LINE_COUNT);
+
+		else
+			chicken &= ~VSC_DATA_SEL_SOFTWARE_CONTROL;
 		I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);
 
 		I915_WRITE(EDP_PSR_DEBUG,

From 95f28d2ec75ac388a8cc988e2e5496ce4adef4e4 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= <jose.souza@intel.com>
Date: Wed, 28 Mar 2018 15:30:42 -0700
Subject: [PATCH 0194/1286] drm/i915/psr: Do not override PSR2 sink support
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Sink can support our PSR2 requirements but userspace can request
a resolution that PSR2 hardware do not support, in this case it
was overwritten the PSR2 sink support.
Adding another flag here, this way if requested resolution changed
to a value that PSR2 hardware can handle, PSR2 can be enabled.

Cc: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: José Roberto de Souza <jose.souza@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180328223046.16125-6-jose.souza@intel.com
---
 drivers/gpu/drm/i915/i915_debugfs.c |  4 ++--
 drivers/gpu/drm/i915/i915_drv.h     |  3 ++-
 drivers/gpu/drm/i915/intel_psr.c    | 33 +++++++++++++++--------------
 3 files changed, 21 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index ff90577da450..1dba2c451255 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2630,7 +2630,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
 		   yesno(work_busy(&dev_priv->psr.work.work)));
 
 	if (HAS_DDI(dev_priv)) {
-		if (dev_priv->psr.psr2_support)
+		if (dev_priv->psr.psr2_enabled)
 			enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
 		else
 			enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
@@ -2678,7 +2678,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
 
 		seq_printf(m, "Performance_Counter: %u\n", psrperf);
 	}
-	if (dev_priv->psr.psr2_support) {
+	if (dev_priv->psr.psr2_enabled) {
 		u32 psr2 = I915_READ(EDP_PSR2_STATUS);
 
 		seq_printf(m, "EDP_PSR2_STATUS: %x [%s]\n",
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 92cf6f4e9e00..46cae097201c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -602,11 +602,12 @@ struct i915_psr {
 	bool active;
 	struct delayed_work work;
 	unsigned busy_frontbuffer_bits;
-	bool psr2_support;
+	bool sink_psr2_support;
 	bool link_standby;
 	bool colorimetry_support;
 	bool alpm;
 	bool has_hw_tracking;
+	bool psr2_enabled;
 
 	void (*enable_source)(struct intel_dp *,
 			      const struct intel_crtc_state *);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 84e1f8be5c48..5efddd920681 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -148,11 +148,12 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
 		 * Y-coordinate requirement panels we would need to enable
 		 * GTC first.
 		 */
-		dev_priv->psr.psr2_support = intel_dp_get_y_coord_required(intel_dp);
-		DRM_DEBUG_KMS("PSR2 %s on sink",
-			      dev_priv->psr.psr2_support ? "supported" : "not supported");
+		dev_priv->psr.sink_psr2_support =
+				intel_dp_get_y_coord_required(intel_dp);
+		DRM_DEBUG_KMS("PSR2 %s on sink", dev_priv->psr.sink_psr2_support
+			      ? "supported" : "not supported");
 
-		if (dev_priv->psr.psr2_support) {
+		if (dev_priv->psr.sink_psr2_support) {
 			dev_priv->psr.colorimetry_support =
 				intel_dp_get_colorimetry_status(intel_dp);
 			dev_priv->psr.alpm =
@@ -193,7 +194,7 @@ static void hsw_psr_setup_vsc(struct intel_dp *intel_dp,
 	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
 	struct edp_vsc_psr psr_vsc;
 
-	if (dev_priv->psr.psr2_support) {
+	if (dev_priv->psr.psr2_enabled) {
 		/* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
 		memset(&psr_vsc, 0, sizeof(psr_vsc));
 		psr_vsc.sdp_header.HB0 = 0;
@@ -265,7 +266,7 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
 	/* Enable ALPM at sink for psr2 */
-	if (dev_priv->psr.psr2_support && dev_priv->psr.alpm)
+	if (dev_priv->psr.psr2_enabled && dev_priv->psr.alpm)
 		drm_dp_dpcd_writeb(&intel_dp->aux,
 				DP_RECEIVER_ALPM_CONFIG,
 				DP_ALPM_ENABLE);
@@ -424,7 +425,7 @@ static void hsw_psr_activate(struct intel_dp *intel_dp)
 	 */
 
 	/* psr1 and psr2 are mutually exclusive.*/
-	if (dev_priv->psr.psr2_support)
+	if (dev_priv->psr.psr2_enabled)
 		hsw_activate_psr2(intel_dp);
 	else
 		hsw_activate_psr1(intel_dp);
@@ -444,7 +445,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
 	 * dynamically during PSR enable, and extracted from sink
 	 * caps during eDP detection.
 	 */
-	if (!dev_priv->psr.psr2_support)
+	if (!dev_priv->psr.sink_psr2_support)
 		return false;
 
 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
@@ -543,7 +544,7 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
 	struct drm_device *dev = intel_dig_port->base.base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
-	if (dev_priv->psr.psr2_support)
+	if (dev_priv->psr.psr2_enabled)
 		WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
 	else
 		WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
@@ -570,7 +571,7 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp,
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 		hsw_psr_setup_aux(intel_dp);
 
-	if (dev_priv->psr.psr2_support) {
+	if (dev_priv->psr.psr2_enabled) {
 		u32 chicken = I915_READ(CHICKEN_TRANS(cpu_transcoder));
 
 		if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv))
@@ -629,7 +630,7 @@ void intel_psr_enable(struct intel_dp *intel_dp,
 		goto unlock;
 	}
 
-	dev_priv->psr.psr2_support = crtc_state->has_psr2;
+	dev_priv->psr.psr2_enabled = crtc_state->has_psr2;
 	dev_priv->psr.busy_frontbuffer_bits = 0;
 
 	dev_priv->psr.setup_vsc(intel_dp, crtc_state);
@@ -699,7 +700,7 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
 		i915_reg_t psr_status;
 		u32 psr_status_mask;
 
-		if (dev_priv->psr.psr2_support) {
+		if (dev_priv->psr.psr2_enabled) {
 			psr_status = EDP_PSR2_STATUS;
 			psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
 
@@ -723,7 +724,7 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
 
 		dev_priv->psr.active = false;
 	} else {
-		if (dev_priv->psr.psr2_support)
+		if (dev_priv->psr.psr2_enabled)
 			WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
 		else
 			WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
@@ -783,7 +784,7 @@ static void intel_psr_work(struct work_struct *work)
 	 * and be ready for re-enable.
 	 */
 	if (HAS_DDI(dev_priv)) {
-		if (dev_priv->psr.psr2_support) {
+		if (dev_priv->psr.psr2_enabled) {
 			if (intel_wait_for_register(dev_priv,
 						    EDP_PSR2_STATUS,
 						    EDP_PSR2_STATUS_STATE_MASK,
@@ -842,7 +843,7 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv)
 		return;
 
 	if (HAS_DDI(dev_priv)) {
-		if (dev_priv->psr.psr2_support) {
+		if (dev_priv->psr.psr2_enabled) {
 			val = I915_READ(EDP_PSR2_CTL);
 			WARN_ON(!(val & EDP_PSR2_ENABLE));
 			I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
@@ -1011,7 +1012,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
 
 	/* By definition flush = invalidate + flush */
 	if (frontbuffer_bits) {
-		if (dev_priv->psr.psr2_support ||
+		if (dev_priv->psr.psr2_enabled ||
 		    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 			intel_psr_exit(dev_priv);
 		} else {

From fe36181be371f3d98441cc23ccbfa89783fa55b1 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= <jose.souza@intel.com>
Date: Wed, 28 Mar 2018 15:30:43 -0700
Subject: [PATCH 0195/1286] drm/i915/psr: Use PSR2 macro for PSR2
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Cosmetic change.

Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: José Roberto de Souza <jose.souza@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180328223046.16125-7-jose.souza@intel.com
---
 drivers/gpu/drm/i915/i915_reg.h  | 3 ++-
 drivers/gpu/drm/i915/intel_psr.c | 2 +-
 2 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 6566f6bc5417..17b86919cddf 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -4069,8 +4069,9 @@ enum {
 #define   EDP_PSR2_TP2_TIME_MASK	(3<<8)
 #define   EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4
 #define   EDP_PSR2_FRAME_BEFORE_SU_MASK	(0xf<<4)
-#define   EDP_PSR2_IDLE_MASK		0xf
 #define   EDP_PSR2_FRAME_BEFORE_SU(a)	((a)<<4)
+#define   EDP_PSR2_IDLE_FRAME_MASK	0xf
+#define   EDP_PSR2_IDLE_FRAME_SHIFT	0
 
 #define EDP_PSR2_STATUS			_MMIO(0x6f940)
 #define EDP_PSR2_STATUS_STATE_MASK     (0xf<<28)
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 5efddd920681..bec455e28943 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -382,7 +382,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
 	uint32_t val;
 	uint8_t sink_latency;
 
-	val = idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
+	val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
 
 	/* FIXME: selective update is probably totally broken because it doesn't
 	 * mesh at all with our frontbuffer tracking. And the hw alone isn't

From 26e5378d115501a7cab25fdfc6ab10ccb5e4106f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= <jose.souza@intel.com>
Date: Wed, 28 Mar 2018 15:30:44 -0700
Subject: [PATCH 0196/1286] drm/i915/psr: Cache sink synchronization latency
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This value do not change overtime so better cache it than
fetch it every PSR enable.

Cc: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: José Roberto de Souza <jose.souza@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180328223046.16125-8-jose.souza@intel.com
---
 drivers/gpu/drm/i915/i915_drv.h  |  1 +
 drivers/gpu/drm/i915/intel_psr.c | 28 ++++++++++++++++------------
 2 files changed, 17 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 46cae097201c..5373b171bb96 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -608,6 +608,7 @@ struct i915_psr {
 	bool alpm;
 	bool has_hw_tracking;
 	bool psr2_enabled;
+	u8 sink_sync_latency;
 
 	void (*enable_source)(struct intel_dp *,
 			      const struct intel_crtc_state *);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index bec455e28943..d079cf0b034c 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -122,6 +122,18 @@ static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
 	return alpm_caps & DP_ALPM_CAP;
 }
 
+static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
+{
+	u8 val = 0;
+
+	if (drm_dp_dpcd_readb(&intel_dp->aux,
+			      DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
+		val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
+	else
+		DRM_ERROR("Unable to get sink synchronization latency\n");
+	return val;
+}
+
 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
 {
 	struct drm_i915_private *dev_priv =
@@ -158,6 +170,8 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
 				intel_dp_get_colorimetry_status(intel_dp);
 			dev_priv->psr.alpm =
 				intel_dp_get_alpm_status(intel_dp);
+			dev_priv->psr.sink_sync_latency =
+				intel_dp_get_sink_sync_latency(intel_dp);
 		}
 	}
 }
@@ -379,10 +393,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
 	 * with the 5 or 6 idle patterns.
 	 */
 	uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
-	uint32_t val;
-	uint8_t sink_latency;
-
-	val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
+	u32 val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
 
 	/* FIXME: selective update is probably totally broken because it doesn't
 	 * mesh at all with our frontbuffer tracking. And the hw alone isn't
@@ -392,14 +403,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
 		val |= EDP_Y_COORDINATE_VALID | EDP_Y_COORDINATE_ENABLE;
 	}
 
-	if (drm_dp_dpcd_readb(&intel_dp->aux,
-				DP_SYNCHRONIZATION_LATENCY_IN_SINK,
-				&sink_latency) == 1) {
-		sink_latency &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
-	} else {
-		sink_latency = 0;
-	}
-	val |= EDP_PSR2_FRAME_BEFORE_SU(sink_latency + 1);
+	val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
 
 	if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
 		val |= EDP_PSR2_TP2_TIME_2500;

From 4df4925b1b26f285aa76f89d95db3388a2d55281 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= <jose.souza@intel.com>
Date: Wed, 28 Mar 2018 15:30:45 -0700
Subject: [PATCH 0197/1286] drm/i915/psr: Set DPCD PSR2 enable bit when needed
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

In the 2 eDP1.4a pannels tested set or not set bit have no effect
but is better set it and comply with specification.

Signed-off-by: José Roberto de Souza <jose.souza@intel.com>
Cc: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180328223046.16125-9-jose.souza@intel.com
---
 drivers/gpu/drm/i915/intel_psr.c | 11 ++++++-----
 1 file changed, 6 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index d079cf0b034c..2d53f7398a6d 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -278,18 +278,19 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 	struct drm_device *dev = dig_port->base.base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	u8 dpcd_val = DP_PSR_ENABLE;
 
 	/* Enable ALPM at sink for psr2 */
 	if (dev_priv->psr.psr2_enabled && dev_priv->psr.alpm)
 		drm_dp_dpcd_writeb(&intel_dp->aux,
 				DP_RECEIVER_ALPM_CONFIG,
 				DP_ALPM_ENABLE);
+
+	if (dev_priv->psr.psr2_enabled)
+		dpcd_val |= DP_PSR_ENABLE_PSR2;
 	if (dev_priv->psr.link_standby)
-		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
-				   DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
-	else
-		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
-				   DP_PSR_ENABLE);
+		dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
+	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
 
 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
 }

From d93ae190e2c95276caceb3642e6d541d93bba705 Mon Sep 17 00:00:00 2001
From: Russell King <rmk+kernel@armlinux.org.uk>
Date: Thu, 17 Nov 2016 23:38:29 +0000
Subject: [PATCH 0198/1286] drm/i2c: tda998x: move mutex/waitqueue/timer/work
 init early

Move the mutex, waitqueue, timer and detect work initialisation early
in the driver's initialisation, rather than being after we've registered
the CEC device.

Acked-by: Hans Verkuil <hans.verkuil@cisco.com>
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
---
 drivers/gpu/drm/i2c/tda998x_drv.c | 11 +++++------
 1 file changed, 5 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index cd3f0873bbdd..83407159e957 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -1475,7 +1475,11 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
 	u32 video;
 	int rev_lo, rev_hi, ret;
 
-	mutex_init(&priv->audio_mutex); /* Protect access from audio thread */
+	mutex_init(&priv->mutex);	/* protect the page access */
+	mutex_init(&priv->audio_mutex); /* protect access from audio thread */
+	init_waitqueue_head(&priv->edid_delay_waitq);
+	timer_setup(&priv->edid_delay_timer, tda998x_edid_delay_done, 0);
+	INIT_WORK(&priv->detect_work, tda998x_detect_work);
 
 	priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
 	priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
@@ -1489,11 +1493,6 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
 	if (!priv->cec)
 		return -ENODEV;
 
-	mutex_init(&priv->mutex);	/* protect the page access */
-	init_waitqueue_head(&priv->edid_delay_waitq);
-	timer_setup(&priv->edid_delay_timer, tda998x_edid_delay_done, 0);
-	INIT_WORK(&priv->detect_work, tda998x_detect_work);
-
 	/* wake up the device: */
 	cec_write(priv, REG_CEC_ENAMODS,
 			CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI);

From 6a765c3fe5497359c11536dfbdcf7526ccb2a33f Mon Sep 17 00:00:00 2001
From: Russell King <rmk+kernel@armlinux.org.uk>
Date: Thu, 17 Nov 2016 23:49:43 +0000
Subject: [PATCH 0199/1286] drm/i2c: tda998x: fix error cleanup paths

If tda998x_get_audio_ports() fails, and we requested the interrupt, we
fail to free the interrupt before returning failure.  Rework the failure
cleanup code and exit paths so that we always clean up properly after an
error, and always propagate the error code.

Acked-by: Hans Verkuil <hans.verkuil@cisco.com>
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
---
 drivers/gpu/drm/i2c/tda998x_drv.c | 31 ++++++++++++++++++-------------
 1 file changed, 18 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 83407159e957..2a99930f1bda 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -1501,10 +1501,15 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
 
 	/* read version: */
 	rev_lo = reg_read(priv, REG_VERSION_LSB);
+	if (rev_lo < 0) {
+		dev_err(&client->dev, "failed to read version: %d\n", rev_lo);
+		return rev_lo;
+	}
+
 	rev_hi = reg_read(priv, REG_VERSION_MSB);
-	if (rev_lo < 0 || rev_hi < 0) {
-		ret = rev_lo < 0 ? rev_lo : rev_hi;
-		goto fail;
+	if (rev_hi < 0) {
+		dev_err(&client->dev, "failed to read version: %d\n", rev_hi);
+		return rev_hi;
 	}
 
 	priv->rev = rev_lo | rev_hi << 8;
@@ -1528,7 +1533,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
 	default:
 		dev_err(&client->dev, "found unsupported device: %04x\n",
 			priv->rev);
-		goto fail;
+		return -ENXIO;
 	}
 
 	/* after reset, enable DDC: */
@@ -1566,7 +1571,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
 			dev_err(&client->dev,
 				"failed to request IRQ#%u: %d\n",
 				client->irq, ret);
-			goto fail;
+			goto err_irq;
 		}
 
 		/* enable HPD irq */
@@ -1589,19 +1594,19 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
 
 	ret = tda998x_get_audio_ports(priv, np);
 	if (ret)
-		goto fail;
+		goto err_audio;
 
 	if (priv->audio_port[0].format != AFMT_UNUSED)
 		tda998x_audio_codec_init(priv, &client->dev);
 
 	return 0;
-fail:
-	/* if encoder_init fails, the encoder slave is never registered,
-	 * so cleanup here:
-	 */
-	if (priv->cec)
-		i2c_unregister_device(priv->cec);
-	return -ENXIO;
+
+err_audio:
+	if (client->irq)
+		free_irq(client->irq, priv);
+err_irq:
+	i2c_unregister_device(priv->cec);
+	return ret;
 }
 
 static void tda998x_encoder_prepare(struct drm_encoder *encoder)

From 101e996b8d321514c45136bef4dd0b1d3c577036 Mon Sep 17 00:00:00 2001
From: Russell King <rmk+kernel@armlinux.org.uk>
Date: Thu, 17 Nov 2016 23:40:26 +0000
Subject: [PATCH 0200/1286] drm/i2c: tda998x: move CEC device initialisation
 later

We no longer use the CEC client to access the CEC part itself, so we can
move this later in the initialisation sequence.

Acked-by: Hans Verkuil <hans.verkuil@cisco.com>
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
---
 drivers/gpu/drm/i2c/tda998x_drv.c | 13 ++++++++-----
 1 file changed, 8 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 2a99930f1bda..7f2762fab5c9 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -1489,9 +1489,6 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
 	priv->cec_addr = 0x34 + (client->addr & 0x03);
 	priv->current_page = 0xff;
 	priv->hdmi = client;
-	priv->cec = i2c_new_dummy(client->adapter, priv->cec_addr);
-	if (!priv->cec)
-		return -ENODEV;
 
 	/* wake up the device: */
 	cec_write(priv, REG_CEC_ENAMODS,
@@ -1578,6 +1575,12 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
 		cec_write(priv, REG_CEC_RXSHPDINTENA, CEC_RXSHPDLEV_HPD);
 	}
 
+	priv->cec = i2c_new_dummy(client->adapter, priv->cec_addr);
+	if (!priv->cec) {
+		ret = -ENODEV;
+		goto fail;
+	}
+
 	/* enable EDID read irq: */
 	reg_set(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
 
@@ -1594,14 +1597,14 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
 
 	ret = tda998x_get_audio_ports(priv, np);
 	if (ret)
-		goto err_audio;
+		goto fail;
 
 	if (priv->audio_port[0].format != AFMT_UNUSED)
 		tda998x_audio_codec_init(priv, &client->dev);
 
 	return 0;
 
-err_audio:
+fail:
 	if (client->irq)
 		free_irq(client->irq, priv);
 err_irq:

From ba8975f15bb93d7f3ebd995a41c2e4b6945bad78 Mon Sep 17 00:00:00 2001
From: Russell King <rmk+kernel@armlinux.org.uk>
Date: Sat, 11 Mar 2017 11:12:22 +0000
Subject: [PATCH 0201/1286] drm/i2c: tda998x: always disable and clear
 interrupts at probe

Always disable and clear interrupts at probe time to ensure that the
TDA998x is in a sane state.  This ensures that the interrupt line,
which is also the CEC clock calibration signal, is always deasserted.

Acked-by: Hans Verkuil <hans.verkuil@cisco.com>
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
---
 drivers/gpu/drm/i2c/tda998x_drv.c | 14 +++++++++-----
 1 file changed, 9 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 7f2762fab5c9..16e0439cad44 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -1546,6 +1546,15 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
 	cec_write(priv, REG_CEC_FRO_IM_CLK_CTRL,
 			CEC_FRO_IM_CLK_CTRL_GHOST_DIS | CEC_FRO_IM_CLK_CTRL_IMCLK_SEL);
 
+	/* ensure interrupts are disabled */
+	cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
+
+	/* clear pending interrupts */
+	cec_read(priv, REG_CEC_RXSHPDINT);
+	reg_read(priv, REG_INT_FLAGS_0);
+	reg_read(priv, REG_INT_FLAGS_1);
+	reg_read(priv, REG_INT_FLAGS_2);
+
 	/* initialize the optional IRQ */
 	if (client->irq) {
 		unsigned long irq_flags;
@@ -1553,11 +1562,6 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
 		/* init read EDID waitqueue and HDP work */
 		init_waitqueue_head(&priv->wq_edid);
 
-		/* clear pending interrupts */
-		reg_read(priv, REG_INT_FLAGS_0);
-		reg_read(priv, REG_INT_FLAGS_1);
-		reg_read(priv, REG_INT_FLAGS_2);
-
 		irq_flags =
 			irqd_get_trigger_type(irq_get_irq_data(client->irq));
 		irq_flags |= IRQF_SHARED | IRQF_ONESHOT;

From f26052079070cd0e8940717a7a7cb7cdd512ac05 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Sat, 31 Mar 2018 14:06:26 +0100
Subject: [PATCH 0202/1286] drm/i915/execlists: Track begin/end of execlists
 submission sequences

We would like to start doing some bookkeeping at the beginning, between
contexts and at the end of execlists submission. We already mark the
beginning and end using EXECLISTS_ACTIVE_USER, to provide an indication
when the HW is idle. This give us a pair of sequence points we can then
expand on for further bookkeeping.

v2: Refactor guc submission to share the same begin/end.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Francisco Jerez <currojerez@riseup.net>
Reviewed-by: Francisco Jerez <currojerez@riseup.net>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180331130626.10712-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_guc_submission.c | 17 ++++---
 drivers/gpu/drm/i915/intel_lrc.c            | 50 ++++++++++++++++-----
 drivers/gpu/drm/i915/intel_ringbuffer.h     | 15 ++++++-
 3 files changed, 63 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 207cda062626..749f27916a02 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -728,7 +728,7 @@ done:
 	execlists->first = rb;
 	if (submit) {
 		port_assign(port, last);
-		execlists_set_active(execlists, EXECLISTS_ACTIVE_USER);
+		execlists_user_begin(execlists, execlists->port);
 		guc_submit(engine);
 	}
 
@@ -748,17 +748,20 @@ static void guc_submission_tasklet(unsigned long data)
 	struct execlist_port *port = execlists->port;
 	struct i915_request *rq;
 
-	rq = port_request(&port[0]);
+	rq = port_request(port);
 	while (rq && i915_request_completed(rq)) {
 		trace_i915_request_out(rq);
 		i915_request_put(rq);
 
-		execlists_port_complete(execlists, port);
-
-		rq = port_request(&port[0]);
+		port = execlists_port_complete(execlists, port);
+		if (port_isset(port)) {
+			execlists_user_begin(execlists, port);
+			rq = port_request(port);
+		} else {
+			execlists_user_end(execlists);
+			rq = NULL;
+		}
 	}
-	if (!rq)
-		execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
 
 	if (execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT) &&
 	    intel_read_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX) ==
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index f60b61bf8b3b..4d08875422b6 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -374,6 +374,19 @@ execlists_context_status_change(struct i915_request *rq, unsigned long status)
 				   status, rq);
 }
 
+inline void
+execlists_user_begin(struct intel_engine_execlists *execlists,
+		     const struct execlist_port *port)
+{
+	execlists_set_active_once(execlists, EXECLISTS_ACTIVE_USER);
+}
+
+inline void
+execlists_user_end(struct intel_engine_execlists *execlists)
+{
+	execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
+}
+
 static inline void
 execlists_context_schedule_in(struct i915_request *rq)
 {
@@ -711,7 +724,7 @@ unlock:
 	spin_unlock_irq(&engine->timeline->lock);
 
 	if (submit) {
-		execlists_set_active(execlists, EXECLISTS_ACTIVE_USER);
+		execlists_user_begin(execlists, execlists->port);
 		execlists_submit_ports(engine);
 	}
 
@@ -742,7 +755,7 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
 		port++;
 	}
 
-	execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
+	execlists_user_end(execlists);
 }
 
 static void clear_gtiir(struct intel_engine_cs *engine)
@@ -873,7 +886,7 @@ static void execlists_submission_tasklet(unsigned long data)
 {
 	struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
 	struct intel_engine_execlists * const execlists = &engine->execlists;
-	struct execlist_port * const port = execlists->port;
+	struct execlist_port *port = execlists->port;
 	struct drm_i915_private *dev_priv = engine->i915;
 	bool fw = false;
 
@@ -1012,10 +1025,28 @@ static void execlists_submission_tasklet(unsigned long data)
 
 			GEM_BUG_ON(count == 0);
 			if (--count == 0) {
+				/*
+				 * On the final event corresponding to the
+				 * submission of this context, we expect either
+				 * an element-switch event or a completion
+				 * event (and on completion, the active-idle
+				 * marker). No more preemptions, lite-restore
+				 * or otherwise.
+				 */
 				GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
 				GEM_BUG_ON(port_isset(&port[1]) &&
 					   !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH));
+				GEM_BUG_ON(!port_isset(&port[1]) &&
+					   !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
+
+				/*
+				 * We rely on the hardware being strongly
+				 * ordered, that the breadcrumb write is
+				 * coherent (visible from the CPU) before the
+				 * user interrupt and CSB is processed.
+				 */
 				GEM_BUG_ON(!i915_request_completed(rq));
+
 				execlists_context_schedule_out(rq);
 				trace_i915_request_out(rq);
 				i915_request_put(rq);
@@ -1023,17 +1054,14 @@ static void execlists_submission_tasklet(unsigned long data)
 				GEM_TRACE("%s completed ctx=%d\n",
 					  engine->name, port->context_id);
 
-				execlists_port_complete(execlists, port);
+				port = execlists_port_complete(execlists, port);
+				if (port_isset(port))
+					execlists_user_begin(execlists, port);
+				else
+					execlists_user_end(execlists);
 			} else {
 				port_set(port, port_pack(rq, count));
 			}
-
-			/* After the final element, the hw should be idle */
-			GEM_BUG_ON(port_count(port) == 0 &&
-				   !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
-			if (port_count(port) == 0)
-				execlists_clear_active(execlists,
-						       EXECLISTS_ACTIVE_USER);
 		}
 
 		if (head != execlists->csb_head) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index a02c7b3b9d55..40461e29cdab 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -638,6 +638,13 @@ execlists_set_active(struct intel_engine_execlists *execlists,
 	__set_bit(bit, (unsigned long *)&execlists->active);
 }
 
+static inline bool
+execlists_set_active_once(struct intel_engine_execlists *execlists,
+			  unsigned int bit)
+{
+	return !__test_and_set_bit(bit, (unsigned long *)&execlists->active);
+}
+
 static inline void
 execlists_clear_active(struct intel_engine_execlists *execlists,
 		       unsigned int bit)
@@ -652,6 +659,10 @@ execlists_is_active(const struct intel_engine_execlists *execlists,
 	return test_bit(bit, (unsigned long *)&execlists->active);
 }
 
+void execlists_user_begin(struct intel_engine_execlists *execlists,
+			  const struct execlist_port *port);
+void execlists_user_end(struct intel_engine_execlists *execlists);
+
 void
 execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);
 
@@ -664,7 +675,7 @@ execlists_num_ports(const struct intel_engine_execlists * const execlists)
 	return execlists->port_mask + 1;
 }
 
-static inline void
+static inline struct execlist_port *
 execlists_port_complete(struct intel_engine_execlists * const execlists,
 			struct execlist_port * const port)
 {
@@ -675,6 +686,8 @@ execlists_port_complete(struct intel_engine_execlists * const execlists,
 
 	memmove(port, port + 1, m * sizeof(struct execlist_port));
 	memset(port + m, 0, sizeof(struct execlist_port));
+
+	return port;
 }
 
 static inline unsigned int

From c575b7eeb89f94356997abd62d6d5a0590e259b7 Mon Sep 17 00:00:00 2001
From: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
Date: Tue, 3 Apr 2018 14:23:17 +0300
Subject: [PATCH 0203/1286] drm/xen-front: Add support for Xen PV display
 frontend

Add support for Xen para-virtualized frontend display driver.
Accompanying backend [1] is implemented as a user-space application
and its helper library [2], capable of running as a Weston client
or DRM master.
Configuration of both backend and frontend is done via
Xen guest domain configuration options [3].

Driver limitations:
 1. Only primary plane without additional properties is supported.
 2. Only one video mode supported which resolution is configured
    via XenStore.
 3. All CRTCs operate at fixed frequency of 60Hz.

1. Implement Xen bus state machine for the frontend driver according to
the state diagram and recovery flow from display para-virtualized
protocol: xen/interface/io/displif.h.

2. Read configuration values from Xen store according
to xen/interface/io/displif.h protocol:
  - read connector(s) configuration
  - read buffer allocation mode (backend/frontend)

3. Handle Xen event channels:
  - create for all configured connectors and publish
    corresponding ring references and event channels in Xen store,
    so backend can connect
  - implement event channels interrupt handlers
  - create and destroy event channels with respect to Xen bus state

4. Implement shared buffer handling according to the
para-virtualized display device protocol at xen/interface/io/displif.h:
  - handle page directories according to displif protocol:
    - allocate and share page directories
    - grant references to the required set of pages for the
      page directory
  - allocate xen balllooned pages via Xen balloon driver
    with alloc_xenballooned_pages/free_xenballooned_pages
  - grant references to the required set of pages for the
    shared buffer itself
  - implement pages map/unmap for the buffers allocated by the
    backend (gnttab_map_refs/gnttab_unmap_refs)

5. Implement kernel modesetiing/connector handling using
DRM simple KMS helper pipeline:

- implement KMS part of the driver with the help of DRM
  simple pipepline helper which is possible due to the fact
  that the para-virtualized driver only supports a single
  (primary) plane:
  - initialize connectors according to XenStore configuration
  - handle frame done events from the backend
  - create and destroy frame buffers and propagate those
    to the backend
  - propagate set/reset mode configuration to the backend on display
    enable/disable callbacks
  - send page flip request to the backend and implement logic for
    reporting backend IO errors on prepare fb callback

- implement virtual connector handling:
  - support only pixel formats suitable for single plane modes
  - make sure the connector is always connected
  - support a single video mode as per para-virtualized driver
    configuration

6. Implement GEM handling depending on driver mode of operation:
depending on the requirements for the para-virtualized environment,
namely requirements dictated by the accompanying DRM/(v)GPU drivers
running in both host and guest environments, number of operating
modes of para-virtualized display driver are supported:
 - display buffers can be allocated by either
   frontend driver or backend
 - display buffers can be allocated to be contiguous
   in memory or not

Note! Frontend driver itself has no dependency on contiguous memory for
its operation.

6.1. Buffers allocated by the frontend driver.

The below modes of operation are configured at compile-time via
frontend driver's kernel configuration.

6.1.1. Front driver configured to use GEM CMA helpers
     This use-case is useful when used with accompanying DRM/vGPU driver
     in guest domain which was designed to only work with contiguous
     buffers, e.g. DRM driver based on GEM CMA helpers: such drivers can
     only import contiguous PRIME buffers, thus requiring frontend driver
     to provide such. In order to implement this mode of operation
     para-virtualized frontend driver can be configured to use
     GEM CMA helpers.

6.1.2. Front driver doesn't use GEM CMA
     If accompanying drivers can cope with non-contiguous memory then, to
     lower pressure on CMA subsystem of the kernel, driver can allocate
     buffers from system memory.

Note! If used with accompanying DRM/(v)GPU drivers this mode of operation
may require IOMMU support on the platform, so accompanying DRM/vGPU
hardware can still reach display buffer memory while importing PRIME
buffers from the frontend driver.

6.2. Buffers allocated by the backend

This mode of operation is run-time configured via guest domain
configuration through XenStore entries.

For systems which do not provide IOMMU support, but having specific
requirements for display buffers it is possible to allocate such buffers
at backend side and share those with the frontend.
For example, if host domain is 1:1 mapped and has DRM/GPU hardware
expecting physically contiguous memory, this allows implementing
zero-copying use-cases.

Note, while using this scenario the following should be considered:
  a) If guest domain dies then pages/grants received from the backend
     cannot be claimed back
  b) Misbehaving guest may send too many requests to the
     backend exhausting its grant references and memory
     (consider this from security POV).

Note! Configuration options 1.1 (contiguous display buffers) and 2
(backend allocated buffers) are not supported at the same time.

7. Handle communication with the backend:
 - send requests and wait for the responses according
   to the displif protocol
 - serialize access to the communication channel
 - time-out used for backend communication is set to 3000 ms
 - manage display buffers shared with the backend

[1] https://github.com/xen-troops/displ_be
[2] https://github.com/xen-troops/libxenbe
[3] https://xenbits.xen.org/gitweb/?p=xen.git;a=blob;f=docs/man/xl.cfg.pod.5.in;h=a699367779e2ae1212ff8f638eff0206ec1a1cc9;hb=refs/heads/master#l1257

Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20180403112317.28751-2-andr2000@gmail.com
---
 Documentation/gpu/drivers.rst               |   1 +
 Documentation/gpu/xen-front.rst             |  43 +
 drivers/gpu/drm/Kconfig                     |   2 +
 drivers/gpu/drm/Makefile                    |   1 +
 drivers/gpu/drm/xen/Kconfig                 |  30 +
 drivers/gpu/drm/xen/Makefile                |  16 +
 drivers/gpu/drm/xen/xen_drm_front.c         | 882 ++++++++++++++++++++
 drivers/gpu/drm/xen/xen_drm_front.h         | 188 +++++
 drivers/gpu/drm/xen/xen_drm_front_cfg.c     |  77 ++
 drivers/gpu/drm/xen/xen_drm_front_cfg.h     |  37 +
 drivers/gpu/drm/xen/xen_drm_front_conn.c    | 115 +++
 drivers/gpu/drm/xen/xen_drm_front_conn.h    |  27 +
 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c | 387 +++++++++
 drivers/gpu/drm/xen/xen_drm_front_evtchnl.h |  81 ++
 drivers/gpu/drm/xen/xen_drm_front_gem.c     | 314 +++++++
 drivers/gpu/drm/xen/xen_drm_front_gem.h     |  43 +
 drivers/gpu/drm/xen/xen_drm_front_gem_cma.c |  79 ++
 drivers/gpu/drm/xen/xen_drm_front_kms.c     | 372 +++++++++
 drivers/gpu/drm/xen/xen_drm_front_kms.h     |  26 +
 drivers/gpu/drm/xen/xen_drm_front_shbuf.c   | 436 ++++++++++
 drivers/gpu/drm/xen/xen_drm_front_shbuf.h   |  72 ++
 21 files changed, 3229 insertions(+)
 create mode 100644 Documentation/gpu/xen-front.rst
 create mode 100644 drivers/gpu/drm/xen/Kconfig
 create mode 100644 drivers/gpu/drm/xen/Makefile
 create mode 100644 drivers/gpu/drm/xen/xen_drm_front.c
 create mode 100644 drivers/gpu/drm/xen/xen_drm_front.h
 create mode 100644 drivers/gpu/drm/xen/xen_drm_front_cfg.c
 create mode 100644 drivers/gpu/drm/xen/xen_drm_front_cfg.h
 create mode 100644 drivers/gpu/drm/xen/xen_drm_front_conn.c
 create mode 100644 drivers/gpu/drm/xen/xen_drm_front_conn.h
 create mode 100644 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c
 create mode 100644 drivers/gpu/drm/xen/xen_drm_front_evtchnl.h
 create mode 100644 drivers/gpu/drm/xen/xen_drm_front_gem.c
 create mode 100644 drivers/gpu/drm/xen/xen_drm_front_gem.h
 create mode 100644 drivers/gpu/drm/xen/xen_drm_front_gem_cma.c
 create mode 100644 drivers/gpu/drm/xen/xen_drm_front_kms.c
 create mode 100644 drivers/gpu/drm/xen/xen_drm_front_kms.h
 create mode 100644 drivers/gpu/drm/xen/xen_drm_front_shbuf.c
 create mode 100644 drivers/gpu/drm/xen/xen_drm_front_shbuf.h

diff --git a/Documentation/gpu/drivers.rst b/Documentation/gpu/drivers.rst
index e8c84419a2a1..d3ab6abae838 100644
--- a/Documentation/gpu/drivers.rst
+++ b/Documentation/gpu/drivers.rst
@@ -12,6 +12,7 @@ GPU Driver Documentation
    tve200
    vc4
    bridge/dw-hdmi
+   xen-front
 
 .. only::  subproject and html
 
diff --git a/Documentation/gpu/xen-front.rst b/Documentation/gpu/xen-front.rst
new file mode 100644
index 000000000000..009d942386c5
--- /dev/null
+++ b/Documentation/gpu/xen-front.rst
@@ -0,0 +1,43 @@
+====================================================
+ drm/xen-front Xen para-virtualized frontend driver
+====================================================
+
+This frontend driver implements Xen para-virtualized display
+according to the display protocol described at
+include/xen/interface/io/displif.h
+
+Driver modes of operation in terms of display buffers used
+==========================================================
+
+.. kernel-doc:: drivers/gpu/drm/xen/xen_drm_front.h
+   :doc: Driver modes of operation in terms of display buffers used
+
+Buffers allocated by the frontend driver
+----------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/xen/xen_drm_front.h
+   :doc: Buffers allocated by the frontend driver
+
+With GEM CMA helpers
+~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/xen/xen_drm_front.h
+   :doc: With GEM CMA helpers
+
+Without GEM CMA helpers
+~~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/xen/xen_drm_front.h
+   :doc: Without GEM CMA helpers
+
+Buffers allocated by the backend
+--------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/xen/xen_drm_front.h
+   :doc: Buffers allocated by the backend
+
+Driver limitations
+==================
+
+.. kernel-doc:: drivers/gpu/drm/xen/xen_drm_front.h
+   :doc: Driver limitations
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index deeefa7a1773..757825ac60df 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -289,6 +289,8 @@ source "drivers/gpu/drm/pl111/Kconfig"
 
 source "drivers/gpu/drm/tve200/Kconfig"
 
+source "drivers/gpu/drm/xen/Kconfig"
+
 # Keep legacy drivers last
 
 menuconfig DRM_LEGACY
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 50093ff4479b..9d66657ea117 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -103,3 +103,4 @@ obj-$(CONFIG_DRM_MXSFB)	+= mxsfb/
 obj-$(CONFIG_DRM_TINYDRM) += tinydrm/
 obj-$(CONFIG_DRM_PL111) += pl111/
 obj-$(CONFIG_DRM_TVE200) += tve200/
+obj-$(CONFIG_DRM_XEN) += xen/
diff --git a/drivers/gpu/drm/xen/Kconfig b/drivers/gpu/drm/xen/Kconfig
new file mode 100644
index 000000000000..4f4abc91f3b6
--- /dev/null
+++ b/drivers/gpu/drm/xen/Kconfig
@@ -0,0 +1,30 @@
+config DRM_XEN
+	bool "DRM Support for Xen guest OS"
+	depends on XEN
+	help
+	  Choose this option if you want to enable DRM support
+	  for Xen.
+
+config DRM_XEN_FRONTEND
+	tristate "Para-virtualized frontend driver for Xen guest OS"
+	depends on DRM_XEN
+	depends on DRM
+	select DRM_KMS_HELPER
+	select VIDEOMODE_HELPERS
+	select XEN_XENBUS_FRONTEND
+	help
+	  Choose this option if you want to enable a para-virtualized
+	  frontend DRM/KMS driver for Xen guest OSes.
+
+config DRM_XEN_FRONTEND_CMA
+	bool "Use DRM CMA to allocate dumb buffers"
+	depends on DRM_XEN_FRONTEND
+	select DRM_KMS_CMA_HELPER
+	select DRM_GEM_CMA_HELPER
+	help
+	  Use DRM CMA helpers to allocate display buffers.
+	  This is useful for the use-cases when guest driver needs to
+	  share or export buffers to other drivers which only expect
+	  contiguous buffers.
+	  Note: in this mode driver cannot use buffers allocated
+	  by the backend.
diff --git a/drivers/gpu/drm/xen/Makefile b/drivers/gpu/drm/xen/Makefile
new file mode 100644
index 000000000000..352730dc6c13
--- /dev/null
+++ b/drivers/gpu/drm/xen/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0 OR MIT
+
+drm_xen_front-objs := xen_drm_front.o \
+		      xen_drm_front_kms.o \
+		      xen_drm_front_conn.o \
+		      xen_drm_front_evtchnl.o \
+		      xen_drm_front_shbuf.o \
+		      xen_drm_front_cfg.o
+
+ifeq ($(CONFIG_DRM_XEN_FRONTEND_CMA),y)
+	drm_xen_front-objs += xen_drm_front_gem_cma.o
+else
+	drm_xen_front-objs += xen_drm_front_gem.o
+endif
+
+obj-$(CONFIG_DRM_XEN_FRONTEND) += drm_xen_front.o
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
new file mode 100644
index 000000000000..4a08b77f1c9e
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -0,0 +1,882 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+/*
+ *  Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include <linux/of_device.h>
+
+#include <xen/platform_pci.h>
+#include <xen/xen.h>
+#include <xen/xenbus.h>
+
+#include <xen/interface/io/displif.h>
+
+#include "xen_drm_front.h"
+#include "xen_drm_front_cfg.h"
+#include "xen_drm_front_evtchnl.h"
+#include "xen_drm_front_gem.h"
+#include "xen_drm_front_kms.h"
+#include "xen_drm_front_shbuf.h"
+
+struct xen_drm_front_dbuf {
+	struct list_head list;
+	u64 dbuf_cookie;
+	u64 fb_cookie;
+	struct xen_drm_front_shbuf *shbuf;
+};
+
+static int dbuf_add_to_list(struct xen_drm_front_info *front_info,
+			    struct xen_drm_front_shbuf *shbuf, u64 dbuf_cookie)
+{
+	struct xen_drm_front_dbuf *dbuf;
+
+	dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL);
+	if (!dbuf)
+		return -ENOMEM;
+
+	dbuf->dbuf_cookie = dbuf_cookie;
+	dbuf->shbuf = shbuf;
+	list_add(&dbuf->list, &front_info->dbuf_list);
+	return 0;
+}
+
+static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list,
+					   u64 dbuf_cookie)
+{
+	struct xen_drm_front_dbuf *buf, *q;
+
+	list_for_each_entry_safe(buf, q, dbuf_list, list)
+		if (buf->dbuf_cookie == dbuf_cookie)
+			return buf;
+
+	return NULL;
+}
+
+static void dbuf_flush_fb(struct list_head *dbuf_list, u64 fb_cookie)
+{
+	struct xen_drm_front_dbuf *buf, *q;
+
+	list_for_each_entry_safe(buf, q, dbuf_list, list)
+		if (buf->fb_cookie == fb_cookie)
+			xen_drm_front_shbuf_flush(buf->shbuf);
+}
+
+static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie)
+{
+	struct xen_drm_front_dbuf *buf, *q;
+
+	list_for_each_entry_safe(buf, q, dbuf_list, list)
+		if (buf->dbuf_cookie == dbuf_cookie) {
+			list_del(&buf->list);
+			xen_drm_front_shbuf_unmap(buf->shbuf);
+			xen_drm_front_shbuf_free(buf->shbuf);
+			kfree(buf);
+			break;
+		}
+}
+
+static void dbuf_free_all(struct list_head *dbuf_list)
+{
+	struct xen_drm_front_dbuf *buf, *q;
+
+	list_for_each_entry_safe(buf, q, dbuf_list, list) {
+		list_del(&buf->list);
+		xen_drm_front_shbuf_unmap(buf->shbuf);
+		xen_drm_front_shbuf_free(buf->shbuf);
+		kfree(buf);
+	}
+}
+
+static struct xendispl_req *
+be_prepare_req(struct xen_drm_front_evtchnl *evtchnl, u8 operation)
+{
+	struct xendispl_req *req;
+
+	req = RING_GET_REQUEST(&evtchnl->u.req.ring,
+			       evtchnl->u.req.ring.req_prod_pvt);
+	req->operation = operation;
+	req->id = evtchnl->evt_next_id++;
+	evtchnl->evt_id = req->id;
+	return req;
+}
+
+static int be_stream_do_io(struct xen_drm_front_evtchnl *evtchnl,
+			   struct xendispl_req *req)
+{
+	reinit_completion(&evtchnl->u.req.completion);
+	if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
+		return -EIO;
+
+	xen_drm_front_evtchnl_flush(evtchnl);
+	return 0;
+}
+
+static int be_stream_wait_io(struct xen_drm_front_evtchnl *evtchnl)
+{
+	if (wait_for_completion_timeout(&evtchnl->u.req.completion,
+			msecs_to_jiffies(XEN_DRM_FRONT_WAIT_BACK_MS)) <= 0)
+		return -ETIMEDOUT;
+
+	return evtchnl->u.req.resp_status;
+}
+
+int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline,
+			   u32 x, u32 y, u32 width, u32 height,
+			   u32 bpp, u64 fb_cookie)
+{
+	struct xen_drm_front_evtchnl *evtchnl;
+	struct xen_drm_front_info *front_info;
+	struct xendispl_req *req;
+	unsigned long flags;
+	int ret;
+
+	front_info = pipeline->drm_info->front_info;
+	evtchnl = &front_info->evt_pairs[pipeline->index].req;
+	if (unlikely(!evtchnl))
+		return -EIO;
+
+	mutex_lock(&evtchnl->u.req.req_io_lock);
+
+	spin_lock_irqsave(&front_info->io_lock, flags);
+	req = be_prepare_req(evtchnl, XENDISPL_OP_SET_CONFIG);
+	req->op.set_config.x = x;
+	req->op.set_config.y = y;
+	req->op.set_config.width = width;
+	req->op.set_config.height = height;
+	req->op.set_config.bpp = bpp;
+	req->op.set_config.fb_cookie = fb_cookie;
+
+	ret = be_stream_do_io(evtchnl, req);
+	spin_unlock_irqrestore(&front_info->io_lock, flags);
+
+	if (ret == 0)
+		ret = be_stream_wait_io(evtchnl);
+
+	mutex_unlock(&evtchnl->u.req.req_io_lock);
+	return ret;
+}
+
+static int be_dbuf_create_int(struct xen_drm_front_info *front_info,
+			      u64 dbuf_cookie, u32 width, u32 height,
+			      u32 bpp, u64 size, struct page **pages,
+			      struct sg_table *sgt)
+{
+	struct xen_drm_front_evtchnl *evtchnl;
+	struct xen_drm_front_shbuf *shbuf;
+	struct xendispl_req *req;
+	struct xen_drm_front_shbuf_cfg buf_cfg;
+	unsigned long flags;
+	int ret;
+
+	evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
+	if (unlikely(!evtchnl))
+		return -EIO;
+
+	memset(&buf_cfg, 0, sizeof(buf_cfg));
+	buf_cfg.xb_dev = front_info->xb_dev;
+	buf_cfg.pages = pages;
+	buf_cfg.size = size;
+	buf_cfg.sgt = sgt;
+	buf_cfg.be_alloc = front_info->cfg.be_alloc;
+
+	shbuf = xen_drm_front_shbuf_alloc(&buf_cfg);
+	if (!shbuf)
+		return -ENOMEM;
+
+	ret = dbuf_add_to_list(front_info, shbuf, dbuf_cookie);
+	if (ret < 0) {
+		xen_drm_front_shbuf_free(shbuf);
+		return ret;
+	}
+
+	mutex_lock(&evtchnl->u.req.req_io_lock);
+
+	spin_lock_irqsave(&front_info->io_lock, flags);
+	req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_CREATE);
+	req->op.dbuf_create.gref_directory =
+			xen_drm_front_shbuf_get_dir_start(shbuf);
+	req->op.dbuf_create.buffer_sz = size;
+	req->op.dbuf_create.dbuf_cookie = dbuf_cookie;
+	req->op.dbuf_create.width = width;
+	req->op.dbuf_create.height = height;
+	req->op.dbuf_create.bpp = bpp;
+	if (buf_cfg.be_alloc)
+		req->op.dbuf_create.flags |= XENDISPL_DBUF_FLG_REQ_ALLOC;
+
+	ret = be_stream_do_io(evtchnl, req);
+	spin_unlock_irqrestore(&front_info->io_lock, flags);
+
+	if (ret < 0)
+		goto fail;
+
+	ret = be_stream_wait_io(evtchnl);
+	if (ret < 0)
+		goto fail;
+
+	ret = xen_drm_front_shbuf_map(shbuf);
+	if (ret < 0)
+		goto fail;
+
+	mutex_unlock(&evtchnl->u.req.req_io_lock);
+	return 0;
+
+fail:
+	mutex_unlock(&evtchnl->u.req.req_io_lock);
+	dbuf_free(&front_info->dbuf_list, dbuf_cookie);
+	return ret;
+}
+
+int xen_drm_front_dbuf_create_from_sgt(struct xen_drm_front_info *front_info,
+				       u64 dbuf_cookie, u32 width, u32 height,
+				       u32 bpp, u64 size, struct sg_table *sgt)
+{
+	return be_dbuf_create_int(front_info, dbuf_cookie, width, height,
+				  bpp, size, NULL, sgt);
+}
+
+int xen_drm_front_dbuf_create_from_pages(struct xen_drm_front_info *front_info,
+					 u64 dbuf_cookie, u32 width, u32 height,
+					 u32 bpp, u64 size, struct page **pages)
+{
+	return be_dbuf_create_int(front_info, dbuf_cookie, width, height,
+				  bpp, size, pages, NULL);
+}
+
+static int xen_drm_front_dbuf_destroy(struct xen_drm_front_info *front_info,
+				      u64 dbuf_cookie)
+{
+	struct xen_drm_front_evtchnl *evtchnl;
+	struct xendispl_req *req;
+	unsigned long flags;
+	bool be_alloc;
+	int ret;
+
+	evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
+	if (unlikely(!evtchnl))
+		return -EIO;
+
+	be_alloc = front_info->cfg.be_alloc;
+
+	/*
+	 * For the backend allocated buffer release references now, so backend
+	 * can free the buffer.
+	 */
+	if (be_alloc)
+		dbuf_free(&front_info->dbuf_list, dbuf_cookie);
+
+	mutex_lock(&evtchnl->u.req.req_io_lock);
+
+	spin_lock_irqsave(&front_info->io_lock, flags);
+	req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_DESTROY);
+	req->op.dbuf_destroy.dbuf_cookie = dbuf_cookie;
+
+	ret = be_stream_do_io(evtchnl, req);
+	spin_unlock_irqrestore(&front_info->io_lock, flags);
+
+	if (ret == 0)
+		ret = be_stream_wait_io(evtchnl);
+
+	/*
+	 * Do this regardless of communication status with the backend:
+	 * if we cannot remove remote resources remove what we can locally.
+	 */
+	if (!be_alloc)
+		dbuf_free(&front_info->dbuf_list, dbuf_cookie);
+
+	mutex_unlock(&evtchnl->u.req.req_io_lock);
+	return ret;
+}
+
+int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info,
+			    u64 dbuf_cookie, u64 fb_cookie, u32 width,
+			    u32 height, u32 pixel_format)
+{
+	struct xen_drm_front_evtchnl *evtchnl;
+	struct xen_drm_front_dbuf *buf;
+	struct xendispl_req *req;
+	unsigned long flags;
+	int ret;
+
+	evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
+	if (unlikely(!evtchnl))
+		return -EIO;
+
+	buf = dbuf_get(&front_info->dbuf_list, dbuf_cookie);
+	if (!buf)
+		return -EINVAL;
+
+	buf->fb_cookie = fb_cookie;
+
+	mutex_lock(&evtchnl->u.req.req_io_lock);
+
+	spin_lock_irqsave(&front_info->io_lock, flags);
+	req = be_prepare_req(evtchnl, XENDISPL_OP_FB_ATTACH);
+	req->op.fb_attach.dbuf_cookie = dbuf_cookie;
+	req->op.fb_attach.fb_cookie = fb_cookie;
+	req->op.fb_attach.width = width;
+	req->op.fb_attach.height = height;
+	req->op.fb_attach.pixel_format = pixel_format;
+
+	ret = be_stream_do_io(evtchnl, req);
+	spin_unlock_irqrestore(&front_info->io_lock, flags);
+
+	if (ret == 0)
+		ret = be_stream_wait_io(evtchnl);
+
+	mutex_unlock(&evtchnl->u.req.req_io_lock);
+	return ret;
+}
+
+int xen_drm_front_fb_detach(struct xen_drm_front_info *front_info,
+			    u64 fb_cookie)
+{
+	struct xen_drm_front_evtchnl *evtchnl;
+	struct xendispl_req *req;
+	unsigned long flags;
+	int ret;
+
+	evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
+	if (unlikely(!evtchnl))
+		return -EIO;
+
+	mutex_lock(&evtchnl->u.req.req_io_lock);
+
+	spin_lock_irqsave(&front_info->io_lock, flags);
+	req = be_prepare_req(evtchnl, XENDISPL_OP_FB_DETACH);
+	req->op.fb_detach.fb_cookie = fb_cookie;
+
+	ret = be_stream_do_io(evtchnl, req);
+	spin_unlock_irqrestore(&front_info->io_lock, flags);
+
+	if (ret == 0)
+		ret = be_stream_wait_io(evtchnl);
+
+	mutex_unlock(&evtchnl->u.req.req_io_lock);
+	return ret;
+}
+
+int xen_drm_front_page_flip(struct xen_drm_front_info *front_info,
+			    int conn_idx, u64 fb_cookie)
+{
+	struct xen_drm_front_evtchnl *evtchnl;
+	struct xendispl_req *req;
+	unsigned long flags;
+	int ret;
+
+	if (unlikely(conn_idx >= front_info->num_evt_pairs))
+		return -EINVAL;
+
+	dbuf_flush_fb(&front_info->dbuf_list, fb_cookie);
+	evtchnl = &front_info->evt_pairs[conn_idx].req;
+
+	mutex_lock(&evtchnl->u.req.req_io_lock);
+
+	spin_lock_irqsave(&front_info->io_lock, flags);
+	req = be_prepare_req(evtchnl, XENDISPL_OP_PG_FLIP);
+	req->op.pg_flip.fb_cookie = fb_cookie;
+
+	ret = be_stream_do_io(evtchnl, req);
+	spin_unlock_irqrestore(&front_info->io_lock, flags);
+
+	if (ret == 0)
+		ret = be_stream_wait_io(evtchnl);
+
+	mutex_unlock(&evtchnl->u.req.req_io_lock);
+	return ret;
+}
+
+void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info,
+				 int conn_idx, u64 fb_cookie)
+{
+	struct xen_drm_front_drm_info *drm_info = front_info->drm_info;
+
+	if (unlikely(conn_idx >= front_info->cfg.num_connectors))
+		return;
+
+	xen_drm_front_kms_on_frame_done(&drm_info->pipeline[conn_idx],
+					fb_cookie);
+}
+
+static int xen_drm_drv_dumb_create(struct drm_file *filp,
+				   struct drm_device *dev,
+				   struct drm_mode_create_dumb *args)
+{
+	struct xen_drm_front_drm_info *drm_info = dev->dev_private;
+	struct drm_gem_object *obj;
+	int ret;
+
+	/*
+	 * Dumb creation is a two stage process: first we create a fully
+	 * constructed GEM object which is communicated to the backend, and
+	 * only after that we can create GEM's handle. This is done so,
+	 * because of the possible races: once you create a handle it becomes
+	 * immediately visible to user-space, so the latter can try accessing
+	 * object without pages etc.
+	 * For details also see drm_gem_handle_create
+	 */
+	args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+	args->size = args->pitch * args->height;
+
+	obj = xen_drm_front_gem_create(dev, args->size);
+	if (IS_ERR_OR_NULL(obj)) {
+		ret = PTR_ERR(obj);
+		goto fail;
+	}
+
+	/*
+	 * In case of CONFIG_DRM_XEN_FRONTEND_CMA gem_obj is constructed
+	 * via DRM CMA helpers and doesn't have ->pages allocated
+	 * (xendrm_gem_get_pages will return NULL), but instead can provide
+	 * sg table
+	 */
+	if (xen_drm_front_gem_get_pages(obj))
+		ret = xen_drm_front_dbuf_create_from_pages(drm_info->front_info,
+				xen_drm_front_dbuf_to_cookie(obj),
+				args->width, args->height, args->bpp,
+				args->size,
+				xen_drm_front_gem_get_pages(obj));
+	else
+		ret = xen_drm_front_dbuf_create_from_sgt(drm_info->front_info,
+				xen_drm_front_dbuf_to_cookie(obj),
+				args->width, args->height, args->bpp,
+				args->size,
+				xen_drm_front_gem_get_sg_table(obj));
+	if (ret)
+		goto fail_backend;
+
+	/* This is the tail of GEM object creation */
+	ret = drm_gem_handle_create(filp, obj, &args->handle);
+	if (ret)
+		goto fail_handle;
+
+	/* Drop reference from allocate - handle holds it now */
+	drm_gem_object_put_unlocked(obj);
+	return 0;
+
+fail_handle:
+	xen_drm_front_dbuf_destroy(drm_info->front_info,
+				   xen_drm_front_dbuf_to_cookie(obj));
+fail_backend:
+	/* drop reference from allocate */
+	drm_gem_object_put_unlocked(obj);
+fail:
+	DRM_ERROR("Failed to create dumb buffer: %d\n", ret);
+	return ret;
+}
+
+static void xen_drm_drv_free_object_unlocked(struct drm_gem_object *obj)
+{
+	struct xen_drm_front_drm_info *drm_info = obj->dev->dev_private;
+	int idx;
+
+	if (drm_dev_enter(obj->dev, &idx)) {
+		xen_drm_front_dbuf_destroy(drm_info->front_info,
+					   xen_drm_front_dbuf_to_cookie(obj));
+		drm_dev_exit(idx);
+	} else {
+		dbuf_free(&drm_info->front_info->dbuf_list,
+			  xen_drm_front_dbuf_to_cookie(obj));
+	}
+
+	xen_drm_front_gem_free_object_unlocked(obj);
+}
+
+static void xen_drm_drv_release(struct drm_device *dev)
+{
+	struct xen_drm_front_drm_info *drm_info = dev->dev_private;
+	struct xen_drm_front_info *front_info = drm_info->front_info;
+
+	xen_drm_front_kms_fini(drm_info);
+
+	drm_atomic_helper_shutdown(dev);
+	drm_mode_config_cleanup(dev);
+
+	drm_dev_fini(dev);
+	kfree(dev);
+
+	if (front_info->cfg.be_alloc)
+		xenbus_switch_state(front_info->xb_dev,
+				    XenbusStateInitialising);
+
+	kfree(drm_info);
+}
+
+static const struct file_operations xen_drm_dev_fops = {
+	.owner          = THIS_MODULE,
+	.open           = drm_open,
+	.release        = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl   = drm_compat_ioctl,
+#endif
+	.poll           = drm_poll,
+	.read           = drm_read,
+	.llseek         = no_llseek,
+#ifdef CONFIG_DRM_XEN_FRONTEND_CMA
+	.mmap           = drm_gem_cma_mmap,
+#else
+	.mmap           = xen_drm_front_gem_mmap,
+#endif
+};
+
+static const struct vm_operations_struct xen_drm_drv_vm_ops = {
+	.open           = drm_gem_vm_open,
+	.close          = drm_gem_vm_close,
+};
+
+static struct drm_driver xen_drm_driver = {
+	.driver_features           = DRIVER_GEM | DRIVER_MODESET |
+				     DRIVER_PRIME | DRIVER_ATOMIC,
+	.release                   = xen_drm_drv_release,
+	.gem_vm_ops                = &xen_drm_drv_vm_ops,
+	.gem_free_object_unlocked  = xen_drm_drv_free_object_unlocked,
+	.prime_handle_to_fd        = drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle        = drm_gem_prime_fd_to_handle,
+	.gem_prime_import          = drm_gem_prime_import,
+	.gem_prime_export          = drm_gem_prime_export,
+	.gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table,
+	.gem_prime_get_sg_table    = xen_drm_front_gem_get_sg_table,
+	.dumb_create               = xen_drm_drv_dumb_create,
+	.fops                      = &xen_drm_dev_fops,
+	.name                      = "xendrm-du",
+	.desc                      = "Xen PV DRM Display Unit",
+	.date                      = "20180221",
+	.major                     = 1,
+	.minor                     = 0,
+
+#ifdef CONFIG_DRM_XEN_FRONTEND_CMA
+	.gem_prime_vmap            = drm_gem_cma_prime_vmap,
+	.gem_prime_vunmap          = drm_gem_cma_prime_vunmap,
+	.gem_prime_mmap            = drm_gem_cma_prime_mmap,
+#else
+	.gem_prime_vmap            = xen_drm_front_gem_prime_vmap,
+	.gem_prime_vunmap          = xen_drm_front_gem_prime_vunmap,
+	.gem_prime_mmap            = xen_drm_front_gem_prime_mmap,
+#endif
+};
+
+static int xen_drm_drv_init(struct xen_drm_front_info *front_info)
+{
+	struct device *dev = &front_info->xb_dev->dev;
+	struct xen_drm_front_drm_info *drm_info;
+	struct drm_device *drm_dev;
+	int ret;
+
+	DRM_INFO("Creating %s\n", xen_drm_driver.desc);
+
+	drm_info = kzalloc(sizeof(*drm_info), GFP_KERNEL);
+	if (!drm_info) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	drm_info->front_info = front_info;
+	front_info->drm_info = drm_info;
+
+	drm_dev = drm_dev_alloc(&xen_drm_driver, dev);
+	if (!drm_dev) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	drm_info->drm_dev = drm_dev;
+
+	drm_dev->dev_private = drm_info;
+
+	ret = xen_drm_front_kms_init(drm_info);
+	if (ret) {
+		DRM_ERROR("Failed to initialize DRM/KMS, ret %d\n", ret);
+		goto fail_modeset;
+	}
+
+	ret = drm_dev_register(drm_dev, 0);
+	if (ret)
+		goto fail_register;
+
+	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+		 xen_drm_driver.name, xen_drm_driver.major,
+		 xen_drm_driver.minor, xen_drm_driver.patchlevel,
+		 xen_drm_driver.date, drm_dev->primary->index);
+
+	return 0;
+
+fail_register:
+	drm_dev_unregister(drm_dev);
+fail_modeset:
+	drm_kms_helper_poll_fini(drm_dev);
+	drm_mode_config_cleanup(drm_dev);
+fail:
+	kfree(drm_info);
+	return ret;
+}
+
+static void xen_drm_drv_fini(struct xen_drm_front_info *front_info)
+{
+	struct xen_drm_front_drm_info *drm_info = front_info->drm_info;
+	struct drm_device *dev;
+
+	if (!drm_info)
+		return;
+
+	dev = drm_info->drm_dev;
+	if (!dev)
+		return;
+
+	/* Nothing to do if device is already unplugged */
+	if (drm_dev_is_unplugged(dev))
+		return;
+
+	drm_kms_helper_poll_fini(dev);
+	drm_dev_unplug(dev);
+
+	front_info->drm_info = NULL;
+
+	xen_drm_front_evtchnl_free_all(front_info);
+	dbuf_free_all(&front_info->dbuf_list);
+
+	/*
+	 * If we are not using backend allocated buffers, then tell the
+	 * backend we are ready to (re)initialize. Otherwise, wait for
+	 * drm_driver.release.
+	 */
+	if (!front_info->cfg.be_alloc)
+		xenbus_switch_state(front_info->xb_dev,
+				    XenbusStateInitialising);
+}
+
+static int displback_initwait(struct xen_drm_front_info *front_info)
+{
+	struct xen_drm_front_cfg *cfg = &front_info->cfg;
+	int ret;
+
+	cfg->front_info = front_info;
+	ret = xen_drm_front_cfg_card(front_info, cfg);
+	if (ret < 0)
+		return ret;
+
+	DRM_INFO("Have %d conector(s)\n", cfg->num_connectors);
+	/* Create event channels for all connectors and publish */
+	ret = xen_drm_front_evtchnl_create_all(front_info);
+	if (ret < 0)
+		return ret;
+
+	return xen_drm_front_evtchnl_publish_all(front_info);
+}
+
+static int displback_connect(struct xen_drm_front_info *front_info)
+{
+	xen_drm_front_evtchnl_set_state(front_info, EVTCHNL_STATE_CONNECTED);
+	return xen_drm_drv_init(front_info);
+}
+
+static void displback_disconnect(struct xen_drm_front_info *front_info)
+{
+	if (!front_info->drm_info)
+		return;
+
+	/* Tell the backend to wait until we release the DRM driver. */
+	xenbus_switch_state(front_info->xb_dev, XenbusStateReconfiguring);
+
+	xen_drm_drv_fini(front_info);
+}
+
+static void displback_changed(struct xenbus_device *xb_dev,
+			      enum xenbus_state backend_state)
+{
+	struct xen_drm_front_info *front_info = dev_get_drvdata(&xb_dev->dev);
+	int ret;
+
+	DRM_DEBUG("Backend state is %s, front is %s\n",
+		  xenbus_strstate(backend_state),
+		  xenbus_strstate(xb_dev->state));
+
+	switch (backend_state) {
+	case XenbusStateReconfiguring:
+		/* fall through */
+	case XenbusStateReconfigured:
+		/* fall through */
+	case XenbusStateInitialised:
+		break;
+
+	case XenbusStateInitialising:
+		if (xb_dev->state == XenbusStateReconfiguring)
+			break;
+
+		/* recovering after backend unexpected closure */
+		displback_disconnect(front_info);
+		break;
+
+	case XenbusStateInitWait:
+		if (xb_dev->state == XenbusStateReconfiguring)
+			break;
+
+		/* recovering after backend unexpected closure */
+		displback_disconnect(front_info);
+		if (xb_dev->state != XenbusStateInitialising)
+			break;
+
+		ret = displback_initwait(front_info);
+		if (ret < 0)
+			xenbus_dev_fatal(xb_dev, ret, "initializing frontend");
+		else
+			xenbus_switch_state(xb_dev, XenbusStateInitialised);
+		break;
+
+	case XenbusStateConnected:
+		if (xb_dev->state != XenbusStateInitialised)
+			break;
+
+		ret = displback_connect(front_info);
+		if (ret < 0) {
+			displback_disconnect(front_info);
+			xenbus_dev_fatal(xb_dev, ret, "connecting backend");
+		} else {
+			xenbus_switch_state(xb_dev, XenbusStateConnected);
+		}
+		break;
+
+	case XenbusStateClosing:
+		/*
+		 * in this state backend starts freeing resources,
+		 * so let it go into closed state, so we can also
+		 * remove ours
+		 */
+		break;
+
+	case XenbusStateUnknown:
+		/* fall through */
+	case XenbusStateClosed:
+		if (xb_dev->state == XenbusStateClosed)
+			break;
+
+		displback_disconnect(front_info);
+		break;
+	}
+}
+
+static int xen_drv_probe(struct xenbus_device *xb_dev,
+			 const struct xenbus_device_id *id)
+{
+	struct xen_drm_front_info *front_info;
+	struct device *dev = &xb_dev->dev;
+	int ret;
+
+	/*
+	 * The device is not spawn from a device tree, so arch_setup_dma_ops
+	 * is not called, thus leaving the device with dummy DMA ops.
+	 * This makes the device return error on PRIME buffer import, which
+	 * is not correct: to fix this call of_dma_configure() with a NULL
+	 * node to set default DMA ops.
+	 */
+	dev->bus->force_dma = true;
+	dev->coherent_dma_mask = DMA_BIT_MASK(32);
+	ret = of_dma_configure(dev, NULL);
+	if (ret < 0) {
+		DRM_ERROR("Cannot setup DMA ops, ret %d", ret);
+		return ret;
+	}
+
+	front_info = devm_kzalloc(&xb_dev->dev,
+				  sizeof(*front_info), GFP_KERNEL);
+	if (!front_info)
+		return -ENOMEM;
+
+	front_info->xb_dev = xb_dev;
+	spin_lock_init(&front_info->io_lock);
+	INIT_LIST_HEAD(&front_info->dbuf_list);
+	dev_set_drvdata(&xb_dev->dev, front_info);
+
+	return xenbus_switch_state(xb_dev, XenbusStateInitialising);
+}
+
+static int xen_drv_remove(struct xenbus_device *dev)
+{
+	struct xen_drm_front_info *front_info = dev_get_drvdata(&dev->dev);
+	int to = 100;
+
+	xenbus_switch_state(dev, XenbusStateClosing);
+
+	/*
+	 * On driver removal it is disconnected from XenBus,
+	 * so no backend state change events come via .otherend_changed
+	 * callback. This prevents us from exiting gracefully, e.g.
+	 * signaling the backend to free event channels, waiting for its
+	 * state to change to XenbusStateClosed and cleaning at our end.
+	 * Normally when front driver removed backend will finally go into
+	 * XenbusStateInitWait state.
+	 *
+	 * Workaround: read backend's state manually and wait with time-out.
+	 */
+	while ((xenbus_read_unsigned(front_info->xb_dev->otherend, "state",
+				     XenbusStateUnknown) != XenbusStateInitWait) &&
+				     to--)
+		msleep(10);
+
+	if (!to) {
+		unsigned int state;
+
+		state = xenbus_read_unsigned(front_info->xb_dev->otherend,
+					     "state", XenbusStateUnknown);
+		DRM_ERROR("Backend state is %s while removing driver\n",
+			  xenbus_strstate(state));
+	}
+
+	xen_drm_drv_fini(front_info);
+	xenbus_frontend_closed(dev);
+	return 0;
+}
+
+static const struct xenbus_device_id xen_driver_ids[] = {
+	{ XENDISPL_DRIVER_NAME },
+	{ "" }
+};
+
+static struct xenbus_driver xen_driver = {
+	.ids = xen_driver_ids,
+	.probe = xen_drv_probe,
+	.remove = xen_drv_remove,
+	.otherend_changed = displback_changed,
+};
+
+static int __init xen_drv_init(void)
+{
+	/* At the moment we only support case with XEN_PAGE_SIZE == PAGE_SIZE */
+	if (XEN_PAGE_SIZE != PAGE_SIZE) {
+		DRM_ERROR(XENDISPL_DRIVER_NAME ": different kernel and Xen page sizes are not supported: XEN_PAGE_SIZE (%lu) != PAGE_SIZE (%lu)\n",
+			  XEN_PAGE_SIZE, PAGE_SIZE);
+		return -ENODEV;
+	}
+
+	if (!xen_domain())
+		return -ENODEV;
+
+	if (!xen_has_pv_devices())
+		return -ENODEV;
+
+	DRM_INFO("Registering XEN PV " XENDISPL_DRIVER_NAME "\n");
+	return xenbus_register_frontend(&xen_driver);
+}
+
+static void __exit xen_drv_fini(void)
+{
+	DRM_INFO("Unregistering XEN PV " XENDISPL_DRIVER_NAME "\n");
+	xenbus_unregister_driver(&xen_driver);
+}
+
+module_init(xen_drv_init);
+module_exit(xen_drv_fini);
+
+MODULE_DESCRIPTION("Xen para-virtualized display device frontend");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("xen:" XENDISPL_DRIVER_NAME);
diff --git a/drivers/gpu/drm/xen/xen_drm_front.h b/drivers/gpu/drm/xen/xen_drm_front.h
new file mode 100644
index 000000000000..16554b2463d8
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+/*
+ *  Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#ifndef __XEN_DRM_FRONT_H_
+#define __XEN_DRM_FRONT_H_
+
+#include <drm/drmP.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#include <linux/scatterlist.h>
+
+#include "xen_drm_front_cfg.h"
+
+/**
+ * DOC: Driver modes of operation in terms of display buffers used
+ *
+ * Depending on the requirements for the para-virtualized environment, namely
+ * requirements dictated by the accompanying DRM/(v)GPU drivers running in both
+ * host and guest environments, number of operating modes of para-virtualized
+ * display driver are supported:
+ *
+ * - display buffers can be allocated by either frontend driver or backend
+ * - display buffers can be allocated to be contiguous in memory or not
+ *
+ * Note! Frontend driver itself has no dependency on contiguous memory for
+ * its operation.
+ */
+
+/**
+ * DOC: Buffers allocated by the frontend driver
+ *
+ * The below modes of operation are configured at compile-time via
+ * frontend driver's kernel configuration:
+ */
+
+/**
+ * DOC: With GEM CMA helpers
+ *
+ * This use-case is useful when used with accompanying DRM/vGPU driver in
+ * guest domain which was designed to only work with contiguous buffers,
+ * e.g. DRM driver based on GEM CMA helpers: such drivers can only import
+ * contiguous PRIME buffers, thus requiring frontend driver to provide
+ * such. In order to implement this mode of operation para-virtualized
+ * frontend driver can be configured to use GEM CMA helpers.
+ */
+
+/**
+ * DOC: Without GEM CMA helpers
+ *
+ * If accompanying drivers can cope with non-contiguous memory then, to
+ * lower pressure on CMA subsystem of the kernel, driver can allocate
+ * buffers from system memory.
+ *
+ * Note! If used with accompanying DRM/(v)GPU drivers this mode of operation
+ * may require IOMMU support on the platform, so accompanying DRM/vGPU
+ * hardware can still reach display buffer memory while importing PRIME
+ * buffers from the frontend driver.
+ */
+
+/**
+ * DOC: Buffers allocated by the backend
+ *
+ * This mode of operation is run-time configured via guest domain configuration
+ * through XenStore entries.
+ *
+ * For systems which do not provide IOMMU support, but having specific
+ * requirements for display buffers it is possible to allocate such buffers
+ * at backend side and share those with the frontend.
+ * For example, if host domain is 1:1 mapped and has DRM/GPU hardware expecting
+ * physically contiguous memory, this allows implementing zero-copying
+ * use-cases.
+ *
+ * Note, while using this scenario the following should be considered:
+ *
+ * #. If guest domain dies then pages/grants received from the backend
+ *    cannot be claimed back
+ *
+ * #. Misbehaving guest may send too many requests to the
+ *    backend exhausting its grant references and memory
+ *    (consider this from security POV)
+ */
+
+/**
+ * DOC: Driver limitations
+ *
+ * #. Only primary plane without additional properties is supported.
+ *
+ * #. Only one video mode per connector supported which is configured
+ *    via XenStore.
+ *
+ * #. All CRTCs operate at fixed frequency of 60Hz.
+ */
+
+/* timeout in ms to wait for backend to respond */
+#define XEN_DRM_FRONT_WAIT_BACK_MS	3000
+
+#ifndef GRANT_INVALID_REF
+/*
+ * Note on usage of grant reference 0 as invalid grant reference:
+ * grant reference 0 is valid, but never exposed to a PV driver,
+ * because of the fact it is already in use/reserved by the PV console.
+ */
+#define GRANT_INVALID_REF	0
+#endif
+
+struct xen_drm_front_info {
+	struct xenbus_device *xb_dev;
+	struct xen_drm_front_drm_info *drm_info;
+
+	/* to protect data between backend IO code and interrupt handler */
+	spinlock_t io_lock;
+
+	int num_evt_pairs;
+	struct xen_drm_front_evtchnl_pair *evt_pairs;
+	struct xen_drm_front_cfg cfg;
+
+	/* display buffers */
+	struct list_head dbuf_list;
+};
+
+struct xen_drm_front_drm_pipeline {
+	struct xen_drm_front_drm_info *drm_info;
+
+	int index;
+
+	struct drm_simple_display_pipe pipe;
+
+	struct drm_connector conn;
+	/* These are only for connector mode checking */
+	int width, height;
+
+	struct drm_pending_vblank_event *pending_event;
+
+	struct delayed_work pflip_to_worker;
+
+	bool conn_connected;
+};
+
+struct xen_drm_front_drm_info {
+	struct xen_drm_front_info *front_info;
+	struct drm_device *drm_dev;
+
+	struct xen_drm_front_drm_pipeline pipeline[XEN_DRM_FRONT_MAX_CRTCS];
+};
+
+static inline u64 xen_drm_front_fb_to_cookie(struct drm_framebuffer *fb)
+{
+	return (u64)fb;
+}
+
+static inline u64 xen_drm_front_dbuf_to_cookie(struct drm_gem_object *gem_obj)
+{
+	return (u64)gem_obj;
+}
+
+int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline,
+			   u32 x, u32 y, u32 width, u32 height,
+			   u32 bpp, u64 fb_cookie);
+
+int xen_drm_front_dbuf_create_from_sgt(struct xen_drm_front_info *front_info,
+				       u64 dbuf_cookie, u32 width, u32 height,
+				       u32 bpp, u64 size, struct sg_table *sgt);
+
+int xen_drm_front_dbuf_create_from_pages(struct xen_drm_front_info *front_info,
+					 u64 dbuf_cookie, u32 width, u32 height,
+					 u32 bpp, u64 size, struct page **pages);
+
+int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info,
+			    u64 dbuf_cookie, u64 fb_cookie, u32 width,
+			    u32 height, u32 pixel_format);
+
+int xen_drm_front_fb_detach(struct xen_drm_front_info *front_info,
+			    u64 fb_cookie);
+
+int xen_drm_front_page_flip(struct xen_drm_front_info *front_info,
+			    int conn_idx, u64 fb_cookie);
+
+void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info,
+				 int conn_idx, u64 fb_cookie);
+
+#endif /* __XEN_DRM_FRONT_H_ */
diff --git a/drivers/gpu/drm/xen/xen_drm_front_cfg.c b/drivers/gpu/drm/xen/xen_drm_front_cfg.c
new file mode 100644
index 000000000000..5baf2b9de93c
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_cfg.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+/*
+ *  Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#include <drm/drmP.h>
+
+#include <linux/device.h>
+
+#include <xen/interface/io/displif.h>
+#include <xen/xenbus.h>
+
+#include "xen_drm_front.h"
+#include "xen_drm_front_cfg.h"
+
+static int cfg_connector(struct xen_drm_front_info *front_info,
+			 struct xen_drm_front_cfg_connector *connector,
+			 const char *path, int index)
+{
+	char *connector_path;
+
+	connector_path = devm_kasprintf(&front_info->xb_dev->dev,
+					GFP_KERNEL, "%s/%d", path, index);
+	if (!connector_path)
+		return -ENOMEM;
+
+	if (xenbus_scanf(XBT_NIL, connector_path, XENDISPL_FIELD_RESOLUTION,
+			 "%d" XENDISPL_RESOLUTION_SEPARATOR "%d",
+			 &connector->width, &connector->height) < 0) {
+		/* either no entry configured or wrong resolution set */
+		connector->width = 0;
+		connector->height = 0;
+		return -EINVAL;
+	}
+
+	connector->xenstore_path = connector_path;
+
+	DRM_INFO("Connector %s: resolution %dx%d\n",
+		 connector_path, connector->width, connector->height);
+	return 0;
+}
+
+int xen_drm_front_cfg_card(struct xen_drm_front_info *front_info,
+			   struct xen_drm_front_cfg *cfg)
+{
+	struct xenbus_device *xb_dev = front_info->xb_dev;
+	int ret, i;
+
+	if (xenbus_read_unsigned(front_info->xb_dev->nodename,
+				 XENDISPL_FIELD_BE_ALLOC, 0)) {
+		DRM_INFO("Backend can provide display buffers\n");
+		cfg->be_alloc = true;
+	}
+
+	cfg->num_connectors = 0;
+	for (i = 0; i < ARRAY_SIZE(cfg->connectors); i++) {
+		ret = cfg_connector(front_info, &cfg->connectors[i],
+				    xb_dev->nodename, i);
+		if (ret < 0)
+			break;
+		cfg->num_connectors++;
+	}
+
+	if (!cfg->num_connectors) {
+		DRM_ERROR("No connector(s) configured at %s\n",
+			  xb_dev->nodename);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
diff --git a/drivers/gpu/drm/xen/xen_drm_front_cfg.h b/drivers/gpu/drm/xen/xen_drm_front_cfg.h
new file mode 100644
index 000000000000..aa8490ba9146
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_cfg.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+/*
+ *  Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#ifndef __XEN_DRM_FRONT_CFG_H_
+#define __XEN_DRM_FRONT_CFG_H_
+
+#include <linux/types.h>
+
+#define XEN_DRM_FRONT_MAX_CRTCS	4
+
+struct xen_drm_front_cfg_connector {
+	int width;
+	int height;
+	char *xenstore_path;
+};
+
+struct xen_drm_front_cfg {
+	struct xen_drm_front_info *front_info;
+	/* number of connectors in this configuration */
+	int num_connectors;
+	/* connector configurations */
+	struct xen_drm_front_cfg_connector connectors[XEN_DRM_FRONT_MAX_CRTCS];
+	/* set if dumb buffers are allocated externally on backend side */
+	bool be_alloc;
+};
+
+int xen_drm_front_cfg_card(struct xen_drm_front_info *front_info,
+			   struct xen_drm_front_cfg *cfg);
+
+#endif /* __XEN_DRM_FRONT_CFG_H_ */
diff --git a/drivers/gpu/drm/xen/xen_drm_front_conn.c b/drivers/gpu/drm/xen/xen_drm_front_conn.c
new file mode 100644
index 000000000000..c91ae532fa55
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_conn.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+/*
+ *  Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include <video/videomode.h>
+
+#include "xen_drm_front.h"
+#include "xen_drm_front_conn.h"
+#include "xen_drm_front_kms.h"
+
+static struct xen_drm_front_drm_pipeline *
+to_xen_drm_pipeline(struct drm_connector *connector)
+{
+	return container_of(connector, struct xen_drm_front_drm_pipeline, conn);
+}
+
+static const u32 plane_formats[] = {
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_RGB888,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_XRGB4444,
+	DRM_FORMAT_ARGB4444,
+	DRM_FORMAT_XRGB1555,
+	DRM_FORMAT_ARGB1555,
+};
+
+const u32 *xen_drm_front_conn_get_formats(int *format_count)
+{
+	*format_count = ARRAY_SIZE(plane_formats);
+	return plane_formats;
+}
+
+static int connector_detect(struct drm_connector *connector,
+			    struct drm_modeset_acquire_ctx *ctx,
+			    bool force)
+{
+	struct xen_drm_front_drm_pipeline *pipeline =
+			to_xen_drm_pipeline(connector);
+
+	if (drm_dev_is_unplugged(connector->dev))
+		pipeline->conn_connected = false;
+
+	return pipeline->conn_connected ? connector_status_connected :
+			connector_status_disconnected;
+}
+
+#define XEN_DRM_CRTC_VREFRESH_HZ	60
+
+static int connector_get_modes(struct drm_connector *connector)
+{
+	struct xen_drm_front_drm_pipeline *pipeline =
+			to_xen_drm_pipeline(connector);
+	struct drm_display_mode *mode;
+	struct videomode videomode;
+	int width, height;
+
+	mode = drm_mode_create(connector->dev);
+	if (!mode)
+		return 0;
+
+	memset(&videomode, 0, sizeof(videomode));
+	videomode.hactive = pipeline->width;
+	videomode.vactive = pipeline->height;
+	width = videomode.hactive + videomode.hfront_porch +
+			videomode.hback_porch + videomode.hsync_len;
+	height = videomode.vactive + videomode.vfront_porch +
+			videomode.vback_porch + videomode.vsync_len;
+	videomode.pixelclock = width * height * XEN_DRM_CRTC_VREFRESH_HZ;
+	mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
+
+	drm_display_mode_from_videomode(&videomode, mode);
+	drm_mode_probed_add(connector, mode);
+	return 1;
+}
+
+static const struct drm_connector_helper_funcs connector_helper_funcs = {
+	.get_modes = connector_get_modes,
+	.detect_ctx = connector_detect,
+};
+
+static const struct drm_connector_funcs connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = drm_connector_cleanup,
+	.reset = drm_atomic_helper_connector_reset,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+int xen_drm_front_conn_init(struct xen_drm_front_drm_info *drm_info,
+			    struct drm_connector *connector)
+{
+	struct xen_drm_front_drm_pipeline *pipeline =
+			to_xen_drm_pipeline(connector);
+
+	drm_connector_helper_add(connector, &connector_helper_funcs);
+
+	pipeline->conn_connected = true;
+
+	connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+			DRM_CONNECTOR_POLL_DISCONNECT;
+
+	return drm_connector_init(drm_info->drm_dev, connector,
+				  &connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL);
+}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_conn.h b/drivers/gpu/drm/xen/xen_drm_front_conn.h
new file mode 100644
index 000000000000..39de7cf5adbe
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_conn.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+/*
+ *  Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#ifndef __XEN_DRM_FRONT_CONN_H_
+#define __XEN_DRM_FRONT_CONN_H_
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_encoder.h>
+
+#include <linux/wait.h>
+
+struct xen_drm_front_drm_info;
+
+int xen_drm_front_conn_init(struct xen_drm_front_drm_info *drm_info,
+			    struct drm_connector *connector);
+
+const u32 *xen_drm_front_conn_get_formats(int *format_count);
+
+#endif /* __XEN_DRM_FRONT_CONN_H_ */
diff --git a/drivers/gpu/drm/xen/xen_drm_front_evtchnl.c b/drivers/gpu/drm/xen/xen_drm_front_evtchnl.c
new file mode 100644
index 000000000000..945226a95e9b
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_evtchnl.c
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+/*
+ *  Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#include <drm/drmP.h>
+
+#include <linux/errno.h>
+#include <linux/irq.h>
+
+#include <xen/xenbus.h>
+#include <xen/events.h>
+#include <xen/grant_table.h>
+
+#include "xen_drm_front.h"
+#include "xen_drm_front_evtchnl.h"
+
+static irqreturn_t evtchnl_interrupt_ctrl(int irq, void *dev_id)
+{
+	struct xen_drm_front_evtchnl *evtchnl = dev_id;
+	struct xen_drm_front_info *front_info = evtchnl->front_info;
+	struct xendispl_resp *resp;
+	RING_IDX i, rp;
+	unsigned long flags;
+
+	if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
+		return IRQ_HANDLED;
+
+	spin_lock_irqsave(&front_info->io_lock, flags);
+
+again:
+	rp = evtchnl->u.req.ring.sring->rsp_prod;
+	/* ensure we see queued responses up to rp */
+	virt_rmb();
+
+	for (i = evtchnl->u.req.ring.rsp_cons; i != rp; i++) {
+		resp = RING_GET_RESPONSE(&evtchnl->u.req.ring, i);
+		if (unlikely(resp->id != evtchnl->evt_id))
+			continue;
+
+		switch (resp->operation) {
+		case XENDISPL_OP_PG_FLIP:
+		case XENDISPL_OP_FB_ATTACH:
+		case XENDISPL_OP_FB_DETACH:
+		case XENDISPL_OP_DBUF_CREATE:
+		case XENDISPL_OP_DBUF_DESTROY:
+		case XENDISPL_OP_SET_CONFIG:
+			evtchnl->u.req.resp_status = resp->status;
+			complete(&evtchnl->u.req.completion);
+			break;
+
+		default:
+			DRM_ERROR("Operation %d is not supported\n",
+				  resp->operation);
+			break;
+		}
+	}
+
+	evtchnl->u.req.ring.rsp_cons = i;
+
+	if (i != evtchnl->u.req.ring.req_prod_pvt) {
+		int more_to_do;
+
+		RING_FINAL_CHECK_FOR_RESPONSES(&evtchnl->u.req.ring,
+					       more_to_do);
+		if (more_to_do)
+			goto again;
+	} else {
+		evtchnl->u.req.ring.sring->rsp_event = i + 1;
+	}
+
+	spin_unlock_irqrestore(&front_info->io_lock, flags);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t evtchnl_interrupt_evt(int irq, void *dev_id)
+{
+	struct xen_drm_front_evtchnl *evtchnl = dev_id;
+	struct xen_drm_front_info *front_info = evtchnl->front_info;
+	struct xendispl_event_page *page = evtchnl->u.evt.page;
+	u32 cons, prod;
+	unsigned long flags;
+
+	if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
+		return IRQ_HANDLED;
+
+	spin_lock_irqsave(&front_info->io_lock, flags);
+
+	prod = page->in_prod;
+	/* ensure we see ring contents up to prod */
+	virt_rmb();
+	if (prod == page->in_cons)
+		goto out;
+
+	for (cons = page->in_cons; cons != prod; cons++) {
+		struct xendispl_evt *event;
+
+		event = &XENDISPL_IN_RING_REF(page, cons);
+		if (unlikely(event->id != evtchnl->evt_id++))
+			continue;
+
+		switch (event->type) {
+		case XENDISPL_EVT_PG_FLIP:
+			xen_drm_front_on_frame_done(front_info, evtchnl->index,
+						    event->op.pg_flip.fb_cookie);
+			break;
+		}
+	}
+	page->in_cons = cons;
+	/* ensure ring contents */
+	virt_wmb();
+
+out:
+	spin_unlock_irqrestore(&front_info->io_lock, flags);
+	return IRQ_HANDLED;
+}
+
+static void evtchnl_free(struct xen_drm_front_info *front_info,
+			 struct xen_drm_front_evtchnl *evtchnl)
+{
+	unsigned long page = 0;
+
+	if (evtchnl->type == EVTCHNL_TYPE_REQ)
+		page = (unsigned long)evtchnl->u.req.ring.sring;
+	else if (evtchnl->type == EVTCHNL_TYPE_EVT)
+		page = (unsigned long)evtchnl->u.evt.page;
+	if (!page)
+		return;
+
+	evtchnl->state = EVTCHNL_STATE_DISCONNECTED;
+
+	if (evtchnl->type == EVTCHNL_TYPE_REQ) {
+		/* release all who still waits for response if any */
+		evtchnl->u.req.resp_status = -EIO;
+		complete_all(&evtchnl->u.req.completion);
+	}
+
+	if (evtchnl->irq)
+		unbind_from_irqhandler(evtchnl->irq, evtchnl);
+
+	if (evtchnl->port)
+		xenbus_free_evtchn(front_info->xb_dev, evtchnl->port);
+
+	/* end access and free the page */
+	if (evtchnl->gref != GRANT_INVALID_REF)
+		gnttab_end_foreign_access(evtchnl->gref, 0, page);
+
+	memset(evtchnl, 0, sizeof(*evtchnl));
+}
+
+static int evtchnl_alloc(struct xen_drm_front_info *front_info, int index,
+			 struct xen_drm_front_evtchnl *evtchnl,
+			 enum xen_drm_front_evtchnl_type type)
+{
+	struct xenbus_device *xb_dev = front_info->xb_dev;
+	unsigned long page;
+	grant_ref_t gref;
+	irq_handler_t handler;
+	int ret;
+
+	memset(evtchnl, 0, sizeof(*evtchnl));
+	evtchnl->type = type;
+	evtchnl->index = index;
+	evtchnl->front_info = front_info;
+	evtchnl->state = EVTCHNL_STATE_DISCONNECTED;
+	evtchnl->gref = GRANT_INVALID_REF;
+
+	page = get_zeroed_page(GFP_NOIO | __GFP_HIGH);
+	if (!page) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	if (type == EVTCHNL_TYPE_REQ) {
+		struct xen_displif_sring *sring;
+
+		init_completion(&evtchnl->u.req.completion);
+		mutex_init(&evtchnl->u.req.req_io_lock);
+		sring = (struct xen_displif_sring *)page;
+		SHARED_RING_INIT(sring);
+		FRONT_RING_INIT(&evtchnl->u.req.ring, sring, XEN_PAGE_SIZE);
+
+		ret = xenbus_grant_ring(xb_dev, sring, 1, &gref);
+		if (ret < 0) {
+			evtchnl->u.req.ring.sring = NULL;
+			free_page(page);
+			goto fail;
+		}
+
+		handler = evtchnl_interrupt_ctrl;
+	} else {
+		ret = gnttab_grant_foreign_access(xb_dev->otherend_id,
+						  virt_to_gfn((void *)page), 0);
+		if (ret < 0) {
+			free_page(page);
+			goto fail;
+		}
+
+		evtchnl->u.evt.page = (struct xendispl_event_page *)page;
+		gref = ret;
+		handler = evtchnl_interrupt_evt;
+	}
+	evtchnl->gref = gref;
+
+	ret = xenbus_alloc_evtchn(xb_dev, &evtchnl->port);
+	if (ret < 0)
+		goto fail;
+
+	ret = bind_evtchn_to_irqhandler(evtchnl->port,
+					handler, 0, xb_dev->devicetype,
+					evtchnl);
+	if (ret < 0)
+		goto fail;
+
+	evtchnl->irq = ret;
+	return 0;
+
+fail:
+	DRM_ERROR("Failed to allocate ring: %d\n", ret);
+	return ret;
+}
+
+int xen_drm_front_evtchnl_create_all(struct xen_drm_front_info *front_info)
+{
+	struct xen_drm_front_cfg *cfg;
+	int ret, conn;
+
+	cfg = &front_info->cfg;
+
+	front_info->evt_pairs =
+			kcalloc(cfg->num_connectors,
+				sizeof(struct xen_drm_front_evtchnl_pair),
+				GFP_KERNEL);
+	if (!front_info->evt_pairs) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	for (conn = 0; conn < cfg->num_connectors; conn++) {
+		ret = evtchnl_alloc(front_info, conn,
+				    &front_info->evt_pairs[conn].req,
+				    EVTCHNL_TYPE_REQ);
+		if (ret < 0) {
+			DRM_ERROR("Error allocating control channel\n");
+			goto fail;
+		}
+
+		ret = evtchnl_alloc(front_info, conn,
+				    &front_info->evt_pairs[conn].evt,
+				    EVTCHNL_TYPE_EVT);
+		if (ret < 0) {
+			DRM_ERROR("Error allocating in-event channel\n");
+			goto fail;
+		}
+	}
+	front_info->num_evt_pairs = cfg->num_connectors;
+	return 0;
+
+fail:
+	xen_drm_front_evtchnl_free_all(front_info);
+	return ret;
+}
+
+static int evtchnl_publish(struct xenbus_transaction xbt,
+			   struct xen_drm_front_evtchnl *evtchnl,
+			   const char *path, const char *node_ring,
+			   const char *node_chnl)
+{
+	struct xenbus_device *xb_dev = evtchnl->front_info->xb_dev;
+	int ret;
+
+	/* write control channel ring reference */
+	ret = xenbus_printf(xbt, path, node_ring, "%u", evtchnl->gref);
+	if (ret < 0) {
+		xenbus_dev_error(xb_dev, ret, "writing ring-ref");
+		return ret;
+	}
+
+	/* write event channel ring reference */
+	ret = xenbus_printf(xbt, path, node_chnl, "%u", evtchnl->port);
+	if (ret < 0) {
+		xenbus_dev_error(xb_dev, ret, "writing event channel");
+		return ret;
+	}
+
+	return 0;
+}
+
+int xen_drm_front_evtchnl_publish_all(struct xen_drm_front_info *front_info)
+{
+	struct xenbus_transaction xbt;
+	struct xen_drm_front_cfg *plat_data;
+	int ret, conn;
+
+	plat_data = &front_info->cfg;
+
+again:
+	ret = xenbus_transaction_start(&xbt);
+	if (ret < 0) {
+		xenbus_dev_fatal(front_info->xb_dev, ret,
+				 "starting transaction");
+		return ret;
+	}
+
+	for (conn = 0; conn < plat_data->num_connectors; conn++) {
+		ret = evtchnl_publish(xbt, &front_info->evt_pairs[conn].req,
+				      plat_data->connectors[conn].xenstore_path,
+				      XENDISPL_FIELD_REQ_RING_REF,
+				      XENDISPL_FIELD_REQ_CHANNEL);
+		if (ret < 0)
+			goto fail;
+
+		ret = evtchnl_publish(xbt, &front_info->evt_pairs[conn].evt,
+				      plat_data->connectors[conn].xenstore_path,
+				      XENDISPL_FIELD_EVT_RING_REF,
+				      XENDISPL_FIELD_EVT_CHANNEL);
+		if (ret < 0)
+			goto fail;
+	}
+
+	ret = xenbus_transaction_end(xbt, 0);
+	if (ret < 0) {
+		if (ret == -EAGAIN)
+			goto again;
+
+		xenbus_dev_fatal(front_info->xb_dev, ret,
+				 "completing transaction");
+		goto fail_to_end;
+	}
+
+	return 0;
+
+fail:
+	xenbus_transaction_end(xbt, 1);
+
+fail_to_end:
+	xenbus_dev_fatal(front_info->xb_dev, ret, "writing Xen store");
+	return ret;
+}
+
+void xen_drm_front_evtchnl_flush(struct xen_drm_front_evtchnl *evtchnl)
+{
+	int notify;
+
+	evtchnl->u.req.ring.req_prod_pvt++;
+	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&evtchnl->u.req.ring, notify);
+	if (notify)
+		notify_remote_via_irq(evtchnl->irq);
+}
+
+void xen_drm_front_evtchnl_set_state(struct xen_drm_front_info *front_info,
+				     enum xen_drm_front_evtchnl_state state)
+{
+	unsigned long flags;
+	int i;
+
+	if (!front_info->evt_pairs)
+		return;
+
+	spin_lock_irqsave(&front_info->io_lock, flags);
+	for (i = 0; i < front_info->num_evt_pairs; i++) {
+		front_info->evt_pairs[i].req.state = state;
+		front_info->evt_pairs[i].evt.state = state;
+	}
+	spin_unlock_irqrestore(&front_info->io_lock, flags);
+}
+
+void xen_drm_front_evtchnl_free_all(struct xen_drm_front_info *front_info)
+{
+	int i;
+
+	if (!front_info->evt_pairs)
+		return;
+
+	for (i = 0; i < front_info->num_evt_pairs; i++) {
+		evtchnl_free(front_info, &front_info->evt_pairs[i].req);
+		evtchnl_free(front_info, &front_info->evt_pairs[i].evt);
+	}
+
+	kfree(front_info->evt_pairs);
+	front_info->evt_pairs = NULL;
+}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_evtchnl.h b/drivers/gpu/drm/xen/xen_drm_front_evtchnl.h
new file mode 100644
index 000000000000..b0af6994332b
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_evtchnl.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+/*
+ *  Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#ifndef __XEN_DRM_FRONT_EVTCHNL_H_
+#define __XEN_DRM_FRONT_EVTCHNL_H_
+
+#include <linux/completion.h>
+#include <linux/types.h>
+
+#include <xen/interface/io/ring.h>
+#include <xen/interface/io/displif.h>
+
+/*
+ * All operations which are not connector oriented use this ctrl event channel,
+ * e.g. fb_attach/destroy which belong to a DRM device, not to a CRTC.
+ */
+#define GENERIC_OP_EVT_CHNL	0
+
+enum xen_drm_front_evtchnl_state {
+	EVTCHNL_STATE_DISCONNECTED,
+	EVTCHNL_STATE_CONNECTED,
+};
+
+enum xen_drm_front_evtchnl_type {
+	EVTCHNL_TYPE_REQ,
+	EVTCHNL_TYPE_EVT,
+};
+
+struct xen_drm_front_drm_info;
+
+struct xen_drm_front_evtchnl {
+	struct xen_drm_front_info *front_info;
+	int gref;
+	int port;
+	int irq;
+	int index;
+	enum xen_drm_front_evtchnl_state state;
+	enum xen_drm_front_evtchnl_type type;
+	/* either response id or incoming event id */
+	u16 evt_id;
+	/* next request id or next expected event id */
+	u16 evt_next_id;
+	union {
+		struct {
+			struct xen_displif_front_ring ring;
+			struct completion completion;
+			/* latest response status */
+			int resp_status;
+			/* serializer for backend IO: request/response */
+			struct mutex req_io_lock;
+		} req;
+		struct {
+			struct xendispl_event_page *page;
+		} evt;
+	} u;
+};
+
+struct xen_drm_front_evtchnl_pair {
+	struct xen_drm_front_evtchnl req;
+	struct xen_drm_front_evtchnl evt;
+};
+
+int xen_drm_front_evtchnl_create_all(struct xen_drm_front_info *front_info);
+
+int xen_drm_front_evtchnl_publish_all(struct xen_drm_front_info *front_info);
+
+void xen_drm_front_evtchnl_flush(struct xen_drm_front_evtchnl *evtchnl);
+
+void xen_drm_front_evtchnl_set_state(struct xen_drm_front_info *front_info,
+				     enum xen_drm_front_evtchnl_state state);
+
+void xen_drm_front_evtchnl_free_all(struct xen_drm_front_info *front_info);
+
+#endif /* __XEN_DRM_FRONT_EVTCHNL_H_ */
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
new file mode 100644
index 000000000000..3b04a2269d7a
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+/*
+ *  Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#include "xen_drm_front_gem.h"
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem.h>
+
+#include <linux/dma-buf.h>
+#include <linux/scatterlist.h>
+#include <linux/shmem_fs.h>
+
+#include <xen/balloon.h>
+
+#include "xen_drm_front.h"
+#include "xen_drm_front_shbuf.h"
+
+struct xen_gem_object {
+	struct drm_gem_object base;
+
+	size_t num_pages;
+	struct page **pages;
+
+	/* set for buffers allocated by the backend */
+	bool be_alloc;
+
+	/* this is for imported PRIME buffer */
+	struct sg_table *sgt_imported;
+};
+
+static inline struct xen_gem_object *
+to_xen_gem_obj(struct drm_gem_object *gem_obj)
+{
+	return container_of(gem_obj, struct xen_gem_object, base);
+}
+
+static int gem_alloc_pages_array(struct xen_gem_object *xen_obj,
+				 size_t buf_size)
+{
+	xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE);
+	xen_obj->pages = kvmalloc_array(xen_obj->num_pages,
+					sizeof(struct page *), GFP_KERNEL);
+	return !xen_obj->pages ? -ENOMEM : 0;
+}
+
+static void gem_free_pages_array(struct xen_gem_object *xen_obj)
+{
+	kvfree(xen_obj->pages);
+	xen_obj->pages = NULL;
+}
+
+static struct xen_gem_object *gem_create_obj(struct drm_device *dev,
+					     size_t size)
+{
+	struct xen_gem_object *xen_obj;
+	int ret;
+
+	xen_obj = kzalloc(sizeof(*xen_obj), GFP_KERNEL);
+	if (!xen_obj)
+		return ERR_PTR(-ENOMEM);
+
+	ret = drm_gem_object_init(dev, &xen_obj->base, size);
+	if (ret < 0) {
+		kfree(xen_obj);
+		return ERR_PTR(ret);
+	}
+
+	return xen_obj;
+}
+
+static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
+{
+	struct xen_drm_front_drm_info *drm_info = dev->dev_private;
+	struct xen_gem_object *xen_obj;
+	int ret;
+
+	size = round_up(size, PAGE_SIZE);
+	xen_obj = gem_create_obj(dev, size);
+	if (IS_ERR_OR_NULL(xen_obj))
+		return xen_obj;
+
+	if (drm_info->front_info->cfg.be_alloc) {
+		/*
+		 * backend will allocate space for this buffer, so
+		 * only allocate array of pointers to pages
+		 */
+		ret = gem_alloc_pages_array(xen_obj, size);
+		if (ret < 0)
+			goto fail;
+
+		/*
+		 * allocate ballooned pages which will be used to map
+		 * grant references provided by the backend
+		 */
+		ret = alloc_xenballooned_pages(xen_obj->num_pages,
+					       xen_obj->pages);
+		if (ret < 0) {
+			DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
+				  xen_obj->num_pages, ret);
+			gem_free_pages_array(xen_obj);
+			goto fail;
+		}
+
+		xen_obj->be_alloc = true;
+		return xen_obj;
+	}
+	/*
+	 * need to allocate backing pages now, so we can share those
+	 * with the backend
+	 */
+	xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
+	xen_obj->pages = drm_gem_get_pages(&xen_obj->base);
+	if (IS_ERR_OR_NULL(xen_obj->pages)) {
+		ret = PTR_ERR(xen_obj->pages);
+		xen_obj->pages = NULL;
+		goto fail;
+	}
+
+	return xen_obj;
+
+fail:
+	DRM_ERROR("Failed to allocate buffer with size %zu\n", size);
+	return ERR_PTR(ret);
+}
+
+struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
+						size_t size)
+{
+	struct xen_gem_object *xen_obj;
+
+	xen_obj = gem_create(dev, size);
+	if (IS_ERR_OR_NULL(xen_obj))
+		return ERR_CAST(xen_obj);
+
+	return &xen_obj->base;
+}
+
+void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj)
+{
+	struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
+
+	if (xen_obj->base.import_attach) {
+		drm_prime_gem_destroy(&xen_obj->base, xen_obj->sgt_imported);
+		gem_free_pages_array(xen_obj);
+	} else {
+		if (xen_obj->pages) {
+			if (xen_obj->be_alloc) {
+				free_xenballooned_pages(xen_obj->num_pages,
+							xen_obj->pages);
+				gem_free_pages_array(xen_obj);
+			} else {
+				drm_gem_put_pages(&xen_obj->base,
+						  xen_obj->pages, true, false);
+			}
+		}
+	}
+	drm_gem_object_release(gem_obj);
+	kfree(xen_obj);
+}
+
+struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *gem_obj)
+{
+	struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
+
+	return xen_obj->pages;
+}
+
+struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
+{
+	struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
+
+	if (!xen_obj->pages)
+		return NULL;
+
+	return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages);
+}
+
+struct drm_gem_object *
+xen_drm_front_gem_import_sg_table(struct drm_device *dev,
+				  struct dma_buf_attachment *attach,
+				  struct sg_table *sgt)
+{
+	struct xen_drm_front_drm_info *drm_info = dev->dev_private;
+	struct xen_gem_object *xen_obj;
+	size_t size;
+	int ret;
+
+	size = attach->dmabuf->size;
+	xen_obj = gem_create_obj(dev, size);
+	if (IS_ERR_OR_NULL(xen_obj))
+		return ERR_CAST(xen_obj);
+
+	ret = gem_alloc_pages_array(xen_obj, size);
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	xen_obj->sgt_imported = sgt;
+
+	ret = drm_prime_sg_to_page_addr_arrays(sgt, xen_obj->pages,
+					       NULL, xen_obj->num_pages);
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	/*
+	 * N.B. Although we have an API to create display buffer from sgt
+	 * we use pages API, because we still need those for GEM handling,
+	 * e.g. for mapping etc.
+	 */
+	ret = xen_drm_front_dbuf_create_from_pages(drm_info->front_info,
+						   xen_drm_front_dbuf_to_cookie(&xen_obj->base),
+						   0, 0, 0, size,
+						   xen_obj->pages);
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	DRM_DEBUG("Imported buffer of size %zu with nents %u\n",
+		  size, sgt->nents);
+
+	return &xen_obj->base;
+}
+
+static int gem_mmap_obj(struct xen_gem_object *xen_obj,
+			struct vm_area_struct *vma)
+{
+	unsigned long addr = vma->vm_start;
+	int i;
+
+	/*
+	 * clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
+	 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
+	 * the whole buffer.
+	 */
+	vma->vm_flags &= ~VM_PFNMAP;
+	vma->vm_flags |= VM_MIXEDMAP;
+	vma->vm_pgoff = 0;
+	vma->vm_page_prot =
+			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+
+	/*
+	 * vm_operations_struct.fault handler will be called if CPU access
+	 * to VM is here. For GPUs this isn't the case, because CPU
+	 * doesn't touch the memory. Insert pages now, so both CPU and GPU are
+	 * happy.
+	 * FIXME: as we insert all the pages now then no .fault handler must
+	 * be called, so don't provide one
+	 */
+	for (i = 0; i < xen_obj->num_pages; i++) {
+		int ret;
+
+		ret = vm_insert_page(vma, addr, xen_obj->pages[i]);
+		if (ret < 0) {
+			DRM_ERROR("Failed to insert pages into vma: %d\n", ret);
+			return ret;
+		}
+
+		addr += PAGE_SIZE;
+	}
+	return 0;
+}
+
+int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct xen_gem_object *xen_obj;
+	struct drm_gem_object *gem_obj;
+	int ret;
+
+	ret = drm_gem_mmap(filp, vma);
+	if (ret < 0)
+		return ret;
+
+	gem_obj = vma->vm_private_data;
+	xen_obj = to_xen_gem_obj(gem_obj);
+	return gem_mmap_obj(xen_obj, vma);
+}
+
+void *xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj)
+{
+	struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
+
+	if (!xen_obj->pages)
+		return NULL;
+
+	return vmap(xen_obj->pages, xen_obj->num_pages,
+		    VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+}
+
+void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj,
+				    void *vaddr)
+{
+	vunmap(vaddr);
+}
+
+int xen_drm_front_gem_prime_mmap(struct drm_gem_object *gem_obj,
+				 struct vm_area_struct *vma)
+{
+	struct xen_gem_object *xen_obj;
+	int ret;
+
+	ret = drm_gem_mmap_obj(gem_obj, gem_obj->size, vma);
+	if (ret < 0)
+		return ret;
+
+	xen_obj = to_xen_gem_obj(gem_obj);
+	return gem_mmap_obj(xen_obj, vma);
+}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.h b/drivers/gpu/drm/xen/xen_drm_front_gem.h
new file mode 100644
index 000000000000..55e531f5a763
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+/*
+ *  Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#ifndef __XEN_DRM_FRONT_GEM_H
+#define __XEN_DRM_FRONT_GEM_H
+
+#include <drm/drmP.h>
+
+struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
+						size_t size);
+
+struct drm_gem_object *
+xen_drm_front_gem_import_sg_table(struct drm_device *dev,
+				  struct dma_buf_attachment *attach,
+				  struct sg_table *sgt);
+
+struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj);
+
+struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *obj);
+
+void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj);
+
+#ifndef CONFIG_DRM_XEN_FRONTEND_CMA
+
+int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+
+void *xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj);
+
+void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj,
+				    void *vaddr);
+
+int xen_drm_front_gem_prime_mmap(struct drm_gem_object *gem_obj,
+				 struct vm_area_struct *vma);
+#endif
+
+#endif /* __XEN_DRM_FRONT_GEM_H */
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem_cma.c b/drivers/gpu/drm/xen/xen_drm_front_gem_cma.c
new file mode 100644
index 000000000000..ba30a4bc2a39
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem_cma.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+/*
+ *  Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include "xen_drm_front.h"
+#include "xen_drm_front_gem.h"
+
+struct drm_gem_object *
+xen_drm_front_gem_import_sg_table(struct drm_device *dev,
+				  struct dma_buf_attachment *attach,
+				  struct sg_table *sgt)
+{
+	struct xen_drm_front_drm_info *drm_info = dev->dev_private;
+	struct drm_gem_object *gem_obj;
+	struct drm_gem_cma_object *cma_obj;
+	int ret;
+
+	gem_obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
+	if (IS_ERR_OR_NULL(gem_obj))
+		return gem_obj;
+
+	cma_obj = to_drm_gem_cma_obj(gem_obj);
+
+	ret = xen_drm_front_dbuf_create_from_sgt(drm_info->front_info,
+						 xen_drm_front_dbuf_to_cookie(gem_obj),
+						 0, 0, 0, gem_obj->size,
+						 drm_gem_cma_prime_get_sg_table(gem_obj));
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	DRM_DEBUG("Imported CMA buffer of size %zu\n", gem_obj->size);
+
+	return gem_obj;
+}
+
+struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
+{
+	return drm_gem_cma_prime_get_sg_table(gem_obj);
+}
+
+struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
+						size_t size)
+{
+	struct xen_drm_front_drm_info *drm_info = dev->dev_private;
+	struct drm_gem_cma_object *cma_obj;
+
+	if (drm_info->front_info->cfg.be_alloc) {
+		/* This use-case is not yet supported and probably won't be */
+		DRM_ERROR("Backend allocated buffers and CMA helpers are not supported at the same time\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	cma_obj = drm_gem_cma_create(dev, size);
+	if (IS_ERR_OR_NULL(cma_obj))
+		return ERR_CAST(cma_obj);
+
+	return &cma_obj->base;
+}
+
+void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj)
+{
+	drm_gem_cma_free_object(gem_obj);
+}
+
+struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *gem_obj)
+{
+	return NULL;
+}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c
new file mode 100644
index 000000000000..0bd6681fa4f3
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+/*
+ *  Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#include "xen_drm_front_kms.h"
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+
+#include "xen_drm_front.h"
+#include "xen_drm_front_conn.h"
+
+/*
+ * Timeout in ms to wait for frame done event from the backend:
+ * must be a bit more than IO time-out
+ */
+#define FRAME_DONE_TO_MS	(XEN_DRM_FRONT_WAIT_BACK_MS + 100)
+
+static struct xen_drm_front_drm_pipeline *
+to_xen_drm_pipeline(struct drm_simple_display_pipe *pipe)
+{
+	return container_of(pipe, struct xen_drm_front_drm_pipeline, pipe);
+}
+
+static void fb_destroy(struct drm_framebuffer *fb)
+{
+	struct xen_drm_front_drm_info *drm_info = fb->dev->dev_private;
+	int idx;
+
+	if (drm_dev_enter(fb->dev, &idx)) {
+		xen_drm_front_fb_detach(drm_info->front_info,
+					xen_drm_front_fb_to_cookie(fb));
+		drm_dev_exit(idx);
+	}
+	drm_gem_fb_destroy(fb);
+}
+
+static struct drm_framebuffer_funcs fb_funcs = {
+	.destroy = fb_destroy,
+};
+
+static struct drm_framebuffer *
+fb_create(struct drm_device *dev, struct drm_file *filp,
+	  const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	struct xen_drm_front_drm_info *drm_info = dev->dev_private;
+	static struct drm_framebuffer *fb;
+	struct drm_gem_object *gem_obj;
+	int ret;
+
+	fb = drm_gem_fb_create_with_funcs(dev, filp, mode_cmd, &fb_funcs);
+	if (IS_ERR_OR_NULL(fb))
+		return fb;
+
+	gem_obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
+	if (!gem_obj) {
+		DRM_ERROR("Failed to lookup GEM object\n");
+		ret = -ENOENT;
+		goto fail;
+	}
+
+	drm_gem_object_put_unlocked(gem_obj);
+
+	ret = xen_drm_front_fb_attach(drm_info->front_info,
+				      xen_drm_front_dbuf_to_cookie(gem_obj),
+				      xen_drm_front_fb_to_cookie(fb),
+				      fb->width, fb->height,
+				      fb->format->format);
+	if (ret < 0) {
+		DRM_ERROR("Back failed to attach FB %p: %d\n", fb, ret);
+		goto fail;
+	}
+
+	return fb;
+
+fail:
+	drm_gem_fb_destroy(fb);
+	return ERR_PTR(ret);
+}
+
+static const struct drm_mode_config_funcs mode_config_funcs = {
+	.fb_create = fb_create,
+	.atomic_check = drm_atomic_helper_check,
+	.atomic_commit = drm_atomic_helper_commit,
+};
+
+static void send_pending_event(struct xen_drm_front_drm_pipeline *pipeline)
+{
+	struct drm_crtc *crtc = &pipeline->pipe.crtc;
+	struct drm_device *dev = crtc->dev;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	if (pipeline->pending_event)
+		drm_crtc_send_vblank_event(crtc, pipeline->pending_event);
+	pipeline->pending_event = NULL;
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static void display_enable(struct drm_simple_display_pipe *pipe,
+			   struct drm_crtc_state *crtc_state,
+			   struct drm_plane_state *plane_state)
+{
+	struct xen_drm_front_drm_pipeline *pipeline =
+			to_xen_drm_pipeline(pipe);
+	struct drm_crtc *crtc = &pipe->crtc;
+	struct drm_framebuffer *fb = plane_state->fb;
+	int ret, idx;
+
+	if (!drm_dev_enter(pipe->crtc.dev, &idx))
+		return;
+
+	ret = xen_drm_front_mode_set(pipeline, crtc->x, crtc->y,
+				     fb->width, fb->height,
+				     fb->format->cpp[0] * 8,
+				     xen_drm_front_fb_to_cookie(fb));
+
+	if (ret) {
+		DRM_ERROR("Failed to enable display: %d\n", ret);
+		pipeline->conn_connected = false;
+	}
+
+	drm_dev_exit(idx);
+}
+
+static void display_disable(struct drm_simple_display_pipe *pipe)
+{
+	struct xen_drm_front_drm_pipeline *pipeline =
+			to_xen_drm_pipeline(pipe);
+	int ret = 0, idx;
+
+	if (drm_dev_enter(pipe->crtc.dev, &idx)) {
+		ret = xen_drm_front_mode_set(pipeline, 0, 0, 0, 0, 0,
+					     xen_drm_front_fb_to_cookie(NULL));
+		drm_dev_exit(idx);
+	}
+	if (ret)
+		DRM_ERROR("Failed to disable display: %d\n", ret);
+
+	/* Make sure we can restart with enabled connector next time */
+	pipeline->conn_connected = true;
+
+	/* release stalled event if any */
+	send_pending_event(pipeline);
+}
+
+void xen_drm_front_kms_on_frame_done(struct xen_drm_front_drm_pipeline *pipeline,
+				     u64 fb_cookie)
+{
+	/*
+	 * This runs in interrupt context, e.g. under
+	 * drm_info->front_info->io_lock, so we cannot call _sync version
+	 * to cancel the work
+	 */
+	cancel_delayed_work(&pipeline->pflip_to_worker);
+
+	send_pending_event(pipeline);
+}
+
+static void pflip_to_worker(struct work_struct *work)
+{
+	struct delayed_work *delayed_work = to_delayed_work(work);
+	struct xen_drm_front_drm_pipeline *pipeline =
+			container_of(delayed_work,
+				     struct xen_drm_front_drm_pipeline,
+				     pflip_to_worker);
+
+	DRM_ERROR("Frame done timed-out, releasing");
+	send_pending_event(pipeline);
+}
+
+static bool display_send_page_flip(struct drm_simple_display_pipe *pipe,
+				   struct drm_plane_state *old_plane_state)
+{
+	struct drm_plane_state *plane_state =
+			drm_atomic_get_new_plane_state(old_plane_state->state,
+						       &pipe->plane);
+
+	/*
+	 * If old_plane_state->fb is NULL and plane_state->fb is not,
+	 * then this is an atomic commit which will enable display.
+	 * If old_plane_state->fb is not NULL and plane_state->fb is,
+	 * then this is an atomic commit which will disable display.
+	 * Ignore these and do not send page flip as this framebuffer will be
+	 * sent to the backend as a part of display_set_config call.
+	 */
+	if (old_plane_state->fb && plane_state->fb) {
+		struct xen_drm_front_drm_pipeline *pipeline =
+				to_xen_drm_pipeline(pipe);
+		struct xen_drm_front_drm_info *drm_info = pipeline->drm_info;
+		int ret;
+
+		schedule_delayed_work(&pipeline->pflip_to_worker,
+				      msecs_to_jiffies(FRAME_DONE_TO_MS));
+
+		ret = xen_drm_front_page_flip(drm_info->front_info,
+					      pipeline->index,
+					      xen_drm_front_fb_to_cookie(plane_state->fb));
+		if (ret) {
+			DRM_ERROR("Failed to send page flip request to backend: %d\n", ret);
+
+			pipeline->conn_connected = false;
+			/*
+			 * Report the flip not handled, so pending event is
+			 * sent, unblocking user-space.
+			 */
+			return false;
+		}
+		/*
+		 * Signal that page flip was handled, pending event will be sent
+		 * on frame done event from the backend.
+		 */
+		return true;
+	}
+
+	return false;
+}
+
+static int display_prepare_fb(struct drm_simple_display_pipe *pipe,
+			      struct drm_plane_state *plane_state)
+{
+	return drm_gem_fb_prepare_fb(&pipe->plane, plane_state);
+}
+
+static void display_update(struct drm_simple_display_pipe *pipe,
+			   struct drm_plane_state *old_plane_state)
+{
+	struct xen_drm_front_drm_pipeline *pipeline =
+			to_xen_drm_pipeline(pipe);
+	struct drm_crtc *crtc = &pipe->crtc;
+	struct drm_pending_vblank_event *event;
+	int idx;
+
+	event = crtc->state->event;
+	if (event) {
+		struct drm_device *dev = crtc->dev;
+		unsigned long flags;
+
+		WARN_ON(pipeline->pending_event);
+
+		spin_lock_irqsave(&dev->event_lock, flags);
+		crtc->state->event = NULL;
+
+		pipeline->pending_event = event;
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+	}
+
+	if (!drm_dev_enter(pipe->crtc.dev, &idx)) {
+		send_pending_event(pipeline);
+		return;
+	}
+
+	/*
+	 * Send page flip request to the backend *after* we have event cached
+	 * above, so on page flip done event from the backend we can
+	 * deliver it and there is no race condition between this code and
+	 * event from the backend.
+	 * If this is not a page flip, e.g. no flip done event from the backend
+	 * is expected, then send now.
+	 */
+	if (!display_send_page_flip(pipe, old_plane_state))
+		send_pending_event(pipeline);
+
+	drm_dev_exit(idx);
+}
+
+static enum drm_mode_status
+display_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
+{
+	struct xen_drm_front_drm_pipeline *pipeline =
+			container_of(crtc, struct xen_drm_front_drm_pipeline,
+				     pipe.crtc);
+
+	if (mode->hdisplay != pipeline->width)
+		return MODE_ERROR;
+
+	if (mode->vdisplay != pipeline->height)
+		return MODE_ERROR;
+
+	return MODE_OK;
+}
+
+static const struct drm_simple_display_pipe_funcs display_funcs = {
+	.mode_valid = display_mode_valid,
+	.enable = display_enable,
+	.disable = display_disable,
+	.prepare_fb = display_prepare_fb,
+	.update = display_update,
+};
+
+static int display_pipe_init(struct xen_drm_front_drm_info *drm_info,
+			     int index, struct xen_drm_front_cfg_connector *cfg,
+			     struct xen_drm_front_drm_pipeline *pipeline)
+{
+	struct drm_device *dev = drm_info->drm_dev;
+	const u32 *formats;
+	int format_count;
+	int ret;
+
+	pipeline->drm_info = drm_info;
+	pipeline->index = index;
+	pipeline->height = cfg->height;
+	pipeline->width = cfg->width;
+
+	INIT_DELAYED_WORK(&pipeline->pflip_to_worker, pflip_to_worker);
+
+	ret = xen_drm_front_conn_init(drm_info, &pipeline->conn);
+	if (ret)
+		return ret;
+
+	formats = xen_drm_front_conn_get_formats(&format_count);
+
+	return drm_simple_display_pipe_init(dev, &pipeline->pipe,
+					    &display_funcs, formats,
+					    format_count, NULL,
+					    &pipeline->conn);
+}
+
+int xen_drm_front_kms_init(struct xen_drm_front_drm_info *drm_info)
+{
+	struct drm_device *dev = drm_info->drm_dev;
+	int i, ret;
+
+	drm_mode_config_init(dev);
+
+	dev->mode_config.min_width = 0;
+	dev->mode_config.min_height = 0;
+	dev->mode_config.max_width = 4095;
+	dev->mode_config.max_height = 2047;
+	dev->mode_config.funcs = &mode_config_funcs;
+
+	for (i = 0; i < drm_info->front_info->cfg.num_connectors; i++) {
+		struct xen_drm_front_cfg_connector *cfg =
+				&drm_info->front_info->cfg.connectors[i];
+		struct xen_drm_front_drm_pipeline *pipeline =
+				&drm_info->pipeline[i];
+
+		ret = display_pipe_init(drm_info, i, cfg, pipeline);
+		if (ret) {
+			drm_mode_config_cleanup(dev);
+			return ret;
+		}
+	}
+
+	drm_mode_config_reset(dev);
+	drm_kms_helper_poll_init(dev);
+	return 0;
+}
+
+void xen_drm_front_kms_fini(struct xen_drm_front_drm_info *drm_info)
+{
+	int i;
+
+	for (i = 0; i < drm_info->front_info->cfg.num_connectors; i++) {
+		struct xen_drm_front_drm_pipeline *pipeline =
+				&drm_info->pipeline[i];
+
+		cancel_delayed_work_sync(&pipeline->pflip_to_worker);
+
+		send_pending_event(pipeline);
+	}
+}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.h b/drivers/gpu/drm/xen/xen_drm_front_kms.h
new file mode 100644
index 000000000000..ab2fbad4fbbf
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_kms.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+/*
+ *  Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#ifndef __XEN_DRM_FRONT_KMS_H_
+#define __XEN_DRM_FRONT_KMS_H_
+
+#include <linux/types.h>
+
+struct xen_drm_front_drm_info;
+struct xen_drm_front_drm_pipeline;
+
+int xen_drm_front_kms_init(struct xen_drm_front_drm_info *drm_info);
+
+void xen_drm_front_kms_fini(struct xen_drm_front_drm_info *drm_info);
+
+void xen_drm_front_kms_on_frame_done(struct xen_drm_front_drm_pipeline *pipeline,
+				     u64 fb_cookie);
+
+#endif /* __XEN_DRM_FRONT_KMS_H_ */
diff --git a/drivers/gpu/drm/xen/xen_drm_front_shbuf.c b/drivers/gpu/drm/xen/xen_drm_front_shbuf.c
new file mode 100644
index 000000000000..19914dde4b3d
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_shbuf.c
@@ -0,0 +1,436 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+/*
+ *  Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#include <drm/drmP.h>
+
+#if defined(CONFIG_X86)
+#include <drm/drm_cache.h>
+#endif
+#include <linux/errno.h>
+#include <linux/mm.h>
+
+#include <asm/xen/hypervisor.h>
+#include <xen/balloon.h>
+#include <xen/xen.h>
+#include <xen/xenbus.h>
+#include <xen/interface/io/ring.h>
+#include <xen/interface/io/displif.h>
+
+#include "xen_drm_front.h"
+#include "xen_drm_front_shbuf.h"
+
+struct xen_drm_front_shbuf_ops {
+	/*
+	 * Calculate number of grefs required to handle this buffer,
+	 * e.g. if grefs are required for page directory only or the buffer
+	 * pages as well.
+	 */
+	void (*calc_num_grefs)(struct xen_drm_front_shbuf *buf);
+	/* Fill page directory according to para-virtual display protocol. */
+	void (*fill_page_dir)(struct xen_drm_front_shbuf *buf);
+	/* Claim grant references for the pages of the buffer. */
+	int (*grant_refs_for_buffer)(struct xen_drm_front_shbuf *buf,
+				     grant_ref_t *priv_gref_head, int gref_idx);
+	/* Map grant references of the buffer. */
+	int (*map)(struct xen_drm_front_shbuf *buf);
+	/* Unmap grant references of the buffer. */
+	int (*unmap)(struct xen_drm_front_shbuf *buf);
+};
+
+grant_ref_t xen_drm_front_shbuf_get_dir_start(struct xen_drm_front_shbuf *buf)
+{
+	if (!buf->grefs)
+		return GRANT_INVALID_REF;
+
+	return buf->grefs[0];
+}
+
+int xen_drm_front_shbuf_map(struct xen_drm_front_shbuf *buf)
+{
+	if (buf->ops->map)
+		return buf->ops->map(buf);
+
+	/* no need to map own grant references */
+	return 0;
+}
+
+int xen_drm_front_shbuf_unmap(struct xen_drm_front_shbuf *buf)
+{
+	if (buf->ops->unmap)
+		return buf->ops->unmap(buf);
+
+	/* no need to unmap own grant references */
+	return 0;
+}
+
+void xen_drm_front_shbuf_flush(struct xen_drm_front_shbuf *buf)
+{
+#if defined(CONFIG_X86)
+	drm_clflush_pages(buf->pages, buf->num_pages);
+#endif
+}
+
+void xen_drm_front_shbuf_free(struct xen_drm_front_shbuf *buf)
+{
+	if (buf->grefs) {
+		int i;
+
+		for (i = 0; i < buf->num_grefs; i++)
+			if (buf->grefs[i] != GRANT_INVALID_REF)
+				gnttab_end_foreign_access(buf->grefs[i],
+							  0, 0UL);
+	}
+	kfree(buf->grefs);
+	kfree(buf->directory);
+	if (buf->sgt) {
+		sg_free_table(buf->sgt);
+		kvfree(buf->pages);
+	}
+	kfree(buf);
+}
+
+/*
+ * number of grefs a page can hold with respect to the
+ * struct xendispl_page_directory header
+ */
+#define XEN_DRM_NUM_GREFS_PER_PAGE ((PAGE_SIZE - \
+		offsetof(struct xendispl_page_directory, gref)) / \
+		sizeof(grant_ref_t))
+
+static int get_num_pages_dir(struct xen_drm_front_shbuf *buf)
+{
+	/* number of pages the page directory consumes itself */
+	return DIV_ROUND_UP(buf->num_pages, XEN_DRM_NUM_GREFS_PER_PAGE);
+}
+
+static void backend_calc_num_grefs(struct xen_drm_front_shbuf *buf)
+{
+	/* only for pages the page directory consumes itself */
+	buf->num_grefs = get_num_pages_dir(buf);
+}
+
+static void guest_calc_num_grefs(struct xen_drm_front_shbuf *buf)
+{
+	/*
+	 * number of pages the page directory consumes itself
+	 * plus grefs for the buffer pages
+	 */
+	buf->num_grefs = get_num_pages_dir(buf) + buf->num_pages;
+}
+
+#define xen_page_to_vaddr(page) \
+		((phys_addr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
+
+static int backend_unmap(struct xen_drm_front_shbuf *buf)
+{
+	struct gnttab_unmap_grant_ref *unmap_ops;
+	int i, ret;
+
+	if (!buf->pages || !buf->backend_map_handles || !buf->grefs)
+		return 0;
+
+	unmap_ops = kcalloc(buf->num_pages, sizeof(*unmap_ops),
+			    GFP_KERNEL);
+	if (!unmap_ops) {
+		DRM_ERROR("Failed to get memory while unmapping\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < buf->num_pages; i++) {
+		phys_addr_t addr;
+
+		addr = xen_page_to_vaddr(buf->pages[i]);
+		gnttab_set_unmap_op(&unmap_ops[i], addr, GNTMAP_host_map,
+				    buf->backend_map_handles[i]);
+	}
+
+	ret = gnttab_unmap_refs(unmap_ops, NULL, buf->pages,
+				buf->num_pages);
+
+	for (i = 0; i < buf->num_pages; i++) {
+		if (unlikely(unmap_ops[i].status != GNTST_okay))
+			DRM_ERROR("Failed to unmap page %d: %d\n",
+				  i, unmap_ops[i].status);
+	}
+
+	if (ret)
+		DRM_ERROR("Failed to unmap grant references, ret %d", ret);
+
+	kfree(unmap_ops);
+	kfree(buf->backend_map_handles);
+	buf->backend_map_handles = NULL;
+	return ret;
+}
+
+static int backend_map(struct xen_drm_front_shbuf *buf)
+{
+	struct gnttab_map_grant_ref *map_ops = NULL;
+	unsigned char *ptr;
+	int ret, cur_gref, cur_dir_page, cur_page, grefs_left;
+
+	map_ops = kcalloc(buf->num_pages, sizeof(*map_ops), GFP_KERNEL);
+	if (!map_ops)
+		return -ENOMEM;
+
+	buf->backend_map_handles = kcalloc(buf->num_pages,
+					   sizeof(*buf->backend_map_handles),
+					   GFP_KERNEL);
+	if (!buf->backend_map_handles) {
+		kfree(map_ops);
+		return -ENOMEM;
+	}
+
+	/*
+	 * read page directory to get grefs from the backend: for external
+	 * buffer we only allocate buf->grefs for the page directory,
+	 * so buf->num_grefs has number of pages in the page directory itself
+	 */
+	ptr = buf->directory;
+	grefs_left = buf->num_pages;
+	cur_page = 0;
+	for (cur_dir_page = 0; cur_dir_page < buf->num_grefs; cur_dir_page++) {
+		struct xendispl_page_directory *page_dir =
+				(struct xendispl_page_directory *)ptr;
+		int to_copy = XEN_DRM_NUM_GREFS_PER_PAGE;
+
+		if (to_copy > grefs_left)
+			to_copy = grefs_left;
+
+		for (cur_gref = 0; cur_gref < to_copy; cur_gref++) {
+			phys_addr_t addr;
+
+			addr = xen_page_to_vaddr(buf->pages[cur_page]);
+			gnttab_set_map_op(&map_ops[cur_page], addr,
+					  GNTMAP_host_map,
+					  page_dir->gref[cur_gref],
+					  buf->xb_dev->otherend_id);
+			cur_page++;
+		}
+
+		grefs_left -= to_copy;
+		ptr += PAGE_SIZE;
+	}
+	ret = gnttab_map_refs(map_ops, NULL, buf->pages, buf->num_pages);
+
+	/* save handles even if error, so we can unmap */
+	for (cur_page = 0; cur_page < buf->num_pages; cur_page++) {
+		buf->backend_map_handles[cur_page] = map_ops[cur_page].handle;
+		if (unlikely(map_ops[cur_page].status != GNTST_okay))
+			DRM_ERROR("Failed to map page %d: %d\n",
+				  cur_page, map_ops[cur_page].status);
+	}
+
+	if (ret) {
+		DRM_ERROR("Failed to map grant references, ret %d", ret);
+		backend_unmap(buf);
+	}
+
+	kfree(map_ops);
+	return ret;
+}
+
+static void backend_fill_page_dir(struct xen_drm_front_shbuf *buf)
+{
+	struct xendispl_page_directory *page_dir;
+	unsigned char *ptr;
+	int i, num_pages_dir;
+
+	ptr = buf->directory;
+	num_pages_dir = get_num_pages_dir(buf);
+
+	/* fill only grefs for the page directory itself */
+	for (i = 0; i < num_pages_dir - 1; i++) {
+		page_dir = (struct xendispl_page_directory *)ptr;
+
+		page_dir->gref_dir_next_page = buf->grefs[i + 1];
+		ptr += PAGE_SIZE;
+	}
+	/* last page must say there is no more pages */
+	page_dir = (struct xendispl_page_directory *)ptr;
+	page_dir->gref_dir_next_page = GRANT_INVALID_REF;
+}
+
+static void guest_fill_page_dir(struct xen_drm_front_shbuf *buf)
+{
+	unsigned char *ptr;
+	int cur_gref, grefs_left, to_copy, i, num_pages_dir;
+
+	ptr = buf->directory;
+	num_pages_dir = get_num_pages_dir(buf);
+
+	/*
+	 * while copying, skip grefs at start, they are for pages
+	 * granted for the page directory itself
+	 */
+	cur_gref = num_pages_dir;
+	grefs_left = buf->num_pages;
+	for (i = 0; i < num_pages_dir; i++) {
+		struct xendispl_page_directory *page_dir =
+				(struct xendispl_page_directory *)ptr;
+
+		if (grefs_left <= XEN_DRM_NUM_GREFS_PER_PAGE) {
+			to_copy = grefs_left;
+			page_dir->gref_dir_next_page = GRANT_INVALID_REF;
+		} else {
+			to_copy = XEN_DRM_NUM_GREFS_PER_PAGE;
+			page_dir->gref_dir_next_page = buf->grefs[i + 1];
+		}
+		memcpy(&page_dir->gref, &buf->grefs[cur_gref],
+		       to_copy * sizeof(grant_ref_t));
+		ptr += PAGE_SIZE;
+		grefs_left -= to_copy;
+		cur_gref += to_copy;
+	}
+}
+
+static int guest_grant_refs_for_buffer(struct xen_drm_front_shbuf *buf,
+				       grant_ref_t *priv_gref_head,
+				       int gref_idx)
+{
+	int i, cur_ref, otherend_id;
+
+	otherend_id = buf->xb_dev->otherend_id;
+	for (i = 0; i < buf->num_pages; i++) {
+		cur_ref = gnttab_claim_grant_reference(priv_gref_head);
+		if (cur_ref < 0)
+			return cur_ref;
+
+		gnttab_grant_foreign_access_ref(cur_ref, otherend_id,
+						xen_page_to_gfn(buf->pages[i]),
+						0);
+		buf->grefs[gref_idx++] = cur_ref;
+	}
+	return 0;
+}
+
+static int grant_references(struct xen_drm_front_shbuf *buf)
+{
+	grant_ref_t priv_gref_head;
+	int ret, i, j, cur_ref;
+	int otherend_id, num_pages_dir;
+
+	ret = gnttab_alloc_grant_references(buf->num_grefs, &priv_gref_head);
+	if (ret < 0) {
+		DRM_ERROR("Cannot allocate grant references\n");
+		return ret;
+	}
+
+	otherend_id = buf->xb_dev->otherend_id;
+	j = 0;
+	num_pages_dir = get_num_pages_dir(buf);
+	for (i = 0; i < num_pages_dir; i++) {
+		unsigned long frame;
+
+		cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
+		if (cur_ref < 0)
+			return cur_ref;
+
+		frame = xen_page_to_gfn(virt_to_page(buf->directory +
+					PAGE_SIZE * i));
+		gnttab_grant_foreign_access_ref(cur_ref, otherend_id, frame, 0);
+		buf->grefs[j++] = cur_ref;
+	}
+
+	if (buf->ops->grant_refs_for_buffer) {
+		ret = buf->ops->grant_refs_for_buffer(buf, &priv_gref_head, j);
+		if (ret)
+			return ret;
+	}
+
+	gnttab_free_grant_references(priv_gref_head);
+	return 0;
+}
+
+static int alloc_storage(struct xen_drm_front_shbuf *buf)
+{
+	if (buf->sgt) {
+		buf->pages = kvmalloc_array(buf->num_pages,
+					    sizeof(struct page *), GFP_KERNEL);
+		if (!buf->pages)
+			return -ENOMEM;
+
+		if (drm_prime_sg_to_page_addr_arrays(buf->sgt, buf->pages,
+						     NULL, buf->num_pages) < 0)
+			return -EINVAL;
+	}
+
+	buf->grefs = kcalloc(buf->num_grefs, sizeof(*buf->grefs), GFP_KERNEL);
+	if (!buf->grefs)
+		return -ENOMEM;
+
+	buf->directory = kcalloc(get_num_pages_dir(buf), PAGE_SIZE, GFP_KERNEL);
+	if (!buf->directory)
+		return -ENOMEM;
+
+	return 0;
+}
+
+/*
+ * For be allocated buffers we don't need grant_refs_for_buffer as those
+ * grant references are allocated at backend side
+ */
+static const struct xen_drm_front_shbuf_ops backend_ops = {
+	.calc_num_grefs = backend_calc_num_grefs,
+	.fill_page_dir = backend_fill_page_dir,
+	.map = backend_map,
+	.unmap = backend_unmap
+};
+
+/* For locally granted references we do not need to map/unmap the references */
+static const struct xen_drm_front_shbuf_ops local_ops = {
+	.calc_num_grefs = guest_calc_num_grefs,
+	.fill_page_dir = guest_fill_page_dir,
+	.grant_refs_for_buffer = guest_grant_refs_for_buffer,
+};
+
+struct xen_drm_front_shbuf *
+xen_drm_front_shbuf_alloc(struct xen_drm_front_shbuf_cfg *cfg)
+{
+	struct xen_drm_front_shbuf *buf;
+	int ret;
+
+	/* either pages or sgt, not both */
+	if (unlikely(cfg->pages && cfg->sgt)) {
+		DRM_ERROR("Cannot handle buffer allocation with both pages and sg table provided\n");
+		return NULL;
+	}
+
+	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+	if (!buf)
+		return NULL;
+
+	if (cfg->be_alloc)
+		buf->ops = &backend_ops;
+	else
+		buf->ops = &local_ops;
+
+	buf->xb_dev = cfg->xb_dev;
+	buf->num_pages = DIV_ROUND_UP(cfg->size, PAGE_SIZE);
+	buf->sgt = cfg->sgt;
+	buf->pages = cfg->pages;
+
+	buf->ops->calc_num_grefs(buf);
+
+	ret = alloc_storage(buf);
+	if (ret)
+		goto fail;
+
+	ret = grant_references(buf);
+	if (ret)
+		goto fail;
+
+	buf->ops->fill_page_dir(buf);
+
+	return buf;
+
+fail:
+	xen_drm_front_shbuf_free(buf);
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_shbuf.h b/drivers/gpu/drm/xen/xen_drm_front_shbuf.h
new file mode 100644
index 000000000000..8c037fd7608b
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_shbuf.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+/*
+ *  Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#ifndef __XEN_DRM_FRONT_SHBUF_H_
+#define __XEN_DRM_FRONT_SHBUF_H_
+
+#include <linux/kernel.h>
+#include <linux/scatterlist.h>
+
+#include <xen/grant_table.h>
+
+struct xen_drm_front_shbuf {
+	/*
+	 * number of references granted for the backend use:
+	 *  - for allocated/imported dma-buf's this holds number of grant
+	 *    references for the page directory and pages of the buffer
+	 *  - for the buffer provided by the backend this holds number of
+	 *    grant references for the page directory as grant references for
+	 *    the buffer will be provided by the backend
+	 */
+	int num_grefs;
+	grant_ref_t *grefs;
+	unsigned char *directory;
+
+	/*
+	 * there are 2 ways to provide backing storage for this shared buffer:
+	 * either pages or sgt. if buffer created from sgt then we own
+	 * the pages and must free those ourselves on closure
+	 */
+	int num_pages;
+	struct page **pages;
+
+	struct sg_table *sgt;
+
+	struct xenbus_device *xb_dev;
+
+	/* these are the ops used internally depending on be_alloc mode */
+	const struct xen_drm_front_shbuf_ops *ops;
+
+	/* Xen map handles for the buffer allocated by the backend */
+	grant_handle_t *backend_map_handles;
+};
+
+struct xen_drm_front_shbuf_cfg {
+	struct xenbus_device *xb_dev;
+	size_t size;
+	struct page **pages;
+	struct sg_table *sgt;
+	bool be_alloc;
+};
+
+struct xen_drm_front_shbuf *
+xen_drm_front_shbuf_alloc(struct xen_drm_front_shbuf_cfg *cfg);
+
+grant_ref_t xen_drm_front_shbuf_get_dir_start(struct xen_drm_front_shbuf *buf);
+
+int xen_drm_front_shbuf_map(struct xen_drm_front_shbuf *buf);
+
+int xen_drm_front_shbuf_unmap(struct xen_drm_front_shbuf *buf);
+
+void xen_drm_front_shbuf_flush(struct xen_drm_front_shbuf *buf);
+
+void xen_drm_front_shbuf_free(struct xen_drm_front_shbuf *buf);
+
+#endif /* __XEN_DRM_FRONT_SHBUF_H_ */

From d31b5c91a27b768ee221fe677eb0b18b4cfb9df8 Mon Sep 17 00:00:00 2001
From: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
Date: Tue, 3 Apr 2018 15:32:38 +0300
Subject: [PATCH 0204/1286] MAINTAINERS: Add drm/xen-front maintainer entry

Add myself as drivers/gpu/drm/xen maintainer.

Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20180403123238.19294-1-andr2000@gmail.com
---
 MAINTAINERS | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/MAINTAINERS b/MAINTAINERS
index 004d2c14ee4b..4af7f6119530 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4841,6 +4841,15 @@ S:	Maintained
 F:	drivers/gpu/drm/tinydrm/
 F:	include/drm/tinydrm/
 
+DRM DRIVERS FOR XEN
+M:	Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+T:	git git://anongit.freedesktop.org/drm/drm-misc
+L:	dri-devel@lists.freedesktop.org
+L:	xen-devel@lists.xen.org
+S:	Supported
+F:	drivers/gpu/drm/xen/
+F:	Documentation/gpu/xen-front.rst
+
 DRM TTM SUBSYSTEM
 M:	Christian Koenig <christian.koenig@amd.com>
 M:	Roger He <Hongbo.He@amd.com>

From 2a694feb93556e979793081b5086f26fa8d65156 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Tue, 3 Apr 2018 19:35:37 +0100
Subject: [PATCH 0205/1286] drm/i915: Store preemption capability in
 engine->flags
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Let's avoid having to delve down the pointer chain to see if the i915
device has support for preemption and store that on the engine, which
made the decision in the first place!

v2: Refactor common preemption policy between execlists/guc.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tomasz Lis <tomasz.lis@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180403183537.5522-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_guc_submission.c | 16 +++++++++++++---
 drivers/gpu/drm/i915/intel_lrc.c            |  7 +++++--
 drivers/gpu/drm/i915/intel_ringbuffer.h     | 18 ++++++++++++++++--
 3 files changed, 34 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 749f27916a02..97121230656c 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -657,6 +657,16 @@ static void port_assign(struct execlist_port *port, struct i915_request *rq)
 	port_set(port, i915_request_get(rq));
 }
 
+static inline int rq_prio(const struct i915_request *rq)
+{
+	return rq->priotree.priority;
+}
+
+static inline int port_prio(const struct execlist_port *port)
+{
+	return rq_prio(port_request(port));
+}
+
 static void guc_dequeue(struct intel_engine_cs *engine)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -672,12 +682,12 @@ static void guc_dequeue(struct intel_engine_cs *engine)
 	GEM_BUG_ON(rb_first(&execlists->queue) != rb);
 
 	if (port_isset(port)) {
-		if (engine->i915->preempt_context) {
+		if (intel_engine_has_preemption(engine)) {
 			struct guc_preempt_work *preempt_work =
 				&engine->i915->guc.preempt_work[engine->id];
+			int prio = execlists->queue_priority;
 
-			if (execlists->queue_priority >
-			    max(port_request(port)->priotree.priority, 0)) {
+			if (__execlists_need_preempt(prio, port_prio(port))) {
 				execlists_set_active(execlists,
 						     EXECLISTS_ACTIVE_PREEMPT);
 				queue_work(engine->i915->guc.preempt_wq,
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 4d08875422b6..88472845ce96 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -183,7 +183,8 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
 				const struct i915_request *last,
 				int prio)
 {
-	return engine->i915->preempt_context && prio > max(rq_prio(last), 0);
+	return (intel_engine_has_preemption(engine) &&
+		__execlists_need_preempt(prio, rq_prio(last)));
 }
 
 /**
@@ -2117,11 +2118,13 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine)
 	engine->unpark = NULL;
 
 	engine->flags |= I915_ENGINE_SUPPORTS_STATS;
+	if (engine->i915->preempt_context)
+		engine->flags |= I915_ENGINE_HAS_PREEMPTION;
 
 	engine->i915->caps.scheduler =
 		I915_SCHEDULER_CAP_ENABLED |
 		I915_SCHEDULER_CAP_PRIORITY;
-	if (engine->i915->preempt_context)
+	if (intel_engine_has_preemption(engine))
 		engine->i915->caps.scheduler |= I915_SCHEDULER_CAP_PREEMPTION;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 40461e29cdab..256d58487559 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -562,6 +562,7 @@ struct intel_engine_cs {
 
 #define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
 #define I915_ENGINE_SUPPORTS_STATS   BIT(1)
+#define I915_ENGINE_HAS_PREEMPTION   BIT(2)
 	unsigned int flags;
 
 	/*
@@ -621,16 +622,29 @@ struct intel_engine_cs {
 	} stats;
 };
 
-static inline bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine)
+static inline bool
+intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine)
 {
 	return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
 }
 
-static inline bool intel_engine_supports_stats(struct intel_engine_cs *engine)
+static inline bool
+intel_engine_supports_stats(const struct intel_engine_cs *engine)
 {
 	return engine->flags & I915_ENGINE_SUPPORTS_STATS;
 }
 
+static inline bool
+intel_engine_has_preemption(const struct intel_engine_cs *engine)
+{
+	return engine->flags & I915_ENGINE_HAS_PREEMPTION;
+}
+
+static inline bool __execlists_need_preempt(int prio, int last)
+{
+	return prio > max(0, last);
+}
+
 static inline void
 execlists_set_active(struct intel_engine_execlists *execlists,
 		     unsigned int bit)

From 4bfbec68966082f046232a011d6f371e8554bb40 Mon Sep 17 00:00:00 2001
From: Ramalingam C <ramalingam.c@intel.com>
Date: Mon, 2 Apr 2018 17:20:22 +0530
Subject: [PATCH 0206/1286] drm/i915: Read HDCP R0 thrice in case of mismatch

As per DP spec when R0 mismatch is detected, HDCP source supported
re-read the R0 atleast twice.

And For HDMI and DP minimum wait required for the R0 availability is
100mSec. So this patch changes the wait time to 100mSec but retries
twice with the time interval of 100mSec for each attempt.

This patch is needed for DP HDCP1.4 CTS Test: 1A-06.

v2:
  No Change
v3:
  Comment on R0 retry is moved closer to the code[Seanpaul]
v4:
  Removing unwanted noise introduced in v3.

Signed-off-by: Ramalingam C <ramalingam.c@intel.com>
Reviewed-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1522669822-2508-1-git-send-email-ramalingam.c@intel.com
---
 drivers/gpu/drm/i915/intel_hdcp.c | 27 +++++++++++++++++++--------
 1 file changed, 19 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c
index 14ca5d3057a7..f2cf2e3acd3c 100644
--- a/drivers/gpu/drm/i915/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/intel_hdcp.c
@@ -506,15 +506,26 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
 	 */
 	wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
 
-	ri.reg = 0;
-	ret = shim->read_ri_prime(intel_dig_port, ri.shim);
-	if (ret)
-		return ret;
-	I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg);
+	tries = 3;
 
-	/* Wait for Ri prime match */
-	if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
-		     (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
+	/*
+	 * DP HDCP Spec mandates the two more reattempt to read R0, incase
+	 * of R0 mismatch.
+	 */
+	for (i = 0; i < tries; i++) {
+		ri.reg = 0;
+		ret = shim->read_ri_prime(intel_dig_port, ri.shim);
+		if (ret)
+			return ret;
+		I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg);
+
+		/* Wait for Ri prime match */
+		if (!wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
+		    (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
+			break;
+	}
+
+	if (i == tries) {
 		DRM_ERROR("Timed out waiting for Ri prime match (%x)\n",
 			  I915_READ(PORT_HDCP_STATUS(port)));
 		return -ETIMEDOUT;

From 41baafae293d617dc89ba320be662afffc5ada6d Mon Sep 17 00:00:00 2001
From: Ramalingam C <ramalingam.c@intel.com>
Date: Mon, 2 Apr 2018 15:40:32 +0530
Subject: [PATCH 0207/1286] drm/i915: Read Vprime thrice incase of mismatch

In case of V prime mismatch, DP HDCP spec mandates the re-read of
Vprime atleast twice.

This patch needed for DP HDCP1.4 CTS Test: 1B-05.

v2:
  Moved the V' validation into a function for retry. [Sean Paul]
v3:
  Removed Inline keyword and DRM_DEBUG_KMS are used [Sean Paul]

Signed-off-by: Ramalingam C <ramalingam.c@intel.com>
Reviewed-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1522663834-24482-3-git-send-email-ramalingam.c@intel.com
---
 drivers/gpu/drm/i915/intel_hdcp.c | 112 +++++++++++++++++++-----------
 1 file changed, 70 insertions(+), 42 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c
index f2cf2e3acd3c..d9dec32c368e 100644
--- a/drivers/gpu/drm/i915/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/intel_hdcp.c
@@ -142,53 +142,17 @@ bool intel_hdcp_is_ksv_valid(u8 *ksv)
 	return true;
 }
 
-/* Implements Part 2 of the HDCP authorization procedure */
 static
-int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
-			       const struct intel_hdcp_shim *shim)
+int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
+				const struct intel_hdcp_shim *shim,
+				u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
 {
 	struct drm_i915_private *dev_priv;
 	u32 vprime, sha_text, sha_leftovers, rep_ctl;
-	u8 bstatus[2], num_downstream, *ksv_fifo;
 	int ret, i, j, sha_idx;
 
 	dev_priv = intel_dig_port->base.base.dev->dev_private;
 
-	ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
-	if (ret) {
-		DRM_ERROR("KSV list failed to become ready (%d)\n", ret);
-		return ret;
-	}
-
-	ret = shim->read_bstatus(intel_dig_port, bstatus);
-	if (ret)
-		return ret;
-
-	if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
-	    DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
-		DRM_ERROR("Max Topology Limit Exceeded\n");
-		return -EPERM;
-	}
-
-	/*
-	 * When repeater reports 0 device count, HDCP1.4 spec allows disabling
-	 * the HDCP encryption. That implies that repeater can't have its own
-	 * display. As there is no consumption of encrypted content in the
-	 * repeater with 0 downstream devices, we are failing the
-	 * authentication.
-	 */
-	num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
-	if (num_downstream == 0)
-		return -EINVAL;
-
-	ksv_fifo = kzalloc(num_downstream * DRM_HDCP_KSV_LEN, GFP_KERNEL);
-	if (!ksv_fifo)
-		return -ENOMEM;
-
-	ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
-	if (ret)
-		return ret;
-
 	/* Process V' values from the receiver */
 	for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
 		ret = shim->read_v_prime_part(intel_dig_port, i, &vprime);
@@ -353,7 +317,8 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
 			return ret;
 		sha_idx += sizeof(sha_text);
 	} else {
-		DRM_ERROR("Invalid number of leftovers %d\n", sha_leftovers);
+		DRM_DEBUG_KMS("Invalid number of leftovers %d\n",
+			      sha_leftovers);
 		return -EINVAL;
 	}
 
@@ -381,14 +346,77 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
 	if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
 				    HDCP_SHA1_COMPLETE,
 				    HDCP_SHA1_COMPLETE, 1)) {
-		DRM_ERROR("Timed out waiting for SHA1 complete\n");
+		DRM_DEBUG_KMS("Timed out waiting for SHA1 complete\n");
 		return -ETIMEDOUT;
 	}
 	if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
-		DRM_ERROR("SHA-1 mismatch, HDCP failed\n");
+		DRM_DEBUG_KMS("SHA-1 mismatch, HDCP failed\n");
 		return -ENXIO;
 	}
 
+	return 0;
+}
+
+/* Implements Part 2 of the HDCP authorization procedure */
+static
+int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
+			       const struct intel_hdcp_shim *shim)
+{
+	u8 bstatus[2], num_downstream, *ksv_fifo;
+	int ret, i, tries = 3;
+
+	ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
+	if (ret) {
+		DRM_ERROR("KSV list failed to become ready (%d)\n", ret);
+		return ret;
+	}
+
+	ret = shim->read_bstatus(intel_dig_port, bstatus);
+	if (ret)
+		return ret;
+
+	if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
+	    DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
+		DRM_ERROR("Max Topology Limit Exceeded\n");
+		return -EPERM;
+	}
+
+	/*
+	 * When repeater reports 0 device count, HDCP1.4 spec allows disabling
+	 * the HDCP encryption. That implies that repeater can't have its own
+	 * display. As there is no consumption of encrypted content in the
+	 * repeater with 0 downstream devices, we are failing the
+	 * authentication.
+	 */
+	num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
+	if (num_downstream == 0)
+		return -EINVAL;
+
+	ksv_fifo = kzalloc(num_downstream * DRM_HDCP_KSV_LEN, GFP_KERNEL);
+	if (!ksv_fifo)
+		return -ENOMEM;
+
+	ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
+	if (ret)
+		return ret;
+
+	/*
+	 * When V prime mismatches, DP Spec mandates re-read of
+	 * V prime atleast twice.
+	 */
+	for (i = 0; i < tries; i++) {
+		ret = intel_hdcp_validate_v_prime(intel_dig_port, shim,
+						  ksv_fifo, num_downstream,
+						  bstatus);
+		if (!ret)
+			break;
+	}
+
+	if (i == tries) {
+		DRM_ERROR("V Prime validation failed.(%d)\n", ret);
+		return ret;
+	}
+
 	DRM_DEBUG_KMS("HDCP is enabled (%d downstream devices)\n",
 		      num_downstream);
 	return 0;

From 6308a31544284c1f60ab9a3052ee05b7877421ca Mon Sep 17 00:00:00 2001
From: Ramalingam C <ramalingam.c@intel.com>
Date: Mon, 2 Apr 2018 15:40:33 +0530
Subject: [PATCH 0208/1286] drm/i915: Check hdcp key loadability

HDCP1.4 key can be loaded, only when Power well #1 is enabled and cdclk
is enabled. Using the I915 power well infrastruture, above requirement
is verified.

This patch enables the hdcp initialization for HSW, BDW, and BXT.

v2:
  Choose the PW# based on the platform.
v3:
  No Changes.

Signed-off-by: Ramalingam C <ramalingam.c@intel.com>
Reviewed-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1522663834-24482-4-git-send-email-ramalingam.c@intel.com
---
 drivers/gpu/drm/i915/intel_hdcp.c | 41 +++++++++++++++++++++++++++++--
 1 file changed, 39 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c
index d9dec32c368e..98a9c81e2dc1 100644
--- a/drivers/gpu/drm/i915/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/intel_hdcp.c
@@ -37,6 +37,43 @@ static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
 	return 0;
 }
 
+static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
+{
+	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	struct i915_power_well *power_well;
+	enum i915_power_well_id id;
+	bool enabled = false;
+
+	/*
+	 * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
+	 * On all BXT+, SW can load the keys only when the PW#1 is turned on.
+	 */
+	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+		id = HSW_DISP_PW_GLOBAL;
+	else
+		id = SKL_DISP_PW_1;
+
+	mutex_lock(&power_domains->lock);
+
+	/* PG1 (power well #1) needs to be enabled */
+	for_each_power_well(dev_priv, power_well) {
+		if (power_well->id == id) {
+			enabled = power_well->ops->is_enabled(dev_priv,
+							      power_well);
+			break;
+		}
+	}
+	mutex_unlock(&power_domains->lock);
+
+	/*
+	 * Another req for hdcp key loadability is enabled state of pll for
+	 * cdclk. Without active crtc we wont land here. So we are assuming that
+	 * cdclk is already on.
+	 */
+
+	return enabled;
+}
+
 static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
 {
 	I915_WRITE(HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
@@ -619,8 +656,8 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
 	DRM_DEBUG_KMS("[%s:%d] HDCP is being enabled...\n",
 		      connector->base.name, connector->base.base.id);
 
-	if (!(I915_READ(SKL_FUSE_STATUS) & SKL_FUSE_PG_DIST_STATUS(1))) {
-		DRM_ERROR("PG1 is disabled, cannot load keys\n");
+	if (!hdcp_key_loadable(dev_priv)) {
+		DRM_ERROR("HDCP key Load is not possible\n");
 		return -ENXIO;
 	}
 

From 6be1187dbffa0027ea379c53f7ca0c782515c610 Mon Sep 17 00:00:00 2001
From: Xidong Wang <wangxidong_97@163.com>
Date: Wed, 4 Apr 2018 10:38:24 +0100
Subject: [PATCH 0209/1286] drm/i915: Do no use kfree() to free a
 kmem_cache_alloc() return value

Along the eb_lookup_vmas() error path, the return value from
kmem_cache_alloc() was freed using kfree(). Fix it to use the proper
kmem_cache_free() instead.

Fixes: d1b48c1e7184 ("drm/i915: Replace execbuf vma ht with an idr")
Signed-off-by: Xidong Wang <wangxidong_97@163.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: <stable@vger.kernel.org> # v4.14+
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180404093824.9313-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 8c170db8495d..0414228cd2b5 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -728,7 +728,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
 
 		err = radix_tree_insert(handles_vma, handle, vma);
 		if (unlikely(err)) {
-			kfree(lut);
+			kmem_cache_free(eb->i915->luts, lut);
 			goto err_obj;
 		}
 

From 2c66555ec19235efd689741c44bbeb893aa8e7de Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed, 4 Apr 2018 10:33:29 +0100
Subject: [PATCH 0210/1286] drm/i915/selftests: Add basic sanitychecks for
 execlists
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Before adding a new feature to execlists submission, we should endeavour
to cover the baseline behaviour with selftests. So start the ball
rolling.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michał Winiarski <michal.winiarski@intel.com>
CC: Michel Thierry <michel.thierry@intel.com>
Cc: Jeff McGee <jeff.mcgee@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180404093329.5383-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_lrc.c              |   4 +
 .../drm/i915/selftests/i915_live_selftests.h  |   1 +
 drivers/gpu/drm/i915/selftests/intel_lrc.c    | 507 ++++++++++++++++++
 3 files changed, 512 insertions(+)
 create mode 100644 drivers/gpu/drm/i915/selftests/intel_lrc.c

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 88472845ce96..3592288e4696 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -2609,3 +2609,7 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
 		}
 	}
 }
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/intel_lrc.c"
+#endif
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 9c76f0305b6a..8bf6aa573226 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -20,4 +20,5 @@ selftest(evict, i915_gem_evict_live_selftests)
 selftest(hugepages, i915_gem_huge_page_live_selftests)
 selftest(contexts, i915_gem_context_live_selftests)
 selftest(hangcheck, intel_hangcheck_live_selftests)
+selftest(execlists, intel_execlists_live_selftests)
 selftest(guc, intel_guc_live_selftest)
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
new file mode 100644
index 000000000000..0481e2e01146
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -0,0 +1,507 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "../i915_selftest.h"
+
+#include "mock_context.h"
+
+struct spinner {
+	struct drm_i915_private *i915;
+	struct drm_i915_gem_object *hws;
+	struct drm_i915_gem_object *obj;
+	u32 *batch;
+	void *seqno;
+};
+
+static int spinner_init(struct spinner *spin, struct drm_i915_private *i915)
+{
+	unsigned int mode;
+	void *vaddr;
+	int err;
+
+	GEM_BUG_ON(INTEL_GEN(i915) < 8);
+
+	memset(spin, 0, sizeof(*spin));
+	spin->i915 = i915;
+
+	spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
+	if (IS_ERR(spin->hws)) {
+		err = PTR_ERR(spin->hws);
+		goto err;
+	}
+
+	spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+	if (IS_ERR(spin->obj)) {
+		err = PTR_ERR(spin->obj);
+		goto err_hws;
+	}
+
+	i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
+	vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
+	if (IS_ERR(vaddr)) {
+		err = PTR_ERR(vaddr);
+		goto err_obj;
+	}
+	spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
+
+	mode = HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
+	vaddr = i915_gem_object_pin_map(spin->obj, mode);
+	if (IS_ERR(vaddr)) {
+		err = PTR_ERR(vaddr);
+		goto err_unpin_hws;
+	}
+	spin->batch = vaddr;
+
+	return 0;
+
+err_unpin_hws:
+	i915_gem_object_unpin_map(spin->hws);
+err_obj:
+	i915_gem_object_put(spin->obj);
+err_hws:
+	i915_gem_object_put(spin->hws);
+err:
+	return err;
+}
+
+static unsigned int seqno_offset(u64 fence)
+{
+	return offset_in_page(sizeof(u32) * fence);
+}
+
+static u64 hws_address(const struct i915_vma *hws,
+		       const struct i915_request *rq)
+{
+	return hws->node.start + seqno_offset(rq->fence.context);
+}
+
+static int emit_recurse_batch(struct spinner *spin,
+			      struct i915_request *rq,
+			      u32 arbitration_command)
+{
+	struct i915_address_space *vm = &rq->ctx->ppgtt->base;
+	struct i915_vma *hws, *vma;
+	u32 *batch;
+	int err;
+
+	vma = i915_vma_instance(spin->obj, vm, NULL);
+	if (IS_ERR(vma))
+		return PTR_ERR(vma);
+
+	hws = i915_vma_instance(spin->hws, vm, NULL);
+	if (IS_ERR(hws))
+		return PTR_ERR(hws);
+
+	err = i915_vma_pin(vma, 0, 0, PIN_USER);
+	if (err)
+		return err;
+
+	err = i915_vma_pin(hws, 0, 0, PIN_USER);
+	if (err)
+		goto unpin_vma;
+
+	i915_vma_move_to_active(vma, rq, 0);
+	if (!i915_gem_object_has_active_reference(vma->obj)) {
+		i915_gem_object_get(vma->obj);
+		i915_gem_object_set_active_reference(vma->obj);
+	}
+
+	i915_vma_move_to_active(hws, rq, 0);
+	if (!i915_gem_object_has_active_reference(hws->obj)) {
+		i915_gem_object_get(hws->obj);
+		i915_gem_object_set_active_reference(hws->obj);
+	}
+
+	batch = spin->batch;
+
+	*batch++ = MI_STORE_DWORD_IMM_GEN4;
+	*batch++ = lower_32_bits(hws_address(hws, rq));
+	*batch++ = upper_32_bits(hws_address(hws, rq));
+	*batch++ = rq->fence.seqno;
+
+	*batch++ = arbitration_command;
+
+	*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
+	*batch++ = lower_32_bits(vma->node.start);
+	*batch++ = upper_32_bits(vma->node.start);
+	*batch++ = MI_BATCH_BUFFER_END; /* not reached */
+
+	i915_gem_chipset_flush(spin->i915);
+
+	err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
+
+	i915_vma_unpin(hws);
+unpin_vma:
+	i915_vma_unpin(vma);
+	return err;
+}
+
+static struct i915_request *
+spinner_create_request(struct spinner *spin,
+		       struct i915_gem_context *ctx,
+		       struct intel_engine_cs *engine,
+		       u32 arbitration_command)
+{
+	struct i915_request *rq;
+	int err;
+
+	rq = i915_request_alloc(engine, ctx);
+	if (IS_ERR(rq))
+		return rq;
+
+	err = emit_recurse_batch(spin, rq, arbitration_command);
+	if (err) {
+		__i915_request_add(rq, false);
+		return ERR_PTR(err);
+	}
+
+	return rq;
+}
+
+static u32 hws_seqno(const struct spinner *spin, const struct i915_request *rq)
+{
+	u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
+
+	return READ_ONCE(*seqno);
+}
+
+struct wedge_me {
+	struct delayed_work work;
+	struct drm_i915_private *i915;
+	const void *symbol;
+};
+
+static void wedge_me(struct work_struct *work)
+{
+	struct wedge_me *w = container_of(work, typeof(*w), work.work);
+
+	pr_err("%pS timed out, cancelling all further testing.\n", w->symbol);
+
+	GEM_TRACE("%pS timed out.\n", w->symbol);
+	GEM_TRACE_DUMP();
+
+	i915_gem_set_wedged(w->i915);
+}
+
+static void __init_wedge(struct wedge_me *w,
+			 struct drm_i915_private *i915,
+			 long timeout,
+			 const void *symbol)
+{
+	w->i915 = i915;
+	w->symbol = symbol;
+
+	INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me);
+	schedule_delayed_work(&w->work, timeout);
+}
+
+static void __fini_wedge(struct wedge_me *w)
+{
+	cancel_delayed_work_sync(&w->work);
+	destroy_delayed_work_on_stack(&w->work);
+	w->i915 = NULL;
+}
+
+#define wedge_on_timeout(W, DEV, TIMEOUT)				\
+	for (__init_wedge((W), (DEV), (TIMEOUT), __builtin_return_address(0)); \
+	     (W)->i915;							\
+	     __fini_wedge((W)))
+
+static noinline int
+flush_test(struct drm_i915_private *i915, unsigned int flags)
+{
+	struct wedge_me w;
+
+	cond_resched();
+
+	wedge_on_timeout(&w, i915, HZ)
+		i915_gem_wait_for_idle(i915, flags);
+
+	return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0;
+}
+
+static void spinner_end(struct spinner *spin)
+{
+	*spin->batch = MI_BATCH_BUFFER_END;
+	i915_gem_chipset_flush(spin->i915);
+}
+
+static void spinner_fini(struct spinner *spin)
+{
+	spinner_end(spin);
+
+	i915_gem_object_unpin_map(spin->obj);
+	i915_gem_object_put(spin->obj);
+
+	i915_gem_object_unpin_map(spin->hws);
+	i915_gem_object_put(spin->hws);
+}
+
+static bool wait_for_spinner(struct spinner *spin, struct i915_request *rq)
+{
+	if (!wait_event_timeout(rq->execute,
+				READ_ONCE(rq->global_seqno),
+				msecs_to_jiffies(10)))
+		return false;
+
+	return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
+					       rq->fence.seqno),
+			     10) &&
+		 wait_for(i915_seqno_passed(hws_seqno(spin, rq),
+					    rq->fence.seqno),
+			  1000));
+}
+
+static int live_sanitycheck(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct intel_engine_cs *engine;
+	struct i915_gem_context *ctx;
+	enum intel_engine_id id;
+	struct spinner spin;
+	int err = -ENOMEM;
+
+	if (!HAS_LOGICAL_RING_CONTEXTS(i915))
+		return 0;
+
+	mutex_lock(&i915->drm.struct_mutex);
+
+	if (spinner_init(&spin, i915))
+		goto err_unlock;
+
+	ctx = kernel_context(i915);
+	if (!ctx)
+		goto err_spin;
+
+	for_each_engine(engine, i915, id) {
+		struct i915_request *rq;
+
+		rq = spinner_create_request(&spin, ctx, engine, MI_NOOP);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto err_ctx;
+		}
+
+		i915_request_add(rq);
+		if (!wait_for_spinner(&spin, rq)) {
+			GEM_TRACE("spinner failed to start\n");
+			GEM_TRACE_DUMP();
+			i915_gem_set_wedged(i915);
+			err = -EIO;
+			goto err_ctx;
+		}
+
+		spinner_end(&spin);
+		if (flush_test(i915, I915_WAIT_LOCKED)) {
+			err = -EIO;
+			goto err_ctx;
+		}
+	}
+
+	err = 0;
+err_ctx:
+	kernel_context_close(ctx);
+err_spin:
+	spinner_fini(&spin);
+err_unlock:
+	flush_test(i915, I915_WAIT_LOCKED);
+	mutex_unlock(&i915->drm.struct_mutex);
+	return err;
+}
+
+static int live_preempt(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct i915_gem_context *ctx_hi, *ctx_lo;
+	struct spinner spin_hi, spin_lo;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	int err = -ENOMEM;
+
+	if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+		return 0;
+
+	mutex_lock(&i915->drm.struct_mutex);
+
+	if (spinner_init(&spin_hi, i915))
+		goto err_unlock;
+
+	if (spinner_init(&spin_lo, i915))
+		goto err_spin_hi;
+
+	ctx_hi = kernel_context(i915);
+	if (!ctx_hi)
+		goto err_spin_lo;
+	ctx_hi->priority = I915_CONTEXT_MAX_USER_PRIORITY;
+
+	ctx_lo = kernel_context(i915);
+	if (!ctx_lo)
+		goto err_ctx_hi;
+	ctx_lo->priority = I915_CONTEXT_MIN_USER_PRIORITY;
+
+	for_each_engine(engine, i915, id) {
+		struct i915_request *rq;
+
+		rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+					    MI_ARB_CHECK);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto err_ctx_lo;
+		}
+
+		i915_request_add(rq);
+		if (!wait_for_spinner(&spin_lo, rq)) {
+			GEM_TRACE("lo spinner failed to start\n");
+			GEM_TRACE_DUMP();
+			i915_gem_set_wedged(i915);
+			err = -EIO;
+			goto err_ctx_lo;
+		}
+
+		rq = spinner_create_request(&spin_hi, ctx_hi, engine,
+					    MI_ARB_CHECK);
+		if (IS_ERR(rq)) {
+			spinner_end(&spin_lo);
+			err = PTR_ERR(rq);
+			goto err_ctx_lo;
+		}
+
+		i915_request_add(rq);
+		if (!wait_for_spinner(&spin_hi, rq)) {
+			GEM_TRACE("hi spinner failed to start\n");
+			GEM_TRACE_DUMP();
+			i915_gem_set_wedged(i915);
+			err = -EIO;
+			goto err_ctx_lo;
+		}
+
+		spinner_end(&spin_hi);
+		spinner_end(&spin_lo);
+		if (flush_test(i915, I915_WAIT_LOCKED)) {
+			err = -EIO;
+			goto err_ctx_lo;
+		}
+	}
+
+	err = 0;
+err_ctx_lo:
+	kernel_context_close(ctx_lo);
+err_ctx_hi:
+	kernel_context_close(ctx_hi);
+err_spin_lo:
+	spinner_fini(&spin_lo);
+err_spin_hi:
+	spinner_fini(&spin_hi);
+err_unlock:
+	flush_test(i915, I915_WAIT_LOCKED);
+	mutex_unlock(&i915->drm.struct_mutex);
+	return err;
+}
+
+static int live_late_preempt(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct i915_gem_context *ctx_hi, *ctx_lo;
+	struct spinner spin_hi, spin_lo;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	int err = -ENOMEM;
+
+	if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+		return 0;
+
+	mutex_lock(&i915->drm.struct_mutex);
+
+	if (spinner_init(&spin_hi, i915))
+		goto err_unlock;
+
+	if (spinner_init(&spin_lo, i915))
+		goto err_spin_hi;
+
+	ctx_hi = kernel_context(i915);
+	if (!ctx_hi)
+		goto err_spin_lo;
+
+	ctx_lo = kernel_context(i915);
+	if (!ctx_lo)
+		goto err_ctx_hi;
+
+	for_each_engine(engine, i915, id) {
+		struct i915_request *rq;
+
+		rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+					    MI_ARB_CHECK);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto err_ctx_lo;
+		}
+
+		i915_request_add(rq);
+		if (!wait_for_spinner(&spin_lo, rq)) {
+			pr_err("First context failed to start\n");
+			goto err_wedged;
+		}
+
+		rq = spinner_create_request(&spin_hi, ctx_hi, engine, MI_NOOP);
+		if (IS_ERR(rq)) {
+			spinner_end(&spin_lo);
+			err = PTR_ERR(rq);
+			goto err_ctx_lo;
+		}
+
+		i915_request_add(rq);
+		if (wait_for_spinner(&spin_hi, rq)) {
+			pr_err("Second context overtook first?\n");
+			goto err_wedged;
+		}
+
+		engine->schedule(rq, I915_PRIORITY_MAX);
+
+		if (!wait_for_spinner(&spin_hi, rq)) {
+			pr_err("High priority context failed to preempt the low priority context\n");
+			GEM_TRACE_DUMP();
+			goto err_wedged;
+		}
+
+		spinner_end(&spin_hi);
+		spinner_end(&spin_lo);
+		if (flush_test(i915, I915_WAIT_LOCKED)) {
+			err = -EIO;
+			goto err_ctx_lo;
+		}
+	}
+
+	err = 0;
+err_ctx_lo:
+	kernel_context_close(ctx_lo);
+err_ctx_hi:
+	kernel_context_close(ctx_hi);
+err_spin_lo:
+	spinner_fini(&spin_lo);
+err_spin_hi:
+	spinner_fini(&spin_hi);
+err_unlock:
+	flush_test(i915, I915_WAIT_LOCKED);
+	mutex_unlock(&i915->drm.struct_mutex);
+	return err;
+
+err_wedged:
+	spinner_end(&spin_hi);
+	spinner_end(&spin_lo);
+	i915_gem_set_wedged(i915);
+	err = -EIO;
+	goto err_ctx_lo;
+}
+
+int intel_execlists_live_selftests(struct drm_i915_private *i915)
+{
+	static const struct i915_subtest tests[] = {
+		SUBTEST(live_sanitycheck),
+		SUBTEST(live_preempt),
+		SUBTEST(live_late_preempt),
+	};
+	return i915_subtests(tests, i915);
+}

From c9fea6f4379c72b7c59e1efceab09a35bc7eac43 Mon Sep 17 00:00:00 2001
From: Oliver O'Halloran <oohall@gmail.com>
Date: Tue, 3 Apr 2018 15:34:01 +1000
Subject: [PATCH 0211/1286] drm/sti: Depend on OF rather than selecting it

Commit cc6b741c6f63 ("drm: sti: remove useless fields from vtg
structure") reworked some code inside of this driver and made it select
CONFIG_OF. This results in the entire OF layer being enabled when
building an allmodconfig on ia64. OF on ia64 is completely unsupported
so this isn't a great state of affairs.

The 0day robot noticed a link-time failure on ia64 caused by
using of_node_to_nid() in an otherwise unrelated driver. The
generic fallback for of_node_to_nid() only exists when:

	defined(CONFIG_OF) && defined(CONFIG_NUMA) == false

Since CONFIG_NUMA is usually selected for IA64 we get the link failure.
Fix this by making the driver depend on OF rather than selecting it,
odds are that was the original intent.

Link: https://lists.01.org/pipermail/kbuild-all/2018-March/045172.html
Fixes: cc6b741c6f63 ("drm: sti: remove useless fields from vtg structure")
Cc: Benjamin Gaignard <benjamin.gaignard@linaro.org>
Cc: Vincent Abriou <vincent.abriou@st.com>
Cc: David Airlie <airlied@linux.ie>
Cc: dri-devel@lists.freedesktop.org
Cc: linux-ia64@vger.kernel.org
Cc: stable@vger.kernel.org
Signed-off-by: Oliver O'Halloran <oohall@gmail.com>
Reviewed-by: Benjamin Gaignard <benjamin.gaignard@linaro.org>
Signed-off-by: Philippe Cornu <philippe.cornu@st.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180403053401.30045-1-oohall@gmail.com
---
 drivers/gpu/drm/sti/Kconfig | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig
index cca4b3c9aeb5..1963cc1b1cc5 100644
--- a/drivers/gpu/drm/sti/Kconfig
+++ b/drivers/gpu/drm/sti/Kconfig
@@ -1,6 +1,6 @@
 config DRM_STI
 	tristate "DRM Support for STMicroelectronics SoC stiH4xx Series"
-	depends on DRM && (ARCH_STI || ARCH_MULTIPLATFORM)
+	depends on OF && DRM && (ARCH_STI || ARCH_MULTIPLATFORM)
 	select RESET_CONTROLLER
 	select DRM_KMS_HELPER
 	select DRM_GEM_CMA_HELPER
@@ -8,6 +8,5 @@ config DRM_STI
 	select DRM_PANEL
 	select FW_LOADER
 	select SND_SOC_HDMI_CODEC if SND_SOC
-	select OF
 	help
 	  Choose this option to enable DRM on STM stiH4xx chipset

From 40da1d310e8a155a09cd64b5b3fd3548a9dcfda0 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Thu, 5 Apr 2018 13:37:14 +0100
Subject: [PATCH 0212/1286] drm/i915: Only call finish_reset after a
 prepare_reset
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

If we skip the intel_prepare_reset(), we should also skip the
intel_display_reset(). If we we use a flag set by intel_prepare_reset()
then we do not have to second guess based on external user controlled
state whether or not the prepare was called before deciding to finish
it after the reset. igt/gem_eio is one such example that may tweak
i915.reset faster than the code is expecting, leading to

[  190.233528] =====================================
[  190.233534] WARNING: bad unlock balance detected!
[  190.233540] 4.16.0-rc7-g335ef9849310-drmtip_10+ #1 Tainted: G     U
[  190.233547] -------------------------------------
[  190.233553] gem_eio/1348 is trying to release lock (crtc_ww_class_acquire) at:
[  190.233569] [<ffffffff895c7810>] drm_modeset_acquire_fini+0x0/0x60
[  190.233575] but there are no more locks to release!
[  190.233580]
               other info that might help us debug this:
[  190.233588] 3 locks held by gem_eio/1348:
[  190.233592]  #0:  (&f->f_pos_lock){+.+.}, at: [<00000000ab90c784>] __fdget_pos+0x3a/0x50
[  190.233607]  #1:  (sb_writers#11){.+.+}, at: [<00000000e1529265>] vfs_write+0x188/0x1a0
[  190.233622]  #2:  (&attr->mutex){+.+.}, at: [<0000000011f40afe>] simple_attr_write+0x36/0xd0
[  190.233635]
               stack backtrace:
[  190.233644] CPU: 0 PID: 1348 Comm: gem_eio Tainted: G     U           4.16.0-rc7-g335ef9849310-drmtip_10+ #1
[  190.233655] Hardware name: Dell Inc.                 OptiPlex GX280               /0G8310, BIOS A04 02/09/2005
[  190.233664] Call Trace:
[  190.233674]  dump_stack+0x67/0x95
[  190.233682]  ? drm_modeset_backoff+0x1b0/0x1b0
[  190.233690]  print_unlock_imbalance_bug+0xd2/0xe0
[  190.233698]  ? drm_modeset_backoff+0x1b0/0x1b0
[  190.233704]  lock_release+0x23e/0x300
[  190.233712]  drm_modeset_acquire_fini+0x16/0x60
[  190.233835]  intel_finish_reset+0x72/0x160 [i915]
[  190.233894]  i915_reset_device+0x1e9/0x240 [i915]
[  190.233953]  ? __intel_get_crtc_scanline+0x1c0/0x1c0 [i915]
[  190.233962]  ? work_on_cpu_safe+0x50/0x50
[  190.234020]  i915_handle_error+0x1f2/0x470 [i915]
[  190.234031]  ? __might_fault+0x39/0x90
[  190.234037]  ? __might_fault+0x39/0x90
[  190.234099]  i915_wedged_set+0x7f/0xc0 [i915]
[  190.234107]  simple_attr_write+0xb0/0xd0
[  190.234117]  full_proxy_write+0x51/0x80
[  190.234125]  __vfs_write+0x21/0x140
[  190.234133]  ? rcu_read_lock_sched_held+0x6f/0x80
[  190.234140]  ? rcu_sync_lockdep_assert+0x29/0x50
[  190.234147]  ? __sb_start_write+0x152/0x1f0
[  190.234152]  ? __sb_start_write+0x168/0x1f0
[  190.234159]  vfs_write+0xbd/0x1a0
[  190.234166]  SyS_write+0x40/0xa0
[  190.234173]  ? do_syscall_64+0x19/0x1b0
[  190.234180]  do_syscall_64+0x6b/0x1b0
[  190.234188]  entry_SYSCALL_64_after_hwframe+0x42/0xb7
[  190.234196] RIP: 0033:0x7f84c1b392b7
[  190.234201] RSP: 002b:00007f84b6755b00 EFLAGS: 00000293 ORIG_RAX: 0000000000000001
[  190.234211] RAX: ffffffffffffffda RBX: 0000000000000046 RCX: 00007f84c1b392b7
[  190.234218] RDX: 0000000000000002 RSI: 000055ec20abc8d6 RDI: 0000000000000046
[  190.234225] RBP: 000055ec20abc8d6 R08: 0000000000000000 R09: 0000000000000000
[  190.234231] R10: 0000000000000000 R11: 0000000000000293 R12: 0000000000000002
[  190.234238] R13: 0000000000000000 R14: 00007f84b0000b20 R15: 000055ec20ce4eb8

Testcase: igt/gem_eio
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180405123714.3638-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_display.c | 9 +++------
 1 file changed, 3 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3acd75753a31..6de2e1b1a4a7 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3677,7 +3677,6 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
 	struct drm_atomic_state *state;
 	int ret;
 
-
 	/* reset doesn't touch the display */
 	if (!i915_modparams.force_reset_modeset_test &&
 	    !gpu_reset_clobbers_display(dev_priv))
@@ -3731,19 +3730,17 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
 {
 	struct drm_device *dev = &dev_priv->drm;
 	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
-	struct drm_atomic_state *state = dev_priv->modeset_restore_state;
+	struct drm_atomic_state *state;
 	int ret;
 
 	/* reset doesn't touch the display */
-	if (!i915_modparams.force_reset_modeset_test &&
-	    !gpu_reset_clobbers_display(dev_priv))
+	if (!test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
 		return;
 
+	state = fetch_and_zero(&dev_priv->modeset_restore_state);
 	if (!state)
 		goto unlock;
 
-	dev_priv->modeset_restore_state = NULL;
-
 	/* reset doesn't touch the display */
 	if (!gpu_reset_clobbers_display(dev_priv)) {
 		/* for testing only restore the display */

From 46a67c4d16ec86ca84a93f7dc92c54138e38a129 Mon Sep 17 00:00:00 2001
From: Radhakrishna Sripada <radhakrishna.sripada@intel.com>
Date: Wed, 4 Apr 2018 15:59:57 -0700
Subject: [PATCH 0213/1286] drm/i915: Fix memory leak in intel_hdcp auth

Static code analysis tool reported memory leak in
intel_hdcp_auth_downstream. Fixing the memory leak.

v2: Rebase, move free to a cleanup label(Jani)

Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Anusha Srivatsa <anusha.srivatsa@intel.com>
Signed-off-by: Radhakrishna Sripada <radhakrishna.sripada@intel.com>
Reviewed-by: Anusha Srivatsa <anusha.srivatsa@intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180404225957.7457-1-radhakrishna.sripada@intel.com
---
 drivers/gpu/drm/i915/intel_hdcp.c | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c
index 98a9c81e2dc1..2db5da550a1c 100644
--- a/drivers/gpu/drm/i915/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/intel_hdcp.c
@@ -435,7 +435,7 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
 
 	ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
 	if (ret)
-		return ret;
+		goto err;
 
 	/*
 	 * When V prime mismatches, DP Spec mandates re-read of
@@ -451,12 +451,15 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
 
 	if (i == tries) {
 		DRM_ERROR("V Prime validation failed.(%d)\n", ret);
-		return ret;
+		goto err;
 	}
 
 	DRM_DEBUG_KMS("HDCP is enabled (%d downstream devices)\n",
 		      num_downstream);
-	return 0;
+	ret = 0;
+err:
+	kfree(ksv_fifo);
+	return ret;
 }
 
 /* Implements Part 1 of the HDCP authorization procedure */

From b651bd2a3ae33f6a98d6ea36ef2518f5211bdc4f Mon Sep 17 00:00:00 2001
From: Gaurav K Singh <gaurav.k.singh@intel.com>
Date: Thu, 5 Apr 2018 22:12:24 +0530
Subject: [PATCH 0214/1286] drm/i915/audio: Fix audio enumeration issue on BXT

On Apollolake, with stress test warm reboot, audio card was not getting
enumerated after reboot. This was a spurious issue happening on
Apollolake. HW codec and HD audio controller link was going out of sync
for which there was a fix in i915 driver but was not getting invoked for
BXT. Extending this fix to BXT as well.

Tested on apollolake chromebook by stress test warm reboot with 2500
iterations.

Bspec: 21829

Signed-off-by: Gaurav K Singh <gaurav.k.singh@intel.com>
Reviewed-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1522946544-11524-1-git-send-email-gaurav.k.singh@intel.com
---
 drivers/gpu/drm/i915/intel_audio.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 709d6ca68074..656f6c931341 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -729,7 +729,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
 	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
 	u32 tmp;
 
-	if (!IS_GEN9_BC(dev_priv))
+	if (!IS_GEN9_BC(dev_priv) && !IS_BROXTON(dev_priv))
 		return;
 
 	i915_audio_component_get_power(kdev);

From 028666793a0291b63eb61bae7252345821326a1b Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Fri, 30 Mar 2018 14:18:01 +0100
Subject: [PATCH 0215/1286] drm/i915/selftests: Avoid repeatedly harming the
 same innocent context
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

We don't handle resetting the kernel context very well, or presumably any
context executing its breadcrumb commands in the ring as opposed to the
batchbuffer and flush. If we trigger a device reset twice in quick
succession while the kernel context is executing, we may end up skipping
the breadcrumb.  This is really only a problem for the selftest as
normally there is a large interlude between resets (hangcheck), or we
focus on resetting just one engine and so avoid repeatedly resetting
innocents.

Something to try would be a preempt-to-idle to quiesce the engine before
reset, so that innocent contexts would be spared the reset.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Michał Winiarski <michal.winiarski@intel.com>
CC: Michel Thierry <michel.thierry@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180330131801.18327-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_drv.c               |  3 ++
 .../gpu/drm/i915/selftests/intel_hangcheck.c  | 48 +++++++++++++++++--
 2 files changed, 47 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index d354627882e3..684060ed8db6 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1886,6 +1886,8 @@ void i915_reset(struct drm_i915_private *i915)
 	int ret;
 	int i;
 
+	GEM_TRACE("flags=%lx\n", error->flags);
+
 	might_sleep();
 	lockdep_assert_held(&i915->drm.struct_mutex);
 	GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
@@ -2016,6 +2018,7 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
 	struct i915_request *active_request;
 	int ret;
 
+	GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
 	GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
 
 	active_request = i915_gem_reset_prepare_engine(engine);
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 9e4e0ad62724..d03abe7f8a53 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -979,6 +979,23 @@ unlock:
 	return err;
 }
 
+static int wait_for_others(struct drm_i915_private *i915,
+			   struct intel_engine_cs *exclude)
+{
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	for_each_engine(engine, i915, id) {
+		if (engine == exclude)
+			continue;
+
+		if (wait_for(intel_engine_is_idle(engine), 10))
+			return -EIO;
+	}
+
+	return 0;
+}
+
 static int igt_reset_queue(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
@@ -1027,13 +1044,36 @@ static int igt_reset_queue(void *arg)
 			i915_request_get(rq);
 			__i915_request_add(rq, true);
 
+			/*
+			 * XXX We don't handle resetting the kernel context
+			 * very well. If we trigger a device reset twice in
+			 * quick succession while the kernel context is
+			 * executing, we may end up skipping the breadcrumb.
+			 * This is really only a problem for the selftest as
+			 * normally there is a large interlude between resets
+			 * (hangcheck), or we focus on resetting just one
+			 * engine and so avoid repeatedly resetting innocents.
+			 */
+			err = wait_for_others(i915, engine);
+			if (err) {
+				pr_err("%s(%s): Failed to idle other inactive engines after device reset\n",
+				       __func__, engine->name);
+				i915_request_put(rq);
+				i915_request_put(prev);
+
+				GEM_TRACE_DUMP();
+				i915_gem_set_wedged(i915);
+				goto fini;
+			}
+
 			if (!wait_for_hang(&h, prev)) {
 				struct drm_printer p = drm_info_printer(i915->drm.dev);
 
-				pr_err("%s: Failed to start request %x, at %x\n",
-				       __func__, prev->fence.seqno, hws_seqno(&h, prev));
-				intel_engine_dump(prev->engine, &p,
-						  "%s\n", prev->engine->name);
+				pr_err("%s(%s): Failed to start request %x, at %x\n",
+				       __func__, engine->name,
+				       prev->fence.seqno, hws_seqno(&h, prev));
+				intel_engine_dump(engine, &p,
+						  "%s\n", engine->name);
 
 				i915_request_put(rq);
 				i915_request_put(prev);

From fd5ff5f6f697db774964c7100fc686fcc2f8ea78 Mon Sep 17 00:00:00 2001
From: Kevin Rogovin <kevin.rogovin@intel.com>
Date: Fri, 6 Apr 2018 11:05:55 +0300
Subject: [PATCH 0216/1286] drm/i915: Narration overview on GEM

Add a narration to i915.rst about Intel GEN GPU's: engines,
driver context and relocation. Also do minor reorder to improve
narration.

v5:
  More type fixes.
  Flow bullet list so lines are not too long.

Signed-off-by: Kevin Rogovin <kevin.rogovin@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
[Joonas: correcting the patch title]
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1523001957-6427-2-git-send-email-kevin.rogovin@intel.com
---
 Documentation/gpu/i915.rst | 120 ++++++++++++++++++++++++++++++-------
 1 file changed, 97 insertions(+), 23 deletions(-)

diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index 7ecad7134677..cd2d796d23dd 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -249,6 +249,103 @@ Memory Management and Command Submission
 This sections covers all things related to the GEM implementation in the
 i915 driver.
 
+Intel GPU Basics
+----------------
+
+An Intel GPU has multiple engines. There are several engine types.
+
+- RCS engine is for rendering 3D and performing compute, this is named
+  `I915_EXEC_RENDER` in user space.
+- BCS is a blitting (copy) engine, this is named `I915_EXEC_BLT` in user
+  space.
+- VCS is a video encode and decode engine, this is named `I915_EXEC_BSD`
+  in user space
+- VECS is video enhancement engine, this is named `I915_EXEC_VEBOX` in user
+  space.
+- The enumeration `I915_EXEC_DEFAULT` does not refer to specific engine;
+  instead it is to be used by user space to specify a default rendering
+  engine (for 3D) that may or may not be the same as RCS.
+
+The Intel GPU family is a family of integrated GPU's using Unified
+Memory Access. For having the GPU "do work", user space will feed the
+GPU batch buffers via one of the ioctls `DRM_IOCTL_I915_GEM_EXECBUFFER2`
+or `DRM_IOCTL_I915_GEM_EXECBUFFER2_WR`. Most such batchbuffers will
+instruct the GPU to perform work (for example rendering) and that work
+needs memory from which to read and memory to which to write. All memory
+is encapsulated within GEM buffer objects (usually created with the ioctl
+`DRM_IOCTL_I915_GEM_CREATE`). An ioctl providing a batchbuffer for the GPU
+to create will also list all GEM buffer objects that the batchbuffer reads
+and/or writes. For implementation details of memory management see
+`GEM BO Management Implementation Details`_.
+
+The i915 driver allows user space to create a context via the ioctl
+`DRM_IOCTL_I915_GEM_CONTEXT_CREATE` which is identified by a 32-bit
+integer. Such a context should be viewed by user-space as -loosely-
+analogous to the idea of a CPU process of an operating system. The i915
+driver guarantees that commands issued to a fixed context are to be
+executed so that writes of a previously issued command are seen by
+reads of following commands. Actions issued between different contexts
+(even if from the same file descriptor) are NOT given that guarantee
+and the only way to synchronize across contexts (even from the same
+file descriptor) is through the use of fences. At least as far back as
+Gen4, also have that a context carries with it a GPU HW context;
+the HW context is essentially (most of atleast) the state of a GPU.
+In addition to the ordering guarantees, the kernel will restore GPU
+state via HW context when commands are issued to a context, this saves
+user space the need to restore (most of atleast) the GPU state at the
+start of each batchbuffer. The non-deprecated ioctls to submit batchbuffer
+work can pass that ID (in the lower bits of drm_i915_gem_execbuffer2::rsvd1)
+to identify what context to use with the command.
+
+The GPU has its own memory management and address space. The kernel
+driver maintains the memory translation table for the GPU. For older
+GPUs (i.e. those before Gen8), there is a single global such translation
+table, a global Graphics Translation Table (GTT). For newer generation
+GPUs each context has its own translation table, called Per-Process
+Graphics Translation Table (PPGTT). Of important note, is that although
+PPGTT is named per-process it is actually per context. When user space
+submits a batchbuffer, the kernel walks the list of GEM buffer objects
+used by the batchbuffer and guarantees that not only is the memory of
+each such GEM buffer object resident but it is also present in the
+(PP)GTT. If the GEM buffer object is not yet placed in the (PP)GTT,
+then it is given an address. Two consequences of this are: the kernel
+needs to edit the batchbuffer submitted to write the correct value of
+the GPU address when a GEM BO is assigned a GPU address and the kernel
+might evict a different GEM BO from the (PP)GTT to make address room
+for another GEM BO. Consequently, the ioctls submitting a batchbuffer
+for execution also include a list of all locations within buffers that
+refer to GPU-addresses so that the kernel can edit the buffer correctly.
+This process is dubbed relocation.
+
+GEM BO Management Implementation Details
+----------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_vma.h
+   :doc: Virtual Memory Address
+
+Buffer Object Eviction
+----------------------
+
+This section documents the interface functions for evicting buffer
+objects to make space available in the virtual gpu address spaces. Note
+that this is mostly orthogonal to shrinking buffer objects caches, which
+has the goal to make main memory (shared with the gpu through the
+unified memory architecture) available.
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_evict.c
+   :internal:
+
+Buffer Object Memory Shrinking
+------------------------------
+
+This section documents the interface function for shrinking memory usage
+of buffer object caches. Shrinking is used to make main memory
+available. Note that this is mostly orthogonal to evicting buffer
+objects, which has the goal to make space in gpu virtual address spaces.
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_shrinker.c
+   :internal:
+
 Batchbuffer Parsing
 -------------------
 
@@ -312,29 +409,6 @@ Object Tiling IOCTLs
 .. kernel-doc:: drivers/gpu/drm/i915/i915_gem_tiling.c
    :doc: buffer object tiling
 
-Buffer Object Eviction
-----------------------
-
-This section documents the interface functions for evicting buffer
-objects to make space available in the virtual gpu address spaces. Note
-that this is mostly orthogonal to shrinking buffer objects caches, which
-has the goal to make main memory (shared with the gpu through the
-unified memory architecture) available.
-
-.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_evict.c
-   :internal:
-
-Buffer Object Memory Shrinking
-------------------------------
-
-This section documents the interface function for shrinking memory usage
-of buffer object caches. Shrinking is used to make main memory
-available. Note that this is mostly orthogonal to evicting buffer
-objects, which has the goal to make space in gpu virtual address spaces.
-
-.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_shrinker.c
-   :internal:
-
 WOPCM
 =====
 

From 4d42db1805abde398a5852dc62bfaccd9f0269bb Mon Sep 17 00:00:00 2001
From: Kevin Rogovin <kevin.rogovin@intel.com>
Date: Fri, 6 Apr 2018 11:05:56 +0300
Subject: [PATCH 0217/1286] drm/i915: Add link to documentation in
 i915_gem_execbuffer.c

Add the documentation of "DOC: User command execution" of
i915_gem_execbuffer.c into a new section in i915.rst.

Signed-off-by: Kevin Rogovin <kevin.rogovin@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
[Joonas: correcting the patch title]
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1523001957-6427-3-git-send-email-kevin.rogovin@intel.com
---
 Documentation/gpu/i915.rst | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index cd2d796d23dd..34d22f275708 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -364,6 +364,12 @@ Batchbuffer Pools
 .. kernel-doc:: drivers/gpu/drm/i915/i915_gem_batch_pool.c
    :internal:
 
+User Batchbuffer Execution
+--------------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_execbuffer.c
+   :doc: User command execution
+
 Logical Rings, Logical Ring Contexts and Execlists
 --------------------------------------------------
 

From 99d7e4eeea778374ecea279d0379fbecb0b297bf Mon Sep 17 00:00:00 2001
From: Kevin Rogovin <kevin.rogovin@intel.com>
Date: Fri, 6 Apr 2018 11:05:57 +0300
Subject: [PATCH 0218/1286] drm/i915: Describe the bottom of stack in
 processing a batchbuffer

Now that "DOC: User command execution" of i915_gem_execbuffer.c is included
in the i915.rst, it is benecifial (for new developers) to read what happens
at the bottom of the driver stack (in terms of bytes written to be read
by the GPU) when processing a user-space batchbuffer.

v5:
  Typo correction of lacking double tick.

Signed-off-by: Kevin Rogovin <kevin.rogovin@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
[Joonas: correcting the patch title]
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1523001957-6427-4-git-send-email-kevin.rogovin@intel.com
---
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 29 ++++++++++++++++++++++
 1 file changed, 29 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 0414228cd2b5..c74f5df3fb5a 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -81,6 +81,35 @@ enum {
  * but this remains just a hint as the kernel may choose a new location for
  * any object in the future.
  *
+ * At the level of talking to the hardware, submitting a batchbuffer for the
+ * GPU to execute is to add content to a buffer from which the HW
+ * command streamer is reading.
+ *
+ * 1. Add a command to load the HW context. For Logical Ring Contexts, i.e.
+ *    Execlists, this command is not placed on the same buffer as the
+ *    remaining items.
+ *
+ * 2. Add a command to invalidate caches to the buffer.
+ *
+ * 3. Add a batchbuffer start command to the buffer; the start command is
+ *    essentially a token together with the GPU address of the batchbuffer
+ *    to be executed.
+ *
+ * 4. Add a pipeline flush to the buffer.
+ *
+ * 5. Add a memory write command to the buffer to record when the GPU
+ *    is done executing the batchbuffer. The memory write writes the
+ *    global sequence number of the request, ``i915_request::global_seqno``;
+ *    the i915 driver uses the current value in the register to determine
+ *    if the GPU has completed the batchbuffer.
+ *
+ * 6. Add a user interrupt command to the buffer. This command instructs
+ *    the GPU to issue an interrupt when the command, pipeline flush and
+ *    memory write are completed.
+ *
+ * 7. Inform the hardware of the additional commands added to the buffer
+ *    (by updating the tail pointer).
+ *
  * Processing an execbuf ioctl is conceptually split up into a few phases.
  *
  * 1. Validation - Ensure all the pointers, handles and flags are valid.

From e34b0345e6a531f980a6560fdc3b651de9cfcc67 Mon Sep 17 00:00:00 2001
From: Michel Thierry <michel.thierry@intel.com>
Date: Thu, 5 Apr 2018 17:00:48 +0300
Subject: [PATCH 0219/1286] drm/i915/icl: Add reset control register changes

The bits used to reset the different engines/domains have changed in
GEN11, this patch maps the reset engine mask bits with the new bits
in the reset control register.

v2: Use shift-left instead of BIT macro to match the file style (Paulo).
v3: Reuse gen8_reset_engines (Daniele).
v4: Do not call intel_uncore_forcewake_reset after reset, we may be
using the forcewake to read protected registers elsewhere and those
results may be clobbered by the concurrent dropping of forcewake.

bspec: 19212
Cc: Oscar Mateo <oscar.mateo@intel.com>
Cc: Antonio Argenziano <antonio.argenziano@intel.com>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Acked-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: Michel Thierry <michel.thierry@intel.com>
Reviewed-by: Oscar Mateo <oscar.mateo@intel.com>
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180405140052.10682-1-mika.kuoppala@linux.intel.com
---
 drivers/gpu/drm/i915/i915_reg.h     | 11 ++++++
 drivers/gpu/drm/i915/intel_uncore.c | 53 +++++++++++++++++++++++++++--
 2 files changed, 62 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 176dca6554f4..b2a2d8fbbc68 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -301,6 +301,17 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define  GEN6_GRDOM_VECS		(1 << 4)
 #define  GEN9_GRDOM_GUC			(1 << 5)
 #define  GEN8_GRDOM_MEDIA2		(1 << 7)
+/* GEN11 changed all bit defs except for FULL & RENDER */
+#define  GEN11_GRDOM_FULL		GEN6_GRDOM_FULL
+#define  GEN11_GRDOM_RENDER		GEN6_GRDOM_RENDER
+#define  GEN11_GRDOM_BLT		(1 << 2)
+#define  GEN11_GRDOM_GUC		(1 << 3)
+#define  GEN11_GRDOM_MEDIA		(1 << 5)
+#define  GEN11_GRDOM_MEDIA2		(1 << 6)
+#define  GEN11_GRDOM_MEDIA3		(1 << 7)
+#define  GEN11_GRDOM_MEDIA4		(1 << 8)
+#define  GEN11_GRDOM_VECS		(1 << 13)
+#define  GEN11_GRDOM_VECS2		(1 << 14)
 
 #define RING_PP_DIR_BASE(engine)	_MMIO((engine)->mmio_base+0x228)
 #define RING_PP_DIR_BASE_READ(engine)	_MMIO((engine)->mmio_base+0x518)
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index e7540bb9786c..d6e20f0f4c28 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1909,6 +1909,50 @@ static int gen6_reset_engines(struct drm_i915_private *dev_priv,
 	return gen6_hw_domain_reset(dev_priv, hw_mask);
 }
 
+/**
+ * gen11_reset_engines - reset individual engines
+ * @dev_priv: i915 device
+ * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
+ *
+ * This function will reset the individual engines that are set in engine_mask.
+ * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
+ *
+ * Note: It is responsibility of the caller to handle the difference between
+ * asking full domain reset versus reset for all available individual engines.
+ *
+ * Returns 0 on success, nonzero on error.
+ */
+static int gen11_reset_engines(struct drm_i915_private *dev_priv,
+			       unsigned engine_mask)
+{
+	struct intel_engine_cs *engine;
+	const u32 hw_engine_mask[I915_NUM_ENGINES] = {
+		[RCS] = GEN11_GRDOM_RENDER,
+		[BCS] = GEN11_GRDOM_BLT,
+		[VCS] = GEN11_GRDOM_MEDIA,
+		[VCS2] = GEN11_GRDOM_MEDIA2,
+		[VCS3] = GEN11_GRDOM_MEDIA3,
+		[VCS4] = GEN11_GRDOM_MEDIA4,
+		[VECS] = GEN11_GRDOM_VECS,
+		[VECS2] = GEN11_GRDOM_VECS2,
+	};
+	u32 hw_mask;
+
+	BUILD_BUG_ON(VECS2 + 1 != I915_NUM_ENGINES);
+
+	if (engine_mask == ALL_ENGINES) {
+		hw_mask = GEN11_GRDOM_FULL;
+	} else {
+		unsigned int tmp;
+
+		hw_mask = 0;
+		for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
+			hw_mask |= hw_engine_mask[engine->id];
+	}
+
+	return gen6_hw_domain_reset(dev_priv, hw_mask);
+}
+
 /**
  * __intel_wait_for_register_fw - wait until register matches expected state
  * @dev_priv: the i915 device
@@ -2057,7 +2101,10 @@ static int gen8_reset_engines(struct drm_i915_private *dev_priv,
 		if (gen8_reset_engine_start(engine))
 			goto not_ready;
 
-	return gen6_reset_engines(dev_priv, engine_mask);
+	if (INTEL_GEN(dev_priv) >= 11)
+		return gen11_reset_engines(dev_priv, engine_mask);
+	else
+		return gen6_reset_engines(dev_priv, engine_mask);
 
 not_ready:
 	for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
@@ -2160,12 +2207,14 @@ bool intel_has_reset_engine(struct drm_i915_private *dev_priv)
 
 int intel_reset_guc(struct drm_i915_private *dev_priv)
 {
+	u32 guc_domain = INTEL_GEN(dev_priv) >= 11 ? GEN11_GRDOM_GUC :
+						     GEN9_GRDOM_GUC;
 	int ret;
 
 	GEM_BUG_ON(!HAS_GUC(dev_priv));
 
 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
-	ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC);
+	ret = gen6_hw_domain_reset(dev_priv, guc_domain);
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 
 	return ret;

From f744dbc2a64d5de0d9b3f883b536c007b1e98fab Mon Sep 17 00:00:00 2001
From: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Fri, 6 Apr 2018 12:31:45 +0300
Subject: [PATCH 0220/1286] drm/i915/icl: Use hw engine class, instance to find
 irq handler

Interrupt identity register we already read from hardware
contains engine class and instance fields. Leverage
these fields to find correct engine to handle the interrupt.

v3: rebase on top of rps intr
    use correct class / instance limits (Michel)
v4: split engine/other handling
v5: empty iir is not err (Daniele, Michel)

Suggested-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Michel Thierry <michel.thierry@intel.com>
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Reviewed-by: Michel Thierry <michel.thierry@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180406093145.14389-1-mika.kuoppala@linux.intel.com
---
 drivers/gpu/drm/i915/i915_irq.c | 99 ++++++++++++++++++---------------
 drivers/gpu/drm/i915/i915_reg.h |  4 +-
 2 files changed, 56 insertions(+), 47 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 27aee25429b7..45f72a0ece04 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2732,47 +2732,9 @@ static void __fini_wedge(struct wedge_me *w)
 	     (W)->i915;							\
 	     __fini_wedge((W)))
 
-static void
-gen11_gt_engine_irq_handler(struct drm_i915_private * const i915,
-			    const unsigned int bank,
-			    const unsigned int engine_n,
-			    const u16 iir)
-{
-	struct intel_engine_cs ** const engine = i915->engine;
-
-	switch (bank) {
-	case 0:
-		switch (engine_n) {
-
-		case GEN11_RCS0:
-			return gen8_cs_irq_handler(engine[RCS], iir);
-
-		case GEN11_BCS:
-			return gen8_cs_irq_handler(engine[BCS], iir);
-		}
-	case 1:
-		switch (engine_n) {
-
-		case GEN11_VCS(0):
-			return gen8_cs_irq_handler(engine[_VCS(0)], iir);
-		case GEN11_VCS(1):
-			return gen8_cs_irq_handler(engine[_VCS(1)], iir);
-		case GEN11_VCS(2):
-			return gen8_cs_irq_handler(engine[_VCS(2)], iir);
-		case GEN11_VCS(3):
-			return gen8_cs_irq_handler(engine[_VCS(3)], iir);
-
-		case GEN11_VECS(0):
-			return gen8_cs_irq_handler(engine[_VECS(0)], iir);
-		case GEN11_VECS(1):
-			return gen8_cs_irq_handler(engine[_VECS(1)], iir);
-		}
-	}
-}
-
 static u32
-gen11_gt_engine_intr(struct drm_i915_private * const i915,
-		     const unsigned int bank, const unsigned int bit)
+gen11_gt_engine_identity(struct drm_i915_private * const i915,
+			 const unsigned int bank, const unsigned int bit)
 {
 	void __iomem * const regs = i915->regs;
 	u32 timeout_ts;
@@ -2799,7 +2761,54 @@ gen11_gt_engine_intr(struct drm_i915_private * const i915,
 	raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
 		      GEN11_INTR_DATA_VALID);
 
-	return ident & GEN11_INTR_ENGINE_MASK;
+	return ident;
+}
+
+static void
+gen11_other_irq_handler(struct drm_i915_private * const i915,
+			const u8 instance, const u16 iir)
+{
+	WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
+		  instance, iir);
+}
+
+static void
+gen11_engine_irq_handler(struct drm_i915_private * const i915,
+			 const u8 class, const u8 instance, const u16 iir)
+{
+	struct intel_engine_cs *engine;
+
+	if (instance <= MAX_ENGINE_INSTANCE)
+		engine = i915->engine_class[class][instance];
+	else
+		engine = NULL;
+
+	if (likely(engine))
+		return gen8_cs_irq_handler(engine, iir);
+
+	WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
+		  class, instance);
+}
+
+static void
+gen11_gt_identity_handler(struct drm_i915_private * const i915,
+			  const u32 identity)
+{
+	const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
+	const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
+	const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
+
+	if (unlikely(!intr))
+		return;
+
+	if (class <= COPY_ENGINE_CLASS)
+		return gen11_engine_irq_handler(i915, class, instance, intr);
+
+	if (class == OTHER_CLASS)
+		return gen11_other_irq_handler(i915, instance, intr);
+
+	WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
+		  class, instance, intr);
 }
 
 static void
@@ -2824,12 +2833,10 @@ gen11_gt_irq_handler(struct drm_i915_private * const i915,
 		}
 
 		for_each_set_bit(bit, &intr_dw, 32) {
-			const u16 iir = gen11_gt_engine_intr(i915, bank, bit);
+			const u32 ident = gen11_gt_engine_identity(i915,
+								   bank, bit);
 
-			if (unlikely(!iir))
-				continue;
-
-			gen11_gt_engine_irq_handler(i915, bank, bit, iir);
+			gen11_gt_identity_handler(i915, ident);
 		}
 
 		/* Clear must be after shared has been served for engine */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b2a2d8fbbc68..d4b5fba7a2dc 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -6998,7 +6998,9 @@ enum {
 #define GEN11_INTR_IDENTITY_REG0	_MMIO(0x190060)
 #define GEN11_INTR_IDENTITY_REG1	_MMIO(0x190064)
 #define  GEN11_INTR_DATA_VALID		(1 << 31)
-#define  GEN11_INTR_ENGINE_MASK		(0xffff)
+#define  GEN11_INTR_ENGINE_CLASS(x)	(((x) & GENMASK(18, 16)) >> 16)
+#define  GEN11_INTR_ENGINE_INSTANCE(x)	(((x) & GENMASK(25, 20)) >> 20)
+#define  GEN11_INTR_ENGINE_INTR(x)	((x) & 0xffff)
 
 #define GEN11_INTR_IDENTITY_REG(x)	_MMIO(0x190060 + (x * 4))
 

From d02b98b8e28278f9f0c0f4dd3f172ffcc20fbdbd Mon Sep 17 00:00:00 2001
From: Oscar Mateo <oscar.mateo@intel.com>
Date: Thu, 5 Apr 2018 17:00:50 +0300
Subject: [PATCH 0221/1286] drm/i915/icl: Handle RPS interrupts correctly for
 Gen11

Using the new hierarchical interrupt infrastructure.

v2: Rebase
v3: Rebase
v4: use class/instance handler (Mika)

Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Michel Thierry <michel.thierry@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180405140052.10682-3-mika.kuoppala@linux.intel.com
---
 drivers/gpu/drm/i915/i915_irq.c  | 73 +++++++++++++++++++++++++++-----
 drivers/gpu/drm/i915/i915_reg.h  |  1 +
 drivers/gpu/drm/i915/intel_drv.h |  1 +
 drivers/gpu/drm/i915/intel_pm.c  |  6 +--
 4 files changed, 67 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 45f72a0ece04..36a635475a74 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -308,17 +308,29 @@ void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
 
 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
 {
+	WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11);
+
 	return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
 }
 
 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
 {
-	return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
+	if (INTEL_GEN(dev_priv) >= 11)
+		return GEN11_GPM_WGBOXPERF_INTR_MASK;
+	else if (INTEL_GEN(dev_priv) >= 8)
+		return GEN8_GT_IMR(2);
+	else
+		return GEN6_PMIMR;
 }
 
 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
 {
-	return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
+	if (INTEL_GEN(dev_priv) >= 11)
+		return GEN11_GPM_WGBOXPERF_INTR_ENABLE;
+	else if (INTEL_GEN(dev_priv) >= 8)
+		return GEN8_GT_IER(2);
+	else
+		return GEN6_PMIER;
 }
 
 /**
@@ -400,6 +412,32 @@ static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_m
 	/* though a barrier is missing here, but don't really need a one */
 }
 
+static u32
+gen11_gt_engine_identity(struct drm_i915_private * const i915,
+			 const unsigned int bank, const unsigned int bit);
+
+void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
+{
+	u32 dw;
+
+	spin_lock_irq(&dev_priv->irq_lock);
+
+	/*
+	 * According to the BSpec, DW_IIR bits cannot be cleared without
+	 * first servicing the Selector & Shared IIR registers.
+	 */
+	dw = I915_READ_FW(GEN11_GT_INTR_DW0);
+	while (dw & BIT(GEN11_GTPM)) {
+		gen11_gt_engine_identity(dev_priv, 0, GEN11_GTPM);
+		I915_WRITE_FW(GEN11_GT_INTR_DW0, BIT(GEN11_GTPM));
+		dw = I915_READ_FW(GEN11_GT_INTR_DW0);
+	}
+
+	dev_priv->gt_pm.rps.pm_iir = 0;
+
+	spin_unlock_irq(&dev_priv->irq_lock);
+}
+
 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
 {
 	spin_lock_irq(&dev_priv->irq_lock);
@@ -415,12 +453,12 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
 	if (READ_ONCE(rps->interrupts_enabled))
 		return;
 
-	if (WARN_ON_ONCE(IS_GEN11(dev_priv)))
-		return;
-
 	spin_lock_irq(&dev_priv->irq_lock);
 	WARN_ON_ONCE(rps->pm_iir);
-	WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
+	if (INTEL_GEN(dev_priv) >= 11)
+		WARN_ON_ONCE(I915_READ_FW(GEN11_GT_INTR_DW0) & BIT(GEN11_GTPM));
+	else
+		WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
 	rps->interrupts_enabled = true;
 	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
 
@@ -434,9 +472,6 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
 	if (!READ_ONCE(rps->interrupts_enabled))
 		return;
 
-	if (WARN_ON_ONCE(IS_GEN11(dev_priv)))
-		return;
-
 	spin_lock_irq(&dev_priv->irq_lock);
 	rps->interrupts_enabled = false;
 
@@ -453,7 +488,10 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
 	 * state of the worker can be discarded.
 	 */
 	cancel_work_sync(&rps->work);
-	gen6_reset_rps_interrupts(dev_priv);
+	if (INTEL_GEN(dev_priv) >= 11)
+		gen11_reset_rps_interrupts(dev_priv);
+	else
+		gen6_reset_rps_interrupts(dev_priv);
 }
 
 void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
@@ -2768,6 +2806,9 @@ static void
 gen11_other_irq_handler(struct drm_i915_private * const i915,
 			const u8 instance, const u16 iir)
 {
+	if (instance == OTHER_GTPM_INSTANCE)
+		return gen6_rps_irq_handler(i915, iir);
+
 	WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
 		  instance, iir);
 }
@@ -3330,6 +3371,9 @@ static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv)
 	I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK,	~0);
 	I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK,	~0);
 	I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK,	~0);
+
+	I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
+	I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
 }
 
 static void gen11_irq_reset(struct drm_device *dev)
@@ -3868,7 +3912,14 @@ static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
 	I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK,	~(irqs | irqs << 16));
 	I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK,	~(irqs | irqs << 16));
 
-	dev_priv->pm_imr = 0xffffffff; /* TODO */
+	/*
+	 * RPS interrupts will get enabled/disabled on demand when RPS itself
+	 * is enabled/disabled.
+	 */
+	dev_priv->pm_ier = 0x0;
+	dev_priv->pm_imr = ~dev_priv->pm_ier;
+	I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
+	I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
 }
 
 static int gen11_irq_postinstall(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index d4b5fba7a2dc..b3a6428aa71d 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -188,6 +188,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define OTHER_CLASS		4
 #define MAX_ENGINE_CLASS	4
 
+#define OTHER_GTPM_INSTANCE	1
 #define MAX_ENGINE_INSTANCE    3
 
 /* PCI config space */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index d1452fd2a58d..85e483e9a45b 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1329,6 +1329,7 @@ void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
 void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
 void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
+void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv);
 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 19e82aaa9863..a018c9abc2b9 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -8028,10 +8028,10 @@ void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
 	dev_priv->gt_pm.rc6.enabled = true; /* force RC6 disabling */
 	intel_disable_gt_powersave(dev_priv);
 
-	if (INTEL_GEN(dev_priv) < 11)
-		gen6_reset_rps_interrupts(dev_priv);
+	if (INTEL_GEN(dev_priv) >= 11)
+		gen11_reset_rps_interrupts(dev_priv);
 	else
-		WARN_ON_ONCE(1);
+		gen6_reset_rps_interrupts(dev_priv);
 }
 
 static inline void intel_disable_llc_pstate(struct drm_i915_private *i915)

From 96606f3beb8668de2c936d7719a7e385cff9ff01 Mon Sep 17 00:00:00 2001
From: Oscar Mateo <oscar.mateo@intel.com>
Date: Fri, 6 Apr 2018 12:32:37 +0300
Subject: [PATCH 0222/1286] drm/i915/icl: Deal with GT INT DW correctly

BSpec says:

"Second level interrupt events are stored in the GT INT DW. GT INT DW is
a double buffered structure. A snapshot of events is taken when SW reads
GT INT DW. From the time of read to the time of SW completely clearing
GT INT DW (to indicate end of service), all incoming interrupts are logged
in a secondary storage structure. this guarantees that the record of
interrupts SW is servicing will not change while under service".

We read GT INT DW in several places now:

- The IRQ handler (banks 0 and 1) where, hopefully, it is completely
  cleared (operation now covered with the irq lock).
- The 'reset' interrupts functions for RPS and GuC logs, where we clear
  the bit we are interested in and leave the others for the normal
  interrupt handler.
- The 'enable' interrupts functions for RPS and GuC logs, as a measure
  of precaution. Here we could relax a bit and don't check GT INT DW
  at all or, if we do, at least we should clear the offending bit
  (which is what this patch does).

Note that, if every bit is cleared on reading GT INT DW, the register
won't be locked. Also note that, according to the BSpec, GT INT DW
cannot be cleared without first servicing the Selector & Shared IIR
registers.

v2:
  - Remove some code duplication (Tvrtko)
  - Make sure GT_INTR_DW are protected by the irq spinlock, since it's a
    global resource (Tvrtko)

v3: Optimize the spinlock (Tvrtko)

v4: Rebase.
v5:
  - take spinlock on outer scope to please sparse (Mika)
  - use raw_reg accessors (Mika)
v6: omit the continue in looping banks (Michel)

Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> (v4)
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Michel Thierry <michel.thierry@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180406093237.14548-1-mika.kuoppala@linux.intel.com
---
 drivers/gpu/drm/i915/i915_irq.c | 116 +++++++++++++++++++++-----------
 1 file changed, 76 insertions(+), 40 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 36a635475a74..c2f878ace0ea 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -243,6 +243,41 @@ void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
 	spin_unlock_irq(&dev_priv->irq_lock);
 }
 
+static u32
+gen11_gt_engine_identity(struct drm_i915_private * const i915,
+			 const unsigned int bank, const unsigned int bit);
+
+static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
+				const unsigned int bank,
+				const unsigned int bit)
+{
+	void __iomem * const regs = i915->regs;
+	u32 dw;
+
+	lockdep_assert_held(&i915->irq_lock);
+
+	dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
+	if (dw & BIT(bit)) {
+		/*
+		 * According to the BSpec, DW_IIR bits cannot be cleared without
+		 * first servicing the Selector & Shared IIR registers.
+		 */
+		gen11_gt_engine_identity(i915, bank, bit);
+
+		/*
+		 * We locked GT INT DW by reading it. If we want to (try
+		 * to) recover from this succesfully, we need to clear
+		 * our bit, otherwise we are locking the register for
+		 * everybody.
+		 */
+		raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
+
+		return true;
+	}
+
+	return false;
+}
+
 /**
  * ilk_update_display_irq - update DEIMR
  * @dev_priv: driver private
@@ -412,26 +447,12 @@ static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_m
 	/* though a barrier is missing here, but don't really need a one */
 }
 
-static u32
-gen11_gt_engine_identity(struct drm_i915_private * const i915,
-			 const unsigned int bank, const unsigned int bit);
-
 void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
 {
-	u32 dw;
-
 	spin_lock_irq(&dev_priv->irq_lock);
 
-	/*
-	 * According to the BSpec, DW_IIR bits cannot be cleared without
-	 * first servicing the Selector & Shared IIR registers.
-	 */
-	dw = I915_READ_FW(GEN11_GT_INTR_DW0);
-	while (dw & BIT(GEN11_GTPM)) {
-		gen11_gt_engine_identity(dev_priv, 0, GEN11_GTPM);
-		I915_WRITE_FW(GEN11_GT_INTR_DW0, BIT(GEN11_GTPM));
-		dw = I915_READ_FW(GEN11_GT_INTR_DW0);
-	}
+	while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM))
+		;
 
 	dev_priv->gt_pm.rps.pm_iir = 0;
 
@@ -455,10 +476,12 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
 
 	spin_lock_irq(&dev_priv->irq_lock);
 	WARN_ON_ONCE(rps->pm_iir);
+
 	if (INTEL_GEN(dev_priv) >= 11)
-		WARN_ON_ONCE(I915_READ_FW(GEN11_GT_INTR_DW0) & BIT(GEN11_GTPM));
+		WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM));
 	else
 		WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
+
 	rps->interrupts_enabled = true;
 	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
 
@@ -2778,6 +2801,8 @@ gen11_gt_engine_identity(struct drm_i915_private * const i915,
 	u32 timeout_ts;
 	u32 ident;
 
+	lockdep_assert_held(&i915->irq_lock);
+
 	raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
 
 	/*
@@ -2852,37 +2877,48 @@ gen11_gt_identity_handler(struct drm_i915_private * const i915,
 		  class, instance, intr);
 }
 
+static void
+gen11_gt_bank_handler(struct drm_i915_private * const i915,
+		      const unsigned int bank)
+{
+	void __iomem * const regs = i915->regs;
+	unsigned long intr_dw;
+	unsigned int bit;
+
+	lockdep_assert_held(&i915->irq_lock);
+
+	intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
+
+	if (unlikely(!intr_dw)) {
+		DRM_ERROR("GT_INTR_DW%u blank!\n", bank);
+		return;
+	}
+
+	for_each_set_bit(bit, &intr_dw, 32) {
+		const u32 ident = gen11_gt_engine_identity(i915,
+							   bank, bit);
+
+		gen11_gt_identity_handler(i915, ident);
+	}
+
+	/* Clear must be after shared has been served for engine */
+	raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
+}
+
 static void
 gen11_gt_irq_handler(struct drm_i915_private * const i915,
 		     const u32 master_ctl)
 {
-	void __iomem * const regs = i915->regs;
 	unsigned int bank;
 
+	spin_lock(&i915->irq_lock);
+
 	for (bank = 0; bank < 2; bank++) {
-		unsigned long intr_dw;
-		unsigned int bit;
-
-		if (!(master_ctl & GEN11_GT_DW_IRQ(bank)))
-			continue;
-
-		intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
-
-		if (unlikely(!intr_dw)) {
-			DRM_ERROR("GT_INTR_DW%u blank!\n", bank);
-			continue;
-		}
-
-		for_each_set_bit(bit, &intr_dw, 32) {
-			const u32 ident = gen11_gt_engine_identity(i915,
-								   bank, bit);
-
-			gen11_gt_identity_handler(i915, ident);
-		}
-
-		/* Clear must be after shared has been served for engine */
-		raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
+		if (master_ctl & GEN11_GT_DW_IRQ(bank))
+			gen11_gt_bank_handler(i915, bank);
 	}
+
+	spin_unlock(&i915->irq_lock);
 }
 
 static irqreturn_t gen11_irq_handler(int irq, void *arg)

From 2b2874efe2413c43f9e330023a03ec5203b372b2 Mon Sep 17 00:00:00 2001
From: Oscar Mateo <oscar.mateo@intel.com>
Date: Thu, 5 Apr 2018 17:00:52 +0300
Subject: [PATCH 0223/1286] drm/i915/icl: Enable RC6 and RPS in Gen11

AFAICT, once the new interrupt is in place, the rest should behave the
same as Gen10.

v2: Update ring frequencies (Sagar)
v3: Rebase.

Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Reviewed-by: Michel Thierry <michel.thierry@intel.com>
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180405140052.10682-5-mika.kuoppala@linux.intel.com
---
 drivers/gpu/drm/i915/i915_debugfs.c | 10 +++++-----
 drivers/gpu/drm/i915/intel_pm.c     | 10 ++++------
 2 files changed, 9 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 1dba2c451255..785b710e4ee4 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1215,20 +1215,20 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
 			    rp_state_cap >> 16) & 0xff;
 		max_freq *= (IS_GEN9_BC(dev_priv) ||
-			     IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
+			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
 		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
 			   intel_gpu_freq(dev_priv, max_freq));
 
 		max_freq = (rp_state_cap & 0xff00) >> 8;
 		max_freq *= (IS_GEN9_BC(dev_priv) ||
-			     IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
+			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
 		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
 			   intel_gpu_freq(dev_priv, max_freq));
 
 		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
 			    rp_state_cap >> 0) & 0xff;
 		max_freq *= (IS_GEN9_BC(dev_priv) ||
-			     IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
+			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
 		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
 			   intel_gpu_freq(dev_priv, max_freq));
 		seq_printf(m, "Max overclocked frequency: %dMHz\n",
@@ -1811,7 +1811,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
 
 	min_gpu_freq = rps->min_freq;
 	max_gpu_freq = rps->max_freq;
-	if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
+	if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
 		/* Convert GT frequency to 50 HZ units */
 		min_gpu_freq /= GEN9_FREQ_SCALER;
 		max_gpu_freq /= GEN9_FREQ_SCALER;
@@ -1827,7 +1827,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
 		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
 			   intel_gpu_freq(dev_priv, (gpu_freq *
 						     (IS_GEN9_BC(dev_priv) ||
-						      IS_CANNONLAKE(dev_priv) ?
+						      INTEL_GEN(dev_priv) >= 10 ?
 						      GEN9_FREQ_SCALER : 1))),
 			   ((ia_freq >> 0) & 0xff) * 100,
 			   ((ia_freq >> 8) & 0xff) * 100);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a018c9abc2b9..0d25e413ec0b 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -6572,7 +6572,7 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
 
 	rps->efficient_freq = rps->rp1_freq;
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
-	    IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
+	    IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
 		u32 ddcc_status = 0;
 
 		if (sandybridge_pcode_read(dev_priv,
@@ -6585,7 +6585,7 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
 					rps->max_freq);
 	}
 
-	if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
+	if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
 		/* Store the frequency values in 16.66 MHZ units, which is
 		 * the natural hardware unit for SKL
 		 */
@@ -6923,7 +6923,7 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
 
 	min_gpu_freq = rps->min_freq;
 	max_gpu_freq = rps->max_freq;
-	if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
+	if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
 		/* Convert GT frequency to 50 HZ units */
 		min_gpu_freq /= GEN9_FREQ_SCALER;
 		max_gpu_freq /= GEN9_FREQ_SCALER;
@@ -6938,7 +6938,7 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
 		const int diff = max_gpu_freq - gpu_freq;
 		unsigned int ia_freq = 0, ring_freq = 0;
 
-		if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
+		if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
 			/*
 			 * ring_freq = 2 * GT. ring_freq is in 100MHz units
 			 * No floor required for ring frequency on SKL.
@@ -8144,8 +8144,6 @@ static void intel_enable_rps(struct drm_i915_private *dev_priv)
 		cherryview_enable_rps(dev_priv);
 	} else if (IS_VALLEYVIEW(dev_priv)) {
 		valleyview_enable_rps(dev_priv);
-	} else if (WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11)) {
-		/* TODO */
 	} else if (INTEL_GEN(dev_priv) >= 9) {
 		gen9_enable_rps(dev_priv);
 	} else if (IS_BROADWELL(dev_priv)) {

From 29991d533f6902757de7ffdb933c196421914b08 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Fri, 6 Apr 2018 11:09:50 +0100
Subject: [PATCH 0224/1286] drm/i915/selftests: Rename wait_for_hang() to
 wait_until_running()

Tvrtko mentioned that wait_for_hang() was confusing as it does not
actually wait for the aforementioned hang, just until the request is
running and we are *ready* to inject a hang. A quick
s/wait_for_hang/wait_until_running/ removes that confusion without
having to rethink the naming scheme, immediately at least.

Suggested-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Michel Thierry <michel.thierry@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180406100950.19033-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/selftests/intel_hangcheck.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index d03abe7f8a53..8650853c8cb3 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -322,7 +322,7 @@ static void hang_fini(struct hang *h)
 	flush_test(h->i915, I915_WAIT_LOCKED);
 }
 
-static bool wait_for_hang(struct hang *h, struct i915_request *rq)
+static bool wait_until_running(struct hang *h, struct i915_request *rq)
 {
 	return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq),
 					       rq->fence.seqno),
@@ -504,7 +504,7 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
 				__i915_request_add(rq, true);
 				mutex_unlock(&i915->drm.struct_mutex);
 
-				if (!wait_for_hang(&h, rq)) {
+				if (!wait_until_running(&h, rq)) {
 					struct drm_printer p = drm_info_printer(i915->drm.dev);
 
 					pr_err("%s: Failed to start request %x, at %x\n",
@@ -747,7 +747,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
 				__i915_request_add(rq, true);
 				mutex_unlock(&i915->drm.struct_mutex);
 
-				if (!wait_for_hang(&h, rq)) {
+				if (!wait_until_running(&h, rq)) {
 					struct drm_printer p = drm_info_printer(i915->drm.dev);
 
 					pr_err("%s: Failed to start request %x, at %x\n",
@@ -935,7 +935,7 @@ static int igt_wait_reset(void *arg)
 	i915_request_get(rq);
 	__i915_request_add(rq, true);
 
-	if (!wait_for_hang(&h, rq)) {
+	if (!wait_until_running(&h, rq)) {
 		struct drm_printer p = drm_info_printer(i915->drm.dev);
 
 		pr_err("%s: Failed to start request %x, at %x\n",
@@ -1066,7 +1066,7 @@ static int igt_reset_queue(void *arg)
 				goto fini;
 			}
 
-			if (!wait_for_hang(&h, prev)) {
+			if (!wait_until_running(&h, prev)) {
 				struct drm_printer p = drm_info_printer(i915->drm.dev);
 
 				pr_err("%s(%s): Failed to start request %x, at %x\n",
@@ -1177,7 +1177,7 @@ static int igt_handle_error(void *arg)
 	i915_request_get(rq);
 	__i915_request_add(rq, true);
 
-	if (!wait_for_hang(&h, rq)) {
+	if (!wait_until_running(&h, rq)) {
 		struct drm_printer p = drm_info_printer(i915->drm.dev);
 
 		pr_err("%s: Failed to start request %x, at %x\n",

From e4d2006f8f040825fa371e774a5debacdbf20b08 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Fri, 6 Apr 2018 16:51:44 +0100
Subject: [PATCH 0225/1286] drm/i915: Split out parking from the idle worker
 for reuse

We will want to park GEM before disengaging the drive^W^W^W unwedging.
Since we already do the work for idling, expose the guts as a new
function that we can then reuse.

v2: Just skip if already parked; makes it more forgiving to use by
future callers.
v3: Extract mark_busy, rename it to i915_gem_unpark and place it next to
i915_gem_park so that we can evaluate it for symmetry more easily.
Calling GEM from inside i915_request looks to be a bit of a layering
violation, for the moment I am imaging them as being notify_cb.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> #v1
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180406155144.27791-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_gem.c     | 123 +++++++++++++++++++++-------
 drivers/gpu/drm/i915/i915_gem.h     |   5 ++
 drivers/gpu/drm/i915/i915_request.c |  52 +-----------
 3 files changed, 103 insertions(+), 77 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9650a7b10c5f..a69dc19a0bdb 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -136,6 +136,100 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
 	return 0;
 }
 
+static u32 __i915_gem_park(struct drm_i915_private *i915)
+{
+	lockdep_assert_held(&i915->drm.struct_mutex);
+	GEM_BUG_ON(i915->gt.active_requests);
+
+	if (!i915->gt.awake)
+		return I915_EPOCH_INVALID;
+
+	GEM_BUG_ON(i915->gt.epoch == I915_EPOCH_INVALID);
+
+	/*
+	 * Be paranoid and flush a concurrent interrupt to make sure
+	 * we don't reactivate any irq tasklets after parking.
+	 *
+	 * FIXME: Note that even though we have waited for execlists to be idle,
+	 * there may still be an in-flight interrupt even though the CSB
+	 * is now empty. synchronize_irq() makes sure that a residual interrupt
+	 * is completed before we continue, but it doesn't prevent the HW from
+	 * raising a spurious interrupt later. To complete the shield we should
+	 * coordinate disabling the CS irq with flushing the interrupts.
+	 */
+	synchronize_irq(i915->drm.irq);
+
+	intel_engines_park(i915);
+	i915_gem_timelines_park(i915);
+
+	i915_pmu_gt_parked(i915);
+
+	i915->gt.awake = false;
+
+	if (INTEL_GEN(i915) >= 6)
+		gen6_rps_idle(i915);
+
+	intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ);
+
+	intel_runtime_pm_put(i915);
+
+	return i915->gt.epoch;
+}
+
+void i915_gem_park(struct drm_i915_private *i915)
+{
+	lockdep_assert_held(&i915->drm.struct_mutex);
+	GEM_BUG_ON(i915->gt.active_requests);
+
+	if (!i915->gt.awake)
+		return;
+
+	/* Defer the actual call to __i915_gem_park() to prevent ping-pongs */
+	mod_delayed_work(i915->wq, &i915->gt.idle_work, msecs_to_jiffies(100));
+}
+
+void i915_gem_unpark(struct drm_i915_private *i915)
+{
+	lockdep_assert_held(&i915->drm.struct_mutex);
+	GEM_BUG_ON(!i915->gt.active_requests);
+
+	if (i915->gt.awake)
+		return;
+
+	intel_runtime_pm_get_noresume(i915);
+
+	/*
+	 * It seems that the DMC likes to transition between the DC states a lot
+	 * when there are no connected displays (no active power domains) during
+	 * command submission.
+	 *
+	 * This activity has negative impact on the performance of the chip with
+	 * huge latencies observed in the interrupt handler and elsewhere.
+	 *
+	 * Work around it by grabbing a GT IRQ power domain whilst there is any
+	 * GT activity, preventing any DC state transitions.
+	 */
+	intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
+
+	i915->gt.awake = true;
+	if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */
+		i915->gt.epoch = 1;
+
+	intel_enable_gt_powersave(i915);
+	i915_update_gfx_val(i915);
+	if (INTEL_GEN(i915) >= 6)
+		gen6_rps_busy(i915);
+	i915_pmu_gt_unparked(i915);
+
+	intel_engines_unpark(i915);
+
+	i915_queue_hangcheck(i915);
+
+	queue_delayed_work(i915->wq,
+			   &i915->gt.retire_work,
+			   round_jiffies_up_relative(HZ));
+}
+
 int
 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
 			    struct drm_file *file)
@@ -3496,36 +3590,9 @@ i915_gem_idle_work_handler(struct work_struct *work)
 	if (new_requests_since_last_retire(dev_priv))
 		goto out_unlock;
 
-	/*
-	 * Be paranoid and flush a concurrent interrupt to make sure
-	 * we don't reactivate any irq tasklets after parking.
-	 *
-	 * FIXME: Note that even though we have waited for execlists to be idle,
-	 * there may still be an in-flight interrupt even though the CSB
-	 * is now empty. synchronize_irq() makes sure that a residual interrupt
-	 * is completed before we continue, but it doesn't prevent the HW from
-	 * raising a spurious interrupt later. To complete the shield we should
-	 * coordinate disabling the CS irq with flushing the interrupts.
-	 */
-	synchronize_irq(dev_priv->drm.irq);
+	epoch = __i915_gem_park(dev_priv);
 
-	intel_engines_park(dev_priv);
-	i915_gem_timelines_park(dev_priv);
-
-	i915_pmu_gt_parked(dev_priv);
-
-	GEM_BUG_ON(!dev_priv->gt.awake);
-	dev_priv->gt.awake = false;
-	epoch = dev_priv->gt.epoch;
-	GEM_BUG_ON(epoch == I915_EPOCH_INVALID);
 	rearm_hangcheck = false;
-
-	if (INTEL_GEN(dev_priv) >= 6)
-		gen6_rps_idle(dev_priv);
-
-	intel_display_power_put(dev_priv, POWER_DOMAIN_GT_IRQ);
-
-	intel_runtime_pm_put(dev_priv);
 out_unlock:
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 8922344fc21b..deaf78d2ae8b 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -27,6 +27,8 @@
 
 #include <linux/bug.h>
 
+struct drm_i915_private;
+
 #ifdef CONFIG_DRM_I915_DEBUG_GEM
 #define GEM_BUG_ON(condition) do { if (unlikely((condition))) {	\
 		pr_err("%s:%d GEM_BUG_ON(%s)\n", \
@@ -61,4 +63,7 @@
 
 #define I915_NUM_ENGINES 8
 
+void i915_gem_park(struct drm_i915_private *i915);
+void i915_gem_unpark(struct drm_i915_private *i915);
+
 #endif /* __I915_GEM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 585242831974..a9d0bde16443 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -255,47 +255,6 @@ int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
 	return reset_all_global_seqno(i915, seqno - 1);
 }
 
-static void mark_busy(struct drm_i915_private *i915)
-{
-	if (i915->gt.awake)
-		return;
-
-	GEM_BUG_ON(!i915->gt.active_requests);
-
-	intel_runtime_pm_get_noresume(i915);
-
-	/*
-	 * It seems that the DMC likes to transition between the DC states a lot
-	 * when there are no connected displays (no active power domains) during
-	 * command submission.
-	 *
-	 * This activity has negative impact on the performance of the chip with
-	 * huge latencies observed in the interrupt handler and elsewhere.
-	 *
-	 * Work around it by grabbing a GT IRQ power domain whilst there is any
-	 * GT activity, preventing any DC state transitions.
-	 */
-	intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
-
-	i915->gt.awake = true;
-	if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */
-		i915->gt.epoch = 1;
-
-	intel_enable_gt_powersave(i915);
-	i915_update_gfx_val(i915);
-	if (INTEL_GEN(i915) >= 6)
-		gen6_rps_busy(i915);
-	i915_pmu_gt_unparked(i915);
-
-	intel_engines_unpark(i915);
-
-	i915_queue_hangcheck(i915);
-
-	queue_delayed_work(i915->wq,
-			   &i915->gt.retire_work,
-			   round_jiffies_up_relative(HZ));
-}
-
 static int reserve_engine(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *i915 = engine->i915;
@@ -313,7 +272,7 @@ static int reserve_engine(struct intel_engine_cs *engine)
 	}
 
 	if (!i915->gt.active_requests++)
-		mark_busy(i915);
+		i915_gem_unpark(i915);
 
 	return 0;
 }
@@ -322,13 +281,8 @@ static void unreserve_engine(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *i915 = engine->i915;
 
-	if (!--i915->gt.active_requests) {
-		/* Cancel the mark_busy() from our reserve_engine() */
-		GEM_BUG_ON(!i915->gt.awake);
-		mod_delayed_work(i915->wq,
-				 &i915->gt.idle_work,
-				 msecs_to_jiffies(100));
-	}
+	if (!--i915->gt.active_requests)
+		i915_gem_park(i915);
 
 	GEM_BUG_ON(!engine->timeline->inflight_seqnos);
 	engine->timeline->inflight_seqnos--;

From bba0869b18e44ff2f713c98575ddad8c7c5e9b10 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Fri, 6 Apr 2018 23:03:53 +0100
Subject: [PATCH 0226/1286] drm/i915: Treat i915_reset_engine() as guilty until
 proven innocent

If we are resetting just one engine, we know it has stalled. So we can
pass the stalled parameter directly to i915_gem_reset_engine(), which
alleviates the necessity to poke at the generic engine->hangcheck.stalled
magic variable, leaving that under control of hangcheck as its name
implies. Other than simplifying by removing the indirect parameter along
this path, this allows us to introduce new reset mechanisms that run
independently of hangcheck.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michel Thierry <michel.thierry@intel.com>
Cc: Jeff McGee <jeff.mcgee@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Michel Thierry <michel.thierry@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180406220354.18911-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_drv.c               |  2 +-
 drivers/gpu/drm/i915/i915_drv.h               |  3 +-
 drivers/gpu/drm/i915/i915_gem.c               | 36 +++++++++----------
 .../gpu/drm/i915/selftests/intel_hangcheck.c  |  9 -----
 4 files changed, 20 insertions(+), 30 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 684060ed8db6..7ce229c6f424 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2050,7 +2050,7 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
 	 * active request and can drop it, adjust head to skip the offending
 	 * request to resume executing remaining requests in the queue.
 	 */
-	i915_gem_reset_engine(engine, active_request);
+	i915_gem_reset_engine(engine, active_request, true);
 
 	/*
 	 * The engine and its registers (and workarounds in case of render)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5373b171bb96..6b3f2f651def 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3132,7 +3132,8 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
 void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
 bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
 void i915_gem_reset_engine(struct intel_engine_cs *engine,
-			   struct i915_request *request);
+			   struct i915_request *request,
+			   bool stalled);
 
 void i915_gem_init_mmio(struct drm_i915_private *i915);
 int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a69dc19a0bdb..306d7a805eb7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2990,20 +2990,6 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
 	return active;
 }
 
-static bool engine_stalled(struct intel_engine_cs *engine)
-{
-	if (!engine->hangcheck.stalled)
-		return false;
-
-	/* Check for possible seqno movement after hang declaration */
-	if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) {
-		DRM_DEBUG_DRIVER("%s pardoned\n", engine->name);
-		return false;
-	}
-
-	return true;
-}
-
 /*
  * Ensure irq handler finishes, and not run again.
  * Also return the active request so that we only search for it once.
@@ -3142,7 +3128,8 @@ static void engine_skip_context(struct i915_request *request)
 /* Returns the request if it was guilty of the hang */
 static struct i915_request *
 i915_gem_reset_request(struct intel_engine_cs *engine,
-		       struct i915_request *request)
+		       struct i915_request *request,
+		       bool stalled)
 {
 	/* The guilty request will get skipped on a hung engine.
 	 *
@@ -3165,7 +3152,15 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
 	 * subsequent hangs.
 	 */
 
-	if (engine_stalled(engine)) {
+	if (i915_request_completed(request)) {
+		GEM_TRACE("%s pardoned global=%d (fence %llx:%d), current %d\n",
+			  engine->name, request->global_seqno,
+			  request->fence.context, request->fence.seqno,
+			  intel_engine_get_seqno(engine));
+		stalled = false;
+	}
+
+	if (stalled) {
 		i915_gem_context_mark_guilty(request->ctx);
 		skip_request(request);
 
@@ -3196,7 +3191,8 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
 }
 
 void i915_gem_reset_engine(struct intel_engine_cs *engine,
-			   struct i915_request *request)
+			   struct i915_request *request,
+			   bool stalled)
 {
 	/*
 	 * Make sure this write is visible before we re-enable the interrupt
@@ -3206,7 +3202,7 @@ void i915_gem_reset_engine(struct intel_engine_cs *engine,
 	smp_store_mb(engine->irq_posted, 0);
 
 	if (request)
-		request = i915_gem_reset_request(engine, request);
+		request = i915_gem_reset_request(engine, request, stalled);
 
 	if (request) {
 		DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
@@ -3229,7 +3225,9 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
 	for_each_engine(engine, dev_priv, id) {
 		struct i915_gem_context *ctx;
 
-		i915_gem_reset_engine(engine, engine->hangcheck.active_request);
+		i915_gem_reset_engine(engine,
+				      engine->hangcheck.active_request,
+				      engine->hangcheck.stalled);
 		ctx = fetch_and_zero(&engine->last_retired_context);
 		if (ctx)
 			engine->context_unpin(engine, ctx);
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 8650853c8cb3..acfb4dcc9fb5 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -522,9 +522,6 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
 				i915_request_put(rq);
 			}
 
-			engine->hangcheck.stalled = true;
-			engine->hangcheck.seqno = seqno;
-
 			err = i915_reset_engine(engine, NULL);
 			if (err) {
 				pr_err("i915_reset_engine failed\n");
@@ -545,8 +542,6 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
 				err = -EINVAL;
 				break;
 			}
-
-			engine->hangcheck.stalled = false;
 		} while (time_before(jiffies, end_time));
 		clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
 
@@ -764,9 +759,6 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
 				seqno = rq->global_seqno - 1;
 			}
 
-			engine->hangcheck.stalled = true;
-			engine->hangcheck.seqno = seqno;
-
 			err = i915_reset_engine(engine, NULL);
 			if (err) {
 				pr_err("i915_reset_engine(%s:%s): failed, err=%d\n",
@@ -774,7 +766,6 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
 				break;
 			}
 
-			engine->hangcheck.stalled = false;
 			count++;
 
 			if (rq) {

From d0667e9ce52eb2d5d32db4f16976226e78f88784 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Fri, 6 Apr 2018 23:03:54 +0100
Subject: [PATCH 0227/1286] drm/i915: Pass the set of guilty engines to
 i915_reset()

Currently, we rely on inspecting the hangcheck state from within the
i915_reset() routines to determine which engines were guilty of the
hang. This is problematic for cases where we want to run
i915_handle_error() and call i915_reset() independently of hangcheck.
Instead of relying on the indirect parameter passing, turn it into an
explicit parameter providing the set of stalled engines which then are
treated as guilty until proven innocent.

While we are removing the implicit stalled parameter, also make the
reason into an explicit parameter to i915_reset(). We still need a
back-channel for i915_handle_error() to hand over the task to the locked
waiter, but let's keep that its own channel rather than incriminate
another.

This leaves stalled/seqno as being private to hangcheck, with no more
nefarious snooping by reset, be it whole-device or per-engine. \o/

The only real issue now is that this makes it crystal clear that we
don't actually do any testing of hangcheck per se in
drv_selftest/live_hangcheck, merely of resets!

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michel Thierry <michel.thierry@intel.com>
Cc: Jeff McGee <jeff.mcgee@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Michel Thierry <michel.thierry@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180406220354.18911-2-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_drv.c               | 13 ++++----
 drivers/gpu/drm/i915/i915_drv.h               | 10 +++++--
 drivers/gpu/drm/i915/i915_gem.c               |  5 ++--
 drivers/gpu/drm/i915/i915_gpu_error.h         |  3 ++
 drivers/gpu/drm/i915/i915_irq.c               | 12 +++++---
 drivers/gpu/drm/i915/i915_request.c           |  6 ++--
 .../gpu/drm/i915/selftests/intel_hangcheck.c  | 30 +++++++++----------
 7 files changed, 47 insertions(+), 32 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 7ce229c6f424..f770be18b2d7 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1866,6 +1866,8 @@ static int i915_resume_switcheroo(struct drm_device *dev)
 /**
  * i915_reset - reset chip after a hang
  * @i915: #drm_i915_private to reset
+ * @stalled_mask: mask of the stalled engines with the guilty requests
+ * @reason: user error message for why we are resetting
  *
  * Reset the chip.  Useful if a hang is detected. Marks the device as wedged
  * on failure.
@@ -1880,7 +1882,9 @@ static int i915_resume_switcheroo(struct drm_device *dev)
  *   - re-init interrupt state
  *   - re-init display
  */
-void i915_reset(struct drm_i915_private *i915)
+void i915_reset(struct drm_i915_private *i915,
+		unsigned int stalled_mask,
+		const char *reason)
 {
 	struct i915_gpu_error *error = &i915->gpu_error;
 	int ret;
@@ -1899,9 +1903,8 @@ void i915_reset(struct drm_i915_private *i915)
 	if (!i915_gem_unset_wedged(i915))
 		goto wakeup;
 
-	if (error->reason)
-		dev_notice(i915->drm.dev,
-			   "Resetting chip for %s\n", error->reason);
+	if (reason)
+		dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
 	error->reset_count++;
 
 	disable_irq(i915->drm.irq);
@@ -1944,7 +1947,7 @@ void i915_reset(struct drm_i915_private *i915)
 		goto error;
 	}
 
-	i915_gem_reset(i915);
+	i915_gem_reset(i915, stalled_mask);
 	intel_overlay_reset(i915);
 
 	/*
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6b3f2f651def..9bca104c409e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2701,8 +2701,11 @@ extern void i915_driver_unload(struct drm_device *dev);
 extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
 extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
 
-extern void i915_reset(struct drm_i915_private *i915);
-extern int i915_reset_engine(struct intel_engine_cs *engine, const char *msg);
+extern void i915_reset(struct drm_i915_private *i915,
+		       unsigned int stalled_mask,
+		       const char *reason);
+extern int i915_reset_engine(struct intel_engine_cs *engine,
+			     const char *reason);
 
 extern bool intel_has_reset_engine(struct drm_i915_private *dev_priv);
 extern int intel_reset_guc(struct drm_i915_private *dev_priv);
@@ -3126,7 +3129,8 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
 struct i915_request *
 i915_gem_reset_prepare_engine(struct intel_engine_cs *engine);
 int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
-void i915_gem_reset(struct drm_i915_private *dev_priv);
+void i915_gem_reset(struct drm_i915_private *dev_priv,
+		    unsigned int stalled_mask);
 void i915_gem_reset_finish_engine(struct intel_engine_cs *engine);
 void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
 void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 306d7a805eb7..28ab0beff86c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3213,7 +3213,8 @@ void i915_gem_reset_engine(struct intel_engine_cs *engine,
 	engine->reset_hw(engine, request);
 }
 
-void i915_gem_reset(struct drm_i915_private *dev_priv)
+void i915_gem_reset(struct drm_i915_private *dev_priv,
+		    unsigned int stalled_mask)
 {
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
@@ -3227,7 +3228,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
 
 		i915_gem_reset_engine(engine,
 				      engine->hangcheck.active_request,
-				      engine->hangcheck.stalled);
+				      stalled_mask & ENGINE_MASK(id));
 		ctx = fetch_and_zero(&engine->last_retired_context);
 		if (ctx)
 			engine->context_unpin(engine, ctx);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index ac5760673cc9..c05b6034d718 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -269,6 +269,9 @@ struct i915_gpu_error {
 	/** Number of times an engine has been reset */
 	u32 reset_engine_count[I915_NUM_ENGINES];
 
+	/** Set of stalled engines with guilty requests, in the current reset */
+	u32 stalled_mask;
+
 	/** Reason for the current *global* reset */
 	const char *reason;
 
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index c2f878ace0ea..b03d18561b55 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2961,7 +2961,8 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
 }
 
 static void i915_reset_device(struct drm_i915_private *dev_priv,
-			      const char *msg)
+			      u32 engine_mask,
+			      const char *reason)
 {
 	struct i915_gpu_error *error = &dev_priv->gpu_error;
 	struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
@@ -2979,9 +2980,11 @@ static void i915_reset_device(struct drm_i915_private *dev_priv,
 	i915_wedge_on_timeout(&w, dev_priv, 5*HZ) {
 		intel_prepare_reset(dev_priv);
 
-		error->reason = msg;
+		error->reason = reason;
+		error->stalled_mask = engine_mask;
 
 		/* Signal that locked waiters should reset the GPU */
+		smp_mb__before_atomic();
 		set_bit(I915_RESET_HANDOFF, &error->flags);
 		wake_up_all(&error->wait_queue);
 
@@ -2990,7 +2993,7 @@ static void i915_reset_device(struct drm_i915_private *dev_priv,
 		 */
 		do {
 			if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
-				i915_reset(dev_priv);
+				i915_reset(dev_priv, engine_mask, reason);
 				mutex_unlock(&dev_priv->drm.struct_mutex);
 			}
 		} while (wait_on_bit_timeout(&error->flags,
@@ -2998,6 +3001,7 @@ static void i915_reset_device(struct drm_i915_private *dev_priv,
 					     TASK_UNINTERRUPTIBLE,
 					     1));
 
+		error->stalled_mask = 0;
 		error->reason = NULL;
 
 		intel_finish_reset(dev_priv);
@@ -3122,7 +3126,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
 				    TASK_UNINTERRUPTIBLE);
 	}
 
-	i915_reset_device(dev_priv, msg);
+	i915_reset_device(dev_priv, engine_mask, msg);
 
 	for_each_engine(engine, dev_priv, tmp) {
 		clear_bit(I915_RESET_ENGINE + engine->id,
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index a9d0bde16443..629f3e860592 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1185,11 +1185,13 @@ static bool __i915_spin_request(const struct i915_request *rq,
 
 static bool __i915_wait_request_check_and_reset(struct i915_request *request)
 {
-	if (likely(!i915_reset_handoff(&request->i915->gpu_error)))
+	struct i915_gpu_error *error = &request->i915->gpu_error;
+
+	if (likely(!i915_reset_handoff(error)))
 		return false;
 
 	__set_current_state(TASK_RUNNING);
-	i915_reset(request->i915);
+	i915_reset(request->i915, error->stalled_mask, error->reason);
 	return true;
 }
 
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index acfb4dcc9fb5..24f913f26a7b 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -437,7 +437,7 @@ static int igt_global_reset(void *arg)
 	mutex_lock(&i915->drm.struct_mutex);
 	reset_count = i915_reset_count(&i915->gpu_error);
 
-	i915_reset(i915);
+	i915_reset(i915, ALL_ENGINES, NULL);
 
 	if (i915_reset_count(&i915->gpu_error) == reset_count) {
 		pr_err("No GPU reset recorded!\n");
@@ -881,17 +881,18 @@ static int igt_reset_engines(void *arg)
 	return 0;
 }
 
-static u32 fake_hangcheck(struct i915_request *rq)
+static u32 fake_hangcheck(struct i915_request *rq, u32 mask)
 {
-	u32 reset_count;
+	struct i915_gpu_error *error = &rq->i915->gpu_error;
+	u32 reset_count = i915_reset_count(error);
 
-	rq->engine->hangcheck.stalled = true;
-	rq->engine->hangcheck.seqno = intel_engine_get_seqno(rq->engine);
+	error->stalled_mask = mask;
 
-	reset_count = i915_reset_count(&rq->i915->gpu_error);
+	/* set_bit() must be after we have setup the backchannel (mask) */
+	smp_mb__before_atomic();
+	set_bit(I915_RESET_HANDOFF, &error->flags);
 
-	set_bit(I915_RESET_HANDOFF, &rq->i915->gpu_error.flags);
-	wake_up_all(&rq->i915->gpu_error.wait_queue);
+	wake_up_all(&error->wait_queue);
 
 	return reset_count;
 }
@@ -939,7 +940,7 @@ static int igt_wait_reset(void *arg)
 		goto out_rq;
 	}
 
-	reset_count = fake_hangcheck(rq);
+	reset_count = fake_hangcheck(rq, ALL_ENGINES);
 
 	timeout = i915_request_wait(rq, I915_WAIT_LOCKED, 10);
 	if (timeout < 0) {
@@ -1075,9 +1076,9 @@ static int igt_reset_queue(void *arg)
 				goto fini;
 			}
 
-			reset_count = fake_hangcheck(prev);
+			reset_count = fake_hangcheck(prev, ENGINE_MASK(id));
 
-			i915_reset(i915);
+			i915_reset(i915, ENGINE_MASK(id), NULL);
 
 			GEM_BUG_ON(test_bit(I915_RESET_HANDOFF,
 					    &i915->gpu_error.flags));
@@ -1150,7 +1151,7 @@ static int igt_handle_error(void *arg)
 	if (!intel_has_reset_engine(i915))
 		return 0;
 
-	if (!intel_engine_can_store_dword(i915->engine[RCS]))
+	if (!engine || !intel_engine_can_store_dword(engine))
 		return 0;
 
 	mutex_lock(&i915->drm.struct_mutex);
@@ -1186,10 +1187,7 @@ static int igt_handle_error(void *arg)
 	/* Temporarily disable error capture */
 	error = xchg(&i915->gpu_error.first_error, (void *)-1);
 
-	engine->hangcheck.stalled = true;
-	engine->hangcheck.seqno = intel_engine_get_seqno(engine);
-
-	i915_handle_error(i915, intel_engine_flag(engine), 0, NULL);
+	i915_handle_error(i915, ENGINE_MASK(engine->id), 0, NULL);
 
 	xchg(&i915->gpu_error.first_error, error);
 

From be1c63c8017bb00a4041abace6cc1e9f0bf26aa9 Mon Sep 17 00:00:00 2001
From: Lyude Paul <lyude@redhat.com>
Date: Fri, 6 Apr 2018 21:10:53 -0400
Subject: [PATCH 0228/1286] drm/i915/dp: Send DPCD ON for MST before phy_up
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

When doing a modeset where the sink is transitioning from D3 to D0 , it
would sometimes be possible for the initial power_up_phy() to start
timing out. This would only be observed in the last action before the
sink went into D3 mode was intel_dp_sink_dpms(DRM_MODE_DPMS_OFF). We
originally thought this might be an issue with us accidentally shutting
off the aux block when putting the sink into D3, but since the DP spec
mandates that sinks must wake up within 1ms while we have 100ms to
respond to an ESI irq, this didn't really add up. Turns out that the
problem is more subtle then that:

It turns out that the timeout is from us not enabling DPMS on the MST
hub before actually trying to initiate sideband communications. This
would cause the first sideband communication (power_up_phy()), to start
timing out because the sink wasn't ready to respond. Afterwards, we
would call intel_dp_sink_dpms(DRM_MODE_DPMS_ON) in
intel_ddi_pre_enable_dp(), which would actually result in waking up the
sink so that sideband requests would work again.

Since DPMS is what lets us actually bring the hub up into a state where
sideband communications become functional again, we just need to make
sure to enable DPMS on the display before attempting to perform sideband
communications.

Changes since v1:
- Remove comment above if (!intel_dp->is_mst) - vsryjala
- Move intel_dp_sink_dpms() for MST into intel_dp_post_disable_mst() to
  keep enable/disable paths symmetrical
- Improve commit message - dhnkrn
Changes since v2:
- Only send DPMS off when we're disabling the last sink, and only send
  DPMS on when we're enabling the first sink - dhnkrn
Changes since v3:
- Check against is_mst, not intel_dp->is_mst - dhnkrn/vsyrjala

Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Tested-by: Laura Abbott <labbott@redhat.com>
Cc: stable@vger.kernel.org
Fixes: ad260ab32a4d9 ("drm/i915/dp: Write to SET_POWER dpcd to enable MST hub.")
Link: https://patchwork.freedesktop.org/patch/msgid/20180407011053.22437-1-lyude@redhat.com
---
 drivers/gpu/drm/i915/intel_ddi.c    | 8 ++++++--
 drivers/gpu/drm/i915/intel_dp_mst.c | 8 +++++++-
 2 files changed, 13 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index a6672a9abd85..92cb26b18a9b 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -2324,7 +2324,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
 		intel_prepare_dp_ddi_buffers(encoder, crtc_state);
 
 	intel_ddi_init_dp_buf_reg(encoder);
-	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+	if (!is_mst)
+		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
 	intel_dp_start_link_train(intel_dp);
 	if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
 		intel_dp_stop_link_train(intel_dp);
@@ -2422,12 +2423,15 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
 	struct intel_dp *intel_dp = &dig_port->dp;
+	bool is_mst = intel_crtc_has_type(old_crtc_state,
+					  INTEL_OUTPUT_DP_MST);
 
 	/*
 	 * Power down sink before disabling the port, otherwise we end
 	 * up getting interrupts from the sink on detecting link loss.
 	 */
-	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+	if (!is_mst)
+		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
 
 	intel_disable_ddi_buf(encoder);
 
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index c3de0918ee13..9e6956c08688 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -180,9 +180,11 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
 	intel_dp->active_mst_links--;
 
 	intel_mst->connector = NULL;
-	if (intel_dp->active_mst_links == 0)
+	if (intel_dp->active_mst_links == 0) {
+		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
 		intel_dig_port->base.post_disable(&intel_dig_port->base,
 						  old_crtc_state, NULL);
+	}
 
 	DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
 }
@@ -223,7 +225,11 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
 
 	DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
 
+	if (intel_dp->active_mst_links == 0)
+		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+
 	drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
+
 	if (intel_dp->active_mst_links == 0)
 		intel_dig_port->base.pre_enable(&intel_dig_port->base,
 						pipe_config, NULL);

From 5e7086eecc32b95288bc76f2a22aadeb368e25ed Mon Sep 17 00:00:00 2001
From: Felix Kuehling <Felix.Kuehling@amd.com>
Date: Tue, 10 Apr 2018 17:32:58 -0400
Subject: [PATCH 0229/1286] drm/amdgpu: Remove unused interface from kfd2kgd
 interface

Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | 10 ----------
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c | 10 ----------
 drivers/gpu/drm/amd/include/kgd_kfd_interface.h   |  5 -----
 3 files changed, 25 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index ea54e53172b9..0ff36d45a597 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -98,8 +98,6 @@ static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
 static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
 					unsigned int vmid);
 
-static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
-				uint32_t hpd_size, uint64_t hpd_gpu_addr);
 static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
 static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
 			uint32_t queue_id, uint32_t __user *wptr,
@@ -183,7 +181,6 @@ static const struct kfd2kgd_calls kfd2kgd = {
 	.free_pasid = amdgpu_pasid_free,
 	.program_sh_mem_settings = kgd_program_sh_mem_settings,
 	.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
-	.init_pipeline = kgd_init_pipeline,
 	.init_interrupts = kgd_init_interrupts,
 	.hqd_load = kgd_hqd_load,
 	.hqd_sdma_load = kgd_hqd_sdma_load,
@@ -309,13 +306,6 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
 	return 0;
 }
 
-static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
-				uint32_t hpd_size, uint64_t hpd_gpu_addr)
-{
-	/* amdgpu owns the per-pipe state */
-	return 0;
-}
-
 static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
 {
 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 89264c9a5e9f..6ef9762b4b00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -57,8 +57,6 @@ static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
 		uint32_t sh_mem_bases);
 static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
 		unsigned int vmid);
-static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
-		uint32_t hpd_size, uint64_t hpd_gpu_addr);
 static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
 static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
 			uint32_t queue_id, uint32_t __user *wptr,
@@ -141,7 +139,6 @@ static const struct kfd2kgd_calls kfd2kgd = {
 	.free_pasid = amdgpu_pasid_free,
 	.program_sh_mem_settings = kgd_program_sh_mem_settings,
 	.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
-	.init_pipeline = kgd_init_pipeline,
 	.init_interrupts = kgd_init_interrupts,
 	.hqd_load = kgd_hqd_load,
 	.hqd_sdma_load = kgd_hqd_sdma_load,
@@ -270,13 +267,6 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
 	return 0;
 }
 
-static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
-				uint32_t hpd_size, uint64_t hpd_gpu_addr)
-{
-	/* amdgpu owns the per-pipe state */
-	return 0;
-}
-
 static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
 {
 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 286cfe7068c1..7cf35068866f 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -173,8 +173,6 @@ struct tile_config {
  * @set_pasid_vmid_mapping: Exposes pasid/vmid pair to the H/W for no cp
  * scheduling mode. Only used for no cp scheduling mode.
  *
- * @init_pipeline: Initialized the compute pipelines.
- *
  * @hqd_load: Loads the mqd structure to a H/W hqd slot. used only for no cp
  * sceduling mode.
  *
@@ -274,9 +272,6 @@ struct kfd2kgd_calls {
 	int (*set_pasid_vmid_mapping)(struct kgd_dev *kgd, unsigned int pasid,
 					unsigned int vmid);
 
-	int (*init_pipeline)(struct kgd_dev *kgd, uint32_t pipe_id,
-				uint32_t hpd_size, uint64_t hpd_gpu_addr);
-
 	int (*init_interrupts)(struct kgd_dev *kgd, uint32_t pipe_id);
 
 	int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,

From cf05fb8b144dae55d094b0fa7991e985a9b4561e Mon Sep 17 00:00:00 2001
From: Felix Kuehling <Felix.Kuehling@amd.com>
Date: Tue, 10 Apr 2018 17:32:59 -0400
Subject: [PATCH 0230/1286] drm/amd: Update GFXv9 SDMA MQD structure

This matches what the HWS firmware expects on GFXv9 chips.

Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 MAINTAINERS                              |  1 +
 drivers/gpu/drm/amd/include/v9_structs.h | 48 ++++++++++++------------
 2 files changed, 25 insertions(+), 24 deletions(-)

diff --git a/MAINTAINERS b/MAINTAINERS
index 92be777d060a..dc929dc9ce9b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -772,6 +772,7 @@ F:	drivers/gpu/drm/amd/amdkfd/
 F:	drivers/gpu/drm/amd/include/cik_structs.h
 F:	drivers/gpu/drm/amd/include/kgd_kfd_interface.h
 F:	drivers/gpu/drm/amd/include/vi_structs.h
+F:	drivers/gpu/drm/amd/include/v9_structs.h
 F:	include/uapi/linux/kfd_ioctl.h
 
 AMD SEATTLE DEVICE TREE SUPPORT
diff --git a/drivers/gpu/drm/amd/include/v9_structs.h b/drivers/gpu/drm/amd/include/v9_structs.h
index 2fb25abaf7c8..ceaf4932258d 100644
--- a/drivers/gpu/drm/amd/include/v9_structs.h
+++ b/drivers/gpu/drm/amd/include/v9_structs.h
@@ -29,10 +29,10 @@ struct v9_sdma_mqd {
 	uint32_t sdmax_rlcx_rb_base;
 	uint32_t sdmax_rlcx_rb_base_hi;
 	uint32_t sdmax_rlcx_rb_rptr;
+	uint32_t sdmax_rlcx_rb_rptr_hi;
 	uint32_t sdmax_rlcx_rb_wptr;
+	uint32_t sdmax_rlcx_rb_wptr_hi;
 	uint32_t sdmax_rlcx_rb_wptr_poll_cntl;
-	uint32_t sdmax_rlcx_rb_wptr_poll_addr_hi;
-	uint32_t sdmax_rlcx_rb_wptr_poll_addr_lo;
 	uint32_t sdmax_rlcx_rb_rptr_addr_hi;
 	uint32_t sdmax_rlcx_rb_rptr_addr_lo;
 	uint32_t sdmax_rlcx_ib_cntl;
@@ -44,29 +44,29 @@ struct v9_sdma_mqd {
 	uint32_t sdmax_rlcx_skip_cntl;
 	uint32_t sdmax_rlcx_context_status;
 	uint32_t sdmax_rlcx_doorbell;
-	uint32_t sdmax_rlcx_virtual_addr;
-	uint32_t sdmax_rlcx_ape1_cntl;
+	uint32_t sdmax_rlcx_status;
 	uint32_t sdmax_rlcx_doorbell_log;
-	uint32_t reserved_22;
-	uint32_t reserved_23;
-	uint32_t reserved_24;
-	uint32_t reserved_25;
-	uint32_t reserved_26;
-	uint32_t reserved_27;
-	uint32_t reserved_28;
-	uint32_t reserved_29;
-	uint32_t reserved_30;
-	uint32_t reserved_31;
-	uint32_t reserved_32;
-	uint32_t reserved_33;
-	uint32_t reserved_34;
-	uint32_t reserved_35;
-	uint32_t reserved_36;
-	uint32_t reserved_37;
-	uint32_t reserved_38;
-	uint32_t reserved_39;
-	uint32_t reserved_40;
-	uint32_t reserved_41;
+	uint32_t sdmax_rlcx_watermark;
+	uint32_t sdmax_rlcx_doorbell_offset;
+	uint32_t sdmax_rlcx_csa_addr_lo;
+	uint32_t sdmax_rlcx_csa_addr_hi;
+	uint32_t sdmax_rlcx_ib_sub_remain;
+	uint32_t sdmax_rlcx_preempt;
+	uint32_t sdmax_rlcx_dummy_reg;
+	uint32_t sdmax_rlcx_rb_wptr_poll_addr_hi;
+	uint32_t sdmax_rlcx_rb_wptr_poll_addr_lo;
+	uint32_t sdmax_rlcx_rb_aql_cntl;
+	uint32_t sdmax_rlcx_minor_ptr_update;
+	uint32_t sdmax_rlcx_midcmd_data0;
+	uint32_t sdmax_rlcx_midcmd_data1;
+	uint32_t sdmax_rlcx_midcmd_data2;
+	uint32_t sdmax_rlcx_midcmd_data3;
+	uint32_t sdmax_rlcx_midcmd_data4;
+	uint32_t sdmax_rlcx_midcmd_data5;
+	uint32_t sdmax_rlcx_midcmd_data6;
+	uint32_t sdmax_rlcx_midcmd_data7;
+	uint32_t sdmax_rlcx_midcmd_data8;
+	uint32_t sdmax_rlcx_midcmd_cntl;
 	uint32_t reserved_42;
 	uint32_t reserved_43;
 	uint32_t reserved_44;

From ab88bded7522dafc1a6beb251092365519a01c4e Mon Sep 17 00:00:00 2001
From: Felix Kuehling <Felix.Kuehling@amd.com>
Date: Tue, 10 Apr 2018 17:33:00 -0400
Subject: [PATCH 0231/1286] drm/amdgpu: Add GFXv9 TLB invalidation packet
 definition

Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
Signed-off-by: Jay Cornwall <Jay.Cornwall@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdgpu/soc15d.h | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h
index 7f408f85fdb6..f22f7a88ce0f 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15d.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h
@@ -268,6 +268,11 @@
 			 * x=1: tmz_end
 			 */
 
+#define	PACKET3_INVALIDATE_TLBS				0x98
+#              define PACKET3_INVALIDATE_TLBS_DST_SEL(x)     ((x) << 0)
+#              define PACKET3_INVALIDATE_TLBS_ALL_HUB(x)     ((x) << 4)
+#              define PACKET3_INVALIDATE_TLBS_PASID(x)       ((x) << 5)
+#              define PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(x)  ((x) << 29)
 #define PACKET3_SET_RESOURCES				0xA0
 /* 1. header
  * 2. CONTROL

From d5a114a6c5f7fa41da338e0134fccf3f25723fbd Mon Sep 17 00:00:00 2001
From: Felix Kuehling <Felix.Kuehling@amd.com>
Date: Tue, 10 Apr 2018 17:33:01 -0400
Subject: [PATCH 0232/1286] drm/amdgpu: Add GFXv9 kfd2kgd interface functions

Signed-off-by: John Bridgman <john.bridgman@amd.com>
Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
Signed-off-by: Jay Cornwall <Jay.Cornwall@amd.com>
Signed-off-by: Yong Zhao <yong.zhao@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 MAINTAINERS                                   |    1 +
 drivers/gpu/drm/amd/amdgpu/Makefile           |    3 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c    |    4 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h    |    1 +
 .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 1043 +++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c         |    1 +
 6 files changed, 1052 insertions(+), 1 deletion(-)
 create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c

diff --git a/MAINTAINERS b/MAINTAINERS
index dc929dc9ce9b..051f2fee58f9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -766,6 +766,7 @@ F:	drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
 F:	drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
 F:	drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
 F:	drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+F:	drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
 F:	drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
 F:	drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
 F:	drivers/gpu/drm/amd/amdkfd/
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 2ca2b5154d52..f3002020df6c 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -130,7 +130,8 @@ amdgpu-y += \
 	 amdgpu_amdkfd.o \
 	 amdgpu_amdkfd_fence.o \
 	 amdgpu_amdkfd_gpuvm.o \
-	 amdgpu_amdkfd_gfx_v8.o
+	 amdgpu_amdkfd_gfx_v8.o \
+	 amdgpu_amdkfd_gfx_v9.o
 
 # add cgs
 amdgpu-y += amdgpu_cgs.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 4d36203ffb11..fcd10dbd121c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -92,6 +92,10 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
 	case CHIP_POLARIS11:
 		kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
 		break;
+	case CHIP_VEGA10:
+	case CHIP_RAVEN:
+		kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions();
+		break;
 	default:
 		dev_dbg(adev->dev, "kfd not supported on this ASIC\n");
 		return;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index c3024b143f3d..12367a9951e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -122,6 +122,7 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
 
 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void);
 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void);
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void);
 
 bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
new file mode 100644
index 000000000000..8f37991df61b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -0,0 +1,1043 @@
+/*
+ * Copyright 2014-2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#define pr_fmt(fmt) "kfd2kgd: " fmt
+
+#include <linux/module.h>
+#include <linux/fdtable.h>
+#include <linux/uaccess.h>
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_ucode.h"
+#include "soc15_hw_ip.h"
+#include "gc/gc_9_0_offset.h"
+#include "gc/gc_9_0_sh_mask.h"
+#include "vega10_enum.h"
+#include "sdma0/sdma0_4_0_offset.h"
+#include "sdma0/sdma0_4_0_sh_mask.h"
+#include "sdma1/sdma1_4_0_offset.h"
+#include "sdma1/sdma1_4_0_sh_mask.h"
+#include "athub/athub_1_0_offset.h"
+#include "athub/athub_1_0_sh_mask.h"
+#include "oss/osssys_4_0_offset.h"
+#include "oss/osssys_4_0_sh_mask.h"
+#include "soc15_common.h"
+#include "v9_structs.h"
+#include "soc15.h"
+#include "soc15d.h"
+
+/* HACK: MMHUB and GC both have VM-related register with the same
+ * names but different offsets. Define the MMHUB register we need here
+ * with a prefix. A proper solution would be to move the functions
+ * programming these registers into gfx_v9_0.c and mmhub_v1_0.c
+ * respectively.
+ */
+#define mmMMHUB_VM_INVALIDATE_ENG16_REQ				0x06f3
+#define mmMMHUB_VM_INVALIDATE_ENG16_REQ_BASE_IDX		0
+
+#define mmMMHUB_VM_INVALIDATE_ENG16_ACK				0x0705
+#define mmMMHUB_VM_INVALIDATE_ENG16_ACK_BASE_IDX		0
+
+#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32		0x072b
+#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX	0
+#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32		0x072c
+#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX	0
+
+#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32		0x074b
+#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_BASE_IDX	0
+#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32		0x074c
+#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_BASE_IDX	0
+
+#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32		0x076b
+#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_BASE_IDX	0
+#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32		0x076c
+#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_BASE_IDX	0
+
+#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32		0x0727
+#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32_BASE_IDX	0
+#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32		0x0728
+#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32_BASE_IDX	0
+
+#define V9_PIPE_PER_MEC		(4)
+#define V9_QUEUES_PER_PIPE_MEC	(8)
+
+enum hqd_dequeue_request_type {
+	NO_ACTION = 0,
+	DRAIN_PIPE,
+	RESET_WAVES
+};
+
+/*
+ * Register access functions
+ */
+
+static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+		uint32_t sh_mem_config,
+		uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
+		uint32_t sh_mem_bases);
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+		unsigned int vmid);
+static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
+static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+			uint32_t queue_id, uint32_t __user *wptr,
+			uint32_t wptr_shift, uint32_t wptr_mask,
+			struct mm_struct *mm);
+static int kgd_hqd_dump(struct kgd_dev *kgd,
+			uint32_t pipe_id, uint32_t queue_id,
+			uint32_t (**dump)[2], uint32_t *n_regs);
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
+			     uint32_t __user *wptr, struct mm_struct *mm);
+static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
+			     uint32_t engine_id, uint32_t queue_id,
+			     uint32_t (**dump)[2], uint32_t *n_regs);
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
+		uint32_t pipe_id, uint32_t queue_id);
+static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
+static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+				enum kfd_preempt_type reset_type,
+				unsigned int utimeout, uint32_t pipe_id,
+				uint32_t queue_id);
+static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+				unsigned int utimeout);
+static int kgd_address_watch_disable(struct kgd_dev *kgd);
+static int kgd_address_watch_execute(struct kgd_dev *kgd,
+					unsigned int watch_point_id,
+					uint32_t cntl_val,
+					uint32_t addr_hi,
+					uint32_t addr_lo);
+static int kgd_wave_control_execute(struct kgd_dev *kgd,
+					uint32_t gfx_index_val,
+					uint32_t sq_cmd);
+static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+					unsigned int watch_point_id,
+					unsigned int reg_offset);
+
+static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
+		uint8_t vmid);
+static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
+		uint8_t vmid);
+static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
+		uint32_t page_table_base);
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
+static void set_scratch_backing_va(struct kgd_dev *kgd,
+					uint64_t va, uint32_t vmid);
+static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
+static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
+
+/* Because of REG_GET_FIELD() being used, we put this function in the
+ * asic specific file.
+ */
+static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
+		struct tile_config *config)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+	config->gb_addr_config = adev->gfx.config.gb_addr_config;
+
+	config->tile_config_ptr = adev->gfx.config.tile_mode_array;
+	config->num_tile_configs =
+			ARRAY_SIZE(adev->gfx.config.tile_mode_array);
+	config->macro_tile_config_ptr =
+			adev->gfx.config.macrotile_mode_array;
+	config->num_macro_tile_configs =
+			ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
+
+	return 0;
+}
+
+static const struct kfd2kgd_calls kfd2kgd = {
+	.init_gtt_mem_allocation = alloc_gtt_mem,
+	.free_gtt_mem = free_gtt_mem,
+	.get_local_mem_info = get_local_mem_info,
+	.get_gpu_clock_counter = get_gpu_clock_counter,
+	.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
+	.alloc_pasid = amdgpu_pasid_alloc,
+	.free_pasid = amdgpu_pasid_free,
+	.program_sh_mem_settings = kgd_program_sh_mem_settings,
+	.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+	.init_interrupts = kgd_init_interrupts,
+	.hqd_load = kgd_hqd_load,
+	.hqd_sdma_load = kgd_hqd_sdma_load,
+	.hqd_dump = kgd_hqd_dump,
+	.hqd_sdma_dump = kgd_hqd_sdma_dump,
+	.hqd_is_occupied = kgd_hqd_is_occupied,
+	.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+	.hqd_destroy = kgd_hqd_destroy,
+	.hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+	.address_watch_disable = kgd_address_watch_disable,
+	.address_watch_execute = kgd_address_watch_execute,
+	.wave_control_execute = kgd_wave_control_execute,
+	.address_watch_get_offset = kgd_address_watch_get_offset,
+	.get_atc_vmid_pasid_mapping_pasid =
+			get_atc_vmid_pasid_mapping_pasid,
+	.get_atc_vmid_pasid_mapping_valid =
+			get_atc_vmid_pasid_mapping_valid,
+	.get_fw_version = get_fw_version,
+	.set_scratch_backing_va = set_scratch_backing_va,
+	.get_tile_config = amdgpu_amdkfd_get_tile_config,
+	.get_cu_info = get_cu_info,
+	.get_vram_usage = amdgpu_amdkfd_get_vram_usage,
+	.create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
+	.acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
+	.destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
+	.get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
+	.set_vm_context_page_table_base = set_vm_context_page_table_base,
+	.alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu,
+	.free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu,
+	.map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu,
+	.unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu,
+	.sync_memory = amdgpu_amdkfd_gpuvm_sync_memory,
+	.map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel,
+	.restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos,
+	.invalidate_tlbs = invalidate_tlbs,
+	.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
+	.submit_ib = amdgpu_amdkfd_submit_ib,
+};
+
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void)
+{
+	return (struct kfd2kgd_calls *)&kfd2kgd;
+}
+
+static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
+{
+	return (struct amdgpu_device *)kgd;
+}
+
+static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
+			uint32_t queue, uint32_t vmid)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+	mutex_lock(&adev->srbm_mutex);
+	soc15_grbm_select(adev, mec, pipe, queue, vmid);
+}
+
+static void unlock_srbm(struct kgd_dev *kgd)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+	soc15_grbm_select(adev, 0, 0, 0, 0);
+	mutex_unlock(&adev->srbm_mutex);
+}
+
+static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
+				uint32_t queue_id)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+	uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+	uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
+
+	lock_srbm(kgd, mec, pipe, queue_id, 0);
+}
+
+static uint32_t get_queue_mask(struct amdgpu_device *adev,
+			       uint32_t pipe_id, uint32_t queue_id)
+{
+	unsigned int bit = (pipe_id * adev->gfx.mec.num_queue_per_pipe +
+			    queue_id) & 31;
+
+	return ((uint32_t)1) << bit;
+}
+
+static void release_queue(struct kgd_dev *kgd)
+{
+	unlock_srbm(kgd);
+}
+
+static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+					uint32_t sh_mem_config,
+					uint32_t sh_mem_ape1_base,
+					uint32_t sh_mem_ape1_limit,
+					uint32_t sh_mem_bases)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+	lock_srbm(kgd, 0, 0, 0, vmid);
+
+	WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config);
+	WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases);
+	/* APE1 no longer exists on GFX9 */
+
+	unlock_srbm(kgd);
+}
+
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+					unsigned int vmid)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+	/*
+	 * We have to assume that there is no outstanding mapping.
+	 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
+	 * a mapping is in progress or because a mapping finished
+	 * and the SW cleared it.
+	 * So the protocol is to always wait & clear.
+	 */
+	uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
+			ATC_VMID0_PASID_MAPPING__VALID_MASK;
+
+	/*
+	 * need to do this twice, once for gfx and once for mmhub
+	 * for ATC add 16 to VMID for mmhub, for IH different registers.
+	 * ATC_VMID0..15 registers are separate from ATC_VMID16..31.
+	 */
+
+	WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid,
+	       pasid_mapping);
+
+	while (!(RREG32(SOC15_REG_OFFSET(
+				ATHUB, 0,
+				mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) &
+		 (1U << vmid)))
+		cpu_relax();
+
+	WREG32(SOC15_REG_OFFSET(ATHUB, 0,
+				mmATC_VMID_PASID_MAPPING_UPDATE_STATUS),
+	       1U << vmid);
+
+	/* Mapping vmid to pasid also for IH block */
+	WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid,
+	       pasid_mapping);
+
+	WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID16_PASID_MAPPING) + vmid,
+	       pasid_mapping);
+
+	while (!(RREG32(SOC15_REG_OFFSET(
+				ATHUB, 0,
+				mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) &
+		 (1U << (vmid + 16))))
+		cpu_relax();
+
+	WREG32(SOC15_REG_OFFSET(ATHUB, 0,
+				mmATC_VMID_PASID_MAPPING_UPDATE_STATUS),
+	       1U << (vmid + 16));
+
+	/* Mapping vmid to pasid also for IH block */
+	WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid,
+	       pasid_mapping);
+	return 0;
+}
+
+/* TODO - RING0 form of field is obsolete, seems to date back to SI
+ * but still works
+ */
+
+static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t mec;
+	uint32_t pipe;
+
+	mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+	pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
+
+	lock_srbm(kgd, mec, pipe, 0, 0);
+
+	WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL),
+		CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
+		CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
+
+	unlock_srbm(kgd);
+
+	return 0;
+}
+
+static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
+				unsigned int engine_id,
+				unsigned int queue_id)
+{
+	uint32_t base[2] = {
+		SOC15_REG_OFFSET(SDMA0, 0,
+				 mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
+		SOC15_REG_OFFSET(SDMA1, 0,
+				 mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL
+	};
+	uint32_t retval;
+
+	retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL -
+					       mmSDMA0_RLC0_RB_CNTL);
+
+	pr_debug("sdma base address: 0x%x\n", retval);
+
+	return retval;
+}
+
+static inline struct v9_mqd *get_mqd(void *mqd)
+{
+	return (struct v9_mqd *)mqd;
+}
+
+static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
+{
+	return (struct v9_sdma_mqd *)mqd;
+}
+
+static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+			uint32_t queue_id, uint32_t __user *wptr,
+			uint32_t wptr_shift, uint32_t wptr_mask,
+			struct mm_struct *mm)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	struct v9_mqd *m;
+	uint32_t *mqd_hqd;
+	uint32_t reg, hqd_base, data;
+
+	m = get_mqd(mqd);
+
+	acquire_queue(kgd, pipe_id, queue_id);
+
+	/* HIQ is set during driver init period with vmid set to 0*/
+	if (m->cp_hqd_vmid == 0) {
+		uint32_t value, mec, pipe;
+
+		mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+		pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
+
+		pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
+			mec, pipe, queue_id);
+		value = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS));
+		value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
+			((mec << 5) | (pipe << 3) | queue_id | 0x80));
+		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value);
+	}
+
+	/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
+	mqd_hqd = &m->cp_mqd_base_addr_lo;
+	hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
+
+	for (reg = hqd_base;
+	     reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
+		WREG32(reg, mqd_hqd[reg - hqd_base]);
+
+
+	/* Activate doorbell logic before triggering WPTR poll. */
+	data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
+			     CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
+	WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data);
+
+	if (wptr) {
+		/* Don't read wptr with get_user because the user
+		 * context may not be accessible (if this function
+		 * runs in a work queue). Instead trigger a one-shot
+		 * polling read from memory in the CP. This assumes
+		 * that wptr is GPU-accessible in the queue's VMID via
+		 * ATC or SVM. WPTR==RPTR before starting the poll so
+		 * the CP starts fetching new commands from the right
+		 * place.
+		 *
+		 * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit
+		 * tricky. Assume that the queue didn't overflow. The
+		 * number of valid bits in the 32-bit RPTR depends on
+		 * the queue size. The remaining bits are taken from
+		 * the saved 64-bit WPTR. If the WPTR wrapped, add the
+		 * queue size.
+		 */
+		uint32_t queue_size =
+			2 << REG_GET_FIELD(m->cp_hqd_pq_control,
+					   CP_HQD_PQ_CONTROL, QUEUE_SIZE);
+		uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1);
+
+		if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr)
+			guessed_wptr += queue_size;
+		guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
+		guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
+
+		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO),
+		       lower_32_bits(guessed_wptr));
+		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI),
+		       upper_32_bits(guessed_wptr));
+		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR),
+		       lower_32_bits((uint64_t)wptr));
+		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
+		       upper_32_bits((uint64_t)wptr));
+		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1),
+		       get_queue_mask(adev, pipe_id, queue_id));
+	}
+
+	/* Start the EOP fetcher */
+	WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR),
+	       REG_SET_FIELD(m->cp_hqd_eop_rptr,
+			     CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
+
+	data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
+	WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data);
+
+	release_queue(kgd);
+
+	return 0;
+}
+
+static int kgd_hqd_dump(struct kgd_dev *kgd,
+			uint32_t pipe_id, uint32_t queue_id,
+			uint32_t (**dump)[2], uint32_t *n_regs)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t i = 0, reg;
+#define HQD_N_REGS 56
+#define DUMP_REG(addr) do {				\
+		if (WARN_ON_ONCE(i >= HQD_N_REGS))	\
+			break;				\
+		(*dump)[i][0] = (addr) << 2;		\
+		(*dump)[i++][1] = RREG32(addr);		\
+	} while (0)
+
+	*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+	if (*dump == NULL)
+		return -ENOMEM;
+
+	acquire_queue(kgd, pipe_id, queue_id);
+
+	for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
+	     reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
+		DUMP_REG(reg);
+
+	release_queue(kgd);
+
+	WARN_ON_ONCE(i != HQD_N_REGS);
+	*n_regs = i;
+
+	return 0;
+}
+
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
+			     uint32_t __user *wptr, struct mm_struct *mm)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	struct v9_sdma_mqd *m;
+	uint32_t sdma_base_addr, sdmax_gfx_context_cntl;
+	unsigned long end_jiffies;
+	uint32_t data;
+	uint64_t data64;
+	uint64_t __user *wptr64 = (uint64_t __user *)wptr;
+
+	m = get_sdma_mqd(mqd);
+	sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+					    m->sdma_queue_id);
+	sdmax_gfx_context_cntl = m->sdma_engine_id ?
+		SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_CONTEXT_CNTL) :
+		SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_CONTEXT_CNTL);
+
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+		m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
+
+	end_jiffies = msecs_to_jiffies(2000) + jiffies;
+	while (true) {
+		data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+		if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
+			break;
+		if (time_after(jiffies, end_jiffies))
+			return -ETIME;
+		usleep_range(500, 1000);
+	}
+	data = RREG32(sdmax_gfx_context_cntl);
+	data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
+			     RESUME_CTX, 0);
+	WREG32(sdmax_gfx_context_cntl, data);
+
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET,
+	       m->sdmax_rlcx_doorbell_offset);
+
+	data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
+			     ENABLE, 1);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI,
+				m->sdmax_rlcx_rb_rptr_hi);
+
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
+	if (read_user_wptr(mm, wptr64, data64)) {
+		WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+		       lower_32_bits(data64));
+		WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
+		       upper_32_bits(data64));
+	} else {
+		WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+		       m->sdmax_rlcx_rb_rptr);
+		WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
+		       m->sdmax_rlcx_rb_rptr_hi);
+	}
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
+
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+			m->sdmax_rlcx_rb_base_hi);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+			m->sdmax_rlcx_rb_rptr_addr_lo);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+			m->sdmax_rlcx_rb_rptr_addr_hi);
+
+	data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
+			     RB_ENABLE, 1);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
+
+	return 0;
+}
+
+static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
+			     uint32_t engine_id, uint32_t queue_id,
+			     uint32_t (**dump)[2], uint32_t *n_regs)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id);
+	uint32_t i = 0, reg;
+#undef HQD_N_REGS
+#define HQD_N_REGS (19+6+7+10)
+
+	*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+	if (*dump == NULL)
+		return -ENOMEM;
+
+	for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
+		DUMP_REG(sdma_base_addr + reg);
+	for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
+		DUMP_REG(sdma_base_addr + reg);
+	for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
+	     reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
+		DUMP_REG(sdma_base_addr + reg);
+	for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
+	     reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
+		DUMP_REG(sdma_base_addr + reg);
+
+	WARN_ON_ONCE(i != HQD_N_REGS);
+	*n_regs = i;
+
+	return 0;
+}
+
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
+				uint32_t pipe_id, uint32_t queue_id)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t act;
+	bool retval = false;
+	uint32_t low, high;
+
+	acquire_queue(kgd, pipe_id, queue_id);
+	act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
+	if (act) {
+		low = lower_32_bits(queue_address >> 8);
+		high = upper_32_bits(queue_address >> 8);
+
+		if (low == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE)) &&
+		   high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI)))
+			retval = true;
+	}
+	release_queue(kgd);
+	return retval;
+}
+
+static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	struct v9_sdma_mqd *m;
+	uint32_t sdma_base_addr;
+	uint32_t sdma_rlc_rb_cntl;
+
+	m = get_sdma_mqd(mqd);
+	sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+					    m->sdma_queue_id);
+
+	sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+
+	if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
+		return true;
+
+	return false;
+}
+
+static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+				enum kfd_preempt_type reset_type,
+				unsigned int utimeout, uint32_t pipe_id,
+				uint32_t queue_id)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	enum hqd_dequeue_request_type type;
+	unsigned long end_jiffies;
+	uint32_t temp;
+	struct v9_mqd *m = get_mqd(mqd);
+
+	acquire_queue(kgd, pipe_id, queue_id);
+
+	if (m->cp_hqd_vmid == 0)
+		WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
+
+	switch (reset_type) {
+	case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
+		type = DRAIN_PIPE;
+		break;
+	case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
+		type = RESET_WAVES;
+		break;
+	default:
+		type = DRAIN_PIPE;
+		break;
+	}
+
+	WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), type);
+
+	end_jiffies = (utimeout * HZ / 1000) + jiffies;
+	while (true) {
+		temp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
+		if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
+			break;
+		if (time_after(jiffies, end_jiffies)) {
+			pr_err("cp queue preemption time out.\n");
+			release_queue(kgd);
+			return -ETIME;
+		}
+		usleep_range(500, 1000);
+	}
+
+	release_queue(kgd);
+	return 0;
+}
+
+static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+				unsigned int utimeout)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	struct v9_sdma_mqd *m;
+	uint32_t sdma_base_addr;
+	uint32_t temp;
+	unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
+
+	m = get_sdma_mqd(mqd);
+	sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+					    m->sdma_queue_id);
+
+	temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+	temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+
+	while (true) {
+		temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+		if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
+			break;
+		if (time_after(jiffies, end_jiffies))
+			return -ETIME;
+		usleep_range(500, 1000);
+	}
+
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+		RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+		SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
+
+	m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
+	m->sdmax_rlcx_rb_rptr_hi =
+		RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI);
+
+	return 0;
+}
+
+static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
+							uint8_t vmid)
+{
+	uint32_t reg;
+	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+	reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+		     + vmid);
+	return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
+}
+
+static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
+								uint8_t vmid)
+{
+	uint32_t reg;
+	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+	reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+		     + vmid);
+	return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
+}
+
+static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+	uint32_t req = (1 << vmid) |
+		(0 << VM_INVALIDATE_ENG16_REQ__FLUSH_TYPE__SHIFT) | /* legacy */
+		VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES_MASK |
+		VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0_MASK |
+		VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1_MASK |
+		VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2_MASK |
+		VM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES_MASK;
+
+	mutex_lock(&adev->srbm_mutex);
+
+	/* Use legacy mode tlb invalidation.
+	 *
+	 * Currently on Raven the code below is broken for anything but
+	 * legacy mode due to a MMHUB power gating problem. A workaround
+	 * is for MMHUB to wait until the condition PER_VMID_INVALIDATE_REQ
+	 * == PER_VMID_INVALIDATE_ACK instead of simply waiting for the ack
+	 * bit.
+	 *
+	 * TODO 1: agree on the right set of invalidation registers for
+	 * KFD use. Use the last one for now. Invalidate both GC and
+	 * MMHUB.
+	 *
+	 * TODO 2: support range-based invalidation, requires kfg2kgd
+	 * interface change
+	 */
+	WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ADDR_RANGE_LO32),
+				0xffffffff);
+	WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ADDR_RANGE_HI32),
+				0x0000001f);
+
+	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
+				mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32),
+				0xffffffff);
+	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
+				mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32),
+				0x0000001f);
+
+	WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_REQ), req);
+
+	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_INVALIDATE_ENG16_REQ),
+				req);
+
+	while (!(RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ACK)) &
+					(1 << vmid)))
+		cpu_relax();
+
+	while (!(RREG32(SOC15_REG_OFFSET(MMHUB, 0,
+					mmMMHUB_VM_INVALIDATE_ENG16_ACK)) &
+					(1 << vmid)))
+		cpu_relax();
+
+	mutex_unlock(&adev->srbm_mutex);
+
+}
+
+static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid)
+{
+	signed long r;
+	uint32_t seq;
+	struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
+
+	spin_lock(&adev->gfx.kiq.ring_lock);
+	amdgpu_ring_alloc(ring, 12); /* fence + invalidate_tlbs package*/
+	amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
+	amdgpu_ring_write(ring,
+			PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
+			PACKET3_INVALIDATE_TLBS_ALL_HUB(1) |
+			PACKET3_INVALIDATE_TLBS_PASID(pasid) |
+			PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(0)); /* legacy */
+	amdgpu_fence_emit_polling(ring, &seq);
+	amdgpu_ring_commit(ring);
+	spin_unlock(&adev->gfx.kiq.ring_lock);
+
+	r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
+	if (r < 1) {
+		DRM_ERROR("wait for kiq fence error: %ld.\n", r);
+		return -ETIME;
+	}
+
+	return 0;
+}
+
+static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+	int vmid;
+	struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
+
+	if (ring->ready)
+		return invalidate_tlbs_with_kiq(adev, pasid);
+
+	for (vmid = 0; vmid < 16; vmid++) {
+		if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
+			continue;
+		if (get_atc_vmid_pasid_mapping_valid(kgd, vmid)) {
+			if (get_atc_vmid_pasid_mapping_pasid(kgd, vmid)
+				== pasid) {
+				write_vmid_invalidate_request(kgd, vmid);
+				break;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+	if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
+		pr_err("non kfd vmid %d\n", vmid);
+		return 0;
+	}
+
+	write_vmid_invalidate_request(kgd, vmid);
+	return 0;
+}
+
+static int kgd_address_watch_disable(struct kgd_dev *kgd)
+{
+	return 0;
+}
+
+static int kgd_address_watch_execute(struct kgd_dev *kgd,
+					unsigned int watch_point_id,
+					uint32_t cntl_val,
+					uint32_t addr_hi,
+					uint32_t addr_lo)
+{
+	return 0;
+}
+
+static int kgd_wave_control_execute(struct kgd_dev *kgd,
+					uint32_t gfx_index_val,
+					uint32_t sq_cmd)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t data = 0;
+
+	mutex_lock(&adev->grbm_idx_mutex);
+
+	WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), gfx_index_val);
+	WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd);
+
+	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+		INSTANCE_BROADCAST_WRITES, 1);
+	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+		SH_BROADCAST_WRITES, 1);
+	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+		SE_BROADCAST_WRITES, 1);
+
+	WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), data);
+	mutex_unlock(&adev->grbm_idx_mutex);
+
+	return 0;
+}
+
+static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+					unsigned int watch_point_id,
+					unsigned int reg_offset)
+{
+	return 0;
+}
+
+static void set_scratch_backing_va(struct kgd_dev *kgd,
+					uint64_t va, uint32_t vmid)
+{
+	/* No longer needed on GFXv9. The scratch base address is
+	 * passed to the shader by the CP. It's the user mode driver's
+	 * responsibility.
+	 */
+}
+
+/* FIXME: Does this need to be ASIC-specific code? */
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+	const union amdgpu_firmware_header *hdr;
+
+	switch (type) {
+	case KGD_ENGINE_PFP:
+		hdr = (const union amdgpu_firmware_header *)adev->gfx.pfp_fw->data;
+		break;
+
+	case KGD_ENGINE_ME:
+		hdr = (const union amdgpu_firmware_header *)adev->gfx.me_fw->data;
+		break;
+
+	case KGD_ENGINE_CE:
+		hdr = (const union amdgpu_firmware_header *)adev->gfx.ce_fw->data;
+		break;
+
+	case KGD_ENGINE_MEC1:
+		hdr = (const union amdgpu_firmware_header *)adev->gfx.mec_fw->data;
+		break;
+
+	case KGD_ENGINE_MEC2:
+		hdr = (const union amdgpu_firmware_header *)adev->gfx.mec2_fw->data;
+		break;
+
+	case KGD_ENGINE_RLC:
+		hdr = (const union amdgpu_firmware_header *)adev->gfx.rlc_fw->data;
+		break;
+
+	case KGD_ENGINE_SDMA1:
+		hdr = (const union amdgpu_firmware_header *)adev->sdma.instance[0].fw->data;
+		break;
+
+	case KGD_ENGINE_SDMA2:
+		hdr = (const union amdgpu_firmware_header *)adev->sdma.instance[1].fw->data;
+		break;
+
+	default:
+		return 0;
+	}
+
+	if (hdr == NULL)
+		return 0;
+
+	/* Only 12 bit in use*/
+	return hdr->common.ucode_version;
+}
+
+static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
+		uint32_t page_table_base)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint64_t base = (uint64_t)page_table_base << PAGE_SHIFT |
+		AMDGPU_PTE_VALID;
+
+	if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
+		pr_err("trying to set page table base for wrong VMID %u\n",
+		       vmid);
+		return;
+	}
+
+	/* TODO: take advantage of per-process address space size. For
+	 * now, all processes share the same address space size, like
+	 * on GFX8 and older.
+	 */
+	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0);
+	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0);
+
+	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2),
+			lower_32_bits(adev->vm_manager.max_pfn - 1));
+	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2),
+			upper_32_bits(adev->vm_manager.max_pfn - 1));
+
+	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base));
+	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base));
+
+	WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0);
+	WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0);
+
+	WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2),
+			lower_32_bits(adev->vm_manager.max_pfn - 1));
+	WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2),
+			upper_32_bits(adev->vm_manager.max_pfn - 1));
+
+	WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base));
+	WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base));
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 9d39fd5b1822..e5962e61beb5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -4686,6 +4686,7 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
 
 	cu_info->number = active_cu_number;
 	cu_info->ao_cu_mask = ao_cu_mask;
+	cu_info->simd_per_cu = NUM_SIMD_PER_CU;
 
 	return 0;
 }

From 642a0e80262af8e9d5b8129e2149c670ab3bb4b8 Mon Sep 17 00:00:00 2001
From: Felix Kuehling <Felix.Kuehling@amd.com>
Date: Tue, 10 Apr 2018 17:33:02 -0400
Subject: [PATCH 0233/1286] drm/amdgpu: Add doorbell routing info to
 kgd2kfd_shared_resources

This is needed for Vega10 and later ASICs to let KFD know which
doorbells can be used for SDMA and CP queues respectively.

Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c    | 22 +++++++++++++++++++
 .../gpu/drm/amd/include/kgd_kfd_interface.h   | 15 +++++++++++++
 2 files changed, 37 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index fcd10dbd121c..cd0e8f192e6a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -179,6 +179,28 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
 				&gpu_resources.doorbell_physical_address,
 				&gpu_resources.doorbell_aperture_size,
 				&gpu_resources.doorbell_start_offset);
+		if (adev->asic_type >= CHIP_VEGA10) {
+			/* On SOC15 the BIF is involved in routing
+			 * doorbells using the low 12 bits of the
+			 * address. Communicate the assignments to
+			 * KFD. KFD uses two doorbell pages per
+			 * process in case of 64-bit doorbells so we
+			 * can use each doorbell assignment twice.
+			 */
+			gpu_resources.sdma_doorbell[0][0] =
+				AMDGPU_DOORBELL64_sDMA_ENGINE0;
+			gpu_resources.sdma_doorbell[0][1] =
+				AMDGPU_DOORBELL64_sDMA_ENGINE0 + 0x200;
+			gpu_resources.sdma_doorbell[1][0] =
+				AMDGPU_DOORBELL64_sDMA_ENGINE1;
+			gpu_resources.sdma_doorbell[1][1] =
+				AMDGPU_DOORBELL64_sDMA_ENGINE1 + 0x200;
+			/* Doorbells 0x0f0-0ff and 0x2f0-2ff are reserved for
+			 * SDMA, IH and VCN. So don't use them for the CP.
+			 */
+			gpu_resources.reserved_doorbell_mask = 0x1f0;
+			gpu_resources.reserved_doorbell_val  = 0x0f0;
+		}
 
 		kgd2kfd->device_init(adev->kfd, &gpu_resources);
 	}
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 7cf35068866f..5733fbee07f7 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -100,6 +100,21 @@ struct kgd2kfd_shared_resources {
 	/* Bit n == 1 means Queue n is available for KFD */
 	DECLARE_BITMAP(queue_bitmap, KGD_MAX_QUEUES);
 
+	/* Doorbell assignments (SOC15 and later chips only). Only
+	 * specific doorbells are routed to each SDMA engine. Others
+	 * are routed to IH and VCN. They are not usable by the CP.
+	 *
+	 * Any doorbell number D that satisfies the following condition
+	 * is reserved: (D & reserved_doorbell_mask) == reserved_doorbell_val
+	 *
+	 * KFD currently uses 1024 (= 0x3ff) doorbells per process. If
+	 * doorbells 0x0f0-0x0f7 and 0x2f-0x2f7 are reserved, that means
+	 * mask would be set to 0x1f8 and val set to 0x0f0.
+	 */
+	unsigned int sdma_doorbell[2][2];
+	unsigned int reserved_doorbell_mask;
+	unsigned int reserved_doorbell_val;
+
 	/* Base address of doorbell aperture. */
 	phys_addr_t doorbell_physical_address;
 

From ada2b29c4a79efbdc5bf5eed876bad6b00f43536 Mon Sep 17 00:00:00 2001
From: Felix Kuehling <Felix.Kuehling@amd.com>
Date: Tue, 10 Apr 2018 17:33:03 -0400
Subject: [PATCH 0234/1286] drm/amdkfd: Make doorbell size ASIC-dependent

This prepares for GFXv9 (Vega10), which has 64-bit doorbells.

Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_device.c   | 10 +++++
 drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c | 48 ++++++++++++-----------
 drivers/gpu/drm/amd/amdkfd/kfd_priv.h     |  7 ++--
 3 files changed, 39 insertions(+), 26 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 7b5799530c0f..f563acbc1ad7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -41,6 +41,7 @@ static const struct kfd_device_info kaveri_device_info = {
 	.max_pasid_bits = 16,
 	/* max num of queues for KV.TODO should be a dynamic value */
 	.max_no_of_hqd	= 24,
+	.doorbell_size  = 4,
 	.ih_ring_entry_size = 4 * sizeof(uint32_t),
 	.event_interrupt_class = &event_interrupt_class_cik,
 	.num_of_watch_points = 4,
@@ -55,6 +56,7 @@ static const struct kfd_device_info carrizo_device_info = {
 	.max_pasid_bits = 16,
 	/* max num of queues for CZ.TODO should be a dynamic value */
 	.max_no_of_hqd	= 24,
+	.doorbell_size  = 4,
 	.ih_ring_entry_size = 4 * sizeof(uint32_t),
 	.event_interrupt_class = &event_interrupt_class_cik,
 	.num_of_watch_points = 4,
@@ -70,6 +72,7 @@ static const struct kfd_device_info hawaii_device_info = {
 	.max_pasid_bits = 16,
 	/* max num of queues for KV.TODO should be a dynamic value */
 	.max_no_of_hqd	= 24,
+	.doorbell_size  = 4,
 	.ih_ring_entry_size = 4 * sizeof(uint32_t),
 	.event_interrupt_class = &event_interrupt_class_cik,
 	.num_of_watch_points = 4,
@@ -83,6 +86,7 @@ static const struct kfd_device_info tonga_device_info = {
 	.asic_family = CHIP_TONGA,
 	.max_pasid_bits = 16,
 	.max_no_of_hqd  = 24,
+	.doorbell_size  = 4,
 	.ih_ring_entry_size = 4 * sizeof(uint32_t),
 	.event_interrupt_class = &event_interrupt_class_cik,
 	.num_of_watch_points = 4,
@@ -96,6 +100,7 @@ static const struct kfd_device_info tonga_vf_device_info = {
 	.asic_family = CHIP_TONGA,
 	.max_pasid_bits = 16,
 	.max_no_of_hqd  = 24,
+	.doorbell_size  = 4,
 	.ih_ring_entry_size = 4 * sizeof(uint32_t),
 	.event_interrupt_class = &event_interrupt_class_cik,
 	.num_of_watch_points = 4,
@@ -109,6 +114,7 @@ static const struct kfd_device_info fiji_device_info = {
 	.asic_family = CHIP_FIJI,
 	.max_pasid_bits = 16,
 	.max_no_of_hqd  = 24,
+	.doorbell_size  = 4,
 	.ih_ring_entry_size = 4 * sizeof(uint32_t),
 	.event_interrupt_class = &event_interrupt_class_cik,
 	.num_of_watch_points = 4,
@@ -122,6 +128,7 @@ static const struct kfd_device_info fiji_vf_device_info = {
 	.asic_family = CHIP_FIJI,
 	.max_pasid_bits = 16,
 	.max_no_of_hqd  = 24,
+	.doorbell_size  = 4,
 	.ih_ring_entry_size = 4 * sizeof(uint32_t),
 	.event_interrupt_class = &event_interrupt_class_cik,
 	.num_of_watch_points = 4,
@@ -136,6 +143,7 @@ static const struct kfd_device_info polaris10_device_info = {
 	.asic_family = CHIP_POLARIS10,
 	.max_pasid_bits = 16,
 	.max_no_of_hqd  = 24,
+	.doorbell_size  = 4,
 	.ih_ring_entry_size = 4 * sizeof(uint32_t),
 	.event_interrupt_class = &event_interrupt_class_cik,
 	.num_of_watch_points = 4,
@@ -149,6 +157,7 @@ static const struct kfd_device_info polaris10_vf_device_info = {
 	.asic_family = CHIP_POLARIS10,
 	.max_pasid_bits = 16,
 	.max_no_of_hqd  = 24,
+	.doorbell_size  = 4,
 	.ih_ring_entry_size = 4 * sizeof(uint32_t),
 	.event_interrupt_class = &event_interrupt_class_cik,
 	.num_of_watch_points = 4,
@@ -162,6 +171,7 @@ static const struct kfd_device_info polaris11_device_info = {
 	.asic_family = CHIP_POLARIS11,
 	.max_pasid_bits = 16,
 	.max_no_of_hqd  = 24,
+	.doorbell_size  = 4,
 	.ih_ring_entry_size = 4 * sizeof(uint32_t),
 	.event_interrupt_class = &event_interrupt_class_cik,
 	.num_of_watch_points = 4,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index ebb4da14e3df..484031423d1f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -33,7 +33,6 @@
 
 static DEFINE_IDA(doorbell_ida);
 static unsigned int max_doorbell_slices;
-#define KFD_SIZE_OF_DOORBELL_IN_BYTES 4
 
 /*
  * Each device exposes a doorbell aperture, a PCI MMIO aperture that
@@ -50,9 +49,9 @@ static unsigned int max_doorbell_slices;
  */
 
 /* # of doorbell bytes allocated for each process. */
-static inline size_t doorbell_process_allocation(void)
+static size_t kfd_doorbell_process_slice(struct kfd_dev *kfd)
 {
-	return roundup(KFD_SIZE_OF_DOORBELL_IN_BYTES *
+	return roundup(kfd->device_info->doorbell_size *
 			KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
 			PAGE_SIZE);
 }
@@ -72,16 +71,16 @@ int kfd_doorbell_init(struct kfd_dev *kfd)
 
 	doorbell_start_offset =
 			roundup(kfd->shared_resources.doorbell_start_offset,
-					doorbell_process_allocation());
+					kfd_doorbell_process_slice(kfd));
 
 	doorbell_aperture_size =
 			rounddown(kfd->shared_resources.doorbell_aperture_size,
-					doorbell_process_allocation());
+					kfd_doorbell_process_slice(kfd));
 
 	if (doorbell_aperture_size > doorbell_start_offset)
 		doorbell_process_limit =
 			(doorbell_aperture_size - doorbell_start_offset) /
-						doorbell_process_allocation();
+						kfd_doorbell_process_slice(kfd);
 	else
 		return -ENOSPC;
 
@@ -95,7 +94,7 @@ int kfd_doorbell_init(struct kfd_dev *kfd)
 	kfd->doorbell_id_offset = doorbell_start_offset / sizeof(u32);
 
 	kfd->doorbell_kernel_ptr = ioremap(kfd->doorbell_base,
-						doorbell_process_allocation());
+					   kfd_doorbell_process_slice(kfd));
 
 	if (!kfd->doorbell_kernel_ptr)
 		return -ENOMEM;
@@ -132,16 +131,16 @@ int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma)
 	phys_addr_t address;
 	struct kfd_dev *dev;
 
+	/* Find kfd device according to gpu id */
+	dev = kfd_device_by_id(vma->vm_pgoff);
+	if (!dev)
+		return -EINVAL;
+
 	/*
 	 * For simplicitly we only allow mapping of the entire doorbell
 	 * allocation of a single device & process.
 	 */
-	if (vma->vm_end - vma->vm_start != doorbell_process_allocation())
-		return -EINVAL;
-
-	/* Find kfd device according to gpu id */
-	dev = kfd_device_by_id(vma->vm_pgoff);
-	if (!dev)
+	if (vma->vm_end - vma->vm_start != kfd_doorbell_process_slice(dev))
 		return -EINVAL;
 
 	/* Calculate physical address of doorbell */
@@ -158,19 +157,19 @@ int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma)
 		 "     vm_flags            == 0x%04lX\n"
 		 "     size                == 0x%04lX\n",
 		 (unsigned long long) vma->vm_start, address, vma->vm_flags,
-		 doorbell_process_allocation());
+		 kfd_doorbell_process_slice(dev));
 
 
 	return io_remap_pfn_range(vma,
 				vma->vm_start,
 				address >> PAGE_SHIFT,
-				doorbell_process_allocation(),
+				kfd_doorbell_process_slice(dev),
 				vma->vm_page_prot);
 }
 
 
 /* get kernel iomem pointer for a doorbell */
-u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
+void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
 					unsigned int *doorbell_off)
 {
 	u32 inx;
@@ -185,6 +184,8 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
 	if (inx >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
 		return NULL;
 
+	inx *= kfd->device_info->doorbell_size / sizeof(u32);
+
 	/*
 	 * Calculating the kernel doorbell offset using the first
 	 * doorbell page.
@@ -210,7 +211,7 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr)
 	mutex_unlock(&kfd->doorbell_mutex);
 }
 
-inline void write_kernel_doorbell(u32 __iomem *db, u32 value)
+void write_kernel_doorbell(void __iomem *db, u32 value)
 {
 	if (db) {
 		writel(value, db);
@@ -228,20 +229,21 @@ unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd,
 {
 	/*
 	 * doorbell_id_offset accounts for doorbells taken by KGD.
-	 * index * doorbell_process_allocation/sizeof(u32) adjusts to
-	 * the process's doorbells.
+	 * index * kfd_doorbell_process_slice/sizeof(u32) adjusts to
+	 * the process's doorbells. The offset returned is in dword
+	 * units regardless of the ASIC-dependent doorbell size.
 	 */
 	return kfd->doorbell_id_offset +
 		process->doorbell_index
-		* doorbell_process_allocation() / sizeof(u32) +
-		queue_id;
+		* kfd_doorbell_process_slice(kfd) / sizeof(u32) +
+		queue_id * kfd->device_info->doorbell_size / sizeof(u32);
 }
 
 uint64_t kfd_get_number_elems(struct kfd_dev *kfd)
 {
 	uint64_t num_of_elems = (kfd->shared_resources.doorbell_aperture_size -
 				kfd->shared_resources.doorbell_start_offset) /
-					doorbell_process_allocation() + 1;
+					kfd_doorbell_process_slice(kfd) + 1;
 
 	return num_of_elems;
 
@@ -251,7 +253,7 @@ phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
 					struct kfd_process *process)
 {
 	return dev->doorbell_base +
-		process->doorbell_index * doorbell_process_allocation();
+		process->doorbell_index * kfd_doorbell_process_slice(dev);
 }
 
 int kfd_alloc_process_doorbells(struct kfd_process *process)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 4d5c49ef2dc5..d9c0fe126429 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -160,6 +160,7 @@ struct kfd_device_info {
 	const struct kfd_event_interrupt_class *event_interrupt_class;
 	unsigned int max_pasid_bits;
 	unsigned int max_no_of_hqd;
+	unsigned int doorbell_size;
 	size_t ih_ring_entry_size;
 	uint8_t num_of_watch_points;
 	uint16_t mqd_size_aligned;
@@ -364,7 +365,7 @@ struct queue_properties {
 	uint32_t queue_percent;
 	uint32_t *read_ptr;
 	uint32_t *write_ptr;
-	uint32_t __iomem *doorbell_ptr;
+	void __iomem *doorbell_ptr;
 	uint32_t doorbell_off;
 	bool is_interop;
 	bool is_evicted;
@@ -728,11 +729,11 @@ void kfd_pasid_free(unsigned int pasid);
 int kfd_doorbell_init(struct kfd_dev *kfd);
 void kfd_doorbell_fini(struct kfd_dev *kfd);
 int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma);
-u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
+void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
 					unsigned int *doorbell_off);
 void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr);
 u32 read_kernel_doorbell(u32 __iomem *db);
-void write_kernel_doorbell(u32 __iomem *db, u32 value);
+void write_kernel_doorbell(void __iomem *db, u32 value);
 unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd,
 					struct kfd_process *process,
 					unsigned int queue_id);

From df03ef9342ce09985210679a734f88a269c19ff5 Mon Sep 17 00:00:00 2001
From: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
Date: Tue, 10 Apr 2018 17:33:04 -0400
Subject: [PATCH 0235/1286] drm/amdkfd: Clean up KFD_MMAP_ offset handling

Use bit-rotate for better clarity and remove _MASK from the #defines as
these represent mmap types.

Centralize all the parsing of the mmap offset in kfd_mmap and add device
parameter to doorbell and reserved_mem map functions.

Encode gpu_id into upper bits of vm_pgoff. This frees up the lower bits
for encoding the the doorbell ID on Vega10.

Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c  | 35 ++++++++++++++-------
 drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c |  9 ++----
 drivers/gpu/drm/amd/amdkfd/kfd_events.c   |  2 +-
 drivers/gpu/drm/amd/amdkfd/kfd_priv.h     | 38 ++++++++++++++++++-----
 drivers/gpu/drm/amd/amdkfd/kfd_process.c  |  8 ++---
 5 files changed, 59 insertions(+), 33 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index cd679cf1fd30..519c7b1854b2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -292,7 +292,8 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
 
 
 	/* Return gpu_id as doorbell offset for mmap usage */
-	args->doorbell_offset = (KFD_MMAP_DOORBELL_MASK | args->gpu_id);
+	args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL;
+	args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id);
 	args->doorbell_offset <<= PAGE_SHIFT;
 
 	mutex_unlock(&p->mutex);
@@ -1644,23 +1645,33 @@ err_i1:
 static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
 {
 	struct kfd_process *process;
+	struct kfd_dev *dev = NULL;
+	unsigned long vm_pgoff;
+	unsigned int gpu_id;
 
 	process = kfd_get_process(current);
 	if (IS_ERR(process))
 		return PTR_ERR(process);
 
-	if ((vma->vm_pgoff & KFD_MMAP_DOORBELL_MASK) ==
-			KFD_MMAP_DOORBELL_MASK) {
-		vma->vm_pgoff = vma->vm_pgoff ^ KFD_MMAP_DOORBELL_MASK;
-		return kfd_doorbell_mmap(process, vma);
-	} else if ((vma->vm_pgoff & KFD_MMAP_EVENTS_MASK) ==
-			KFD_MMAP_EVENTS_MASK) {
-		vma->vm_pgoff = vma->vm_pgoff ^ KFD_MMAP_EVENTS_MASK;
+	vm_pgoff = vma->vm_pgoff;
+	vma->vm_pgoff = KFD_MMAP_OFFSET_VALUE_GET(vm_pgoff);
+	gpu_id = KFD_MMAP_GPU_ID_GET(vm_pgoff);
+	if (gpu_id)
+		dev = kfd_device_by_id(gpu_id);
+
+	switch (vm_pgoff & KFD_MMAP_TYPE_MASK) {
+	case KFD_MMAP_TYPE_DOORBELL:
+		if (!dev)
+			return -ENODEV;
+		return kfd_doorbell_mmap(dev, process, vma);
+
+	case KFD_MMAP_TYPE_EVENTS:
 		return kfd_event_mmap(process, vma);
-	} else if ((vma->vm_pgoff & KFD_MMAP_RESERVED_MEM_MASK) ==
-			KFD_MMAP_RESERVED_MEM_MASK) {
-		vma->vm_pgoff = vma->vm_pgoff ^ KFD_MMAP_RESERVED_MEM_MASK;
-		return kfd_reserved_mem_mmap(process, vma);
+
+	case KFD_MMAP_TYPE_RESERVED_MEM:
+		if (!dev)
+			return -ENODEV;
+		return kfd_reserved_mem_mmap(dev, process, vma);
 	}
 
 	return -EFAULT;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index 484031423d1f..efc59dea563f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -126,15 +126,10 @@ void kfd_doorbell_fini(struct kfd_dev *kfd)
 		iounmap(kfd->doorbell_kernel_ptr);
 }
 
-int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma)
+int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
+		      struct vm_area_struct *vma)
 {
 	phys_addr_t address;
-	struct kfd_dev *dev;
-
-	/* Find kfd device according to gpu id */
-	dev = kfd_device_by_id(vma->vm_pgoff);
-	if (!dev)
-		return -EINVAL;
 
 	/*
 	 * For simplicitly we only allow mapping of the entire doorbell
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 4890a90f1e44..bccf2f761177 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -345,7 +345,7 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
 	case KFD_EVENT_TYPE_DEBUG:
 		ret = create_signal_event(devkfd, p, ev);
 		if (!ret) {
-			*event_page_offset = KFD_MMAP_EVENTS_MASK;
+			*event_page_offset = KFD_MMAP_TYPE_EVENTS;
 			*event_page_offset <<= PAGE_SHIFT;
 			*event_slot_index = ev->event_id;
 		}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index d9c0fe126429..2d575c014651 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -41,9 +41,33 @@
 
 #define KFD_SYSFS_FILE_MODE 0444
 
-#define KFD_MMAP_DOORBELL_MASK 0x8000000000000ull
-#define KFD_MMAP_EVENTS_MASK 0x4000000000000ull
-#define KFD_MMAP_RESERVED_MEM_MASK 0x2000000000000ull
+/* GPU ID hash width in bits */
+#define KFD_GPU_ID_HASH_WIDTH 16
+
+/* Use upper bits of mmap offset to store KFD driver specific information.
+ * BITS[63:62] - Encode MMAP type
+ * BITS[61:46] - Encode gpu_id. To identify to which GPU the offset belongs to
+ * BITS[45:0]  - MMAP offset value
+ *
+ * NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these
+ *  defines are w.r.t to PAGE_SIZE
+ */
+#define KFD_MMAP_TYPE_SHIFT	(62 - PAGE_SHIFT)
+#define KFD_MMAP_TYPE_MASK	(0x3ULL << KFD_MMAP_TYPE_SHIFT)
+#define KFD_MMAP_TYPE_DOORBELL	(0x3ULL << KFD_MMAP_TYPE_SHIFT)
+#define KFD_MMAP_TYPE_EVENTS	(0x2ULL << KFD_MMAP_TYPE_SHIFT)
+#define KFD_MMAP_TYPE_RESERVED_MEM	(0x1ULL << KFD_MMAP_TYPE_SHIFT)
+
+#define KFD_MMAP_GPU_ID_SHIFT (46 - PAGE_SHIFT)
+#define KFD_MMAP_GPU_ID_MASK (((1ULL << KFD_GPU_ID_HASH_WIDTH) - 1) \
+				<< KFD_MMAP_GPU_ID_SHIFT)
+#define KFD_MMAP_GPU_ID(gpu_id) ((((uint64_t)gpu_id) << KFD_MMAP_GPU_ID_SHIFT)\
+				& KFD_MMAP_GPU_ID_MASK)
+#define KFD_MMAP_GPU_ID_GET(offset)    ((offset & KFD_MMAP_GPU_ID_MASK) \
+				>> KFD_MMAP_GPU_ID_SHIFT)
+
+#define KFD_MMAP_OFFSET_VALUE_MASK	(0x3FFFFFFFFFFFULL >> PAGE_SHIFT)
+#define KFD_MMAP_OFFSET_VALUE_GET(offset) (offset & KFD_MMAP_OFFSET_VALUE_MASK)
 
 /*
  * When working with cp scheduler we should assign the HIQ manually or via
@@ -55,9 +79,6 @@
 #define KFD_CIK_HIQ_PIPE 4
 #define KFD_CIK_HIQ_QUEUE 0
 
-/* GPU ID hash width in bits */
-#define KFD_GPU_ID_HASH_WIDTH 16
-
 /* Macro for allocating structures */
 #define kfd_alloc_struct(ptr_to_struct)	\
 	((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
@@ -698,7 +719,7 @@ struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
 struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
 							struct kfd_process *p);
 
-int kfd_reserved_mem_mmap(struct kfd_process *process,
+int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
 			  struct vm_area_struct *vma);
 
 /* KFD process API for creating and translating handles */
@@ -728,7 +749,8 @@ void kfd_pasid_free(unsigned int pasid);
 /* Doorbells */
 int kfd_doorbell_init(struct kfd_dev *kfd);
 void kfd_doorbell_fini(struct kfd_dev *kfd);
-int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma);
+int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
+		      struct vm_area_struct *vma);
 void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
 					unsigned int *doorbell_off);
 void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 2791e72c2058..131fe2a1b589 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -451,7 +451,8 @@ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
 		if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
 			continue;
 
-		offset = (dev->id | KFD_MMAP_RESERVED_MEM_MASK) << PAGE_SHIFT;
+		offset = (KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id))
+			<< PAGE_SHIFT;
 		qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
 			KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
 			MAP_SHARED, offset);
@@ -989,15 +990,12 @@ int kfd_resume_all_processes(void)
 	return ret;
 }
 
-int kfd_reserved_mem_mmap(struct kfd_process *process,
+int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
 			  struct vm_area_struct *vma)
 {
-	struct kfd_dev *dev = kfd_device_by_id(vma->vm_pgoff);
 	struct kfd_process_device *pdd;
 	struct qcm_process_device *qpd;
 
-	if (!dev)
-		return -EINVAL;
 	if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
 		pr_err("Incorrect CWSR mapping size.\n");
 		return -EINVAL;

From ef568db792e66216b48fd1567ff4a9d3bf9af866 Mon Sep 17 00:00:00 2001
From: Felix Kuehling <Felix.Kuehling@amd.com>
Date: Tue, 10 Apr 2018 17:33:05 -0400
Subject: [PATCH 0236/1286] drm/amdkfd: Implement doorbell allocation for SOC15

Allocate doorbells according to the doorbell routing information on
SOC15 ASICs (Vega10 and later). On older ASICs we continue to use the
queue_id as the doorbell ID to maintain compatibility with the Thunk.

Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c      |  7 ++
 .../drm/amd/amdkfd/kfd_device_queue_manager.c | 82 ++++++++++++++++++-
 drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c     | 12 +--
 drivers/gpu/drm/amd/amdkfd/kfd_priv.h         | 11 ++-
 drivers/gpu/drm/amd/amdkfd/kfd_process.c      | 32 ++++++++
 .../amd/amdkfd/kfd_process_queue_manager.c    | 12 ++-
 6 files changed, 139 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 519c7b1854b2..5694fbead9a5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -295,6 +295,13 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
 	args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL;
 	args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id);
 	args->doorbell_offset <<= PAGE_SHIFT;
+	if (KFD_IS_SOC15(dev->device_info->asic_family))
+		/* On SOC15 ASICs, doorbell allocation must be
+		 * per-device, and independent from the per-process
+		 * queue_id. Return the doorbell offset within the
+		 * doorbell aperture to user mode.
+		 */
+		args->doorbell_offset |= q_properties.doorbell_off;
 
 	mutex_unlock(&p->mutex);
 
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index d55d29d31da4..e9c72d8f0935 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -110,6 +110,57 @@ void program_sh_mem_settings(struct device_queue_manager *dqm,
 						qpd->sh_mem_bases);
 }
 
+static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
+{
+	struct kfd_dev *dev = qpd->dqm->dev;
+
+	if (!KFD_IS_SOC15(dev->device_info->asic_family)) {
+		/* On pre-SOC15 chips we need to use the queue ID to
+		 * preserve the user mode ABI.
+		 */
+		q->doorbell_id = q->properties.queue_id;
+	} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
+		/* For SDMA queues on SOC15, use static doorbell
+		 * assignments based on the engine and queue.
+		 */
+		q->doorbell_id = dev->shared_resources.sdma_doorbell
+			[q->properties.sdma_engine_id]
+			[q->properties.sdma_queue_id];
+	} else {
+		/* For CP queues on SOC15 reserve a free doorbell ID */
+		unsigned int found;
+
+		found = find_first_zero_bit(qpd->doorbell_bitmap,
+					    KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
+		if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
+			pr_debug("No doorbells available");
+			return -EBUSY;
+		}
+		set_bit(found, qpd->doorbell_bitmap);
+		q->doorbell_id = found;
+	}
+
+	q->properties.doorbell_off =
+		kfd_doorbell_id_to_offset(dev, q->process,
+					  q->doorbell_id);
+
+	return 0;
+}
+
+static void deallocate_doorbell(struct qcm_process_device *qpd,
+				struct queue *q)
+{
+	unsigned int old;
+	struct kfd_dev *dev = qpd->dqm->dev;
+
+	if (!KFD_IS_SOC15(dev->device_info->asic_family) ||
+	    q->properties.type == KFD_QUEUE_TYPE_SDMA)
+		return;
+
+	old = test_and_clear_bit(q->doorbell_id, qpd->doorbell_bitmap);
+	WARN_ON(!old);
+}
+
 static int allocate_vmid(struct device_queue_manager *dqm,
 			struct qcm_process_device *qpd,
 			struct queue *q)
@@ -301,10 +352,14 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
 	if (retval)
 		return retval;
 
+	retval = allocate_doorbell(qpd, q);
+	if (retval)
+		goto out_deallocate_hqd;
+
 	retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
 				&q->gart_mqd_addr, &q->properties);
 	if (retval)
-		goto out_deallocate_hqd;
+		goto out_deallocate_doorbell;
 
 	pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
 			q->pipe, q->queue);
@@ -324,6 +379,8 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
 
 out_uninit_mqd:
 	mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+out_deallocate_doorbell:
+	deallocate_doorbell(qpd, q);
 out_deallocate_hqd:
 	deallocate_hqd(dqm, q);
 
@@ -357,6 +414,8 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
 	}
 	dqm->total_queue_count--;
 
+	deallocate_doorbell(qpd, q);
+
 	retval = mqd->destroy_mqd(mqd, q->mqd,
 				KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
 				KFD_UNMAP_LATENCY_MS,
@@ -861,6 +920,10 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
 	q->properties.sdma_queue_id = q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE;
 	q->properties.sdma_engine_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
 
+	retval = allocate_doorbell(qpd, q);
+	if (retval)
+		goto out_deallocate_sdma_queue;
+
 	pr_debug("SDMA id is:    %d\n", q->sdma_id);
 	pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
 	pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
@@ -869,7 +932,7 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
 	retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
 				&q->gart_mqd_addr, &q->properties);
 	if (retval)
-		goto out_deallocate_sdma_queue;
+		goto out_deallocate_doorbell;
 
 	retval = mqd->load_mqd(mqd, q->mqd, 0, 0, &q->properties, NULL);
 	if (retval)
@@ -879,6 +942,8 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
 
 out_uninit_mqd:
 	mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+out_deallocate_doorbell:
+	deallocate_doorbell(qpd, q);
 out_deallocate_sdma_queue:
 	deallocate_sdma_queue(dqm, q->sdma_id);
 
@@ -1070,12 +1135,17 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
 		q->properties.sdma_engine_id =
 			q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
 	}
+
+	retval = allocate_doorbell(qpd, q);
+	if (retval)
+		goto out_deallocate_sdma_queue;
+
 	mqd = dqm->ops.get_mqd_manager(dqm,
 			get_mqd_type_from_queue_type(q->properties.type));
 
 	if (!mqd) {
 		retval = -ENOMEM;
-		goto out_deallocate_sdma_queue;
+		goto out_deallocate_doorbell;
 	}
 	/*
 	 * Eviction state logic: we only mark active queues as evicted
@@ -1093,7 +1163,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
 	retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
 				&q->gart_mqd_addr, &q->properties);
 	if (retval)
-		goto out_deallocate_sdma_queue;
+		goto out_deallocate_doorbell;
 
 	list_add(&q->list, &qpd->queues_list);
 	qpd->queue_count++;
@@ -1117,6 +1187,8 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
 	mutex_unlock(&dqm->lock);
 	return retval;
 
+out_deallocate_doorbell:
+	deallocate_doorbell(qpd, q);
 out_deallocate_sdma_queue:
 	if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
 		deallocate_sdma_queue(dqm, q->sdma_id);
@@ -1257,6 +1329,8 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
 		goto failed;
 	}
 
+	deallocate_doorbell(qpd, q);
+
 	if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
 		dqm->sdma_queue_count--;
 		deallocate_sdma_queue(dqm, q->sdma_id);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index efc59dea563f..36c9269ea7c0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -49,7 +49,7 @@ static unsigned int max_doorbell_slices;
  */
 
 /* # of doorbell bytes allocated for each process. */
-static size_t kfd_doorbell_process_slice(struct kfd_dev *kfd)
+size_t kfd_doorbell_process_slice(struct kfd_dev *kfd)
 {
 	return roundup(kfd->device_info->doorbell_size *
 			KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
@@ -214,13 +214,9 @@ void write_kernel_doorbell(void __iomem *db, u32 value)
 	}
 }
 
-/*
- * queue_ids are in the range [0,MAX_PROCESS_QUEUES) and are mapped 1:1
- * to doorbells with the process's doorbell page
- */
-unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd,
+unsigned int kfd_doorbell_id_to_offset(struct kfd_dev *kfd,
 					struct kfd_process *process,
-					unsigned int queue_id)
+					unsigned int doorbell_id)
 {
 	/*
 	 * doorbell_id_offset accounts for doorbells taken by KGD.
@@ -231,7 +227,7 @@ unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd,
 	return kfd->doorbell_id_offset +
 		process->doorbell_index
 		* kfd_doorbell_process_slice(kfd) / sizeof(u32) +
-		queue_id * kfd->device_info->doorbell_size / sizeof(u32);
+		doorbell_id * kfd->device_info->doorbell_size / sizeof(u32);
 }
 
 uint64_t kfd_get_number_elems(struct kfd_dev *kfd)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 2d575c014651..ddb3c8cdfb7b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -169,6 +169,8 @@ enum cache_policy {
 	cache_policy_noncoherent
 };
 
+#define KFD_IS_SOC15(chip) ((chip) >= CHIP_VEGA10)
+
 struct kfd_event_interrupt_class {
 	bool (*interrupt_isr)(struct kfd_dev *dev,
 				const uint32_t *ih_ring_entry);
@@ -449,6 +451,7 @@ struct queue {
 	uint32_t queue;
 
 	unsigned int sdma_id;
+	unsigned int doorbell_id;
 
 	struct kfd_process	*process;
 	struct kfd_dev		*device;
@@ -523,6 +526,9 @@ struct qcm_process_device {
 	/* IB memory */
 	uint64_t ib_base;
 	void *ib_kaddr;
+
+	/* doorbell resources per process per device */
+	unsigned long *doorbell_bitmap;
 };
 
 /* KFD Memory Eviction */
@@ -747,6 +753,7 @@ unsigned int kfd_pasid_alloc(void);
 void kfd_pasid_free(unsigned int pasid);
 
 /* Doorbells */
+size_t kfd_doorbell_process_slice(struct kfd_dev *kfd);
 int kfd_doorbell_init(struct kfd_dev *kfd);
 void kfd_doorbell_fini(struct kfd_dev *kfd);
 int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
@@ -756,9 +763,9 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
 void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr);
 u32 read_kernel_doorbell(u32 __iomem *db);
 void write_kernel_doorbell(void __iomem *db, u32 value);
-unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd,
+unsigned int kfd_doorbell_id_to_offset(struct kfd_dev *kfd,
 					struct kfd_process *process,
-					unsigned int queue_id);
+					unsigned int doorbell_id);
 phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
 					struct kfd_process *process);
 int kfd_alloc_process_doorbells(struct kfd_process *process);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 131fe2a1b589..1d80b4f7c681 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -332,6 +332,7 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
 			free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
 				get_order(KFD_CWSR_TBA_TMA_SIZE));
 
+		kfree(pdd->qpd.doorbell_bitmap);
 		idr_destroy(&pdd->alloc_idr);
 
 		kfree(pdd);
@@ -586,6 +587,31 @@ err_alloc_process:
 	return ERR_PTR(err);
 }
 
+static int init_doorbell_bitmap(struct qcm_process_device *qpd,
+			struct kfd_dev *dev)
+{
+	unsigned int i;
+
+	if (!KFD_IS_SOC15(dev->device_info->asic_family))
+		return 0;
+
+	qpd->doorbell_bitmap =
+		kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
+				     BITS_PER_BYTE), GFP_KERNEL);
+	if (!qpd->doorbell_bitmap)
+		return -ENOMEM;
+
+	/* Mask out any reserved doorbells */
+	for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS; i++)
+		if ((dev->shared_resources.reserved_doorbell_mask & i) ==
+		    dev->shared_resources.reserved_doorbell_val) {
+			set_bit(i, qpd->doorbell_bitmap);
+			pr_debug("reserved doorbell 0x%03x\n", i);
+		}
+
+	return 0;
+}
+
 struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
 							struct kfd_process *p)
 {
@@ -607,6 +633,12 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
 	if (!pdd)
 		return NULL;
 
+	if (init_doorbell_bitmap(&pdd->qpd, dev)) {
+		pr_err("Failed to init doorbell for process\n");
+		kfree(pdd);
+		return NULL;
+	}
+
 	pdd->dev = dev;
 	INIT_LIST_HEAD(&pdd->qpd.queues_list);
 	INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 7817e327ea6d..3045aebdc3f7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -119,9 +119,6 @@ static int create_cp_queue(struct process_queue_manager *pqm,
 	/* Doorbell initialized in user space*/
 	q_properties->doorbell_ptr = NULL;
 
-	q_properties->doorbell_off =
-			kfd_queue_id_to_doorbell(dev, pqm->process, qid);
-
 	/* let DQM handle it*/
 	q_properties->vmid = 0;
 	q_properties->queue_id = qid;
@@ -248,6 +245,15 @@ int pqm_create_queue(struct process_queue_manager *pqm,
 		goto err_create_queue;
 	}
 
+	if (q)
+		/* Return the doorbell offset within the doorbell page
+		 * to the caller so it can be passed up to user mode
+		 * (in bytes).
+		 */
+		properties->doorbell_off =
+			(q->properties.doorbell_off * sizeof(uint32_t)) &
+			(kfd_doorbell_process_slice(dev) - 1);
+
 	pr_debug("PQM After DQM create queue\n");
 
 	list_add(&pqn->process_queue_list, &pqm->queues);

From f6e27ff19d9db90e55576dea5aef98feb3d0ce5e Mon Sep 17 00:00:00 2001
From: Felix Kuehling <Felix.Kuehling@amd.com>
Date: Tue, 10 Apr 2018 17:33:06 -0400
Subject: [PATCH 0237/1286] drm/amdkfd: Move packet writer functions into
 ASIC-specific file

This is in preparation for GFXv9 (Vega10) which uses incompatible PM4
packet formats from previous ASIC generations.

Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 .../drm/amd/amdkfd/kfd_device_queue_manager.c |  10 +-
 .../gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c  | 310 ++++++++++++++
 .../gpu/drm/amd/amdkfd/kfd_packet_manager.c   | 381 ++++--------------
 drivers/gpu/drm/amd/amdkfd/kfd_priv.h         |  35 +-
 4 files changed, 420 insertions(+), 316 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index e9c72d8f0935..500f022d089d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -196,15 +196,19 @@ static int allocate_vmid(struct device_queue_manager *dqm,
 static int flush_texture_cache_nocpsch(struct kfd_dev *kdev,
 				struct qcm_process_device *qpd)
 {
-	uint32_t len;
+	const struct packet_manager_funcs *pmf = qpd->dqm->packets.pmf;
+	int ret;
 
 	if (!qpd->ib_kaddr)
 		return -ENOMEM;
 
-	len = pm_create_release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr);
+	ret = pmf->release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr);
+	if (ret)
+		return ret;
 
 	return kdev->kfd2kgd->submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid,
-				qpd->ib_base, (uint32_t *)qpd->ib_kaddr, len);
+				qpd->ib_base, (uint32_t *)qpd->ib_kaddr,
+				pmf->release_mem_size / sizeof(uint32_t));
 }
 
 static void deallocate_vmid(struct device_queue_manager *dqm,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
index f1d48281e322..7ee326fa486d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
@@ -22,6 +22,9 @@
  */
 
 #include "kfd_kernel_queue.h"
+#include "kfd_device_queue_manager.h"
+#include "kfd_pm4_headers_vi.h"
+#include "kfd_pm4_opcodes.h"
 
 static bool initialize_vi(struct kernel_queue *kq, struct kfd_dev *dev,
 			enum kfd_queue_type type, unsigned int queue_size);
@@ -54,3 +57,310 @@ static void uninitialize_vi(struct kernel_queue *kq)
 {
 	kfd_gtt_sa_free(kq->dev, kq->eop_mem);
 }
+
+static unsigned int build_pm4_header(unsigned int opcode, size_t packet_size)
+{
+	union PM4_MES_TYPE_3_HEADER header;
+
+	header.u32All = 0;
+	header.opcode = opcode;
+	header.count = packet_size / 4 - 2;
+	header.type = PM4_TYPE_3;
+
+	return header.u32All;
+}
+
+static int pm_map_process_vi(struct packet_manager *pm, uint32_t *buffer,
+				struct qcm_process_device *qpd)
+{
+	struct pm4_mes_map_process *packet;
+
+	packet = (struct pm4_mes_map_process *)buffer;
+
+	memset(buffer, 0, sizeof(struct pm4_mes_map_process));
+
+	packet->header.u32All = build_pm4_header(IT_MAP_PROCESS,
+					sizeof(struct pm4_mes_map_process));
+	packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
+	packet->bitfields2.process_quantum = 1;
+	packet->bitfields2.pasid = qpd->pqm->process->pasid;
+	packet->bitfields3.page_table_base = qpd->page_table_base;
+	packet->bitfields10.gds_size = qpd->gds_size;
+	packet->bitfields10.num_gws = qpd->num_gws;
+	packet->bitfields10.num_oac = qpd->num_oac;
+	packet->bitfields10.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
+
+	packet->sh_mem_config = qpd->sh_mem_config;
+	packet->sh_mem_bases = qpd->sh_mem_bases;
+	packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
+	packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
+
+	packet->sh_hidden_private_base_vmid = qpd->sh_hidden_private_base;
+
+	packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
+	packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
+
+	return 0;
+}
+
+static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer,
+			uint64_t ib, size_t ib_size_in_dwords, bool chain)
+{
+	struct pm4_mes_runlist *packet;
+	int concurrent_proc_cnt = 0;
+	struct kfd_dev *kfd = pm->dqm->dev;
+
+	if (WARN_ON(!ib))
+		return -EFAULT;
+
+	/* Determine the number of processes to map together to HW:
+	 * it can not exceed the number of VMIDs available to the
+	 * scheduler, and it is determined by the smaller of the number
+	 * of processes in the runlist and kfd module parameter
+	 * hws_max_conc_proc.
+	 * Note: the arbitration between the number of VMIDs and
+	 * hws_max_conc_proc has been done in
+	 * kgd2kfd_device_init().
+	 */
+	concurrent_proc_cnt = min(pm->dqm->processes_count,
+			kfd->max_proc_per_quantum);
+
+	packet = (struct pm4_mes_runlist *)buffer;
+
+	memset(buffer, 0, sizeof(struct pm4_mes_runlist));
+	packet->header.u32All = build_pm4_header(IT_RUN_LIST,
+						sizeof(struct pm4_mes_runlist));
+
+	packet->bitfields4.ib_size = ib_size_in_dwords;
+	packet->bitfields4.chain = chain ? 1 : 0;
+	packet->bitfields4.offload_polling = 0;
+	packet->bitfields4.valid = 1;
+	packet->bitfields4.process_cnt = concurrent_proc_cnt;
+	packet->ordinal2 = lower_32_bits(ib);
+	packet->bitfields3.ib_base_hi = upper_32_bits(ib);
+
+	return 0;
+}
+
+static int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
+				struct scheduling_resources *res)
+{
+	struct pm4_mes_set_resources *packet;
+
+	packet = (struct pm4_mes_set_resources *)buffer;
+	memset(buffer, 0, sizeof(struct pm4_mes_set_resources));
+
+	packet->header.u32All = build_pm4_header(IT_SET_RESOURCES,
+					sizeof(struct pm4_mes_set_resources));
+
+	packet->bitfields2.queue_type =
+			queue_type__mes_set_resources__hsa_interface_queue_hiq;
+	packet->bitfields2.vmid_mask = res->vmid_mask;
+	packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100;
+	packet->bitfields7.oac_mask = res->oac_mask;
+	packet->bitfields8.gds_heap_base = res->gds_heap_base;
+	packet->bitfields8.gds_heap_size = res->gds_heap_size;
+
+	packet->gws_mask_lo = lower_32_bits(res->gws_mask);
+	packet->gws_mask_hi = upper_32_bits(res->gws_mask);
+
+	packet->queue_mask_lo = lower_32_bits(res->queue_mask);
+	packet->queue_mask_hi = upper_32_bits(res->queue_mask);
+
+	return 0;
+}
+
+static int pm_map_queues_vi(struct packet_manager *pm, uint32_t *buffer,
+		struct queue *q, bool is_static)
+{
+	struct pm4_mes_map_queues *packet;
+	bool use_static = is_static;
+
+	packet = (struct pm4_mes_map_queues *)buffer;
+	memset(buffer, 0, sizeof(struct pm4_mes_map_queues));
+
+	packet->header.u32All = build_pm4_header(IT_MAP_QUEUES,
+					sizeof(struct pm4_mes_map_queues));
+	packet->bitfields2.alloc_format =
+		alloc_format__mes_map_queues__one_per_pipe_vi;
+	packet->bitfields2.num_queues = 1;
+	packet->bitfields2.queue_sel =
+		queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi;
+
+	packet->bitfields2.engine_sel =
+		engine_sel__mes_map_queues__compute_vi;
+	packet->bitfields2.queue_type =
+		queue_type__mes_map_queues__normal_compute_vi;
+
+	switch (q->properties.type) {
+	case KFD_QUEUE_TYPE_COMPUTE:
+		if (use_static)
+			packet->bitfields2.queue_type =
+		queue_type__mes_map_queues__normal_latency_static_queue_vi;
+		break;
+	case KFD_QUEUE_TYPE_DIQ:
+		packet->bitfields2.queue_type =
+			queue_type__mes_map_queues__debug_interface_queue_vi;
+		break;
+	case KFD_QUEUE_TYPE_SDMA:
+		packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
+				engine_sel__mes_map_queues__sdma0_vi;
+		use_static = false; /* no static queues under SDMA */
+		break;
+	default:
+		WARN(1, "queue type %d", q->properties.type);
+		return -EINVAL;
+	}
+	packet->bitfields3.doorbell_offset =
+			q->properties.doorbell_off;
+
+	packet->mqd_addr_lo =
+			lower_32_bits(q->gart_mqd_addr);
+
+	packet->mqd_addr_hi =
+			upper_32_bits(q->gart_mqd_addr);
+
+	packet->wptr_addr_lo =
+			lower_32_bits((uint64_t)q->properties.write_ptr);
+
+	packet->wptr_addr_hi =
+			upper_32_bits((uint64_t)q->properties.write_ptr);
+
+	return 0;
+}
+
+static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
+			enum kfd_queue_type type,
+			enum kfd_unmap_queues_filter filter,
+			uint32_t filter_param, bool reset,
+			unsigned int sdma_engine)
+{
+	struct pm4_mes_unmap_queues *packet;
+
+	packet = (struct pm4_mes_unmap_queues *)buffer;
+	memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues));
+
+	packet->header.u32All = build_pm4_header(IT_UNMAP_QUEUES,
+					sizeof(struct pm4_mes_unmap_queues));
+	switch (type) {
+	case KFD_QUEUE_TYPE_COMPUTE:
+	case KFD_QUEUE_TYPE_DIQ:
+		packet->bitfields2.engine_sel =
+			engine_sel__mes_unmap_queues__compute;
+		break;
+	case KFD_QUEUE_TYPE_SDMA:
+		packet->bitfields2.engine_sel =
+			engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
+		break;
+	default:
+		WARN(1, "queue type %d", type);
+		return -EINVAL;
+	}
+
+	if (reset)
+		packet->bitfields2.action =
+			action__mes_unmap_queues__reset_queues;
+	else
+		packet->bitfields2.action =
+			action__mes_unmap_queues__preempt_queues;
+
+	switch (filter) {
+	case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE:
+		packet->bitfields2.queue_sel =
+			queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
+		packet->bitfields2.num_queues = 1;
+		packet->bitfields3b.doorbell_offset0 = filter_param;
+		break;
+	case KFD_UNMAP_QUEUES_FILTER_BY_PASID:
+		packet->bitfields2.queue_sel =
+			queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
+		packet->bitfields3a.pasid = filter_param;
+		break;
+	case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES:
+		packet->bitfields2.queue_sel =
+			queue_sel__mes_unmap_queues__unmap_all_queues;
+		break;
+	case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES:
+		/* in this case, we do not preempt static queues */
+		packet->bitfields2.queue_sel =
+			queue_sel__mes_unmap_queues__unmap_all_non_static_queues;
+		break;
+	default:
+		WARN(1, "filter %d", filter);
+		return -EINVAL;
+	}
+
+	return 0;
+
+}
+
+static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
+			uint64_t fence_address,	uint32_t fence_value)
+{
+	struct pm4_mes_query_status *packet;
+
+	packet = (struct pm4_mes_query_status *)buffer;
+	memset(buffer, 0, sizeof(struct pm4_mes_query_status));
+
+	packet->header.u32All = build_pm4_header(IT_QUERY_STATUS,
+					sizeof(struct pm4_mes_query_status));
+
+	packet->bitfields2.context_id = 0;
+	packet->bitfields2.interrupt_sel =
+			interrupt_sel__mes_query_status__completion_status;
+	packet->bitfields2.command =
+			command__mes_query_status__fence_only_after_write_ack;
+
+	packet->addr_hi = upper_32_bits((uint64_t)fence_address);
+	packet->addr_lo = lower_32_bits((uint64_t)fence_address);
+	packet->data_hi = upper_32_bits((uint64_t)fence_value);
+	packet->data_lo = lower_32_bits((uint64_t)fence_value);
+
+	return 0;
+}
+
+static int pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer)
+{
+	struct pm4_mec_release_mem *packet;
+
+	packet = (struct pm4_mec_release_mem *)buffer;
+	memset(buffer, 0, sizeof(*packet));
+
+	packet->header.u32All = build_pm4_header(IT_RELEASE_MEM,
+						 sizeof(*packet));
+
+	packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT;
+	packet->bitfields2.event_index = event_index___release_mem__end_of_pipe;
+	packet->bitfields2.tcl1_action_ena = 1;
+	packet->bitfields2.tc_action_ena = 1;
+	packet->bitfields2.cache_policy = cache_policy___release_mem__lru;
+	packet->bitfields2.atc = 0;
+
+	packet->bitfields3.data_sel = data_sel___release_mem__send_32_bit_low;
+	packet->bitfields3.int_sel =
+		int_sel___release_mem__send_interrupt_after_write_confirm;
+
+	packet->bitfields4.address_lo_32b = (gpu_addr & 0xffffffff) >> 2;
+	packet->address_hi = upper_32_bits(gpu_addr);
+
+	packet->data_lo = 0;
+
+	return 0;
+}
+
+const struct packet_manager_funcs kfd_vi_pm_funcs = {
+	.map_process		= pm_map_process_vi,
+	.runlist		= pm_runlist_vi,
+	.set_resources		= pm_set_resources_vi,
+	.map_queues		= pm_map_queues_vi,
+	.unmap_queues		= pm_unmap_queues_vi,
+	.query_status		= pm_query_status_vi,
+	.release_mem		= pm_release_mem_vi,
+	.map_process_size	= sizeof(struct pm4_mes_map_process),
+	.runlist_size		= sizeof(struct pm4_mes_runlist),
+	.set_resources_size	= sizeof(struct pm4_mes_set_resources),
+	.map_queues_size	= sizeof(struct pm4_mes_map_queues),
+	.unmap_queues_size	= sizeof(struct pm4_mes_unmap_queues),
+	.query_status_size	= sizeof(struct pm4_mes_query_status),
+	.release_mem_size	= sizeof(struct pm4_mec_release_mem)
+};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 89ba4c670ec5..860ff2481747 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -26,8 +26,6 @@
 #include "kfd_device_queue_manager.h"
 #include "kfd_kernel_queue.h"
 #include "kfd_priv.h"
-#include "kfd_pm4_headers_vi.h"
-#include "kfd_pm4_opcodes.h"
 
 static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
 				unsigned int buffer_size_bytes)
@@ -39,18 +37,6 @@ static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
 	*wptr = temp;
 }
 
-static unsigned int build_pm4_header(unsigned int opcode, size_t packet_size)
-{
-	union PM4_MES_TYPE_3_HEADER header;
-
-	header.u32All = 0;
-	header.opcode = opcode;
-	header.count = packet_size / 4 - 2;
-	header.type = PM4_TYPE_3;
-
-	return header.u32All;
-}
-
 static void pm_calc_rlib_size(struct packet_manager *pm,
 				unsigned int *rlib_size,
 				bool *over_subscription)
@@ -80,9 +66,9 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
 		pr_debug("Over subscribed runlist\n");
 	}
 
-	map_queue_size = sizeof(struct pm4_mes_map_queues);
+	map_queue_size = pm->pmf->map_queues_size;
 	/* calculate run list ib allocation size */
-	*rlib_size = process_count * sizeof(struct pm4_mes_map_process) +
+	*rlib_size = process_count * pm->pmf->map_process_size +
 		     queue_count * map_queue_size;
 
 	/*
@@ -90,7 +76,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
 	 * when over subscription
 	 */
 	if (*over_subscription)
-		*rlib_size += sizeof(struct pm4_mes_runlist);
+		*rlib_size += pm->pmf->runlist_size;
 
 	pr_debug("runlist ib size %d\n", *rlib_size);
 }
@@ -124,137 +110,6 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
 	return retval;
 }
 
-static int pm_create_runlist(struct packet_manager *pm, uint32_t *buffer,
-			uint64_t ib, size_t ib_size_in_dwords, bool chain)
-{
-	struct pm4_mes_runlist *packet;
-	int concurrent_proc_cnt = 0;
-	struct kfd_dev *kfd = pm->dqm->dev;
-
-	if (WARN_ON(!ib))
-		return -EFAULT;
-
-	/* Determine the number of processes to map together to HW:
-	 * it can not exceed the number of VMIDs available to the
-	 * scheduler, and it is determined by the smaller of the number
-	 * of processes in the runlist and kfd module parameter
-	 * hws_max_conc_proc.
-	 * Note: the arbitration between the number of VMIDs and
-	 * hws_max_conc_proc has been done in
-	 * kgd2kfd_device_init().
-	 */
-	concurrent_proc_cnt = min(pm->dqm->processes_count,
-			kfd->max_proc_per_quantum);
-
-	packet = (struct pm4_mes_runlist *)buffer;
-
-	memset(buffer, 0, sizeof(struct pm4_mes_runlist));
-	packet->header.u32All = build_pm4_header(IT_RUN_LIST,
-						sizeof(struct pm4_mes_runlist));
-
-	packet->bitfields4.ib_size = ib_size_in_dwords;
-	packet->bitfields4.chain = chain ? 1 : 0;
-	packet->bitfields4.offload_polling = 0;
-	packet->bitfields4.valid = 1;
-	packet->bitfields4.process_cnt = concurrent_proc_cnt;
-	packet->ordinal2 = lower_32_bits(ib);
-	packet->bitfields3.ib_base_hi = upper_32_bits(ib);
-
-	return 0;
-}
-
-static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
-				struct qcm_process_device *qpd)
-{
-	struct pm4_mes_map_process *packet;
-
-	packet = (struct pm4_mes_map_process *)buffer;
-
-	memset(buffer, 0, sizeof(struct pm4_mes_map_process));
-
-	packet->header.u32All = build_pm4_header(IT_MAP_PROCESS,
-					sizeof(struct pm4_mes_map_process));
-	packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
-	packet->bitfields2.process_quantum = 1;
-	packet->bitfields2.pasid = qpd->pqm->process->pasid;
-	packet->bitfields3.page_table_base = qpd->page_table_base;
-	packet->bitfields10.gds_size = qpd->gds_size;
-	packet->bitfields10.num_gws = qpd->num_gws;
-	packet->bitfields10.num_oac = qpd->num_oac;
-	packet->bitfields10.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
-
-	packet->sh_mem_config = qpd->sh_mem_config;
-	packet->sh_mem_bases = qpd->sh_mem_bases;
-	packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
-	packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
-
-	packet->sh_hidden_private_base_vmid = qpd->sh_hidden_private_base;
-
-	packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
-	packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
-
-	return 0;
-}
-
-static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
-		struct queue *q, bool is_static)
-{
-	struct pm4_mes_map_queues *packet;
-	bool use_static = is_static;
-
-	packet = (struct pm4_mes_map_queues *)buffer;
-	memset(buffer, 0, sizeof(struct pm4_mes_map_queues));
-
-	packet->header.u32All = build_pm4_header(IT_MAP_QUEUES,
-						sizeof(struct pm4_mes_map_queues));
-	packet->bitfields2.alloc_format =
-		alloc_format__mes_map_queues__one_per_pipe_vi;
-	packet->bitfields2.num_queues = 1;
-	packet->bitfields2.queue_sel =
-		queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi;
-
-	packet->bitfields2.engine_sel =
-		engine_sel__mes_map_queues__compute_vi;
-	packet->bitfields2.queue_type =
-		queue_type__mes_map_queues__normal_compute_vi;
-
-	switch (q->properties.type) {
-	case KFD_QUEUE_TYPE_COMPUTE:
-		if (use_static)
-			packet->bitfields2.queue_type =
-		queue_type__mes_map_queues__normal_latency_static_queue_vi;
-		break;
-	case KFD_QUEUE_TYPE_DIQ:
-		packet->bitfields2.queue_type =
-			queue_type__mes_map_queues__debug_interface_queue_vi;
-		break;
-	case KFD_QUEUE_TYPE_SDMA:
-		packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
-				engine_sel__mes_map_queues__sdma0_vi;
-		use_static = false; /* no static queues under SDMA */
-		break;
-	default:
-		WARN(1, "queue type %d", q->properties.type);
-		return -EINVAL;
-	}
-	packet->bitfields3.doorbell_offset =
-			q->properties.doorbell_off;
-
-	packet->mqd_addr_lo =
-			lower_32_bits(q->gart_mqd_addr);
-
-	packet->mqd_addr_hi =
-			upper_32_bits(q->gart_mqd_addr);
-
-	packet->wptr_addr_lo =
-			lower_32_bits((uint64_t)q->properties.write_ptr);
-
-	packet->wptr_addr_hi =
-			upper_32_bits((uint64_t)q->properties.write_ptr);
-
-	return 0;
-}
-
 static int pm_create_runlist_ib(struct packet_manager *pm,
 				struct list_head *queues,
 				uint64_t *rl_gpu_addr,
@@ -292,12 +147,12 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
 			return -ENOMEM;
 		}
 
-		retval = pm_create_map_process(pm, &rl_buffer[rl_wptr], qpd);
+		retval = pm->pmf->map_process(pm, &rl_buffer[rl_wptr], qpd);
 		if (retval)
 			return retval;
 
 		proccesses_mapped++;
-		inc_wptr(&rl_wptr, sizeof(struct pm4_mes_map_process),
+		inc_wptr(&rl_wptr, pm->pmf->map_process_size,
 				alloc_size_bytes);
 
 		list_for_each_entry(kq, &qpd->priv_queue_list, list) {
@@ -307,7 +162,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
 			pr_debug("static_queue, mapping kernel q %d, is debug status %d\n",
 				kq->queue->queue, qpd->is_debug);
 
-			retval = pm_create_map_queue(pm,
+			retval = pm->pmf->map_queues(pm,
 						&rl_buffer[rl_wptr],
 						kq->queue,
 						qpd->is_debug);
@@ -315,7 +170,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
 				return retval;
 
 			inc_wptr(&rl_wptr,
-				sizeof(struct pm4_mes_map_queues),
+				pm->pmf->map_queues_size,
 				alloc_size_bytes);
 		}
 
@@ -326,7 +181,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
 			pr_debug("static_queue, mapping user queue %d, is debug status %d\n",
 				q->queue, qpd->is_debug);
 
-			retval = pm_create_map_queue(pm,
+			retval = pm->pmf->map_queues(pm,
 						&rl_buffer[rl_wptr],
 						q,
 						qpd->is_debug);
@@ -335,7 +190,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
 				return retval;
 
 			inc_wptr(&rl_wptr,
-				sizeof(struct pm4_mes_map_queues),
+				pm->pmf->map_queues_size,
 				alloc_size_bytes);
 		}
 	}
@@ -343,7 +198,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
 	pr_debug("Finished map process and queues to runlist\n");
 
 	if (is_over_subscription)
-		retval = pm_create_runlist(pm, &rl_buffer[rl_wptr],
+		retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr],
 					*rl_gpu_addr,
 					alloc_size_bytes / sizeof(uint32_t),
 					true);
@@ -355,45 +210,25 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
 	return retval;
 }
 
-/* pm_create_release_mem - Create a RELEASE_MEM packet and return the size
- *     of this packet
- *     @gpu_addr - GPU address of the packet. It's a virtual address.
- *     @buffer - buffer to fill up with the packet. It's a CPU kernel pointer
- *     Return - length of the packet
- */
-uint32_t pm_create_release_mem(uint64_t gpu_addr, uint32_t *buffer)
-{
-	struct pm4_mec_release_mem *packet;
-
-	WARN_ON(!buffer);
-
-	packet = (struct pm4_mec_release_mem *)buffer;
-	memset(buffer, 0, sizeof(*packet));
-
-	packet->header.u32All = build_pm4_header(IT_RELEASE_MEM,
-						 sizeof(*packet));
-
-	packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT;
-	packet->bitfields2.event_index = event_index___release_mem__end_of_pipe;
-	packet->bitfields2.tcl1_action_ena = 1;
-	packet->bitfields2.tc_action_ena = 1;
-	packet->bitfields2.cache_policy = cache_policy___release_mem__lru;
-	packet->bitfields2.atc = 0;
-
-	packet->bitfields3.data_sel = data_sel___release_mem__send_32_bit_low;
-	packet->bitfields3.int_sel =
-		int_sel___release_mem__send_interrupt_after_write_confirm;
-
-	packet->bitfields4.address_lo_32b = (gpu_addr & 0xffffffff) >> 2;
-	packet->address_hi = upper_32_bits(gpu_addr);
-
-	packet->data_lo = 0;
-
-	return sizeof(*packet) / sizeof(unsigned int);
-}
-
 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
 {
+	switch (dqm->dev->device_info->asic_family) {
+	case CHIP_KAVERI:
+	case CHIP_HAWAII:
+		/* PM4 packet structures on CIK are the same as on VI */
+	case CHIP_CARRIZO:
+	case CHIP_TONGA:
+	case CHIP_FIJI:
+	case CHIP_POLARIS10:
+	case CHIP_POLARIS11:
+		pm->pmf = &kfd_vi_pm_funcs;
+		break;
+	default:
+		WARN(1, "Unexpected ASIC family %u",
+		     dqm->dev->device_info->asic_family);
+		return -EINVAL;
+	}
+
 	pm->dqm = dqm;
 	mutex_init(&pm->lock);
 	pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
@@ -415,38 +250,25 @@ void pm_uninit(struct packet_manager *pm)
 int pm_send_set_resources(struct packet_manager *pm,
 				struct scheduling_resources *res)
 {
-	struct pm4_mes_set_resources *packet;
+	uint32_t *buffer, size;
 	int retval = 0;
 
+	size = pm->pmf->set_resources_size;
 	mutex_lock(&pm->lock);
 	pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
-					sizeof(*packet) / sizeof(uint32_t),
-					(unsigned int **)&packet);
-	if (!packet) {
+					size / sizeof(uint32_t),
+					(unsigned int **)&buffer);
+	if (!buffer) {
 		pr_err("Failed to allocate buffer on kernel queue\n");
 		retval = -ENOMEM;
 		goto out;
 	}
 
-	memset(packet, 0, sizeof(struct pm4_mes_set_resources));
-	packet->header.u32All = build_pm4_header(IT_SET_RESOURCES,
-					sizeof(struct pm4_mes_set_resources));
-
-	packet->bitfields2.queue_type =
-			queue_type__mes_set_resources__hsa_interface_queue_hiq;
-	packet->bitfields2.vmid_mask = res->vmid_mask;
-	packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100;
-	packet->bitfields7.oac_mask = res->oac_mask;
-	packet->bitfields8.gds_heap_base = res->gds_heap_base;
-	packet->bitfields8.gds_heap_size = res->gds_heap_size;
-
-	packet->gws_mask_lo = lower_32_bits(res->gws_mask);
-	packet->gws_mask_hi = upper_32_bits(res->gws_mask);
-
-	packet->queue_mask_lo = lower_32_bits(res->queue_mask);
-	packet->queue_mask_hi = upper_32_bits(res->queue_mask);
-
-	pm->priv_queue->ops.submit_packet(pm->priv_queue);
+	retval = pm->pmf->set_resources(pm, buffer, res);
+	if (!retval)
+		pm->priv_queue->ops.submit_packet(pm->priv_queue);
+	else
+		pm->priv_queue->ops.rollback_packet(pm->priv_queue);
 
 out:
 	mutex_unlock(&pm->lock);
@@ -468,7 +290,7 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
 
 	pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
 
-	packet_size_dwords = sizeof(struct pm4_mes_runlist) / sizeof(uint32_t);
+	packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t);
 	mutex_lock(&pm->lock);
 
 	retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
@@ -476,7 +298,7 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
 	if (retval)
 		goto fail_acquire_packet_buffer;
 
-	retval = pm_create_runlist(pm, rl_buffer, rl_gpu_ib_addr,
+	retval = pm->pmf->runlist(pm, rl_buffer, rl_gpu_ib_addr,
 					rl_ib_size / sizeof(uint32_t), false);
 	if (retval)
 		goto fail_create_runlist;
@@ -499,37 +321,29 @@ fail_create_runlist_ib:
 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
 			uint32_t fence_value)
 {
-	int retval;
-	struct pm4_mes_query_status *packet;
+	uint32_t *buffer, size;
+	int retval = 0;
 
 	if (WARN_ON(!fence_address))
 		return -EFAULT;
 
+	size = pm->pmf->query_status_size;
 	mutex_lock(&pm->lock);
-	retval = pm->priv_queue->ops.acquire_packet_buffer(
-			pm->priv_queue,
-			sizeof(struct pm4_mes_query_status) / sizeof(uint32_t),
-			(unsigned int **)&packet);
-	if (retval)
-		goto fail_acquire_packet_buffer;
+	pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+			size / sizeof(uint32_t), (unsigned int **)&buffer);
+	if (!buffer) {
+		pr_err("Failed to allocate buffer on kernel queue\n");
+		retval = -ENOMEM;
+		goto out;
+	}
 
-	packet->header.u32All = build_pm4_header(IT_QUERY_STATUS,
-					sizeof(struct pm4_mes_query_status));
+	retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value);
+	if (!retval)
+		pm->priv_queue->ops.submit_packet(pm->priv_queue);
+	else
+		pm->priv_queue->ops.rollback_packet(pm->priv_queue);
 
-	packet->bitfields2.context_id = 0;
-	packet->bitfields2.interrupt_sel =
-			interrupt_sel__mes_query_status__completion_status;
-	packet->bitfields2.command =
-			command__mes_query_status__fence_only_after_write_ack;
-
-	packet->addr_hi = upper_32_bits((uint64_t)fence_address);
-	packet->addr_lo = lower_32_bits((uint64_t)fence_address);
-	packet->data_hi = upper_32_bits((uint64_t)fence_value);
-	packet->data_lo = lower_32_bits((uint64_t)fence_value);
-
-	pm->priv_queue->ops.submit_packet(pm->priv_queue);
-
-fail_acquire_packet_buffer:
+out:
 	mutex_unlock(&pm->lock);
 	return retval;
 }
@@ -539,82 +353,27 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
 			uint32_t filter_param, bool reset,
 			unsigned int sdma_engine)
 {
-	int retval;
-	uint32_t *buffer;
-	struct pm4_mes_unmap_queues *packet;
+	uint32_t *buffer, size;
+	int retval = 0;
 
+	size = pm->pmf->unmap_queues_size;
 	mutex_lock(&pm->lock);
-	retval = pm->priv_queue->ops.acquire_packet_buffer(
-			pm->priv_queue,
-			sizeof(struct pm4_mes_unmap_queues) / sizeof(uint32_t),
-			&buffer);
-	if (retval)
-		goto err_acquire_packet_buffer;
-
-	packet = (struct pm4_mes_unmap_queues *)buffer;
-	memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues));
-	pr_debug("static_queue: unmapping queues: filter is %d , reset is %d , type is %d\n",
-		filter, reset, type);
-	packet->header.u32All = build_pm4_header(IT_UNMAP_QUEUES,
-					sizeof(struct pm4_mes_unmap_queues));
-	switch (type) {
-	case KFD_QUEUE_TYPE_COMPUTE:
-	case KFD_QUEUE_TYPE_DIQ:
-		packet->bitfields2.engine_sel =
-			engine_sel__mes_unmap_queues__compute;
-		break;
-	case KFD_QUEUE_TYPE_SDMA:
-		packet->bitfields2.engine_sel =
-			engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
-		break;
-	default:
-		WARN(1, "queue type %d", type);
-		retval = -EINVAL;
-		goto err_invalid;
+	pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+			size / sizeof(uint32_t), (unsigned int **)&buffer);
+	if (!buffer) {
+		pr_err("Failed to allocate buffer on kernel queue\n");
+		retval = -ENOMEM;
+		goto out;
 	}
 
-	if (reset)
-		packet->bitfields2.action =
-				action__mes_unmap_queues__reset_queues;
+	retval = pm->pmf->unmap_queues(pm, buffer, type, filter, filter_param,
+				       reset, sdma_engine);
+	if (!retval)
+		pm->priv_queue->ops.submit_packet(pm->priv_queue);
 	else
-		packet->bitfields2.action =
-				action__mes_unmap_queues__preempt_queues;
+		pm->priv_queue->ops.rollback_packet(pm->priv_queue);
 
-	switch (filter) {
-	case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE:
-		packet->bitfields2.queue_sel =
-				queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
-		packet->bitfields2.num_queues = 1;
-		packet->bitfields3b.doorbell_offset0 = filter_param;
-		break;
-	case KFD_UNMAP_QUEUES_FILTER_BY_PASID:
-		packet->bitfields2.queue_sel =
-				queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
-		packet->bitfields3a.pasid = filter_param;
-		break;
-	case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES:
-		packet->bitfields2.queue_sel =
-				queue_sel__mes_unmap_queues__unmap_all_queues;
-		break;
-	case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES:
-		/* in this case, we do not preempt static queues */
-		packet->bitfields2.queue_sel =
-				queue_sel__mes_unmap_queues__unmap_all_non_static_queues;
-		break;
-	default:
-		WARN(1, "filter %d", filter);
-		retval = -EINVAL;
-		goto err_invalid;
-	}
-
-	pm->priv_queue->ops.submit_packet(pm->priv_queue);
-
-	mutex_unlock(&pm->lock);
-	return 0;
-
-err_invalid:
-	pm->priv_queue->ops.rollback_packet(pm->priv_queue);
-err_acquire_packet_buffer:
+out:
 	mutex_unlock(&pm->lock);
 	return retval;
 }
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index ddb3c8cdfb7b..873a8fbc14ce 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -866,8 +866,41 @@ struct packet_manager {
 	bool allocated;
 	struct kfd_mem_obj *ib_buffer_obj;
 	unsigned int ib_size_bytes;
+
+	const struct packet_manager_funcs *pmf;
 };
 
+struct packet_manager_funcs {
+	/* Support ASIC-specific packet formats for PM4 packets */
+	int (*map_process)(struct packet_manager *pm, uint32_t *buffer,
+			struct qcm_process_device *qpd);
+	int (*runlist)(struct packet_manager *pm, uint32_t *buffer,
+			uint64_t ib, size_t ib_size_in_dwords, bool chain);
+	int (*set_resources)(struct packet_manager *pm, uint32_t *buffer,
+			struct scheduling_resources *res);
+	int (*map_queues)(struct packet_manager *pm, uint32_t *buffer,
+			struct queue *q, bool is_static);
+	int (*unmap_queues)(struct packet_manager *pm, uint32_t *buffer,
+			enum kfd_queue_type type,
+			enum kfd_unmap_queues_filter mode,
+			uint32_t filter_param, bool reset,
+			unsigned int sdma_engine);
+	int (*query_status)(struct packet_manager *pm, uint32_t *buffer,
+			uint64_t fence_address,	uint32_t fence_value);
+	int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer);
+
+	/* Packet sizes */
+	int map_process_size;
+	int runlist_size;
+	int set_resources_size;
+	int map_queues_size;
+	int unmap_queues_size;
+	int query_status_size;
+	int release_mem_size;
+};
+
+extern const struct packet_manager_funcs kfd_vi_pm_funcs;
+
 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
 void pm_uninit(struct packet_manager *pm);
 int pm_send_set_resources(struct packet_manager *pm,
@@ -883,8 +916,6 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
 
 void pm_release_ib(struct packet_manager *pm);
 
-uint32_t pm_create_release_mem(uint64_t gpu_addr, uint32_t *buffer);
-
 uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
 
 /* Events */

From 454150b1f9a6be0a69138a698471bd13424204cc Mon Sep 17 00:00:00 2001
From: Felix Kuehling <Felix.Kuehling@amd.com>
Date: Tue, 10 Apr 2018 17:33:07 -0400
Subject: [PATCH 0238/1286] drm/amdkfd: Add GFXv9 PM4 packet writer functions

Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdkfd/Makefile           |   7 +-
 .../gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c  | 331 ++++++++++
 .../gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c  |  18 +-
 .../gpu/drm/amd/amdkfd/kfd_packet_manager.c   |   4 +
 .../gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h   | 583 ++++++++++++++++++
 drivers/gpu/drm/amd/amdkfd/kfd_priv.h         |   6 +
 6 files changed, 937 insertions(+), 12 deletions(-)
 create mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
 create mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h

diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
index 0d0242240c47..52b3c1b419f1 100644
--- a/drivers/gpu/drm/amd/amdkfd/Makefile
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -31,9 +31,10 @@ amdkfd-y	:= kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \
 		kfd_process.o kfd_queue.o kfd_mqd_manager.o \
 		kfd_mqd_manager_cik.o kfd_mqd_manager_vi.o \
 		kfd_kernel_queue.o kfd_kernel_queue_cik.o \
-		kfd_kernel_queue_vi.o kfd_packet_manager.o \
-		kfd_process_queue_manager.o kfd_device_queue_manager.o \
-		kfd_device_queue_manager_cik.o kfd_device_queue_manager_vi.o \
+		kfd_kernel_queue_vi.o kfd_kernel_queue_v9.o \
+		kfd_packet_manager.o kfd_process_queue_manager.o \
+		kfd_device_queue_manager.o kfd_device_queue_manager_cik.o \
+		kfd_device_queue_manager_vi.o \
 		kfd_interrupt.o kfd_events.o cik_event_interrupt.o \
 		kfd_dbgdev.o kfd_dbgmgr.o kfd_crat.o
 
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
new file mode 100644
index 000000000000..ece7d59537b7
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
@@ -0,0 +1,331 @@
+/*
+ * Copyright 2016-2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "kfd_kernel_queue.h"
+#include "kfd_device_queue_manager.h"
+#include "kfd_pm4_headers_ai.h"
+#include "kfd_pm4_opcodes.h"
+
+static bool initialize_v9(struct kernel_queue *kq, struct kfd_dev *dev,
+			enum kfd_queue_type type, unsigned int queue_size);
+static void uninitialize_v9(struct kernel_queue *kq);
+
+void kernel_queue_init_v9(struct kernel_queue_ops *ops)
+{
+	ops->initialize = initialize_v9;
+	ops->uninitialize = uninitialize_v9;
+}
+
+static bool initialize_v9(struct kernel_queue *kq, struct kfd_dev *dev,
+			enum kfd_queue_type type, unsigned int queue_size)
+{
+	int retval;
+
+	retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem);
+	if (retval)
+		return false;
+
+	kq->eop_gpu_addr = kq->eop_mem->gpu_addr;
+	kq->eop_kernel_addr = kq->eop_mem->cpu_ptr;
+
+	memset(kq->eop_kernel_addr, 0, PAGE_SIZE);
+
+	return true;
+}
+
+static void uninitialize_v9(struct kernel_queue *kq)
+{
+	kfd_gtt_sa_free(kq->dev, kq->eop_mem);
+}
+
+static int pm_map_process_v9(struct packet_manager *pm,
+		uint32_t *buffer, struct qcm_process_device *qpd)
+{
+	struct pm4_mes_map_process *packet;
+	uint64_t vm_page_table_base_addr =
+		(uint64_t)(qpd->page_table_base) << 12;
+
+	packet = (struct pm4_mes_map_process *)buffer;
+	memset(buffer, 0, sizeof(struct pm4_mes_map_process));
+
+	packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
+					sizeof(struct pm4_mes_map_process));
+	packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
+	packet->bitfields2.process_quantum = 1;
+	packet->bitfields2.pasid = qpd->pqm->process->pasid;
+	packet->bitfields14.gds_size = qpd->gds_size;
+	packet->bitfields14.num_gws = qpd->num_gws;
+	packet->bitfields14.num_oac = qpd->num_oac;
+	packet->bitfields14.sdma_enable = 1;
+	packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
+
+	packet->sh_mem_config = qpd->sh_mem_config;
+	packet->sh_mem_bases = qpd->sh_mem_bases;
+	packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8);
+	packet->sq_shader_tba_hi = upper_32_bits(qpd->tba_addr >> 8);
+	packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8);
+	packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8);
+
+	packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
+	packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
+
+	packet->vm_context_page_table_base_addr_lo32 =
+			lower_32_bits(vm_page_table_base_addr);
+	packet->vm_context_page_table_base_addr_hi32 =
+			upper_32_bits(vm_page_table_base_addr);
+
+	return 0;
+}
+
+static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer,
+			uint64_t ib, size_t ib_size_in_dwords, bool chain)
+{
+	struct pm4_mes_runlist *packet;
+
+	int concurrent_proc_cnt = 0;
+	struct kfd_dev *kfd = pm->dqm->dev;
+
+	/* Determine the number of processes to map together to HW:
+	 * it can not exceed the number of VMIDs available to the
+	 * scheduler, and it is determined by the smaller of the number
+	 * of processes in the runlist and kfd module parameter
+	 * hws_max_conc_proc.
+	 * Note: the arbitration between the number of VMIDs and
+	 * hws_max_conc_proc has been done in
+	 * kgd2kfd_device_init().
+	 */
+	concurrent_proc_cnt = min(pm->dqm->processes_count,
+			kfd->max_proc_per_quantum);
+
+	packet = (struct pm4_mes_runlist *)buffer;
+
+	memset(buffer, 0, sizeof(struct pm4_mes_runlist));
+	packet->header.u32All = pm_build_pm4_header(IT_RUN_LIST,
+						sizeof(struct pm4_mes_runlist));
+
+	packet->bitfields4.ib_size = ib_size_in_dwords;
+	packet->bitfields4.chain = chain ? 1 : 0;
+	packet->bitfields4.offload_polling = 0;
+	packet->bitfields4.valid = 1;
+	packet->bitfields4.process_cnt = concurrent_proc_cnt;
+	packet->ordinal2 = lower_32_bits(ib);
+	packet->ib_base_hi = upper_32_bits(ib);
+
+	return 0;
+}
+
+static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
+		struct queue *q, bool is_static)
+{
+	struct pm4_mes_map_queues *packet;
+	bool use_static = is_static;
+
+	packet = (struct pm4_mes_map_queues *)buffer;
+	memset(buffer, 0, sizeof(struct pm4_mes_map_queues));
+
+	packet->header.u32All = pm_build_pm4_header(IT_MAP_QUEUES,
+					sizeof(struct pm4_mes_map_queues));
+	packet->bitfields2.alloc_format =
+		alloc_format__mes_map_queues__one_per_pipe_vi;
+	packet->bitfields2.num_queues = 1;
+	packet->bitfields2.queue_sel =
+		queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi;
+
+	packet->bitfields2.engine_sel =
+		engine_sel__mes_map_queues__compute_vi;
+	packet->bitfields2.queue_type =
+		queue_type__mes_map_queues__normal_compute_vi;
+
+	switch (q->properties.type) {
+	case KFD_QUEUE_TYPE_COMPUTE:
+		if (use_static)
+			packet->bitfields2.queue_type =
+		queue_type__mes_map_queues__normal_latency_static_queue_vi;
+		break;
+	case KFD_QUEUE_TYPE_DIQ:
+		packet->bitfields2.queue_type =
+			queue_type__mes_map_queues__debug_interface_queue_vi;
+		break;
+	case KFD_QUEUE_TYPE_SDMA:
+		packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
+				engine_sel__mes_map_queues__sdma0_vi;
+		use_static = false; /* no static queues under SDMA */
+		break;
+	default:
+		WARN(1, "queue type %d", q->properties.type);
+		return -EINVAL;
+	}
+	packet->bitfields3.doorbell_offset =
+			q->properties.doorbell_off;
+
+	packet->mqd_addr_lo =
+			lower_32_bits(q->gart_mqd_addr);
+
+	packet->mqd_addr_hi =
+			upper_32_bits(q->gart_mqd_addr);
+
+	packet->wptr_addr_lo =
+			lower_32_bits((uint64_t)q->properties.write_ptr);
+
+	packet->wptr_addr_hi =
+			upper_32_bits((uint64_t)q->properties.write_ptr);
+
+	return 0;
+}
+
+static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer,
+			enum kfd_queue_type type,
+			enum kfd_unmap_queues_filter filter,
+			uint32_t filter_param, bool reset,
+			unsigned int sdma_engine)
+{
+	struct pm4_mes_unmap_queues *packet;
+
+	packet = (struct pm4_mes_unmap_queues *)buffer;
+	memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues));
+
+	packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES,
+					sizeof(struct pm4_mes_unmap_queues));
+	switch (type) {
+	case KFD_QUEUE_TYPE_COMPUTE:
+	case KFD_QUEUE_TYPE_DIQ:
+		packet->bitfields2.engine_sel =
+			engine_sel__mes_unmap_queues__compute;
+		break;
+	case KFD_QUEUE_TYPE_SDMA:
+		packet->bitfields2.engine_sel =
+			engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
+		break;
+	default:
+		WARN(1, "queue type %d", type);
+		return -EINVAL;
+	}
+
+	if (reset)
+		packet->bitfields2.action =
+			action__mes_unmap_queues__reset_queues;
+	else
+		packet->bitfields2.action =
+			action__mes_unmap_queues__preempt_queues;
+
+	switch (filter) {
+	case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE:
+		packet->bitfields2.queue_sel =
+			queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
+		packet->bitfields2.num_queues = 1;
+		packet->bitfields3b.doorbell_offset0 = filter_param;
+		break;
+	case KFD_UNMAP_QUEUES_FILTER_BY_PASID:
+		packet->bitfields2.queue_sel =
+			queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
+		packet->bitfields3a.pasid = filter_param;
+		break;
+	case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES:
+		packet->bitfields2.queue_sel =
+			queue_sel__mes_unmap_queues__unmap_all_queues;
+		break;
+	case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES:
+		/* in this case, we do not preempt static queues */
+		packet->bitfields2.queue_sel =
+			queue_sel__mes_unmap_queues__unmap_all_non_static_queues;
+		break;
+	default:
+		WARN(1, "filter %d", filter);
+		return -EINVAL;
+	}
+
+	return 0;
+
+}
+
+static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer,
+			uint64_t fence_address,	uint32_t fence_value)
+{
+	struct pm4_mes_query_status *packet;
+
+	packet = (struct pm4_mes_query_status *)buffer;
+	memset(buffer, 0, sizeof(struct pm4_mes_query_status));
+
+
+	packet->header.u32All = pm_build_pm4_header(IT_QUERY_STATUS,
+					sizeof(struct pm4_mes_query_status));
+
+	packet->bitfields2.context_id = 0;
+	packet->bitfields2.interrupt_sel =
+			interrupt_sel__mes_query_status__completion_status;
+	packet->bitfields2.command =
+			command__mes_query_status__fence_only_after_write_ack;
+
+	packet->addr_hi = upper_32_bits((uint64_t)fence_address);
+	packet->addr_lo = lower_32_bits((uint64_t)fence_address);
+	packet->data_hi = upper_32_bits((uint64_t)fence_value);
+	packet->data_lo = lower_32_bits((uint64_t)fence_value);
+
+	return 0;
+}
+
+
+static int pm_release_mem_v9(uint64_t gpu_addr, uint32_t *buffer)
+{
+	struct pm4_mec_release_mem *packet;
+
+	packet = (struct pm4_mec_release_mem *)buffer;
+	memset(buffer, 0, sizeof(struct pm4_mec_release_mem));
+
+	packet->header.u32All = pm_build_pm4_header(IT_RELEASE_MEM,
+					sizeof(struct pm4_mec_release_mem));
+
+	packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT;
+	packet->bitfields2.event_index = event_index__mec_release_mem__end_of_pipe;
+	packet->bitfields2.tcl1_action_ena = 1;
+	packet->bitfields2.tc_action_ena = 1;
+	packet->bitfields2.cache_policy = cache_policy__mec_release_mem__lru;
+
+	packet->bitfields3.data_sel = data_sel__mec_release_mem__send_32_bit_low;
+	packet->bitfields3.int_sel =
+		int_sel__mec_release_mem__send_interrupt_after_write_confirm;
+
+	packet->bitfields4.address_lo_32b = (gpu_addr & 0xffffffff) >> 2;
+	packet->address_hi = upper_32_bits(gpu_addr);
+
+	packet->data_lo = 0;
+
+	return 0;
+}
+
+const struct packet_manager_funcs kfd_v9_pm_funcs = {
+	.map_process		= pm_map_process_v9,
+	.runlist		= pm_runlist_v9,
+	.set_resources		= pm_set_resources_vi,
+	.map_queues		= pm_map_queues_v9,
+	.unmap_queues		= pm_unmap_queues_v9,
+	.query_status		= pm_query_status_v9,
+	.release_mem		= pm_release_mem_v9,
+	.map_process_size	= sizeof(struct pm4_mes_map_process),
+	.runlist_size		= sizeof(struct pm4_mes_runlist),
+	.set_resources_size	= sizeof(struct pm4_mes_set_resources),
+	.map_queues_size	= sizeof(struct pm4_mes_map_queues),
+	.unmap_queues_size	= sizeof(struct pm4_mes_unmap_queues),
+	.query_status_size	= sizeof(struct pm4_mes_query_status),
+	.release_mem_size	= sizeof(struct pm4_mec_release_mem)
+};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
index 7ee326fa486d..f9019efd31b9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
@@ -58,7 +58,7 @@ static void uninitialize_vi(struct kernel_queue *kq)
 	kfd_gtt_sa_free(kq->dev, kq->eop_mem);
 }
 
-static unsigned int build_pm4_header(unsigned int opcode, size_t packet_size)
+unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size)
 {
 	union PM4_MES_TYPE_3_HEADER header;
 
@@ -79,7 +79,7 @@ static int pm_map_process_vi(struct packet_manager *pm, uint32_t *buffer,
 
 	memset(buffer, 0, sizeof(struct pm4_mes_map_process));
 
-	packet->header.u32All = build_pm4_header(IT_MAP_PROCESS,
+	packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
 					sizeof(struct pm4_mes_map_process));
 	packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
 	packet->bitfields2.process_quantum = 1;
@@ -128,7 +128,7 @@ static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer,
 	packet = (struct pm4_mes_runlist *)buffer;
 
 	memset(buffer, 0, sizeof(struct pm4_mes_runlist));
-	packet->header.u32All = build_pm4_header(IT_RUN_LIST,
+	packet->header.u32All = pm_build_pm4_header(IT_RUN_LIST,
 						sizeof(struct pm4_mes_runlist));
 
 	packet->bitfields4.ib_size = ib_size_in_dwords;
@@ -142,7 +142,7 @@ static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer,
 	return 0;
 }
 
-static int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
+int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
 				struct scheduling_resources *res)
 {
 	struct pm4_mes_set_resources *packet;
@@ -150,7 +150,7 @@ static int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
 	packet = (struct pm4_mes_set_resources *)buffer;
 	memset(buffer, 0, sizeof(struct pm4_mes_set_resources));
 
-	packet->header.u32All = build_pm4_header(IT_SET_RESOURCES,
+	packet->header.u32All = pm_build_pm4_header(IT_SET_RESOURCES,
 					sizeof(struct pm4_mes_set_resources));
 
 	packet->bitfields2.queue_type =
@@ -179,7 +179,7 @@ static int pm_map_queues_vi(struct packet_manager *pm, uint32_t *buffer,
 	packet = (struct pm4_mes_map_queues *)buffer;
 	memset(buffer, 0, sizeof(struct pm4_mes_map_queues));
 
-	packet->header.u32All = build_pm4_header(IT_MAP_QUEUES,
+	packet->header.u32All = pm_build_pm4_header(IT_MAP_QUEUES,
 					sizeof(struct pm4_mes_map_queues));
 	packet->bitfields2.alloc_format =
 		alloc_format__mes_map_queues__one_per_pipe_vi;
@@ -240,7 +240,7 @@ static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
 	packet = (struct pm4_mes_unmap_queues *)buffer;
 	memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues));
 
-	packet->header.u32All = build_pm4_header(IT_UNMAP_QUEUES,
+	packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES,
 					sizeof(struct pm4_mes_unmap_queues));
 	switch (type) {
 	case KFD_QUEUE_TYPE_COMPUTE:
@@ -302,7 +302,7 @@ static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
 	packet = (struct pm4_mes_query_status *)buffer;
 	memset(buffer, 0, sizeof(struct pm4_mes_query_status));
 
-	packet->header.u32All = build_pm4_header(IT_QUERY_STATUS,
+	packet->header.u32All = pm_build_pm4_header(IT_QUERY_STATUS,
 					sizeof(struct pm4_mes_query_status));
 
 	packet->bitfields2.context_id = 0;
@@ -326,7 +326,7 @@ static int pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer)
 	packet = (struct pm4_mec_release_mem *)buffer;
 	memset(buffer, 0, sizeof(*packet));
 
-	packet->header.u32All = build_pm4_header(IT_RELEASE_MEM,
+	packet->header.u32All = pm_build_pm4_header(IT_RELEASE_MEM,
 						 sizeof(*packet));
 
 	packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 860ff2481747..91f0350b6180 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -223,6 +223,10 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
 	case CHIP_POLARIS11:
 		pm->pmf = &kfd_vi_pm_funcs;
 		break;
+	case CHIP_VEGA10:
+	case CHIP_RAVEN:
+		pm->pmf = &kfd_v9_pm_funcs;
+		break;
 	default:
 		WARN(1, "Unexpected ASIC family %u",
 		     dqm->dev->device_info->asic_family);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
new file mode 100644
index 000000000000..f2bcf5c092ea
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
@@ -0,0 +1,583 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef F32_MES_PM4_PACKETS_H
+#define F32_MES_PM4_PACKETS_H
+
+#ifndef PM4_MES_HEADER_DEFINED
+#define PM4_MES_HEADER_DEFINED
+union PM4_MES_TYPE_3_HEADER {
+	struct {
+		uint32_t reserved1 : 8; /* < reserved */
+		uint32_t opcode    : 8; /* < IT opcode */
+		uint32_t count     : 14;/* < number of DWORDs - 1 in the
+					 *   information body.
+					 */
+		uint32_t type      : 2; /* < packet identifier.
+					 *   It should be 3 for type 3 packets
+					 */
+	};
+	uint32_t u32All;
+};
+#endif /* PM4_MES_HEADER_DEFINED */
+
+/*--------------------MES_SET_RESOURCES--------------------*/
+
+#ifndef PM4_MES_SET_RESOURCES_DEFINED
+#define PM4_MES_SET_RESOURCES_DEFINED
+enum mes_set_resources_queue_type_enum {
+	queue_type__mes_set_resources__kernel_interface_queue_kiq = 0,
+	queue_type__mes_set_resources__hsa_interface_queue_hiq = 1,
+	queue_type__mes_set_resources__hsa_debug_interface_queue = 4
+};
+
+
+struct pm4_mes_set_resources {
+	union {
+		union PM4_MES_TYPE_3_HEADER	header;		/* header */
+		uint32_t			ordinal1;
+	};
+
+	union {
+		struct {
+			uint32_t vmid_mask:16;
+			uint32_t unmap_latency:8;
+			uint32_t reserved1:5;
+			enum mes_set_resources_queue_type_enum queue_type:3;
+		} bitfields2;
+		uint32_t ordinal2;
+	};
+
+	uint32_t queue_mask_lo;
+	uint32_t queue_mask_hi;
+	uint32_t gws_mask_lo;
+	uint32_t gws_mask_hi;
+
+	union {
+		struct {
+			uint32_t oac_mask:16;
+			uint32_t reserved2:16;
+		} bitfields7;
+		uint32_t ordinal7;
+	};
+
+	union {
+		struct {
+		uint32_t gds_heap_base:6;
+		uint32_t reserved3:5;
+		uint32_t gds_heap_size:6;
+		uint32_t reserved4:15;
+		} bitfields8;
+		uint32_t ordinal8;
+	};
+
+};
+#endif
+
+/*--------------------MES_RUN_LIST--------------------*/
+
+#ifndef PM4_MES_RUN_LIST_DEFINED
+#define PM4_MES_RUN_LIST_DEFINED
+
+struct pm4_mes_runlist {
+	union {
+		union PM4_MES_TYPE_3_HEADER header; /* header */
+		uint32_t ordinal1;
+	};
+
+	union {
+		struct {
+			uint32_t reserved1:2;
+			uint32_t ib_base_lo:30;
+		} bitfields2;
+		uint32_t ordinal2;
+	};
+
+	uint32_t ib_base_hi;
+
+	union {
+		struct {
+			uint32_t ib_size:20;
+			uint32_t chain:1;
+			uint32_t offload_polling:1;
+			uint32_t reserved2:1;
+			uint32_t valid:1;
+			uint32_t process_cnt:4;
+			uint32_t reserved3:4;
+		} bitfields4;
+		uint32_t ordinal4;
+	};
+
+};
+#endif
+
+/*--------------------MES_MAP_PROCESS--------------------*/
+
+#ifndef PM4_MES_MAP_PROCESS_DEFINED
+#define PM4_MES_MAP_PROCESS_DEFINED
+
+struct pm4_mes_map_process {
+	union {
+		union PM4_MES_TYPE_3_HEADER header;	/* header */
+		uint32_t ordinal1;
+	};
+
+	union {
+		struct {
+			uint32_t pasid:16;
+			uint32_t reserved1:8;
+			uint32_t diq_enable:1;
+			uint32_t process_quantum:7;
+		} bitfields2;
+		uint32_t ordinal2;
+	};
+
+	uint32_t vm_context_page_table_base_addr_lo32;
+
+	uint32_t vm_context_page_table_base_addr_hi32;
+
+	uint32_t sh_mem_bases;
+
+	uint32_t sh_mem_config;
+
+	uint32_t sq_shader_tba_lo;
+
+	uint32_t sq_shader_tba_hi;
+
+	uint32_t sq_shader_tma_lo;
+
+	uint32_t sq_shader_tma_hi;
+
+	uint32_t reserved6;
+
+	uint32_t gds_addr_lo;
+
+	uint32_t gds_addr_hi;
+
+	union {
+		struct {
+			uint32_t num_gws:6;
+			uint32_t reserved7:1;
+			uint32_t sdma_enable:1;
+			uint32_t num_oac:4;
+			uint32_t reserved8:4;
+			uint32_t gds_size:6;
+			uint32_t num_queues:10;
+		} bitfields14;
+		uint32_t ordinal14;
+	};
+
+	uint32_t completion_signal_lo;
+
+	uint32_t completion_signal_hi;
+
+};
+
+#endif
+
+/*--------------------MES_MAP_PROCESS_VM--------------------*/
+
+#ifndef PM4_MES_MAP_PROCESS_VM_DEFINED
+#define PM4_MES_MAP_PROCESS_VM_DEFINED
+
+struct PM4_MES_MAP_PROCESS_VM {
+	union {
+		union PM4_MES_TYPE_3_HEADER header;	/* header */
+		uint32_t ordinal1;
+	};
+
+	uint32_t reserved1;
+
+	uint32_t vm_context_cntl;
+
+	uint32_t reserved2;
+
+	uint32_t vm_context_page_table_end_addr_lo32;
+
+	uint32_t vm_context_page_table_end_addr_hi32;
+
+	uint32_t vm_context_page_table_start_addr_lo32;
+
+	uint32_t vm_context_page_table_start_addr_hi32;
+
+	uint32_t reserved3;
+
+	uint32_t reserved4;
+
+	uint32_t reserved5;
+
+	uint32_t reserved6;
+
+	uint32_t reserved7;
+
+	uint32_t reserved8;
+
+	uint32_t completion_signal_lo32;
+
+	uint32_t completion_signal_hi32;
+
+};
+#endif
+
+/*--------------------MES_MAP_QUEUES--------------------*/
+
+#ifndef PM4_MES_MAP_QUEUES_VI_DEFINED
+#define PM4_MES_MAP_QUEUES_VI_DEFINED
+enum mes_map_queues_queue_sel_enum {
+	queue_sel__mes_map_queues__map_to_specified_queue_slots_vi = 0,
+queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi = 1
+};
+
+enum mes_map_queues_queue_type_enum {
+	queue_type__mes_map_queues__normal_compute_vi = 0,
+	queue_type__mes_map_queues__debug_interface_queue_vi = 1,
+	queue_type__mes_map_queues__normal_latency_static_queue_vi = 2,
+queue_type__mes_map_queues__low_latency_static_queue_vi = 3
+};
+
+enum mes_map_queues_alloc_format_enum {
+	alloc_format__mes_map_queues__one_per_pipe_vi = 0,
+alloc_format__mes_map_queues__all_on_one_pipe_vi = 1
+};
+
+enum mes_map_queues_engine_sel_enum {
+	engine_sel__mes_map_queues__compute_vi = 0,
+	engine_sel__mes_map_queues__sdma0_vi = 2,
+	engine_sel__mes_map_queues__sdma1_vi = 3
+};
+
+
+struct pm4_mes_map_queues {
+	union {
+		union PM4_MES_TYPE_3_HEADER   header;            /* header */
+		uint32_t            ordinal1;
+	};
+
+	union {
+		struct {
+			uint32_t reserved1:4;
+			enum mes_map_queues_queue_sel_enum queue_sel:2;
+			uint32_t reserved2:15;
+			enum mes_map_queues_queue_type_enum queue_type:3;
+			enum mes_map_queues_alloc_format_enum alloc_format:2;
+			enum mes_map_queues_engine_sel_enum engine_sel:3;
+			uint32_t num_queues:3;
+		} bitfields2;
+		uint32_t ordinal2;
+	};
+
+	union {
+		struct {
+			uint32_t reserved3:1;
+			uint32_t check_disable:1;
+			uint32_t doorbell_offset:26;
+			uint32_t reserved4:4;
+		} bitfields3;
+		uint32_t ordinal3;
+	};
+
+	uint32_t mqd_addr_lo;
+	uint32_t mqd_addr_hi;
+	uint32_t wptr_addr_lo;
+	uint32_t wptr_addr_hi;
+};
+#endif
+
+/*--------------------MES_QUERY_STATUS--------------------*/
+
+#ifndef PM4_MES_QUERY_STATUS_DEFINED
+#define PM4_MES_QUERY_STATUS_DEFINED
+enum mes_query_status_interrupt_sel_enum {
+	interrupt_sel__mes_query_status__completion_status = 0,
+	interrupt_sel__mes_query_status__process_status = 1,
+	interrupt_sel__mes_query_status__queue_status = 2
+};
+
+enum mes_query_status_command_enum {
+	command__mes_query_status__interrupt_only = 0,
+	command__mes_query_status__fence_only_immediate = 1,
+	command__mes_query_status__fence_only_after_write_ack = 2,
+	command__mes_query_status__fence_wait_for_write_ack_send_interrupt = 3
+};
+
+enum mes_query_status_engine_sel_enum {
+	engine_sel__mes_query_status__compute = 0,
+	engine_sel__mes_query_status__sdma0_queue = 2,
+	engine_sel__mes_query_status__sdma1_queue = 3
+};
+
+struct pm4_mes_query_status {
+	union {
+		union PM4_MES_TYPE_3_HEADER   header;            /* header */
+		uint32_t            ordinal1;
+	};
+
+	union {
+		struct {
+			uint32_t context_id:28;
+			enum mes_query_status_interrupt_sel_enum	interrupt_sel:2;
+			enum mes_query_status_command_enum command:2;
+		} bitfields2;
+		uint32_t ordinal2;
+	};
+
+	union {
+		struct {
+			uint32_t pasid:16;
+			uint32_t reserved1:16;
+		} bitfields3a;
+		struct {
+			uint32_t reserved2:2;
+			uint32_t doorbell_offset:26;
+			enum mes_query_status_engine_sel_enum engine_sel:3;
+			uint32_t reserved3:1;
+		} bitfields3b;
+		uint32_t ordinal3;
+	};
+
+	uint32_t addr_lo;
+	uint32_t addr_hi;
+	uint32_t data_lo;
+	uint32_t data_hi;
+};
+#endif
+
+/*--------------------MES_UNMAP_QUEUES--------------------*/
+
+#ifndef PM4_MES_UNMAP_QUEUES_DEFINED
+#define PM4_MES_UNMAP_QUEUES_DEFINED
+enum mes_unmap_queues_action_enum {
+	action__mes_unmap_queues__preempt_queues = 0,
+	action__mes_unmap_queues__reset_queues = 1,
+	action__mes_unmap_queues__disable_process_queues = 2,
+	action__mes_unmap_queues__reserved = 3
+};
+
+enum mes_unmap_queues_queue_sel_enum {
+	queue_sel__mes_unmap_queues__perform_request_on_specified_queues = 0,
+	queue_sel__mes_unmap_queues__perform_request_on_pasid_queues = 1,
+	queue_sel__mes_unmap_queues__unmap_all_queues = 2,
+	queue_sel__mes_unmap_queues__unmap_all_non_static_queues = 3
+};
+
+enum mes_unmap_queues_engine_sel_enum {
+	engine_sel__mes_unmap_queues__compute = 0,
+	engine_sel__mes_unmap_queues__sdma0 = 2,
+	engine_sel__mes_unmap_queues__sdmal = 3
+};
+
+struct pm4_mes_unmap_queues {
+	union {
+		union PM4_MES_TYPE_3_HEADER   header;            /* header */
+		uint32_t            ordinal1;
+	};
+
+	union {
+		struct {
+			enum mes_unmap_queues_action_enum action:2;
+			uint32_t reserved1:2;
+			enum mes_unmap_queues_queue_sel_enum queue_sel:2;
+			uint32_t reserved2:20;
+			enum mes_unmap_queues_engine_sel_enum engine_sel:3;
+			uint32_t num_queues:3;
+		} bitfields2;
+		uint32_t ordinal2;
+	};
+
+	union {
+		struct {
+			uint32_t pasid:16;
+			uint32_t reserved3:16;
+		} bitfields3a;
+		struct {
+			uint32_t reserved4:2;
+			uint32_t doorbell_offset0:26;
+			int32_t reserved5:4;
+		} bitfields3b;
+		uint32_t ordinal3;
+	};
+
+	union {
+	struct {
+			uint32_t reserved6:2;
+			uint32_t doorbell_offset1:26;
+			uint32_t reserved7:4;
+		} bitfields4;
+		uint32_t ordinal4;
+	};
+
+	union {
+		struct {
+			uint32_t reserved8:2;
+			uint32_t doorbell_offset2:26;
+			uint32_t reserved9:4;
+		} bitfields5;
+		uint32_t ordinal5;
+	};
+
+	union {
+		struct {
+			uint32_t reserved10:2;
+			uint32_t doorbell_offset3:26;
+			uint32_t reserved11:4;
+		} bitfields6;
+		uint32_t ordinal6;
+	};
+};
+#endif
+
+#ifndef PM4_MEC_RELEASE_MEM_DEFINED
+#define PM4_MEC_RELEASE_MEM_DEFINED
+
+enum mec_release_mem_event_index_enum {
+	event_index__mec_release_mem__end_of_pipe = 5,
+	event_index__mec_release_mem__shader_done = 6
+};
+
+enum mec_release_mem_cache_policy_enum {
+	cache_policy__mec_release_mem__lru = 0,
+	cache_policy__mec_release_mem__stream = 1
+};
+
+enum mec_release_mem_pq_exe_status_enum {
+	pq_exe_status__mec_release_mem__default = 0,
+	pq_exe_status__mec_release_mem__phase_update = 1
+};
+
+enum mec_release_mem_dst_sel_enum {
+	dst_sel__mec_release_mem__memory_controller = 0,
+	dst_sel__mec_release_mem__tc_l2 = 1,
+	dst_sel__mec_release_mem__queue_write_pointer_register = 2,
+	dst_sel__mec_release_mem__queue_write_pointer_poll_mask_bit = 3
+};
+
+enum mec_release_mem_int_sel_enum {
+	int_sel__mec_release_mem__none = 0,
+	int_sel__mec_release_mem__send_interrupt_only = 1,
+	int_sel__mec_release_mem__send_interrupt_after_write_confirm = 2,
+	int_sel__mec_release_mem__send_data_after_write_confirm = 3,
+	int_sel__mec_release_mem__unconditionally_send_int_ctxid = 4,
+	int_sel__mec_release_mem__conditionally_send_int_ctxid_based_on_32_bit_compare = 5,
+	int_sel__mec_release_mem__conditionally_send_int_ctxid_based_on_64_bit_compare = 6
+};
+
+enum mec_release_mem_data_sel_enum {
+	data_sel__mec_release_mem__none = 0,
+	data_sel__mec_release_mem__send_32_bit_low = 1,
+	data_sel__mec_release_mem__send_64_bit_data = 2,
+	data_sel__mec_release_mem__send_gpu_clock_counter = 3,
+	data_sel__mec_release_mem__send_cp_perfcounter_hi_lo = 4,
+	data_sel__mec_release_mem__store_gds_data_to_memory = 5
+};
+
+struct pm4_mec_release_mem {
+	union {
+		union PM4_MES_TYPE_3_HEADER header;     /*header */
+		unsigned int ordinal1;
+	};
+
+	union {
+		struct {
+			unsigned int event_type:6;
+			unsigned int reserved1:2;
+			enum mec_release_mem_event_index_enum event_index:4;
+			unsigned int tcl1_vol_action_ena:1;
+			unsigned int tc_vol_action_ena:1;
+			unsigned int reserved2:1;
+			unsigned int tc_wb_action_ena:1;
+			unsigned int tcl1_action_ena:1;
+			unsigned int tc_action_ena:1;
+			uint32_t reserved3:1;
+			uint32_t tc_nc_action_ena:1;
+			uint32_t tc_wc_action_ena:1;
+			uint32_t tc_md_action_ena:1;
+			uint32_t reserved4:3;
+			enum mec_release_mem_cache_policy_enum cache_policy:2;
+			uint32_t reserved5:2;
+			enum mec_release_mem_pq_exe_status_enum pq_exe_status:1;
+			uint32_t reserved6:2;
+		} bitfields2;
+		unsigned int ordinal2;
+	};
+
+	union {
+		struct {
+			uint32_t reserved7:16;
+			enum mec_release_mem_dst_sel_enum dst_sel:2;
+			uint32_t reserved8:6;
+			enum mec_release_mem_int_sel_enum int_sel:3;
+			uint32_t reserved9:2;
+			enum mec_release_mem_data_sel_enum data_sel:3;
+		} bitfields3;
+		unsigned int ordinal3;
+	};
+
+	union {
+		struct {
+			uint32_t reserved10:2;
+			unsigned int address_lo_32b:30;
+		} bitfields4;
+		struct {
+			uint32_t reserved11:3;
+			uint32_t address_lo_64b:29;
+		} bitfields4b;
+		uint32_t reserved12;
+		unsigned int ordinal4;
+	};
+
+	union {
+		uint32_t address_hi;
+		uint32_t reserved13;
+		uint32_t ordinal5;
+	};
+
+	union {
+		uint32_t data_lo;
+		uint32_t cmp_data_lo;
+		struct {
+			uint32_t dw_offset:16;
+			uint32_t num_dwords:16;
+		} bitfields6c;
+		uint32_t reserved14;
+		uint32_t ordinal6;
+	};
+
+	union {
+		uint32_t data_hi;
+		uint32_t cmp_data_hi;
+		uint32_t reserved15;
+		uint32_t reserved16;
+		uint32_t ordinal7;
+	};
+
+	uint32_t int_ctxid;
+
+};
+
+#endif
+
+enum {
+	CACHE_FLUSH_AND_INV_TS_EVENT = 0x00000014
+};
+#endif
+
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 873a8fbc14ce..b68299a3e18a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -900,6 +900,7 @@ struct packet_manager_funcs {
 };
 
 extern const struct packet_manager_funcs kfd_vi_pm_funcs;
+extern const struct packet_manager_funcs kfd_v9_pm_funcs;
 
 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
 void pm_uninit(struct packet_manager *pm);
@@ -916,6 +917,11 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
 
 void pm_release_ib(struct packet_manager *pm);
 
+/* Following PM funcs can be shared among VI and AI */
+unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size);
+int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
+				struct scheduling_resources *res);
+
 uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
 
 /* Events */

From b91d43dd01aadd43b1002160b78d77f8175876a4 Mon Sep 17 00:00:00 2001
From: Felix Kuehling <Felix.Kuehling@amd.com>
Date: Tue, 10 Apr 2018 17:33:08 -0400
Subject: [PATCH 0239/1286] drm/amdkfd: Add GFXv9 MQD manager

Signed-off-by: John Bridgman <john.bridgman@amd.com>
Signed-off-by: Jay Cornwall <Jay.Cornwall@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdkfd/Makefile           |   1 +
 drivers/gpu/drm/amd/amdkfd/kfd_device.c       |   2 +-
 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c  |   3 +
 .../gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c   | 443 ++++++++++++++++++
 drivers/gpu/drm/amd/amdkfd/kfd_priv.h         |   3 +
 5 files changed, 451 insertions(+), 1 deletion(-)
 create mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c

diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
index 52b3c1b419f1..094b591ed8c2 100644
--- a/drivers/gpu/drm/amd/amdkfd/Makefile
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -30,6 +30,7 @@ amdkfd-y	:= kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \
 		kfd_pasid.o kfd_doorbell.o kfd_flat_memory.o \
 		kfd_process.o kfd_queue.o kfd_mqd_manager.o \
 		kfd_mqd_manager_cik.o kfd_mqd_manager_vi.o \
+		kfd_mqd_manager_v9.o \
 		kfd_kernel_queue.o kfd_kernel_queue_cik.o \
 		kfd_kernel_queue_vi.o kfd_kernel_queue_v9.o \
 		kfd_packet_manager.o kfd_process_queue_manager.o \
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index f563acbc1ad7..c368ce3e96ff 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -700,7 +700,7 @@ int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
 	if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
 		return -ENOMEM;
 
-	*mem_obj = kmalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
+	*mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
 	if ((*mem_obj) == NULL)
 		return -ENOMEM;
 
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index ee7061e1c466..4b8eb506642b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -38,6 +38,9 @@ struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
 	case CHIP_POLARIS10:
 	case CHIP_POLARIS11:
 		return mqd_manager_init_vi_tonga(type, dev);
+	case CHIP_VEGA10:
+	case CHIP_RAVEN:
+		return mqd_manager_init_v9(type, dev);
 	default:
 		WARN(1, "Unexpected ASIC family %u",
 		     dev->device_info->asic_family);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
new file mode 100644
index 000000000000..684054ff02cd
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -0,0 +1,443 @@
+/*
+ * Copyright 2016-2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include "kfd_priv.h"
+#include "kfd_mqd_manager.h"
+#include "v9_structs.h"
+#include "gc/gc_9_0_offset.h"
+#include "gc/gc_9_0_sh_mask.h"
+#include "sdma0/sdma0_4_0_sh_mask.h"
+
+static inline struct v9_mqd *get_mqd(void *mqd)
+{
+	return (struct v9_mqd *)mqd;
+}
+
+static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
+{
+	return (struct v9_sdma_mqd *)mqd;
+}
+
+static int init_mqd(struct mqd_manager *mm, void **mqd,
+			struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
+			struct queue_properties *q)
+{
+	int retval;
+	uint64_t addr;
+	struct v9_mqd *m;
+	struct kfd_dev *kfd = mm->dev;
+
+	/* From V9,  for CWSR, the control stack is located on the next page
+	 * boundary after the mqd, we will use the gtt allocation function
+	 * instead of sub-allocation function.
+	 */
+	if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
+		*mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
+		if (!*mqd_mem_obj)
+			return -ENOMEM;
+		retval = kfd->kfd2kgd->init_gtt_mem_allocation(kfd->kgd,
+			ALIGN(q->ctl_stack_size, PAGE_SIZE) +
+				ALIGN(sizeof(struct v9_mqd), PAGE_SIZE),
+			&((*mqd_mem_obj)->gtt_mem),
+			&((*mqd_mem_obj)->gpu_addr),
+			(void *)&((*mqd_mem_obj)->cpu_ptr));
+	} else
+		retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd),
+				mqd_mem_obj);
+	if (retval != 0)
+		return -ENOMEM;
+
+	m = (struct v9_mqd *) (*mqd_mem_obj)->cpu_ptr;
+	addr = (*mqd_mem_obj)->gpu_addr;
+
+	memset(m, 0, sizeof(struct v9_mqd));
+
+	m->header = 0xC0310800;
+	m->compute_pipelinestat_enable = 1;
+	m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
+	m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
+	m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
+	m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
+
+	m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
+			0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+
+	m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
+
+	m->cp_mqd_base_addr_lo        = lower_32_bits(addr);
+	m->cp_mqd_base_addr_hi        = upper_32_bits(addr);
+
+	m->cp_hqd_quantum = 1 << CP_HQD_QUANTUM__QUANTUM_EN__SHIFT |
+			1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
+			10 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
+
+	m->cp_hqd_pipe_priority = 1;
+	m->cp_hqd_queue_priority = 15;
+
+	if (q->format == KFD_QUEUE_FORMAT_AQL) {
+		m->cp_hqd_aql_control =
+			1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT;
+	}
+
+	if (q->tba_addr) {
+		m->compute_pgm_rsrc2 |=
+			(1 << COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT);
+	}
+
+	if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) {
+		m->cp_hqd_persistent_state |=
+			(1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT);
+		m->cp_hqd_ctx_save_base_addr_lo =
+			lower_32_bits(q->ctx_save_restore_area_address);
+		m->cp_hqd_ctx_save_base_addr_hi =
+			upper_32_bits(q->ctx_save_restore_area_address);
+		m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size;
+		m->cp_hqd_cntl_stack_size = q->ctl_stack_size;
+		m->cp_hqd_cntl_stack_offset = q->ctl_stack_size;
+		m->cp_hqd_wg_state_offset = q->ctl_stack_size;
+	}
+
+	*mqd = m;
+	if (gart_addr)
+		*gart_addr = addr;
+	retval = mm->update_mqd(mm, m, q);
+
+	return retval;
+}
+
+static int load_mqd(struct mqd_manager *mm, void *mqd,
+			uint32_t pipe_id, uint32_t queue_id,
+			struct queue_properties *p, struct mm_struct *mms)
+{
+	/* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
+	uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
+
+	return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id,
+					  (uint32_t __user *)p->write_ptr,
+					  wptr_shift, 0, mms);
+}
+
+static int update_mqd(struct mqd_manager *mm, void *mqd,
+		      struct queue_properties *q)
+{
+	struct v9_mqd *m;
+
+	m = get_mqd(mqd);
+
+	m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+	m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1;
+	pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
+
+	m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
+	m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
+
+	m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
+	m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
+	m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr);
+	m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr);
+
+	m->cp_hqd_pq_doorbell_control =
+		q->doorbell_off <<
+			CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
+	pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
+			m->cp_hqd_pq_doorbell_control);
+
+	m->cp_hqd_ib_control =
+		3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT |
+		1 << CP_HQD_IB_CONTROL__IB_EXE_DISABLE__SHIFT;
+
+	/*
+	 * HW does not clamp this field correctly. Maximum EOP queue size
+	 * is constrained by per-SE EOP done signal count, which is 8-bit.
+	 * Limit is 0xFF EOP entries (= 0x7F8 dwords). CP will not submit
+	 * more than (EOP entry count - 1) so a queue size of 0x800 dwords
+	 * is safe, giving a maximum field value of 0xA.
+	 */
+	m->cp_hqd_eop_control = min(0xA,
+		order_base_2(q->eop_ring_buffer_size / 4) - 1);
+	m->cp_hqd_eop_base_addr_lo =
+			lower_32_bits(q->eop_ring_buffer_address >> 8);
+	m->cp_hqd_eop_base_addr_hi =
+			upper_32_bits(q->eop_ring_buffer_address >> 8);
+
+	m->cp_hqd_iq_timer = 0;
+
+	m->cp_hqd_vmid = q->vmid;
+
+	if (q->format == KFD_QUEUE_FORMAT_AQL) {
+		m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK |
+				2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT |
+				1 << CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT |
+				1 << CP_HQD_PQ_CONTROL__WPP_CLAMP_EN__SHIFT;
+		m->cp_hqd_pq_doorbell_control |= 1 <<
+			CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT;
+	}
+	if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address)
+		m->cp_hqd_ctx_save_control = 0;
+
+	q->is_active = (q->queue_size > 0 &&
+			q->queue_address != 0 &&
+			q->queue_percent > 0 &&
+			!q->is_evicted);
+
+	return 0;
+}
+
+
+static int destroy_mqd(struct mqd_manager *mm, void *mqd,
+			enum kfd_preempt_type type,
+			unsigned int timeout, uint32_t pipe_id,
+			uint32_t queue_id)
+{
+	return mm->dev->kfd2kgd->hqd_destroy
+		(mm->dev->kgd, mqd, type, timeout,
+		pipe_id, queue_id);
+}
+
+static void uninit_mqd(struct mqd_manager *mm, void *mqd,
+			struct kfd_mem_obj *mqd_mem_obj)
+{
+	struct kfd_dev *kfd = mm->dev;
+
+	if (mqd_mem_obj->gtt_mem) {
+		kfd->kfd2kgd->free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem);
+		kfree(mqd_mem_obj);
+	} else {
+		kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
+	}
+}
+
+static bool is_occupied(struct mqd_manager *mm, void *mqd,
+			uint64_t queue_address,	uint32_t pipe_id,
+			uint32_t queue_id)
+{
+	return mm->dev->kfd2kgd->hqd_is_occupied(
+		mm->dev->kgd, queue_address,
+		pipe_id, queue_id);
+}
+
+static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
+			struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
+			struct queue_properties *q)
+{
+	struct v9_mqd *m;
+	int retval = init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
+
+	if (retval != 0)
+		return retval;
+
+	m = get_mqd(*mqd);
+
+	m->cp_hqd_pq_control |= 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT |
+			1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
+
+	return retval;
+}
+
+static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
+			struct queue_properties *q)
+{
+	struct v9_mqd *m;
+	int retval = update_mqd(mm, mqd, q);
+
+	if (retval != 0)
+		return retval;
+
+	/* TODO: what's the point? update_mqd already does this. */
+	m = get_mqd(mqd);
+	m->cp_hqd_vmid = q->vmid;
+	return retval;
+}
+
+static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
+		struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
+		struct queue_properties *q)
+{
+	int retval;
+	struct v9_sdma_mqd *m;
+
+
+	retval = kfd_gtt_sa_allocate(mm->dev,
+			sizeof(struct v9_sdma_mqd),
+			mqd_mem_obj);
+
+	if (retval != 0)
+		return -ENOMEM;
+
+	m = (struct v9_sdma_mqd *) (*mqd_mem_obj)->cpu_ptr;
+
+	memset(m, 0, sizeof(struct v9_sdma_mqd));
+
+	*mqd = m;
+	if (gart_addr)
+		*gart_addr = (*mqd_mem_obj)->gpu_addr;
+
+	retval = mm->update_mqd(mm, m, q);
+
+	return retval;
+}
+
+static void uninit_mqd_sdma(struct mqd_manager *mm, void *mqd,
+		struct kfd_mem_obj *mqd_mem_obj)
+{
+	kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
+}
+
+static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
+		uint32_t pipe_id, uint32_t queue_id,
+		struct queue_properties *p, struct mm_struct *mms)
+{
+	return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd,
+					       (uint32_t __user *)p->write_ptr,
+					       mms);
+}
+
+#define SDMA_RLC_DUMMY_DEFAULT 0xf
+
+static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
+		struct queue_properties *q)
+{
+	struct v9_sdma_mqd *m;
+
+	m = get_sdma_mqd(mqd);
+	m->sdmax_rlcx_rb_cntl = order_base_2(q->queue_size / 4)
+		<< SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
+		q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
+		1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
+		6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
+
+	m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8);
+	m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8);
+	m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
+	m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
+	m->sdmax_rlcx_doorbell_offset =
+		q->doorbell_off << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT;
+
+	m->sdma_engine_id = q->sdma_engine_id;
+	m->sdma_queue_id = q->sdma_queue_id;
+	m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT;
+
+	q->is_active = (q->queue_size > 0 &&
+			q->queue_address != 0 &&
+			q->queue_percent > 0 &&
+			!q->is_evicted);
+
+	return 0;
+}
+
+/*
+ *  * preempt type here is ignored because there is only one way
+ *  * to preempt sdma queue
+ */
+static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
+		enum kfd_preempt_type type,
+		unsigned int timeout, uint32_t pipe_id,
+		uint32_t queue_id)
+{
+	return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout);
+}
+
+static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
+		uint64_t queue_address, uint32_t pipe_id,
+		uint32_t queue_id)
+{
+	return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int debugfs_show_mqd(struct seq_file *m, void *data)
+{
+	seq_hex_dump(m, "    ", DUMP_PREFIX_OFFSET, 32, 4,
+		     data, sizeof(struct v9_mqd), false);
+	return 0;
+}
+
+static int debugfs_show_mqd_sdma(struct seq_file *m, void *data)
+{
+	seq_hex_dump(m, "    ", DUMP_PREFIX_OFFSET, 32, 4,
+		     data, sizeof(struct v9_sdma_mqd), false);
+	return 0;
+}
+
+#endif
+
+struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
+		struct kfd_dev *dev)
+{
+	struct mqd_manager *mqd;
+
+	if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
+		return NULL;
+
+	mqd = kzalloc(sizeof(*mqd), GFP_NOIO);
+	if (!mqd)
+		return NULL;
+
+	mqd->dev = dev;
+
+	switch (type) {
+	case KFD_MQD_TYPE_CP:
+	case KFD_MQD_TYPE_COMPUTE:
+		mqd->init_mqd = init_mqd;
+		mqd->uninit_mqd = uninit_mqd;
+		mqd->load_mqd = load_mqd;
+		mqd->update_mqd = update_mqd;
+		mqd->destroy_mqd = destroy_mqd;
+		mqd->is_occupied = is_occupied;
+#if defined(CONFIG_DEBUG_FS)
+		mqd->debugfs_show_mqd = debugfs_show_mqd;
+#endif
+		break;
+	case KFD_MQD_TYPE_HIQ:
+		mqd->init_mqd = init_mqd_hiq;
+		mqd->uninit_mqd = uninit_mqd;
+		mqd->load_mqd = load_mqd;
+		mqd->update_mqd = update_mqd_hiq;
+		mqd->destroy_mqd = destroy_mqd;
+		mqd->is_occupied = is_occupied;
+#if defined(CONFIG_DEBUG_FS)
+		mqd->debugfs_show_mqd = debugfs_show_mqd;
+#endif
+		break;
+	case KFD_MQD_TYPE_SDMA:
+		mqd->init_mqd = init_mqd_sdma;
+		mqd->uninit_mqd = uninit_mqd_sdma;
+		mqd->load_mqd = load_mqd_sdma;
+		mqd->update_mqd = update_mqd_sdma;
+		mqd->destroy_mqd = destroy_mqd_sdma;
+		mqd->is_occupied = is_occupied_sdma;
+#if defined(CONFIG_DEBUG_FS)
+		mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
+#endif
+		break;
+	default:
+		kfree(mqd);
+		return NULL;
+	}
+
+	return mqd;
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index b68299a3e18a..fac28827b000 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -197,6 +197,7 @@ struct kfd_mem_obj {
 	uint32_t range_end;
 	uint64_t gpu_addr;
 	uint32_t *cpu_ptr;
+	void *gtt_mem;
 };
 
 struct kfd_vmid_info {
@@ -822,6 +823,8 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
 		struct kfd_dev *dev);
 struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type,
 		struct kfd_dev *dev);
+struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
+		struct kfd_dev *dev);
 struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev);
 void device_queue_manager_uninit(struct device_queue_manager *dqm);
 struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,

From bed4f110251b4f9041e5e797e035bc40c34d60ea Mon Sep 17 00:00:00 2001
From: Felix Kuehling <Felix.Kuehling@amd.com>
Date: Tue, 10 Apr 2018 17:33:09 -0400
Subject: [PATCH 0240/1286] drm/amdkfd: Add GFXv9 device queue manager

Signed-off-by: John Bridgman <john.bridgman@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdkfd/Makefile           |  2 +-
 .../drm/amd/amdkfd/kfd_device_queue_manager.c | 10 ++-
 .../drm/amd/amdkfd/kfd_device_queue_manager.h |  2 +
 .../amd/amdkfd/kfd_device_queue_manager_v9.c  | 84 +++++++++++++++++++
 drivers/gpu/drm/amd/amdkfd/kfd_module.c       |  5 ++
 drivers/gpu/drm/amd/amdkfd/kfd_priv.h         |  5 ++
 6 files changed, 106 insertions(+), 2 deletions(-)
 create mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c

diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
index 094b591ed8c2..ff8b5aa11f4e 100644
--- a/drivers/gpu/drm/amd/amdkfd/Makefile
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -35,7 +35,7 @@ amdkfd-y	:= kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \
 		kfd_kernel_queue_vi.o kfd_kernel_queue_v9.o \
 		kfd_packet_manager.o kfd_process_queue_manager.o \
 		kfd_device_queue_manager.o kfd_device_queue_manager_cik.o \
-		kfd_device_queue_manager_vi.o \
+		kfd_device_queue_manager_vi.o kfd_device_queue_manager_v9.o \
 		kfd_interrupt.o kfd_events.o cik_event_interrupt.o \
 		kfd_dbgdev.o kfd_dbgmgr.o kfd_crat.o
 
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 500f022d089d..9af94b1f9074 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1386,7 +1386,10 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
 				   void __user *alternate_aperture_base,
 				   uint64_t alternate_aperture_size)
 {
-	bool retval;
+	bool retval = true;
+
+	if (!dqm->asic_ops.set_cache_memory_policy)
+		return retval;
 
 	mutex_lock(&dqm->lock);
 
@@ -1655,6 +1658,11 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
 	case CHIP_POLARIS11:
 		device_queue_manager_init_vi_tonga(&dqm->asic_ops);
 		break;
+
+	case CHIP_VEGA10:
+	case CHIP_RAVEN:
+		device_queue_manager_init_v9(&dqm->asic_ops);
+		break;
 	default:
 		WARN(1, "Unexpected ASIC family %u",
 		     dev->device_info->asic_family);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 412beff3281d..59a6b1956932 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -200,6 +200,8 @@ void device_queue_manager_init_vi(
 		struct device_queue_manager_asic_ops *asic_ops);
 void device_queue_manager_init_vi_tonga(
 		struct device_queue_manager_asic_ops *asic_ops);
+void device_queue_manager_init_v9(
+		struct device_queue_manager_asic_ops *asic_ops);
 void program_sh_mem_settings(struct device_queue_manager *dqm,
 					struct qcm_process_device *qpd);
 unsigned int get_queues_num(struct device_queue_manager *dqm);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
new file mode 100644
index 000000000000..79e5bcf6367c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2016-2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "kfd_device_queue_manager.h"
+#include "vega10_enum.h"
+#include "gc/gc_9_0_offset.h"
+#include "gc/gc_9_0_sh_mask.h"
+#include "sdma0/sdma0_4_0_sh_mask.h"
+
+static int update_qpd_v9(struct device_queue_manager *dqm,
+			 struct qcm_process_device *qpd);
+static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q,
+			    struct qcm_process_device *qpd);
+
+void device_queue_manager_init_v9(
+	struct device_queue_manager_asic_ops *asic_ops)
+{
+	asic_ops->update_qpd = update_qpd_v9;
+	asic_ops->init_sdma_vm = init_sdma_vm_v9;
+}
+
+static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd)
+{
+	uint32_t shared_base = pdd->lds_base >> 48;
+	uint32_t private_base = pdd->scratch_base >> 48;
+
+	return (shared_base << SH_MEM_BASES__SHARED_BASE__SHIFT) |
+		private_base;
+}
+
+static int update_qpd_v9(struct device_queue_manager *dqm,
+			 struct qcm_process_device *qpd)
+{
+	struct kfd_process_device *pdd;
+
+	pdd = qpd_to_pdd(qpd);
+
+	/* check if sh_mem_config register already configured */
+	if (qpd->sh_mem_config == 0) {
+		qpd->sh_mem_config =
+				SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+					SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
+		if (vega10_noretry &&
+		    !dqm->dev->device_info->needs_iommu_device)
+			qpd->sh_mem_config |=
+				1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
+
+		qpd->sh_mem_ape1_limit = 0;
+		qpd->sh_mem_ape1_base = 0;
+	}
+
+	qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd);
+
+	pr_debug("sh_mem_bases 0x%X\n", qpd->sh_mem_bases);
+
+	return 0;
+}
+
+static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q,
+			    struct qcm_process_device *qpd)
+{
+	/* Not needed on SDMAv4 any more */
+	q->properties.sdma_vm_addr = 0;
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 45bc458f7348..76bf2dc8aec4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -83,6 +83,11 @@ module_param(ignore_crat, int, 0444);
 MODULE_PARM_DESC(ignore_crat,
 	"Ignore CRAT table during KFD initialization (0 = use CRAT (default), 1 = ignore CRAT)");
 
+int vega10_noretry;
+module_param_named(noretry, vega10_noretry, int, 0644);
+MODULE_PARM_DESC(noretry,
+	"Set sh_mem_config.retry_disable on Vega10 (0 = retry enabled (default), 1 = retry disabled)");
+
 static int amdkfd_init_completed;
 
 int kgd2kfd_init(unsigned int interface_version,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index fac28827b000..d5cdb5db4983 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -137,6 +137,11 @@ extern int debug_largebar;
  */
 extern int ignore_crat;
 
+/*
+ * Set sh_mem_config.retry_disable on Vega10
+ */
+extern int vega10_noretry;
+
 /**
  * enum kfd_sched_policy
  *

From ca750681bc4a897ffa7eed71a1e05762fb1f0a34 Mon Sep 17 00:00:00 2001
From: Felix Kuehling <Felix.Kuehling@amd.com>
Date: Tue, 10 Apr 2018 17:33:10 -0400
Subject: [PATCH 0241/1286] drm/amdkfd: Add SOC15 interrupt processing support

Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
Signed-off-by: Oak Zeng <Oak.Zeng@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdkfd/Makefile           |  2 +-
 .../gpu/drm/amd/amdkfd/kfd_int_process_v9.c   | 84 +++++++++++++++++++
 drivers/gpu/drm/amd/amdkfd/kfd_priv.h         |  2 +
 drivers/gpu/drm/amd/amdkfd/soc15_int.h        | 47 +++++++++++
 4 files changed, 134 insertions(+), 1 deletion(-)
 create mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
 create mode 100644 drivers/gpu/drm/amd/amdkfd/soc15_int.h

diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
index ff8b5aa11f4e..ffd096fffc1c 100644
--- a/drivers/gpu/drm/amd/amdkfd/Makefile
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -37,7 +37,7 @@ amdkfd-y	:= kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \
 		kfd_device_queue_manager.o kfd_device_queue_manager_cik.o \
 		kfd_device_queue_manager_vi.o kfd_device_queue_manager_v9.o \
 		kfd_interrupt.o kfd_events.o cik_event_interrupt.o \
-		kfd_dbgdev.o kfd_dbgmgr.o kfd_crat.o
+		kfd_int_process_v9.o kfd_dbgdev.o kfd_dbgmgr.o kfd_crat.o
 
 ifneq ($(CONFIG_AMD_IOMMU_V2),)
 amdkfd-y += kfd_iommu.o
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
new file mode 100644
index 000000000000..39d41155581f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2016-2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "kfd_priv.h"
+#include "kfd_events.h"
+#include "soc15_int.h"
+
+
+static bool event_interrupt_isr_v9(struct kfd_dev *dev,
+					const uint32_t *ih_ring_entry)
+{
+	uint16_t source_id, client_id, pasid, vmid;
+
+	source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
+	client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
+	pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
+	vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
+
+	if (pasid) {
+		const uint32_t *data = ih_ring_entry;
+
+		pr_debug("client id 0x%x, source id %d, pasid 0x%x. raw data:\n",
+			 client_id, source_id, pasid);
+		pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
+			 data[0], data[1], data[2], data[3],
+			 data[4], data[5], data[6], data[7]);
+	}
+
+	return (pasid != 0) &&
+		(source_id == SOC15_INTSRC_CP_END_OF_PIPE ||
+		 source_id == SOC15_INTSRC_SDMA_TRAP ||
+		 source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
+		 source_id == SOC15_INTSRC_CP_BAD_OPCODE);
+}
+
+static void event_interrupt_wq_v9(struct kfd_dev *dev,
+					const uint32_t *ih_ring_entry)
+{
+	uint16_t source_id, client_id, pasid, vmid;
+	uint32_t context_id;
+
+	source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
+	client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
+	pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
+	vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
+	context_id = SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry);
+
+	if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
+		kfd_signal_event_interrupt(pasid, context_id, 32);
+	else if (source_id == SOC15_INTSRC_SDMA_TRAP)
+		kfd_signal_event_interrupt(pasid, context_id & 0xfffffff, 28);
+	else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG)
+		kfd_signal_event_interrupt(pasid, context_id & 0xffffff, 24);
+	else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
+		kfd_signal_hw_exception_event(pasid);
+	else if (client_id == SOC15_IH_CLIENTID_VMC ||
+		 client_id == SOC15_IH_CLIENTID_UTCL2) {
+		/* TODO */
+	}
+}
+
+const struct kfd_event_interrupt_class event_interrupt_class_v9 = {
+	.interrupt_isr = event_interrupt_isr_v9,
+	.interrupt_wq = event_interrupt_wq_v9,
+};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index d5cdb5db4983..06b210b33dda 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -934,6 +934,8 @@ uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
 
 /* Events */
 extern const struct kfd_event_interrupt_class event_interrupt_class_cik;
+extern const struct kfd_event_interrupt_class event_interrupt_class_v9;
+
 extern const struct kfd_device_global_init_class device_global_init_class_cik;
 
 void kfd_event_init_process(struct kfd_process *p);
diff --git a/drivers/gpu/drm/amd/amdkfd/soc15_int.h b/drivers/gpu/drm/amd/amdkfd/soc15_int.h
new file mode 100644
index 000000000000..0bc0b25cb410
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/soc15_int.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2016-2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef HSA_SOC15_INT_H_INCLUDED
+#define HSA_SOC15_INT_H_INCLUDED
+
+#include "soc15_ih_clientid.h"
+
+#define SOC15_INTSRC_CP_END_OF_PIPE	181
+#define SOC15_INTSRC_CP_BAD_OPCODE	183
+#define SOC15_INTSRC_SQ_INTERRUPT_MSG	239
+#define SOC15_INTSRC_VMC_FAULT		0
+#define SOC15_INTSRC_SDMA_TRAP		224
+
+
+#define SOC15_CLIENT_ID_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[0]) & 0xff)
+#define SOC15_SOURCE_ID_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[0]) >> 8 & 0xff)
+#define SOC15_RING_ID_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[0]) >> 16 & 0xff)
+#define SOC15_VMID_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[0]) >> 24 & 0xf)
+#define SOC15_VMID_TYPE_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[0]) >> 31 & 0x1)
+#define SOC15_PASID_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[3]) & 0xffff)
+#define SOC15_CONTEXT_ID0_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[4]))
+#define SOC15_CONTEXT_ID1_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[5]))
+#define SOC15_CONTEXT_ID2_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[6]))
+#define SOC15_CONTEXT_ID3_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[7]))
+
+#endif
+

From 2a26fbfe80015faef830bc47c5223b4b31d41791 Mon Sep 17 00:00:00 2001
From: Felix Kuehling <Felix.Kuehling@amd.com>
Date: Tue, 10 Apr 2018 17:33:11 -0400
Subject: [PATCH 0242/1286] drm/amdkfd: Fix goto usage

Missed a spot in previous cleanup commit:
Remove gotos that do not feature any common cleanup, and use gotos
instead of repeating cleanup commands.

According to kernel.org: "The goto statement comes in handy when a
function exits from multiple locations and some common work such as
cleanup has to be done. If there is no cleanup needed then just return
directly."

Signed-off-by: Kent Russell <kent.russell@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c | 14 ++++++++------
 1 file changed, 8 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 69f496485331..23e586b0507c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -232,18 +232,16 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
 		 * make sure calling functions know
 		 * acquire_packet_buffer() failed
 		 */
-		*buffer_ptr = NULL;
-		return -ENOMEM;
+		goto err_no_space;
 	}
 
 	if (wptr + packet_size_in_dwords >= queue_size_dwords) {
 		/* make sure after rolling back to position 0, there is
 		 * still enough space.
 		 */
-		if (packet_size_in_dwords >= rptr) {
-			*buffer_ptr = NULL;
-			return -ENOMEM;
-		}
+		if (packet_size_in_dwords >= rptr)
+			goto err_no_space;
+
 		/* fill nops, roll back and start at position 0 */
 		while (wptr > 0) {
 			queue_address[wptr] = kq->nop_packet;
@@ -255,6 +253,10 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
 	kq->pending_wptr = wptr + packet_size_in_dwords;
 
 	return 0;
+
+err_no_space:
+	*buffer_ptr = NULL;
+	return -ENOMEM;
 }
 
 static void submit_packet(struct kernel_queue *kq)

From bebfd2f4126a115420a2b04f44a05552c12e5b46 Mon Sep 17 00:00:00 2001
From: Felix Kuehling <Felix.Kuehling@amd.com>
Date: Tue, 10 Apr 2018 17:33:12 -0400
Subject: [PATCH 0243/1286] drm/amdkfd: Fix kernel queue rollback_packet

kq->queue->properties.write_ptr is a GPU address which can'd be
derefenced in the kernel. Use kq->wptr_kernel instead, which is the
kernel CPU address of the same buffer.

Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 23e586b0507c..9f381612afd7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -279,7 +279,7 @@ static void submit_packet(struct kernel_queue *kq)
 
 static void rollback_packet(struct kernel_queue *kq)
 {
-	kq->pending_wptr = *kq->queue->properties.write_ptr;
+	kq->pending_wptr = *kq->wptr_kernel;
 }
 
 struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,

From 9d7d024816686f922735f7adccd00e3fc44e2e03 Mon Sep 17 00:00:00 2001
From: Felix Kuehling <Felix.Kuehling@amd.com>
Date: Sun, 8 Apr 2018 22:03:51 -0400
Subject: [PATCH 0244/1286] drm/amdkfd: Add 64-bit doorbell and wptr support to
 kernel queue

v2: Removed redundant 0x before %p.

Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c     | 10 ++++++++
 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c | 25 ++++++++++++++-----
 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h |  7 +++++-
 .../gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c |  9 +++++++
 .../gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c  |  9 +++++++
 .../gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c  |  9 +++++++
 drivers/gpu/drm/amd/amdkfd/kfd_priv.h         |  1 +
 7 files changed, 63 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index 36c9269ea7c0..c3744d89352c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -214,6 +214,16 @@ void write_kernel_doorbell(void __iomem *db, u32 value)
 	}
 }
 
+void write_kernel_doorbell64(void __iomem *db, u64 value)
+{
+	if (db) {
+		WARN(((unsigned long)db & 7) != 0,
+		     "Unaligned 64-bit doorbell");
+		writeq(value, (u64 __iomem *)db);
+		pr_debug("writing %llu to doorbell address %p\n", value, db);
+	}
+}
+
 unsigned int kfd_doorbell_id_to_offset(struct kfd_dev *kfd,
 					struct kfd_process *process,
 					unsigned int doorbell_id)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 9f381612afd7..476951d8c91c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -99,7 +99,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
 	kq->rptr_kernel = kq->rptr_mem->cpu_ptr;
 	kq->rptr_gpu_addr = kq->rptr_mem->gpu_addr;
 
-	retval = kfd_gtt_sa_allocate(dev, sizeof(*kq->wptr_kernel),
+	retval = kfd_gtt_sa_allocate(dev, dev->device_info->doorbell_size,
 					&kq->wptr_mem);
 
 	if (retval != 0)
@@ -208,6 +208,7 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
 	size_t available_size;
 	size_t queue_size_dwords;
 	uint32_t wptr, rptr;
+	uint64_t wptr64;
 	unsigned int *queue_address;
 
 	/* When rptr == wptr, the buffer is empty.
@@ -216,7 +217,8 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
 	 * the opposite. So we can only use up to queue_size_dwords - 1 dwords.
 	 */
 	rptr = *kq->rptr_kernel;
-	wptr = *kq->wptr_kernel;
+	wptr = kq->pending_wptr;
+	wptr64 = kq->pending_wptr64;
 	queue_address = (unsigned int *)kq->pq_kernel_addr;
 	queue_size_dwords = kq->queue->properties.queue_size / 4;
 
@@ -246,11 +248,13 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
 		while (wptr > 0) {
 			queue_address[wptr] = kq->nop_packet;
 			wptr = (wptr + 1) % queue_size_dwords;
+			wptr64++;
 		}
 	}
 
 	*buffer_ptr = &queue_address[wptr];
 	kq->pending_wptr = wptr + packet_size_in_dwords;
+	kq->pending_wptr64 = wptr64 + packet_size_in_dwords;
 
 	return 0;
 
@@ -272,14 +276,18 @@ static void submit_packet(struct kernel_queue *kq)
 	pr_debug("\n");
 #endif
 
-	*kq->wptr_kernel = kq->pending_wptr;
-	write_kernel_doorbell(kq->queue->properties.doorbell_ptr,
-				kq->pending_wptr);
+	kq->ops_asic_specific.submit_packet(kq);
 }
 
 static void rollback_packet(struct kernel_queue *kq)
 {
-	kq->pending_wptr = *kq->wptr_kernel;
+	if (kq->dev->device_info->doorbell_size == 8) {
+		kq->pending_wptr64 = *kq->wptr64_kernel;
+		kq->pending_wptr = *kq->wptr_kernel %
+			(kq->queue->properties.queue_size / 4);
+	} else {
+		kq->pending_wptr = *kq->wptr_kernel;
+	}
 }
 
 struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
@@ -310,6 +318,11 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
 	case CHIP_HAWAII:
 		kernel_queue_init_cik(&kq->ops_asic_specific);
 		break;
+
+	case CHIP_VEGA10:
+	case CHIP_RAVEN:
+		kernel_queue_init_v9(&kq->ops_asic_specific);
+		break;
 	default:
 		WARN(1, "Unexpected ASIC family %u",
 		     dev->device_info->asic_family);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
index 594053136ee4..97aff2041a5d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
@@ -72,6 +72,7 @@ struct kernel_queue {
 	struct kfd_dev		*dev;
 	struct mqd_manager	*mqd;
 	struct queue		*queue;
+	uint64_t		pending_wptr64;
 	uint32_t		pending_wptr;
 	unsigned int		nop_packet;
 
@@ -79,7 +80,10 @@ struct kernel_queue {
 	uint32_t		*rptr_kernel;
 	uint64_t		rptr_gpu_addr;
 	struct kfd_mem_obj	*wptr_mem;
-	uint32_t		*wptr_kernel;
+	union {
+		uint64_t	*wptr64_kernel;
+		uint32_t	*wptr_kernel;
+	};
 	uint64_t		wptr_gpu_addr;
 	struct kfd_mem_obj	*pq;
 	uint64_t		pq_gpu_addr;
@@ -97,5 +101,6 @@ struct kernel_queue {
 
 void kernel_queue_init_cik(struct kernel_queue_ops *ops);
 void kernel_queue_init_vi(struct kernel_queue_ops *ops);
+void kernel_queue_init_v9(struct kernel_queue_ops *ops);
 
 #endif /* KFD_KERNEL_QUEUE_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c
index a90eb440b1fb..19e54acb4125 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c
@@ -26,11 +26,13 @@
 static bool initialize_cik(struct kernel_queue *kq, struct kfd_dev *dev,
 			enum kfd_queue_type type, unsigned int queue_size);
 static void uninitialize_cik(struct kernel_queue *kq);
+static void submit_packet_cik(struct kernel_queue *kq);
 
 void kernel_queue_init_cik(struct kernel_queue_ops *ops)
 {
 	ops->initialize = initialize_cik;
 	ops->uninitialize = uninitialize_cik;
+	ops->submit_packet = submit_packet_cik;
 }
 
 static bool initialize_cik(struct kernel_queue *kq, struct kfd_dev *dev,
@@ -42,3 +44,10 @@ static bool initialize_cik(struct kernel_queue *kq, struct kfd_dev *dev,
 static void uninitialize_cik(struct kernel_queue *kq)
 {
 }
+
+static void submit_packet_cik(struct kernel_queue *kq)
+{
+	*kq->wptr_kernel = kq->pending_wptr;
+	write_kernel_doorbell(kq->queue->properties.doorbell_ptr,
+				kq->pending_wptr);
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
index ece7d59537b7..684a3bf07efd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
@@ -29,11 +29,13 @@
 static bool initialize_v9(struct kernel_queue *kq, struct kfd_dev *dev,
 			enum kfd_queue_type type, unsigned int queue_size);
 static void uninitialize_v9(struct kernel_queue *kq);
+static void submit_packet_v9(struct kernel_queue *kq);
 
 void kernel_queue_init_v9(struct kernel_queue_ops *ops)
 {
 	ops->initialize = initialize_v9;
 	ops->uninitialize = uninitialize_v9;
+	ops->submit_packet = submit_packet_v9;
 }
 
 static bool initialize_v9(struct kernel_queue *kq, struct kfd_dev *dev,
@@ -58,6 +60,13 @@ static void uninitialize_v9(struct kernel_queue *kq)
 	kfd_gtt_sa_free(kq->dev, kq->eop_mem);
 }
 
+static void submit_packet_v9(struct kernel_queue *kq)
+{
+	*kq->wptr64_kernel = kq->pending_wptr64;
+	write_kernel_doorbell64(kq->queue->properties.doorbell_ptr,
+				kq->pending_wptr64);
+}
+
 static int pm_map_process_v9(struct packet_manager *pm,
 		uint32_t *buffer, struct qcm_process_device *qpd)
 {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
index f9019efd31b9..bf20c6d32ef3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
@@ -29,11 +29,13 @@
 static bool initialize_vi(struct kernel_queue *kq, struct kfd_dev *dev,
 			enum kfd_queue_type type, unsigned int queue_size);
 static void uninitialize_vi(struct kernel_queue *kq);
+static void submit_packet_vi(struct kernel_queue *kq);
 
 void kernel_queue_init_vi(struct kernel_queue_ops *ops)
 {
 	ops->initialize = initialize_vi;
 	ops->uninitialize = uninitialize_vi;
+	ops->submit_packet = submit_packet_vi;
 }
 
 static bool initialize_vi(struct kernel_queue *kq, struct kfd_dev *dev,
@@ -58,6 +60,13 @@ static void uninitialize_vi(struct kernel_queue *kq)
 	kfd_gtt_sa_free(kq->dev, kq->eop_mem);
 }
 
+static void submit_packet_vi(struct kernel_queue *kq)
+{
+	*kq->wptr_kernel = kq->pending_wptr;
+	write_kernel_doorbell(kq->queue->properties.doorbell_ptr,
+				kq->pending_wptr);
+}
+
 unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size)
 {
 	union PM4_MES_TYPE_3_HEADER header;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 06b210b33dda..10d5b5445195 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -769,6 +769,7 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
 void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr);
 u32 read_kernel_doorbell(u32 __iomem *db);
 void write_kernel_doorbell(void __iomem *db, u32 value);
+void write_kernel_doorbell64(void __iomem *db, u64 value);
 unsigned int kfd_doorbell_id_to_offset(struct kfd_dev *kfd,
 					struct kfd_process *process,
 					unsigned int doorbell_id);

From 60f8e873307fd15bfb45f1895958cb04a2434e03 Mon Sep 17 00:00:00 2001
From: Mahesh Kumar <mahesh1.kumar@intel.com>
Date: Mon, 9 Apr 2018 09:11:00 +0530
Subject: [PATCH 0245/1286] drm/i915/skl+: rename skl_wm_values struct to
 skl_ddb_values

skl_wm_values struct contains values of pipe/plane DDB only.
so rename it for better readability of code. Similarly
skl_copy_wm_for_pipe copies DDB values.

s/skl_wm_values/skl_ddb_values
s/skl_copy_wm_for_pipe/skl_copy_ddb_for_pipe

Changes since V1:
 - also change name of skl_copy_wm_for_pipe

v2: Added reviewed by from Juha-Pekka Heikkila

v3: Rebased the series

Reviewed-by: Juha-Pekka Heikkila <juhapekka.heikkila@gmail.com>
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: Mahesh Kumar <mahesh1.kumar@intel.com>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1523245273-30264-2-git-send-email-vidya.srinivas@intel.com
---
 drivers/gpu/drm/i915/i915_drv.h  |  4 ++--
 drivers/gpu/drm/i915/intel_drv.h |  2 +-
 drivers/gpu/drm/i915/intel_pm.c  | 16 ++++++++--------
 3 files changed, 11 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9bca104c409e..c1b89e9899b3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1186,7 +1186,7 @@ struct skl_ddb_allocation {
 	struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES];
 };
 
-struct skl_wm_values {
+struct skl_ddb_values {
 	unsigned dirty_pipes;
 	struct skl_ddb_allocation ddb;
 };
@@ -1885,7 +1885,7 @@ struct drm_i915_private {
 		/* current hardware state */
 		union {
 			struct ilk_wm_values hw;
-			struct skl_wm_values skl_hw;
+			struct skl_ddb_values skl_hw;
 			struct vlv_wm_values vlv;
 			struct g4x_wm_values g4x;
 		};
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 85e483e9a45b..d9a44ccdc837 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -482,7 +482,7 @@ struct intel_atomic_state {
 	bool skip_intermediate_wm;
 
 	/* Gen9+ only */
-	struct skl_wm_values wm_results;
+	struct skl_ddb_values wm_results;
 
 	struct i915_sw_fence commit_ready;
 
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 0d25e413ec0b..b7f6d8b8ff60 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -5042,9 +5042,9 @@ skl_compute_ddb(struct drm_atomic_state *state)
 }
 
 static void
-skl_copy_wm_for_pipe(struct skl_wm_values *dst,
-		     struct skl_wm_values *src,
-		     enum pipe pipe)
+skl_copy_ddb_for_pipe(struct skl_ddb_values *dst,
+		      struct skl_ddb_values *src,
+		      enum pipe pipe)
 {
 	memcpy(dst->ddb.y_plane[pipe], src->ddb.y_plane[pipe],
 	       sizeof(dst->ddb.y_plane[pipe]));
@@ -5095,7 +5095,7 @@ skl_compute_wm(struct drm_atomic_state *state)
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *cstate;
 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-	struct skl_wm_values *results = &intel_state->wm_results;
+	struct skl_ddb_values *results = &intel_state->wm_results;
 	struct drm_device *dev = state->dev;
 	struct skl_pipe_wm *pipe_wm;
 	bool changed = false;
@@ -5197,8 +5197,8 @@ static void skl_initial_wm(struct intel_atomic_state *state,
 	struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
 	struct drm_device *dev = intel_crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct skl_wm_values *results = &state->wm_results;
-	struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
+	struct skl_ddb_values *results = &state->wm_results;
+	struct skl_ddb_values *hw_vals = &dev_priv->wm.skl_hw;
 	enum pipe pipe = intel_crtc->pipe;
 
 	if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
@@ -5209,7 +5209,7 @@ static void skl_initial_wm(struct intel_atomic_state *state,
 	if (cstate->base.active_changed)
 		skl_atomic_update_crtc_wm(state, cstate);
 
-	skl_copy_wm_for_pipe(hw_vals, results, pipe);
+	skl_copy_ddb_for_pipe(hw_vals, results, pipe);
 
 	mutex_unlock(&dev_priv->wm.wm_mutex);
 }
@@ -5341,7 +5341,7 @@ void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
 void skl_wm_get_hw_state(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
+	struct skl_ddb_values *hw = &dev_priv->wm.skl_hw;
 	struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
 	struct drm_crtc *crtc;
 	struct intel_crtc *intel_crtc;

From b879d58ff31baf28c7d7d690c8da7978299fbe02 Mon Sep 17 00:00:00 2001
From: Mahesh Kumar <mahesh1.kumar@intel.com>
Date: Mon, 9 Apr 2018 09:11:01 +0530
Subject: [PATCH 0246/1286] drm/i915/skl+: refactor WM calculation for NV12

Current code calculates DDB for planar formats in such a way that we
store DDB of plane-0 in plane 1 & vice-versa.
In order to make this clean this patch refactors WM/DDB calculation for
NV12 planar formats.

v2: Addressed review comments by Maarten

v3: Rebased and addressed review comments by Maarten

v4: Fixed a compilation issue of string replacement is_nv12 to
is_planar

v5: Added reviewed by from Juha-Pekka Heikkila

v6: Rebased the series

Reviewed-by: Juha-Pekka Heikkila <juhapekka.heikkila@gmail.com>
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: Mahesh Kumar <mahesh1.kumar@intel.com>
Signed-off-by: Vidya Srinivas <vidya.srinivas@intel.com>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1523245273-30264-3-git-send-email-vidya.srinivas@intel.com
---
 drivers/gpu/drm/i915/i915_drv.h  |   5 +-
 drivers/gpu/drm/i915/intel_drv.h |   1 +
 drivers/gpu/drm/i915/intel_pm.c  | 121 ++++++++++++++++---------------
 3 files changed, 66 insertions(+), 61 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c1b89e9899b3..28fd200eb1fc 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1182,8 +1182,9 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
 }
 
 struct skl_ddb_allocation {
-	struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */
-	struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES];
+	/* packed/y */
+	struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES];
+	struct skl_ddb_entry uv_plane[I915_MAX_PIPES][I915_MAX_PLANES];
 };
 
 struct skl_ddb_values {
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index d9a44ccdc837..626a46c11f50 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -603,6 +603,7 @@ struct intel_pipe_wm {
 struct skl_plane_wm {
 	struct skl_wm_level wm[8];
 	struct skl_wm_level trans_wm;
+	bool is_planar;
 };
 
 struct skl_pipe_wm {
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index b7f6d8b8ff60..fda22b1ae753 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4009,9 +4009,9 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
 static unsigned int
 skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
 			     const struct drm_plane_state *pstate,
-			     int y)
+			     const int plane)
 {
-	struct intel_plane *plane = to_intel_plane(pstate->plane);
+	struct intel_plane *intel_plane = to_intel_plane(pstate->plane);
 	struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
 	uint32_t data_rate;
 	uint32_t width = 0, height = 0;
@@ -4025,9 +4025,9 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
 	fb = pstate->fb;
 	format = fb->format->format;
 
-	if (plane->id == PLANE_CURSOR)
+	if (intel_plane->id == PLANE_CURSOR)
 		return 0;
-	if (y && format != DRM_FORMAT_NV12)
+	if (plane == 1 && format != DRM_FORMAT_NV12)
 		return 0;
 
 	/*
@@ -4038,19 +4038,14 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
 	width = drm_rect_width(&intel_pstate->base.src) >> 16;
 	height = drm_rect_height(&intel_pstate->base.src) >> 16;
 
-	/* for planar format */
-	if (format == DRM_FORMAT_NV12) {
-		if (y)  /* y-plane data rate */
-			data_rate = width * height *
-				fb->format->cpp[0];
-		else    /* uv-plane data rate */
-			data_rate = (width / 2) * (height / 2) *
-				fb->format->cpp[1];
-	} else {
-		/* for packed formats */
-		data_rate = width * height * fb->format->cpp[0];
+	/* UV plane does 1/2 pixel sub-sampling */
+	if (plane == 1 && format == DRM_FORMAT_NV12) {
+		width /= 2;
+		height /= 2;
 	}
 
+	data_rate = width * height * fb->format->cpp[plane];
+
 	down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate);
 
 	return mul_round_up_u32_fixed16(data_rate, down_scale_amount);
@@ -4063,8 +4058,8 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
  */
 static unsigned int
 skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
-				 unsigned *plane_data_rate,
-				 unsigned *plane_y_data_rate)
+				 unsigned int *plane_data_rate,
+				 unsigned int *uv_plane_data_rate)
 {
 	struct drm_crtc_state *cstate = &intel_cstate->base;
 	struct drm_atomic_state *state = cstate->state;
@@ -4080,17 +4075,17 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
 		enum plane_id plane_id = to_intel_plane(plane)->id;
 		unsigned int rate;
 
-		/* packed/uv */
+		/* packed/y */
 		rate = skl_plane_relative_data_rate(intel_cstate,
 						    pstate, 0);
 		plane_data_rate[plane_id] = rate;
 
 		total_data_rate += rate;
 
-		/* y-plane */
+		/* uv-plane */
 		rate = skl_plane_relative_data_rate(intel_cstate,
 						    pstate, 1);
-		plane_y_data_rate[plane_id] = rate;
+		uv_plane_data_rate[plane_id] = rate;
 
 		total_data_rate += rate;
 	}
@@ -4099,8 +4094,7 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
 }
 
 static uint16_t
-skl_ddb_min_alloc(const struct drm_plane_state *pstate,
-		  const int y)
+skl_ddb_min_alloc(const struct drm_plane_state *pstate, const int plane)
 {
 	struct drm_framebuffer *fb = pstate->fb;
 	struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
@@ -4111,8 +4105,8 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
 	if (WARN_ON(!fb))
 		return 0;
 
-	/* For packed formats, no y-plane, return 0 */
-	if (y && fb->format->format != DRM_FORMAT_NV12)
+	/* For packed formats, and uv-plane, return 0 */
+	if (plane == 1 && fb->format->format != DRM_FORMAT_NV12)
 		return 0;
 
 	/* For Non Y-tile return 8-blocks */
@@ -4131,15 +4125,12 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
 	src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
 
 	/* Halve UV plane width and height for NV12 */
-	if (fb->format->format == DRM_FORMAT_NV12 && !y) {
+	if (plane == 1) {
 		src_w /= 2;
 		src_h /= 2;
 	}
 
-	if (fb->format->format == DRM_FORMAT_NV12 && !y)
-		plane_bpp = fb->format->cpp[1];
-	else
-		plane_bpp = fb->format->cpp[0];
+	plane_bpp = fb->format->cpp[plane];
 
 	if (drm_rotation_90_or_270(pstate->rotation)) {
 		switch (plane_bpp) {
@@ -4167,7 +4158,7 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
 
 static void
 skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active,
-		 uint16_t *minimum, uint16_t *y_minimum)
+		 uint16_t *minimum, uint16_t *uv_minimum)
 {
 	const struct drm_plane_state *pstate;
 	struct drm_plane *plane;
@@ -4182,7 +4173,7 @@ skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active,
 			continue;
 
 		minimum[plane_id] = skl_ddb_min_alloc(pstate, 0);
-		y_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
+		uv_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
 	}
 
 	minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active);
@@ -4200,17 +4191,17 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 	struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
 	uint16_t alloc_size, start;
 	uint16_t minimum[I915_MAX_PLANES] = {};
-	uint16_t y_minimum[I915_MAX_PLANES] = {};
+	uint16_t uv_minimum[I915_MAX_PLANES] = {};
 	unsigned int total_data_rate;
 	enum plane_id plane_id;
 	int num_active;
-	unsigned plane_data_rate[I915_MAX_PLANES] = {};
-	unsigned plane_y_data_rate[I915_MAX_PLANES] = {};
+	unsigned int plane_data_rate[I915_MAX_PLANES] = {};
+	unsigned int uv_plane_data_rate[I915_MAX_PLANES] = {};
 	uint16_t total_min_blocks = 0;
 
 	/* Clear the partitioning for disabled planes. */
 	memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
-	memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
+	memset(ddb->uv_plane[pipe], 0, sizeof(ddb->uv_plane[pipe]));
 
 	if (WARN_ON(!state))
 		return 0;
@@ -4225,7 +4216,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 	if (alloc_size == 0)
 		return 0;
 
-	skl_ddb_calc_min(cstate, num_active, minimum, y_minimum);
+	skl_ddb_calc_min(cstate, num_active, minimum, uv_minimum);
 
 	/*
 	 * 1. Allocate the mininum required blocks for each active plane
@@ -4235,7 +4226,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 
 	for_each_plane_id_on_crtc(intel_crtc, plane_id) {
 		total_min_blocks += minimum[plane_id];
-		total_min_blocks += y_minimum[plane_id];
+		total_min_blocks += uv_minimum[plane_id];
 	}
 
 	if (total_min_blocks > alloc_size) {
@@ -4257,14 +4248,14 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 	 */
 	total_data_rate = skl_get_total_relative_data_rate(cstate,
 							   plane_data_rate,
-							   plane_y_data_rate);
+							   uv_plane_data_rate);
 	if (total_data_rate == 0)
 		return 0;
 
 	start = alloc->start;
 	for_each_plane_id_on_crtc(intel_crtc, plane_id) {
-		unsigned int data_rate, y_data_rate;
-		uint16_t plane_blocks, y_plane_blocks = 0;
+		unsigned int data_rate, uv_data_rate;
+		uint16_t plane_blocks, uv_plane_blocks;
 
 		if (plane_id == PLANE_CURSOR)
 			continue;
@@ -4288,21 +4279,20 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 
 		start += plane_blocks;
 
-		/*
-		 * allocation for y_plane part of planar format:
-		 */
-		y_data_rate = plane_y_data_rate[plane_id];
+		/* Allocate DDB for UV plane for planar format/NV12 */
+		uv_data_rate = uv_plane_data_rate[plane_id];
 
-		y_plane_blocks = y_minimum[plane_id];
-		y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
-					total_data_rate);
+		uv_plane_blocks = uv_minimum[plane_id];
+		uv_plane_blocks += div_u64((uint64_t)alloc_size * uv_data_rate,
+					   total_data_rate);
 
-		if (y_data_rate) {
-			ddb->y_plane[pipe][plane_id].start = start;
-			ddb->y_plane[pipe][plane_id].end = start + y_plane_blocks;
+		if (uv_data_rate) {
+			ddb->uv_plane[pipe][plane_id].start = start;
+			ddb->uv_plane[pipe][plane_id].end =
+				start + uv_plane_blocks;
 		}
 
-		start += y_plane_blocks;
+		start += uv_plane_blocks;
 	}
 
 	return 0;
@@ -4430,8 +4420,7 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
 		wp->width = drm_rect_width(&intel_pstate->base.src) >> 16;
 	}
 
-	wp->cpp = (fb->format->format == DRM_FORMAT_NV12) ? fb->format->cpp[1] :
-							    fb->format->cpp[0];
+	wp->cpp = fb->format->cpp[0];
 	wp->plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate,
 							     intel_pstate);
 
@@ -4660,6 +4649,9 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
 			return ret;
 	}
 
+	if (intel_pstate->base.fb->format->format == DRM_FORMAT_NV12)
+		wm->is_planar = true;
+
 	return 0;
 }
 
@@ -4833,10 +4825,21 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
 
 	skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
 			    &ddb->plane[pipe][plane_id]);
-	if (INTEL_GEN(dev_priv) < 11)
+	if (INTEL_GEN(dev_priv) >= 11)
+		return skl_ddb_entry_write(dev_priv,
+					   PLANE_BUF_CFG(pipe, plane_id),
+					   &ddb->plane[pipe][plane_id]);
+	if (wm->is_planar) {
+		skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
+				    &ddb->uv_plane[pipe][plane_id]);
 		skl_ddb_entry_write(dev_priv,
 				    PLANE_NV12_BUF_CFG(pipe, plane_id),
-				    &ddb->y_plane[pipe][plane_id]);
+				    &ddb->plane[pipe][plane_id]);
+	} else {
+		skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
+				    &ddb->plane[pipe][plane_id]);
+		I915_WRITE(PLANE_NV12_BUF_CFG(pipe, plane_id), 0x0);
+	}
 }
 
 static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
@@ -4951,8 +4954,8 @@ skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
 
 		if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id],
 					&new_ddb->plane[pipe][plane_id]) &&
-		    skl_ddb_entry_equal(&cur_ddb->y_plane[pipe][plane_id],
-					&new_ddb->y_plane[pipe][plane_id]))
+		    skl_ddb_entry_equal(&cur_ddb->uv_plane[pipe][plane_id],
+					&new_ddb->uv_plane[pipe][plane_id]))
 			continue;
 
 		plane_state = drm_atomic_get_plane_state(state, plane);
@@ -5046,8 +5049,8 @@ skl_copy_ddb_for_pipe(struct skl_ddb_values *dst,
 		      struct skl_ddb_values *src,
 		      enum pipe pipe)
 {
-	memcpy(dst->ddb.y_plane[pipe], src->ddb.y_plane[pipe],
-	       sizeof(dst->ddb.y_plane[pipe]));
+	memcpy(dst->ddb.uv_plane[pipe], src->ddb.uv_plane[pipe],
+	       sizeof(dst->ddb.uv_plane[pipe]));
 	memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
 	       sizeof(dst->ddb.plane[pipe]));
 }

From f34a291c0a9f141728b2ad852066322ca38d3cdb Mon Sep 17 00:00:00 2001
From: Mahesh Kumar <mahesh1.kumar@intel.com>
Date: Mon, 9 Apr 2018 09:11:02 +0530
Subject: [PATCH 0247/1286] drm/i915/skl+: add NV12 in skl_format_to_fourcc

Add support of recognizing DRM_FORMAT_NV12 from plane_format
register value.

v2: Added reviewed by tag from Mika Kahola

v3: Added reviewed by from Juha-Pekka Heikkila

v4: Rebased the series

Reviewed-by: Juha-Pekka Heikkila <juhapekka.heikkila@gmail.com>
Reviewed-by: Mika Kahola <mika.kahola@intel.com>
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: Mahesh Kumar <mahesh1.kumar@intel.com>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1523245273-30264-4-git-send-email-vidya.srinivas@intel.com
---
 drivers/gpu/drm/i915/intel_display.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 6de2e1b1a4a7..fa78f2590eea 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2662,6 +2662,8 @@ static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
 	switch (format) {
 	case PLANE_CTL_FORMAT_RGB_565:
 		return DRM_FORMAT_RGB565;
+	case PLANE_CTL_FORMAT_NV12:
+		return DRM_FORMAT_NV12;
 	default:
 	case PLANE_CTL_FORMAT_XRGB_8888:
 		if (rgb_order) {

From ddf343191420e88479027fec9dc8efc0cafb63ef Mon Sep 17 00:00:00 2001
From: Mahesh Kumar <mahesh1.kumar@intel.com>
Date: Mon, 9 Apr 2018 09:11:03 +0530
Subject: [PATCH 0248/1286] drm/i915/skl+: support verification of DDB HW state
 for NV12

For YUV 420 Planar formats like NV12,
buffer allocation is done for Y and UV surfaces separately.
For NV12 plane formats, the UV buffer
allocation must be programmed in the Plane Buffer Config register
and the Y buffer allocation must be programmed in the
Plane NV12 Buffer Config register. Both register values
should be verified during verify_wm_state.

v2: Addressed review comments by Maarten.

v3: Addressed review comments by Shashank Sharma.

v4: Adding reviewed by tag from Shashank Sharma

v5: Added reviewed by from Juha-Pekka Heikkila

v6: Rebased the series

Reviewed-by: Juha-Pekka Heikkila <juhapekka.heikkila@gmail.com>
Reviewed-by: Shashank Sharma <shashank.sharma@intel.com>
Signed-off-by: Mahesh Kumar <mahesh1.kumar@intel.com>
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1523245273-30264-5-git-send-email-vidya.srinivas@intel.com
---
 drivers/gpu/drm/i915/intel_display.c |  2 +-
 drivers/gpu/drm/i915/intel_drv.h     |  1 +
 drivers/gpu/drm/i915/intel_pm.c      | 51 ++++++++++++++++++++++------
 3 files changed, 43 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index fa78f2590eea..057f3cf95e86 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2657,7 +2657,7 @@ static int i9xx_format_to_fourcc(int format)
 	}
 }
 
-static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
+int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
 {
 	switch (format) {
 	case PLANE_CTL_FORMAT_RGB_565:
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 626a46c11f50..4eeaca350411 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1613,6 +1613,7 @@ u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
 int skl_check_plane_surface(const struct intel_crtc_state *crtc_state,
 			    struct intel_plane_state *plane_state);
 int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
+int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
 
 /* intel_csr.c */
 void intel_csr_ucode_init(struct drm_i915_private *);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index fda22b1ae753..d22d9b2cbc62 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3825,6 +3825,44 @@ static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
 		entry->end += 1;
 }
 
+static void
+skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
+			   const enum pipe pipe,
+			   const enum plane_id plane_id,
+			   struct skl_ddb_allocation *ddb /* out */)
+{
+	u32 val, val2 = 0;
+	int fourcc, pixel_format;
+
+	/* Cursor doesn't support NV12/planar, so no extra calculation needed */
+	if (plane_id == PLANE_CURSOR) {
+		val = I915_READ(CUR_BUF_CFG(pipe));
+		skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane_id], val);
+		return;
+	}
+
+	val = I915_READ(PLANE_CTL(pipe, plane_id));
+
+	/* No DDB allocated for disabled planes */
+	if (!(val & PLANE_CTL_ENABLE))
+		return;
+
+	pixel_format = val & PLANE_CTL_FORMAT_MASK;
+	fourcc = skl_format_to_fourcc(pixel_format,
+				      val & PLANE_CTL_ORDER_RGBX,
+				      val & PLANE_CTL_ALPHA_MASK);
+
+	val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
+	val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
+
+	if (fourcc == DRM_FORMAT_NV12) {
+		skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane_id], val2);
+		skl_ddb_entry_init_from_hw(&ddb->uv_plane[pipe][plane_id], val);
+	} else {
+		skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane_id], val);
+	}
+}
+
 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
 			  struct skl_ddb_allocation *ddb /* out */)
 {
@@ -3841,16 +3879,9 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
 		if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
 			continue;
 
-		for_each_plane_id_on_crtc(crtc, plane_id) {
-			u32 val;
-
-			if (plane_id != PLANE_CURSOR)
-				val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
-			else
-				val = I915_READ(CUR_BUF_CFG(pipe));
-
-			skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane_id], val);
-		}
+		for_each_plane_id_on_crtc(crtc, plane_id)
+			skl_ddb_get_hw_plane_state(dev_priv, pipe,
+						   plane_id, ddb);
 
 		intel_display_power_put(dev_priv, power_domain);
 	}

From 942aa2d0503d483562795d8f4c0957e13bd4b59d Mon Sep 17 00:00:00 2001
From: Mahesh Kumar <mahesh1.kumar@intel.com>
Date: Mon, 9 Apr 2018 09:11:04 +0530
Subject: [PATCH 0249/1286] drm/i915/skl+: NV12 related changes for WM

NV12 requires WM calculation for UV plane as well.
UV plane WM should also fulfill all the WM related restrictions.

v2: Addressed review comments from Shashank Sharma.

v3: Addressed review comments from Shashank Sharma
Changed plane_num to plane_id in skl_compute_plane_wm_params
and skl_compute_plane_wm.
Adding reviewed by tag from Shashank Sharma

v4: Added reviewed by from Juha-Pekka Heikkila

v5: Rebased the series

Reviewed-by: Juha-Pekka Heikkila <juhapekka.heikkila@gmail.com>
Reviewed-by: Shashank Sharma <shashank.sharma@intel.com>
Signed-off-by: Mahesh Kumar <mahesh1.kumar@intel.com>
Signed-off-by: Vidya Srinivas <vidya.srinivas@intel.com>
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1523245273-30264-6-git-send-email-vidya.srinivas@intel.com
---
 drivers/gpu/drm/i915/i915_drv.h  |  1 +
 drivers/gpu/drm/i915/intel_drv.h |  1 +
 drivers/gpu/drm/i915/intel_pm.c  | 50 +++++++++++++++++++++++++++-----
 3 files changed, 44 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 28fd200eb1fc..ba4a82193246 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1202,6 +1202,7 @@ struct skl_wm_level {
 struct skl_wm_params {
 	bool x_tiled, y_tiled;
 	bool rc_surface;
+	bool is_planar;
 	uint32_t width;
 	uint8_t cpp;
 	uint32_t plane_pixel_rate;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 4eeaca350411..2c3c40d12136 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -602,6 +602,7 @@ struct intel_pipe_wm {
 
 struct skl_plane_wm {
 	struct skl_wm_level wm[8];
+	struct skl_wm_level uv_wm[8];
 	struct skl_wm_level trans_wm;
 	bool is_planar;
 };
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d22d9b2cbc62..fb30efa22c24 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4419,7 +4419,7 @@ static int
 skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
 			    struct intel_crtc_state *cstate,
 			    const struct intel_plane_state *intel_pstate,
-			    struct skl_wm_params *wp)
+			    struct skl_wm_params *wp, int plane_id)
 {
 	struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane);
 	const struct drm_plane_state *pstate = &intel_pstate->base;
@@ -4432,6 +4432,12 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
 	if (!intel_wm_plane_visible(cstate, intel_pstate))
 		return 0;
 
+	/* only NV12 format has two planes */
+	if (plane_id == 1 && fb->format->format != DRM_FORMAT_NV12) {
+		DRM_DEBUG_KMS("Non NV12 format have single plane\n");
+		return -EINVAL;
+	}
+
 	wp->y_tiled = fb->modifier == I915_FORMAT_MOD_Y_TILED ||
 		      fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
 		      fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
@@ -4439,6 +4445,7 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
 	wp->x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED;
 	wp->rc_surface = fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
 			 fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
+	wp->is_planar = fb->format->format == DRM_FORMAT_NV12;
 
 	if (plane->id == PLANE_CURSOR) {
 		wp->width = intel_pstate->base.crtc_w;
@@ -4451,7 +4458,10 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
 		wp->width = drm_rect_width(&intel_pstate->base.src) >> 16;
 	}
 
-	wp->cpp = fb->format->cpp[0];
+	if (plane_id == 1 && wp->is_planar)
+		wp->width /= 2;
+
+	wp->cpp = fb->format->cpp[plane_id];
 	wp->plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate,
 							     intel_pstate);
 
@@ -4649,7 +4659,8 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
 		      struct intel_crtc_state *cstate,
 		      const struct intel_plane_state *intel_pstate,
 		      const struct skl_wm_params *wm_params,
-		      struct skl_plane_wm *wm)
+		      struct skl_plane_wm *wm,
+		      int plane_id)
 {
 	struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
 	struct drm_plane *plane = intel_pstate->base.plane;
@@ -4657,15 +4668,19 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
 	uint16_t ddb_blocks;
 	enum pipe pipe = intel_crtc->pipe;
 	int level, max_level = ilk_wm_max_level(dev_priv);
+	enum plane_id intel_plane_id = intel_plane->id;
 	int ret;
 
 	if (WARN_ON(!intel_pstate->base.fb))
 		return -EINVAL;
 
-	ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][intel_plane->id]);
+	ddb_blocks = plane_id ?
+		     skl_ddb_entry_size(&ddb->uv_plane[pipe][intel_plane_id]) :
+		     skl_ddb_entry_size(&ddb->plane[pipe][intel_plane_id]);
 
 	for (level = 0; level <= max_level; level++) {
-		struct skl_wm_level *result = &wm->wm[level];
+		struct skl_wm_level *result = plane_id ? &wm->uv_wm[level] :
+							  &wm->wm[level];
 
 		ret = skl_compute_plane_wm(dev_priv,
 					   cstate,
@@ -4792,20 +4807,39 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
 
 		wm = &pipe_wm->planes[plane_id];
 		ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
-		memset(&wm_params, 0, sizeof(struct skl_wm_params));
 
 		ret = skl_compute_plane_wm_params(dev_priv, cstate,
-						  intel_pstate, &wm_params);
+						  intel_pstate, &wm_params, 0);
 		if (ret)
 			return ret;
 
 		ret = skl_compute_wm_levels(dev_priv, ddb, cstate,
-					    intel_pstate, &wm_params, wm);
+					    intel_pstate, &wm_params, wm, 0);
 		if (ret)
 			return ret;
+
 		skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0],
 					  ddb_blocks, &wm->trans_wm);
+
+		/* uv plane watermarks must also be validated for NV12/Planar */
+		if (wm_params.is_planar) {
+			memset(&wm_params, 0, sizeof(struct skl_wm_params));
+			wm->is_planar = true;
+
+			ret = skl_compute_plane_wm_params(dev_priv, cstate,
+							  intel_pstate,
+							  &wm_params, 1);
+			if (ret)
+				return ret;
+
+			ret = skl_compute_wm_levels(dev_priv, ddb, cstate,
+						    intel_pstate, &wm_params,
+						    wm, 1);
+			if (ret)
+				return ret;
+		}
 	}
+
 	pipe_wm->linetime = skl_compute_linetime_wm(cstate);
 
 	return 0;

From 62027b7736d038309e93e6d5d25a9a72390821cb Mon Sep 17 00:00:00 2001
From: Mahesh Kumar <mahesh1.kumar@intel.com>
Date: Mon, 9 Apr 2018 09:11:05 +0530
Subject: [PATCH 0250/1286] drm/i915/skl+: pass skl_wm_level struct to wm
 compute func

This patch passes skl_wm_level structure itself to watermark
computation function skl_compute_plane_wm function (instead
of its internal parameters). It reduces number of arguments
required to be passed.

v2: Addressed review comments by Shashank Sharma

v3: Adding reviewed by tag from Shashank Sharma

v4: Added reviewed by from Juha-Pekka Heikkila

v5: Rebased the series

Reviewed-by: Juha-Pekka Heikkila <juhapekka.heikkila@gmail.com>
Reviewed-by: Shashank Sharma <shashank.sharma@intel.com>
Signed-off-by: Mahesh Kumar <mahesh1.kumar@intel.com>
Signed-off-by: Vidya Srinivas <vidya.srinivas@intel.com>
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1523245273-30264-7-git-send-email-vidya.srinivas@intel.com
---
 drivers/gpu/drm/i915/intel_pm.c | 18 +++++++-----------
 1 file changed, 7 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index fb30efa22c24..06352c9e9ef6 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4529,9 +4529,7 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 				uint16_t ddb_allocation,
 				int level,
 				const struct skl_wm_params *wp,
-				uint16_t *out_blocks, /* out */
-				uint8_t *out_lines, /* out */
-				bool *enabled /* out */)
+				struct skl_wm_level *result /* out */)
 {
 	const struct drm_plane_state *pstate = &intel_pstate->base;
 	uint32_t latency = dev_priv->wm.skl_latency[level];
@@ -4545,7 +4543,7 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 
 	if (latency == 0 ||
 	    !intel_wm_plane_visible(cstate, intel_pstate)) {
-		*enabled = false;
+		result->plane_en = false;
 		return 0;
 	}
 
@@ -4626,7 +4624,7 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 	if ((level > 0 && res_lines > 31) ||
 	    res_blocks >= ddb_allocation ||
 	    min_disp_buf_needed >= ddb_allocation) {
-		*enabled = false;
+		result->plane_en = false;
 
 		/*
 		 * If there are no valid level 0 watermarks, then we can't
@@ -4646,9 +4644,9 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 	}
 
 	/* The number of lines are ignored for the level 0 watermark. */
-	*out_lines = level ? res_lines : 0;
-	*out_blocks = res_blocks;
-	*enabled = true;
+	result->plane_res_b = res_blocks;
+	result->plane_res_l = res_lines;
+	result->plane_en = true;
 
 	return 0;
 }
@@ -4688,9 +4686,7 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
 					   ddb_blocks,
 					   level,
 					   wm_params,
-					   &result->plane_res_b,
-					   &result->plane_res_l,
-					   &result->plane_en);
+					   result);
 		if (ret)
 			return ret;
 	}

From 8b2b53ce94e808ef9340add94c4c50b9e5267413 Mon Sep 17 00:00:00 2001
From: Mahesh Kumar <mahesh1.kumar@intel.com>
Date: Mon, 9 Apr 2018 09:11:06 +0530
Subject: [PATCH 0251/1286] drm/i915/skl+: make sure higher latency level has
 higher wm value

DDB allocation optimization algorithm requires/assumes ddb allocation for
any memory C-state level DDB value to be as high as level below the
current level. Render decompression requires level WM to be as high as
wm level-0. This patch fulfils both the requirements.

v2: Changed plane_num to plane_id in skl_compute_wm_levels

v3: Addressed review comments from Shashank Sharma
Changed the commit message "statement can be more clear,
"DDB value to be as high as level below " what is level below ?"

v4: Added reviewed by tag from Shashank Sharma

v5: Added reviewed by from Juha-Pekka Heikkila

v6: Rebased the series

Reviewed-by: Juha-Pekka Heikkila <juhapekka.heikkila@gmail.com>
Reviewed-by: Shashank Sharma <shashank.sharma@intel.com>
Signed-off-by: Mahesh Kumar <mahesh1.kumar@intel.com>
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1523245273-30264-8-git-send-email-vidya.srinivas@intel.com
---
 drivers/gpu/drm/i915/intel_pm.c | 18 ++++++++++++++++++
 1 file changed, 18 insertions(+)

diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 06352c9e9ef6..707843012dff 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4529,6 +4529,7 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 				uint16_t ddb_allocation,
 				int level,
 				const struct skl_wm_params *wp,
+				const struct skl_wm_level *result_prev,
 				struct skl_wm_level *result /* out */)
 {
 	const struct drm_plane_state *pstate = &intel_pstate->base;
@@ -4596,6 +4597,15 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 		} else {
 			res_blocks++;
 		}
+
+		/*
+		 * Make sure result blocks for higher latency levels are atleast
+		 * as high as level below the current level.
+		 * Assumption in DDB algorithm optimization for special cases.
+		 * Also covers Display WA #1125 for RC.
+		 */
+		if (result_prev->plane_res_b > res_blocks)
+			res_blocks = result_prev->plane_res_b;
 	}
 
 	if (INTEL_GEN(dev_priv) >= 11) {
@@ -4679,6 +4689,13 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
 	for (level = 0; level <= max_level; level++) {
 		struct skl_wm_level *result = plane_id ? &wm->uv_wm[level] :
 							  &wm->wm[level];
+		struct skl_wm_level *result_prev;
+
+		if (level)
+			result_prev = plane_id ? &wm->uv_wm[level - 1] :
+						  &wm->wm[level - 1];
+		else
+			result_prev = plane_id ? &wm->uv_wm[0] : &wm->wm[0];
 
 		ret = skl_compute_plane_wm(dev_priv,
 					   cstate,
@@ -4686,6 +4703,7 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
 					   ddb_blocks,
 					   level,
 					   wm_params,
+					   result_prev,
 					   result);
 		if (ret)
 			return ret;

From 08d0e875aefe72c63076a768a368126ea74a1e3e Mon Sep 17 00:00:00 2001
From: Mahesh Kumar <mahesh1.kumar@intel.com>
Date: Mon, 9 Apr 2018 09:11:07 +0530
Subject: [PATCH 0252/1286] drm/i915/skl+: nv12 workaround disable WM level 1-7

Display Workaround #0826 (SKL:ALL BXT:ALL) & #1059(CNL:A)
Hardware sometimes fails to wake memory from pkg C states fetching the
last few lines of planar YUV 420 (NV12) planes. This causes
intermittent underflow and corruption.
WA: Disable package C states or do not enable latency levels 1 through 7
(WM1 - WM7) on NV12 planes.

v2: Addressed review comments by Maarten.

v3: Adding reviewed by tag from Shashank Sharma

v4: Added reviewed by from Juha-Pekka Heikkila

v5: Rebased the series

Reviewed-by: Juha-Pekka Heikkila <juhapekka.heikkila@gmail.com>
Reviewed-by: Shashank Sharma <shashank.sharma@intel.com>
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: Mahesh Kumar <mahesh1.kumar@intel.com>
Signed-off-by: Vidya Srinivas <vidya.srinivas@intel.com>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1523245273-30264-9-git-send-email-vidya.srinivas@intel.com
---
 drivers/gpu/drm/i915/intel_pm.c | 11 +++++++++++
 1 file changed, 11 insertions(+)

diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 707843012dff..9d5a7b3e9716 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4653,6 +4653,17 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 		}
 	}
 
+	/*
+	 * Display WA #826 (SKL:ALL, BXT:ALL) & #1059 (CNL:A)
+	 * disable wm level 1-7 on NV12 planes
+	 */
+	if (wp->is_planar && level >= 1 &&
+	    (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) ||
+	     IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))) {
+		result->plane_en = false;
+		return 0;
+	}
+
 	/* The number of lines are ignored for the level 0 watermark. */
 	result->plane_res_b = res_blocks;
 	result->plane_res_l = res_lines;

From e1f96a66e72569f6277262eef23614236cc6dc15 Mon Sep 17 00:00:00 2001
From: Mahesh Kumar <mahesh1.kumar@intel.com>
Date: Mon, 9 Apr 2018 09:11:08 +0530
Subject: [PATCH 0253/1286] drm/i915/skl: split skl_compute_ddb function

This patch splits skl_compute_wm/ddb functions into two parts.
One adds all affected pipes after the commit to atomic_state structure
and second part does compute the DDB.

v2: Added reviewed by tag from Shashank Sharma

v3: Added reviewed by from Juha-Pekka Heikkila

v4: Rebased the series

v5: Fixed checkpatch error. Changed *changed = true
to (*changed) = true;

Reviewed-by: Juha-Pekka Heikkila <juhapekka.heikkila@gmail.com>
Reviewed-by: Shashank Sharma <shashank.sharma@intel.com>
Signed-off-by: Mahesh Kumar <mahesh1.kumar@intel.com>
Signed-off-by: Vidya Srinivas <vidya.srinivas@intel.com>
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1523245273-30264-10-git-send-email-vidya.srinivas@intel.com
---
 drivers/gpu/drm/i915/intel_pm.c | 157 ++++++++++++++++++--------------
 1 file changed, 88 insertions(+), 69 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 9d5a7b3e9716..007a12ebe725 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -5059,69 +5059,16 @@ skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
 static int
 skl_compute_ddb(struct drm_atomic_state *state)
 {
-	struct drm_device *dev = state->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	const struct drm_i915_private *dev_priv = to_i915(state->dev);
 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-	struct intel_crtc *intel_crtc;
 	struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
-	uint32_t realloc_pipes = pipes_modified(state);
-	int ret;
+	struct intel_crtc *crtc;
+	struct intel_crtc_state *cstate;
+	int ret, i;
 
-	/*
-	 * If this is our first atomic update following hardware readout,
-	 * we can't trust the DDB that the BIOS programmed for us.  Let's
-	 * pretend that all pipes switched active status so that we'll
-	 * ensure a full DDB recompute.
-	 */
-	if (dev_priv->wm.distrust_bios_wm) {
-		ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
-				       state->acquire_ctx);
-		if (ret)
-			return ret;
-
-		intel_state->active_pipe_changes = ~0;
-
-		/*
-		 * We usually only initialize intel_state->active_crtcs if we
-		 * we're doing a modeset; make sure this field is always
-		 * initialized during the sanitization process that happens
-		 * on the first commit too.
-		 */
-		if (!intel_state->modeset)
-			intel_state->active_crtcs = dev_priv->active_crtcs;
-	}
-
-	/*
-	 * If the modeset changes which CRTC's are active, we need to
-	 * recompute the DDB allocation for *all* active pipes, even
-	 * those that weren't otherwise being modified in any way by this
-	 * atomic commit.  Due to the shrinking of the per-pipe allocations
-	 * when new active CRTC's are added, it's possible for a pipe that
-	 * we were already using and aren't changing at all here to suddenly
-	 * become invalid if its DDB needs exceeds its new allocation.
-	 *
-	 * Note that if we wind up doing a full DDB recompute, we can't let
-	 * any other display updates race with this transaction, so we need
-	 * to grab the lock on *all* CRTC's.
-	 */
-	if (intel_state->active_pipe_changes) {
-		realloc_pipes = ~0;
-		intel_state->wm_results.dirty_pipes = ~0;
-	}
-
-	/*
-	 * We're not recomputing for the pipes not included in the commit, so
-	 * make sure we start with the current state.
-	 */
 	memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
 
-	for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
-		struct intel_crtc_state *cstate;
-
-		cstate = intel_atomic_get_crtc_state(state, intel_crtc);
-		if (IS_ERR(cstate))
-			return PTR_ERR(cstate);
-
+	for_each_new_intel_crtc_in_state(intel_state, crtc, cstate, i) {
 		ret = skl_allocate_pipe_ddb(cstate, ddb);
 		if (ret)
 			return ret;
@@ -5183,23 +5130,23 @@ skl_print_wm_changes(const struct drm_atomic_state *state)
 }
 
 static int
-skl_compute_wm(struct drm_atomic_state *state)
+skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
 {
-	struct drm_crtc *crtc;
-	struct drm_crtc_state *cstate;
-	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-	struct skl_ddb_values *results = &intel_state->wm_results;
 	struct drm_device *dev = state->dev;
-	struct skl_pipe_wm *pipe_wm;
-	bool changed = false;
+	const struct drm_i915_private *dev_priv = to_i915(dev);
+	const struct drm_crtc *crtc;
+	const struct drm_crtc_state *cstate;
+	struct intel_crtc *intel_crtc;
+	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+	uint32_t realloc_pipes = pipes_modified(state);
 	int ret, i;
 
 	/*
 	 * When we distrust bios wm we always need to recompute to set the
 	 * expected DDB allocations for each CRTC.
 	 */
-	if (to_i915(dev)->wm.distrust_bios_wm)
-		changed = true;
+	if (dev_priv->wm.distrust_bios_wm)
+		(*changed) = true;
 
 	/*
 	 * If this transaction isn't actually touching any CRTC's, don't
@@ -5210,14 +5157,86 @@ skl_compute_wm(struct drm_atomic_state *state)
 	 * hold _all_ CRTC state mutexes.
 	 */
 	for_each_new_crtc_in_state(state, crtc, cstate, i)
-		changed = true;
+		(*changed) = true;
 
-	if (!changed)
+	if (!*changed)
 		return 0;
 
+	/*
+	 * If this is our first atomic update following hardware readout,
+	 * we can't trust the DDB that the BIOS programmed for us.  Let's
+	 * pretend that all pipes switched active status so that we'll
+	 * ensure a full DDB recompute.
+	 */
+	if (dev_priv->wm.distrust_bios_wm) {
+		ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
+				       state->acquire_ctx);
+		if (ret)
+			return ret;
+
+		intel_state->active_pipe_changes = ~0;
+
+		/*
+		 * We usually only initialize intel_state->active_crtcs if we
+		 * we're doing a modeset; make sure this field is always
+		 * initialized during the sanitization process that happens
+		 * on the first commit too.
+		 */
+		if (!intel_state->modeset)
+			intel_state->active_crtcs = dev_priv->active_crtcs;
+	}
+
+	/*
+	 * If the modeset changes which CRTC's are active, we need to
+	 * recompute the DDB allocation for *all* active pipes, even
+	 * those that weren't otherwise being modified in any way by this
+	 * atomic commit.  Due to the shrinking of the per-pipe allocations
+	 * when new active CRTC's are added, it's possible for a pipe that
+	 * we were already using and aren't changing at all here to suddenly
+	 * become invalid if its DDB needs exceeds its new allocation.
+	 *
+	 * Note that if we wind up doing a full DDB recompute, we can't let
+	 * any other display updates race with this transaction, so we need
+	 * to grab the lock on *all* CRTC's.
+	 */
+	if (intel_state->active_pipe_changes) {
+		realloc_pipes = ~0;
+		intel_state->wm_results.dirty_pipes = ~0;
+	}
+
+	/*
+	 * We're not recomputing for the pipes not included in the commit, so
+	 * make sure we start with the current state.
+	 */
+	for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
+		struct intel_crtc_state *cstate;
+
+		cstate = intel_atomic_get_crtc_state(state, intel_crtc);
+		if (IS_ERR(cstate))
+			return PTR_ERR(cstate);
+	}
+
+	return 0;
+}
+
+static int
+skl_compute_wm(struct drm_atomic_state *state)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *cstate;
+	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+	struct skl_ddb_values *results = &intel_state->wm_results;
+	struct skl_pipe_wm *pipe_wm;
+	bool changed = false;
+	int ret, i;
+
 	/* Clear all dirty flags */
 	results->dirty_pipes = 0;
 
+	ret = skl_ddb_add_affected_pipes(state, &changed);
+	if (ret || !changed)
+		return ret;
+
 	ret = skl_compute_ddb(state);
 	if (ret)
 		return ret;

From c4a4efa91737e61b3334642a61659fd64d7e31d6 Mon Sep 17 00:00:00 2001
From: Vidya Srinivas <vidya.srinivas@intel.com>
Date: Mon, 9 Apr 2018 09:11:09 +0530
Subject: [PATCH 0254/1286] drm/i915: Display WA 827

Display WA 827 applies to GEN9 (excluede GLK) and CNL.
Switching the plane format from NV12 to RGB and leaving system idle
results in display underrun and corruption.
WA: Set the bit 15 & bit 19 to 1b in the CLKGATE_DIS_PSL
register for the pipe in which NV12 plane is enabled.

v2: Addressed review comments from Maarten and
Juha-Pekka Heikkila. Added reviewed by from
Juha-Pekka Heikkila.

v3: Rebased the series

Reviewed-by: Juha-Pekka Heikkila <juhapekka.heikkila@gmail.com>
Signed-off-by: Chandra Konduru <chandra.konduru@intel.com>
Signed-off-by: Nabendu Maiti <nabendu.bikash.maiti@intel.com>
Signed-off-by: Vidya Srinivas <vidya.srinivas@intel.com>
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1523245273-30264-11-git-send-email-vidya.srinivas@intel.com
---
 drivers/gpu/drm/i915/i915_reg.h      |  3 +++
 drivers/gpu/drm/i915/intel_display.c | 34 ++++++++++++++++++++++++++++
 2 files changed, 37 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b3a6428aa71d..1f858e2c85f0 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3824,6 +3824,9 @@ enum {
 #define _CLKGATE_DIS_PSL_A		0x46520
 #define _CLKGATE_DIS_PSL_B		0x46524
 #define _CLKGATE_DIS_PSL_C		0x46528
+#define   DUPS1_GATING_DIS		(1 << 15)
+#define   DUPS2_GATING_DIS		(1 << 19)
+#define   DUPS3_GATING_DIS		(1 << 23)
 #define   DPF_GATING_DIS		(1 << 10)
 #define   DPF_RAM_GATING_DIS		(1 << 9)
 #define   DPFR_GATING_DIS		(1 << 8)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 057f3cf95e86..466d2479a8fb 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -488,6 +488,21 @@ static const struct intel_limit intel_limits_bxt = {
 	.p2 = { .p2_slow = 1, .p2_fast = 20 },
 };
 
+static void
+skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable)
+{
+	if (IS_SKYLAKE(dev_priv))
+		return;
+
+	if (enable)
+		I915_WRITE(CLKGATE_DIS_PSL(pipe),
+			   DUPS1_GATING_DIS | DUPS2_GATING_DIS);
+	else
+		I915_WRITE(CLKGATE_DIS_PSL(pipe),
+			   I915_READ(CLKGATE_DIS_PSL(pipe)) &
+			   ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
+}
+
 static bool
 needs_modeset(const struct drm_crtc_state *state)
 {
@@ -5103,6 +5118,8 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s
 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
 {
 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_atomic_state *old_state = old_crtc_state->base.state;
 	struct intel_crtc_state *pipe_config =
 		intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
@@ -5125,6 +5142,7 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
 							 to_intel_plane(primary));
 		struct intel_plane_state *old_primary_state =
 			to_intel_plane_state(old_pri_state);
+		struct drm_framebuffer *fb = primary_state->base.fb;
 
 		intel_fbc_post_update(crtc);
 
@@ -5132,6 +5150,14 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
 		    (needs_modeset(&pipe_config->base) ||
 		     !old_primary_state->base.visible))
 			intel_post_enable_primary(&crtc->base, pipe_config);
+
+		/* Display WA 827 */
+		if ((INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv)) ||
+		    IS_CANNONLAKE(dev_priv)) {
+			if (fb && fb->format->format == DRM_FORMAT_NV12)
+				skl_wa_clkgate(dev_priv, crtc->pipe, false);
+		}
+
 	}
 }
 
@@ -5158,6 +5184,14 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
 							 to_intel_plane(primary));
 		struct intel_plane_state *old_primary_state =
 			to_intel_plane_state(old_pri_state);
+		struct drm_framebuffer *fb = primary_state->base.fb;
+
+		/* Display WA 827 */
+		if ((INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv)) ||
+		    IS_CANNONLAKE(dev_priv)) {
+			if (fb && fb->format->format == DRM_FORMAT_NV12)
+				skl_wa_clkgate(dev_priv, crtc->pipe, true);
+		}
 
 		intel_fbc_pre_update(crtc, pipe_config, primary_state);
 		/*

From 8ed30ab6acede16996ac140c45271f6b61a753a8 Mon Sep 17 00:00:00 2001
From: Vidya Srinivas <vidya.srinivas@intel.com>
Date: Mon, 9 Apr 2018 09:11:10 +0530
Subject: [PATCH 0255/1286] drm/i915: Enable YUV to RGB for Gen10 in Plane Ctrl
 Reg

If the fb format is YUV, enable the plane CSC mode bits
for the conversion.

v2: Addressed review comments from Shashank Sharma
Alignment issue fixed in i915_reg.h

v3: Adding Reviewed By from Shashank Sharma

v4: Rebased the patch. As part of rebasing, re-using
the color series defines which are already merged.
plane_state->base.color_encoding might not be set for
NV12. For now, just using PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709
in glk_plane_color_ctl if format is NV12.

v5: Added reviewed by from Juha-Pekka Heikkila

v6: Rebased the series

Reviewed-by: Shashank Sharma <shashank.sharma@intel.com>
Reviewed-by: Juha-Pekka Heikkila <juhapekka.heikkila@gmail.com>
Signed-off-by: Vidya Srinivas <vidya.srinivas@intel.com>
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1523245273-30264-12-git-send-email-vidya.srinivas@intel.com
---
 drivers/gpu/drm/i915/intel_display.c | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 466d2479a8fb..eb9d4e7f9160 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3630,6 +3630,11 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
 	plane_color_ctl |= glk_plane_color_ctl_alpha(fb->format->format);
 
 	if (intel_format_is_yuv(fb->format->format)) {
+		if (fb->format->format == DRM_FORMAT_NV12) {
+			plane_color_ctl |=
+				PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
+			goto out;
+		}
 		if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
 			plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
 		else
@@ -3638,7 +3643,7 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
 		if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
 			plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
 	}
-
+out:
 	return plane_color_ctl;
 }
 

From e6e1948c9fabe1a83c0c033ea6e5d83dfce8f38b Mon Sep 17 00:00:00 2001
From: Chandra Konduru <chandra.konduru@intel.com>
Date: Mon, 9 Apr 2018 09:11:11 +0530
Subject: [PATCH 0256/1286] drm/i915: Set scaler mode for NV12

This patch sets appropriate scaler mode for NV12 format.
In this mode, skylake scaler does either chroma-upsampling or
chroma-upsampling and resolution scaling

v2: Review comments from Ville addressed
NV12 case to be checked first for setting
the scaler

v3: Rebased (me)

v4: Rebased (me)

v5: Missed the Tested-by/Reviewed-by in the previous series
Adding the same to commit message in this version.

v6: Rebased (me)

v7: Rebased (me)

v8: Rebased (me)
Restricting the NV12 change for scaler to BXT and KBL
in this series.

v9: Rebased (me)

v10: As of now, NV12 has been tested on Gen9 and Gen10. However,
code is applicable to all GEN >= 9. Hence making
that change to keep it generic.
Comments under v8 is not valid anymore.

v11: Addressed review comments by Shashank Sharma.
For Gen10+, the scaler mode to be set it planar or normal
(single bit). Changed the code to be applicable to all
Gen.

v12: Addressed review comments from Shashank Sharma
For Gen9 (apart from GLK) bits 28:29 to be programmed
in PS_CTRL for NV12. For GLK and Gen10+, bit 29 to be set
for all Planar.

v13: Addressed review comments from Juha-Pekka Heikkila
"NV12 not to be supported by SKL"
Adding Reviewed by tag from Shashank Shamr

v14: Added reviewed by from Juha-Pekka Heikkila

v15: Rebased the series

Tested-by: Clinton Taylor <clinton.a.taylor@intel.com>
Reviewed-by: Juha-Pekka Heikkila <juhapekka.heikkila@gmail.com>
Reviewed-by: Shashank Sharma <shashank.sharma@intel.com>
Reviewed-by: Clinton Taylor <clinton.a.taylor@intel.com>
Signed-off-by: Chandra Konduru <chandra.konduru@intel.com>
Signed-off-by: Nabendu Maiti <nabendu.bikash.maiti@intel.com>
Signed-off-by: Vidya Srinivas <vidya.srinivas@intel.com>
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1523245273-30264-13-git-send-email-vidya.srinivas@intel.com
---
 drivers/gpu/drm/i915/i915_reg.h     |  2 ++
 drivers/gpu/drm/i915/intel_atomic.c | 14 ++++++++++++--
 2 files changed, 14 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 1f858e2c85f0..fb106026a1f4 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -6664,6 +6664,8 @@ enum {
 #define PS_SCALER_MODE_MASK (3 << 28)
 #define PS_SCALER_MODE_DYN  (0 << 28)
 #define PS_SCALER_MODE_HQ  (1 << 28)
+#define SKL_PS_SCALER_MODE_NV12 (2 << 28)
+#define PS_SCALER_MODE_PLANAR (1 << 29)
 #define PS_PLANE_SEL_MASK  (7 << 25)
 #define PS_PLANE_SEL(plane) (((plane) + 1) << 25)
 #define PS_FILTER_MASK         (3 << 23)
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index e9fb692076d7..bb8c1687823e 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -328,8 +328,18 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
 		}
 
 		/* set scaler mode */
-		if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
-			scaler_state->scalers[*scaler_id].mode = 0;
+		if ((INTEL_GEN(dev_priv) >= 9) &&
+		    plane_state && plane_state->base.fb &&
+		    plane_state->base.fb->format->format ==
+		    DRM_FORMAT_NV12) {
+			if (INTEL_GEN(dev_priv) == 9 &&
+			    !IS_GEMINILAKE(dev_priv) &&
+			    !IS_SKYLAKE(dev_priv))
+				scaler_state->scalers[*scaler_id].mode =
+					SKL_PS_SCALER_MODE_NV12;
+			else
+				scaler_state->scalers[*scaler_id].mode =
+					PS_SCALER_MODE_PLANAR;
 		} else if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) {
 			/*
 			 * when only 1 scaler is in use on either pipe A or B,

From a589b1384593c6a9df8b808e5a0aa4280de5edc2 Mon Sep 17 00:00:00 2001
From: Chandra Konduru <chandra.konduru@intel.com>
Date: Mon, 9 Apr 2018 09:11:12 +0530
Subject: [PATCH 0257/1286] drm/i915: Update format_is_yuv() to include NV12

This patch adds NV12 to format_is_yuv() function
for sprite planes.

v2:
-Use intel_ prefix for format_is_yuv (Ville)

v3: Rebased (me)

v4: Rebased and addressed review comments from Clinton A Taylor.
"static function in intel_sprite.c is not available
to the primary plane functions".
Changed commit message - function modified for
sprite planes.

v5: Missed the Tested-by/Reviewed-by in the previous series
Adding the same to commit message in this version.

v6: Rebased (me)

v7: Rebased (me)

v8: Rebased (me)

v9: Rebased (me)

v10: Changed intel_format_is_yuv function from
static to non-static. We need to use it later from
other files for check.

v11: Rebased the patch. format_is_yuv has already
been renamed to intel_format_is_yuv in the color
patch series which is already merged. This function
which was previously static has already been made
non-static. So this patch after rebase just adds
NV12 to intel_format_is_yuv function.

v12: Added reviewed by from Juha-Pekka Heikkila

v13/v14/v15: Rebased the series

Tested-by: Clinton Taylor <clinton.a.taylor@intel.com>
Reviewed-by: Juha-Pekka Heikkila <juhapekka.heikkila@gmail.com>
Reviewed-by: Clinton Taylor <clinton.a.taylor@intel.com>
Reviewed-by: Shashank Sharma <shashank.sharma@intel.com>
Signed-off-by: Chandra Konduru <chandra.konduru@intel.com>
Signed-off-by: Nabendu Maiti <nabendu.bikash.maiti@intel.com>
Signed-off-by: Vidya Srinivas <vidya.srinivas@intel.com>
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1523245273-30264-14-git-send-email-vidya.srinivas@intel.com
---
 drivers/gpu/drm/i915/intel_drv.h    | 1 +
 drivers/gpu/drm/i915/intel_sprite.c | 1 +
 2 files changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 2c3c40d12136..a6d7d856ea62 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -2055,6 +2055,7 @@ void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc);
 bool skl_plane_get_hw_state(struct intel_plane *plane);
 bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
 		       enum pipe pipe, enum plane_id plane_id);
+bool intel_format_is_yuv(uint32_t format);
 
 /* intel_tv.c */
 void intel_tv_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index dbdcf85032df..0652e583b03d 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -48,6 +48,7 @@ bool intel_format_is_yuv(u32 format)
 	case DRM_FORMAT_UYVY:
 	case DRM_FORMAT_VYUY:
 	case DRM_FORMAT_YVYU:
+	case DRM_FORMAT_NV12:
 		return true;
 	default:
 		return false;

From 77224cd59eae67acfc1259f0756957b10ec7c3b5 Mon Sep 17 00:00:00 2001
From: Chandra Konduru <chandra.konduru@intel.com>
Date: Mon, 9 Apr 2018 09:11:13 +0530
Subject: [PATCH 0258/1286] drm/i915: Upscale scaler max scale for NV12

This patch updates scaler max limit support for NV12

v2: Rebased (me)

v3: Rebased (me)

v4: Missed the Tested-by/Reviewed-by in the previous series
Adding the same to commit message in this version.

v5: Addressed review comments from Ville and rebased
- calculation of max_scale to be made
less convoluted by splitting it up a bit
- Indentation errors to be fixed in the series

v6: Rebased (me)
Fixed review comments from Paauwe, Bob J
Previous version, where a split of calculation
was done, was wrong. Fixed that issue here.

v7: Rebased (me)

v8: Rebased (me)

v9: Rebased (me)

v10: Rebased (me)

v11: Addressed review comments from Shashank Sharma
Alignment issues fixed.
When call to skl_update_scaler is made, 0 was being
sent instead of pixel_format.
When crtc update scaler is called, we dont have the
fb to derive the pixel format. Added the function
parameter bool plane_scaler_check to account for this.

v12: Fixed failure in IGT debugfs_test.
fb is NULL in skl_update_scaler_plane
Due to this, accessing fb->format caused failure.
Patch checks fb before using.

v13: In the previous version there was a flaw.
In skl_update_scaler during plane_scaler_check
if the format was non-NV12, it would set need_scaling
to false. This could reset the previously set need_scaling
from a previous condition check. Patch fixes this.
Patch also adds minimum src height for YUV 420 formats
to 16 (as defined in BSpec) and adds for checking this
range.

v14: Addressed review comments from Maarten
Just add a check for NV12 min src height in
skl_update_scaler and retain the remaining checks
as is. Added Reviewed By from Juha-Pekka Heikkila.

v15: Rebased the series.

v16: Changed fb height restriction to be >= 16 as per
Bspec. Earlier it was > 16.

v17: Adding src width and height to be mult of 4 restriction
to avoid pipe fifo underruns for NV12.

Credits-to: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Tested-by: Clinton Taylor <clinton.a.taylor@intel.com>
Reviewed-by: Clinton Taylor <clinton.a.taylor@intel.com>
Reviewed-by: Juha-Pekka Heikkila <juhapekka.heikkila@gmail.com>
Signed-off-by: Chandra Konduru <chandra.konduru@intel.com>
Signed-off-by: Nabendu Maiti <nabendu.bikash.maiti@intel.com>
Signed-off-by: Vidya Srinivas <vidya.srinivas@intel.com>
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1523245273-30264-15-git-send-email-vidya.srinivas@intel.com
---
 drivers/gpu/drm/i915/intel_display.c | 48 +++++++++++++++++++++-------
 drivers/gpu/drm/i915/intel_drv.h     |  5 ++-
 drivers/gpu/drm/i915/intel_sprite.c  |  6 +++-
 3 files changed, 46 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index eb9d4e7f9160..fec3d6dd5c60 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3483,6 +3483,8 @@ static u32 skl_plane_ctl_format(uint32_t pixel_format)
 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
 	case DRM_FORMAT_VYUY:
 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
+	case DRM_FORMAT_NV12:
+		return PLANE_CTL_FORMAT_NV12;
 	default:
 		MISSING_CASE(pixel_format);
 	}
@@ -4724,7 +4726,9 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
 static int
 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
 		  unsigned int scaler_user, int *scaler_id,
-		  int src_w, int src_h, int dst_w, int dst_h)
+		  int src_w, int src_h, int dst_w, int dst_h,
+		  bool plane_scaler_check,
+		  uint32_t pixel_format)
 {
 	struct intel_crtc_scaler_state *scaler_state =
 		&crtc_state->scaler_state;
@@ -4742,6 +4746,10 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
 	 */
 	need_scaling = src_w != dst_w || src_h != dst_h;
 
+	if (plane_scaler_check)
+		if (pixel_format == DRM_FORMAT_NV12)
+			need_scaling = true;
+
 	if (crtc_state->ycbcr420 && scaler_user == SKL_CRTC_INDEX)
 		need_scaling = true;
 
@@ -4781,6 +4789,13 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
 		return 0;
 	}
 
+	if (plane_scaler_check && pixel_format == DRM_FORMAT_NV12 &&
+	    (src_h < SKL_MIN_YUV_420_SRC_H || (src_w % 4) != 0 ||
+	     (src_h % 4) != 0)) {
+		DRM_DEBUG_KMS("NV12: src dimensions not met\n");
+		return -EINVAL;
+	}
+
 	/* range checks */
 	if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
 	    dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
@@ -4820,9 +4835,10 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
 	const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
 
 	return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
-		&state->scaler_state.scaler_id,
-		state->pipe_src_w, state->pipe_src_h,
-		adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
+				 &state->scaler_state.scaler_id,
+				 state->pipe_src_w, state->pipe_src_h,
+				 adjusted_mode->crtc_hdisplay,
+				 adjusted_mode->crtc_vdisplay, false, 0);
 }
 
 /**
@@ -4851,7 +4867,8 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
 				drm_rect_width(&plane_state->base.src) >> 16,
 				drm_rect_height(&plane_state->base.src) >> 16,
 				drm_rect_width(&plane_state->base.dst),
-				drm_rect_height(&plane_state->base.dst));
+				drm_rect_height(&plane_state->base.dst),
+				fb ? true : false, fb ? fb->format->format : 0);
 
 	if (ret || plane_state->scaler_id < 0)
 		return ret;
@@ -4877,6 +4894,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
 	case DRM_FORMAT_YVYU:
 	case DRM_FORMAT_UYVY:
 	case DRM_FORMAT_VYUY:
+	case DRM_FORMAT_NV12:
 		break;
 	default:
 		DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
@@ -12877,11 +12895,13 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
 }
 
 int
-skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
+skl_max_scale(struct intel_crtc *intel_crtc,
+	      struct intel_crtc_state *crtc_state,
+	      uint32_t pixel_format)
 {
 	struct drm_i915_private *dev_priv;
-	int max_scale;
-	int crtc_clock, max_dotclk;
+	int max_scale, mult;
+	int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
 
 	if (!intel_crtc || !crtc_state->base.enable)
 		return DRM_PLANE_HELPER_NO_SCALING;
@@ -12903,8 +12923,10 @@ skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state
 	 *            or
 	 *    cdclk/crtc_clock
 	 */
-	max_scale = min((1 << 16) * 3 - 1,
-			(1 << 8) * ((max_dotclk << 8) / crtc_clock));
+	mult = pixel_format == DRM_FORMAT_NV12 ? 2 : 3;
+	tmpclk1 = (1 << 16) * mult - 1;
+	tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
+	max_scale = min(tmpclk1, tmpclk2);
 
 	return max_scale;
 }
@@ -12920,12 +12942,16 @@ intel_check_primary_plane(struct intel_plane *plane,
 	int max_scale = DRM_PLANE_HELPER_NO_SCALING;
 	bool can_position = false;
 	int ret;
+	uint32_t pixel_format = 0;
 
 	if (INTEL_GEN(dev_priv) >= 9) {
 		/* use scaler when colorkey is not required */
 		if (!state->ckey.flags) {
 			min_scale = 1;
-			max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
+			if (state->base.fb)
+				pixel_format = state->base.fb->format->format;
+			max_scale = skl_max_scale(to_intel_crtc(crtc),
+						  crtc_state, pixel_format);
 		}
 		can_position = true;
 	}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a6d7d856ea62..b2e0fa04ef5b 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -552,6 +552,8 @@ struct intel_initial_plane_config {
 #define ICL_MAX_SRC_H 4096
 #define ICL_MAX_DST_W 5120
 #define ICL_MAX_DST_H 4096
+#define SKL_MIN_YUV_420_SRC_W 16
+#define SKL_MIN_YUV_420_SRC_H 16
 
 struct intel_scaler {
 	int in_use;
@@ -1597,7 +1599,8 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
 				 struct intel_crtc_state *pipe_config);
 
 int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
-int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
+int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+		  uint32_t pixel_format);
 
 static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
 {
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 0652e583b03d..aa1dfaa692b9 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -947,6 +947,7 @@ intel_check_sprite_plane(struct intel_plane *plane,
 	int max_scale, min_scale;
 	bool can_scale;
 	int ret;
+	uint32_t pixel_format = 0;
 
 	*src = drm_plane_state_src(&state->base);
 	*dst = drm_plane_state_dest(&state->base);
@@ -970,11 +971,14 @@ intel_check_sprite_plane(struct intel_plane *plane,
 
 	/* setup can_scale, min_scale, max_scale */
 	if (INTEL_GEN(dev_priv) >= 9) {
+		if (state->base.fb)
+			pixel_format = state->base.fb->format->format;
 		/* use scaler when colorkey is not required */
 		if (!state->ckey.flags) {
 			can_scale = 1;
 			min_scale = 1;
-			max_scale = skl_max_scale(crtc, crtc_state);
+			max_scale =
+				skl_max_scale(crtc, crtc_state, pixel_format);
 		} else {
 			can_scale = 0;
 			min_scale = DRM_PLANE_HELPER_NO_SCALING;

From 19d3cf00cda6a3543c6fe2c75b674efab4a20eb6 Mon Sep 17 00:00:00 2001
From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Date: Fri, 6 Apr 2018 12:44:07 +0100
Subject: [PATCH 0259/1286] drm/i915: Enclose for_each_engine_masked macro
 arguments in parentheses

Enclose for_each_engine_masked macro arguments in parentheses.

v2:
 * Fixup whitespace to satisfy checkpatch.
 * Likewise reformat to 80 chars.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180406114407.25360-1-tvrtko.ursulin@linux.intel.com
---
 drivers/gpu/drm/i915/i915_drv.h | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ba4a82193246..649c0f2f3bae 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2151,8 +2151,10 @@ static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc)
 
 /* Iterator over subset of engines selected by mask */
 #define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
-	for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask;	\
-	     tmp__ ? (engine__ = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; )
+	for ((tmp__) = (mask__) & INTEL_INFO(dev_priv__)->ring_mask; \
+	     (tmp__) ? \
+	     ((engine__) = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : \
+	     0;)
 
 enum hdmi_force_audio {
 	HDMI_AUDIO_OFF_DVI = -2,	/* no aux data for HDMI-DVI converter */

From 0c5c7df360dbcfefac61ebd118c8551acf714d79 Mon Sep 17 00:00:00 2001
From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Date: Fri, 6 Apr 2018 13:35:14 +0100
Subject: [PATCH 0260/1286] drm/i915/execlists: Log fence context & seqno
 throughout GEM_TRACE

Include fence context and seqno in low level tracing so it is easier to
follow flows of individual requests when things go bad.

Also added tracing on the reset side of things.

v2:
 Chris Wilson:
 * Standardize global_seqno and seqno as global.
 * Include current hws seqno in execlists_cancel_port_requests.

v3:
 * Fix port printk format for all builds.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> # v2
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180406123514.5809-1-tvrtko.ursulin@linux.intel.com
---
 drivers/gpu/drm/i915/i915_request.c |  6 +++---
 drivers/gpu/drm/i915/intel_lrc.c    | 22 +++++++++++++++++-----
 2 files changed, 20 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 629f3e860592..9ca9c24b4421 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -343,7 +343,7 @@ static void i915_request_retire(struct i915_request *request)
 	struct intel_engine_cs *engine = request->engine;
 	struct i915_gem_active *active, *next;
 
-	GEM_TRACE("%s fence %llx:%d, global_seqno %d, current %d\n",
+	GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n",
 		  engine->name,
 		  request->fence.context, request->fence.seqno,
 		  request->global_seqno,
@@ -466,7 +466,7 @@ void __i915_request_submit(struct i915_request *request)
 	struct intel_engine_cs *engine = request->engine;
 	u32 seqno;
 
-	GEM_TRACE("%s fence %llx:%d -> global_seqno %d, current %d\n",
+	GEM_TRACE("%s fence %llx:%d -> global=%d, current %d\n",
 		  engine->name,
 		  request->fence.context, request->fence.seqno,
 		  engine->timeline->seqno + 1,
@@ -516,7 +516,7 @@ void __i915_request_unsubmit(struct i915_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
 
-	GEM_TRACE("%s fence %llx:%d <- global_seqno %d, current %d\n",
+	GEM_TRACE("%s fence %llx:%d <- global=%d, current %d\n",
 		  engine->name,
 		  request->fence.context, request->fence.seqno,
 		  request->global_seqno,
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 3592288e4696..02b25bf2378a 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -468,10 +468,11 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
 			desc = execlists_update_context(rq);
 			GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
 
-			GEM_TRACE("%s in[%d]:  ctx=%d.%d, seqno=%d (current %d), prio=%d\n",
+			GEM_TRACE("%s in[%d]:  ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n",
 				  engine->name, n,
 				  port[n].context_id, count,
 				  rq->global_seqno,
+				  rq->fence.context, rq->fence.seqno,
 				  intel_engine_get_seqno(engine),
 				  rq_prio(rq));
 		} else {
@@ -742,6 +743,13 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
 	while (num_ports-- && port_isset(port)) {
 		struct i915_request *rq = port_request(port);
 
+		GEM_TRACE("%s:port%u global=%d (fence %llx:%d), (current %d)\n",
+			  rq->engine->name,
+			  (unsigned int)(port - execlists->port),
+			  rq->global_seqno,
+			  rq->fence.context, rq->fence.seqno,
+			  intel_engine_get_seqno(rq->engine));
+
 		GEM_BUG_ON(!execlists->active);
 		intel_engine_context_out(rq->engine);
 
@@ -817,7 +825,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
 	struct rb_node *rb;
 	unsigned long flags;
 
-	GEM_TRACE("%s\n", engine->name);
+	GEM_TRACE("%s current %d\n",
+		  engine->name, intel_engine_get_seqno(engine));
 
 	/*
 	 * Before we call engine->cancel_requests(), we should have exclusive
@@ -1014,10 +1023,12 @@ static void execlists_submission_tasklet(unsigned long data)
 							EXECLISTS_ACTIVE_USER));
 
 			rq = port_unpack(port, &count);
-			GEM_TRACE("%s out[0]: ctx=%d.%d, seqno=%d (current %d), prio=%d\n",
+			GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n",
 				  engine->name,
 				  port->context_id, count,
 				  rq ? rq->global_seqno : 0,
+				  rq ? rq->fence.context : 0,
+				  rq ? rq->fence.seqno : 0,
 				  intel_engine_get_seqno(engine),
 				  rq ? rq_prio(rq) : 0);
 
@@ -1744,8 +1755,9 @@ static void reset_common_ring(struct intel_engine_cs *engine,
 	struct intel_context *ce;
 	unsigned long flags;
 
-	GEM_TRACE("%s seqno=%x\n",
-		  engine->name, request ? request->global_seqno : 0);
+	GEM_TRACE("%s request global=%x, current=%d\n",
+		  engine->name, request ? request->global_seqno : 0,
+		  intel_engine_get_seqno(engine));
 
 	/* See execlists_cancel_requests() for the irq/spinlock split. */
 	local_irq_save(flags);

From cd27d88fba98660da870c00eda5ac0ea7969fc8e Mon Sep 17 00:00:00 2001
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Date: Mon, 9 Apr 2018 14:46:53 +0200
Subject: [PATCH 0261/1286] drm/i915: Change use get_new_plane_state instead of
 existing plane state
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

The get_existing macros are deprecated and should be replaced by
get_old/new_state for clarity.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180409124656.39886-1-maarten.lankhorst@linux.intel.com
[mlankhorst: Remove useless warn. (Ville)]
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_atomic.c |  5 +++--
 drivers/gpu/drm/i915/intel_drv.h    | 11 -----------
 drivers/gpu/drm/i915/intel_pm.c     |  2 --
 3 files changed, 3 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index bb8c1687823e..40285d1b91b7 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -227,6 +227,7 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
 	struct intel_crtc_scaler_state *scaler_state =
 		&crtc_state->scaler_state;
 	struct drm_atomic_state *drm_state = crtc_state->base.state;
+	struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state);
 	int num_scalers_need;
 	int i, j;
 
@@ -304,8 +305,8 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
 				continue;
 			}
 
-			plane_state = intel_atomic_get_existing_plane_state(drm_state,
-									    intel_plane);
+			plane_state = intel_atomic_get_new_plane_state(intel_state,
+								       intel_plane);
 			scaler_id = &plane_state->scaler_id;
 		}
 
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index b2e0fa04ef5b..e545aa673bd9 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -2109,17 +2109,6 @@ intel_atomic_get_existing_crtc_state(struct drm_atomic_state *state,
 		return NULL;
 }
 
-static inline struct intel_plane_state *
-intel_atomic_get_existing_plane_state(struct drm_atomic_state *state,
-				      struct intel_plane *plane)
-{
-	struct drm_plane_state *plane_state;
-
-	plane_state = drm_atomic_get_existing_plane_state(state, &plane->base);
-
-	return to_intel_plane_state(plane_state);
-}
-
 int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
 			       struct intel_crtc *intel_crtc,
 			       struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 007a12ebe725..4baab858e442 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -5037,8 +5037,6 @@ skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
 	struct drm_plane *plane;
 	enum pipe pipe = intel_crtc->pipe;
 
-	WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc));
-
 	drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) {
 		enum plane_id plane_id = to_intel_plane(plane)->id;
 

From 70c7183fbe96a42292df598288d32c2a172ff12f Mon Sep 17 00:00:00 2001
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Date: Mon, 9 Apr 2018 14:46:54 +0200
Subject: [PATCH 0262/1286] drm/i915: Remove get_existing_crtc_state
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

get_existing_crtc_state is currently unused, get rid of it.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180409124656.39886-2-maarten.lankhorst@linux.intel.com
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_drv.h | 14 --------------
 1 file changed, 14 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index e545aa673bd9..9969309132d0 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -2095,20 +2095,6 @@ intel_atomic_get_crtc_state(struct drm_atomic_state *state,
 	return to_intel_crtc_state(crtc_state);
 }
 
-static inline struct intel_crtc_state *
-intel_atomic_get_existing_crtc_state(struct drm_atomic_state *state,
-				     struct intel_crtc *crtc)
-{
-	struct drm_crtc_state *crtc_state;
-
-	crtc_state = drm_atomic_get_existing_crtc_state(state, &crtc->base);
-
-	if (crtc_state)
-		return to_intel_crtc_state(crtc_state);
-	else
-		return NULL;
-}
-
 int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
 			       struct intel_crtc *intel_crtc,
 			       struct intel_crtc_state *crtc_state);

From 8b69449d26637551c4145731e684cf1bb2478393 Mon Sep 17 00:00:00 2001
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Date: Mon, 9 Apr 2018 14:46:55 +0200
Subject: [PATCH 0263/1286] drm/i915: Remove last references to
 drm_atomic_get_existing* macros
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

All the references to get_existing_state can be converted to
get_new_state or get_old_state, which means that i915 is now
get_existing_state free.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180409124656.39886-3-maarten.lankhorst@linux.intel.com
[mlankhorst: Fix alignment in prepare_plane_fb. (Ville)]
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_display.c | 53 +++++++++++++---------------
 1 file changed, 24 insertions(+), 29 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index fec3d6dd5c60..8fe805983be8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -5148,8 +5148,8 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
 		intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
 						crtc);
 	struct drm_plane *primary = crtc->base.primary;
-	struct drm_plane_state *old_pri_state =
-		drm_atomic_get_existing_plane_state(old_state, primary);
+	struct drm_plane_state *old_primary_state =
+		drm_atomic_get_old_plane_state(old_state, primary);
 
 	intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
 
@@ -5159,19 +5159,16 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
 	if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
 		hsw_enable_ips(pipe_config);
 
-	if (old_pri_state) {
-		struct intel_plane_state *primary_state =
-			intel_atomic_get_new_plane_state(to_intel_atomic_state(old_state),
-							 to_intel_plane(primary));
-		struct intel_plane_state *old_primary_state =
-			to_intel_plane_state(old_pri_state);
-		struct drm_framebuffer *fb = primary_state->base.fb;
+	if (old_primary_state) {
+		struct drm_plane_state *new_primary_state =
+			drm_atomic_get_new_plane_state(old_state, primary);
+		struct drm_framebuffer *fb = new_primary_state->fb;
 
 		intel_fbc_post_update(crtc);
 
-		if (primary_state->base.visible &&
+		if (new_primary_state->visible &&
 		    (needs_modeset(&pipe_config->base) ||
-		     !old_primary_state->base.visible))
+		     !old_primary_state->visible))
 			intel_post_enable_primary(&crtc->base, pipe_config);
 
 		/* Display WA 827 */
@@ -5192,8 +5189,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_atomic_state *old_state = old_crtc_state->base.state;
 	struct drm_plane *primary = crtc->base.primary;
-	struct drm_plane_state *old_pri_state =
-		drm_atomic_get_existing_plane_state(old_state, primary);
+	struct drm_plane_state *old_primary_state =
+		drm_atomic_get_old_plane_state(old_state, primary);
 	bool modeset = needs_modeset(&pipe_config->base);
 	struct intel_atomic_state *old_intel_state =
 		to_intel_atomic_state(old_state);
@@ -5201,13 +5198,11 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
 	if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
 		hsw_disable_ips(old_crtc_state);
 
-	if (old_pri_state) {
-		struct intel_plane_state *primary_state =
+	if (old_primary_state) {
+		struct intel_plane_state *new_primary_state =
 			intel_atomic_get_new_plane_state(old_intel_state,
 							 to_intel_plane(primary));
-		struct intel_plane_state *old_primary_state =
-			to_intel_plane_state(old_pri_state);
-		struct drm_framebuffer *fb = primary_state->base.fb;
+		struct drm_framebuffer *fb = new_primary_state->base.fb;
 
 		/* Display WA 827 */
 		if ((INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv)) ||
@@ -5216,13 +5211,13 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
 				skl_wa_clkgate(dev_priv, crtc->pipe, true);
 		}
 
-		intel_fbc_pre_update(crtc, pipe_config, primary_state);
+		intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
 		/*
 		 * Gen2 reports pipe underruns whenever all planes are disabled.
 		 * So disable underrun reporting before all the planes get disabled.
 		 */
-		if (IS_GEN2(dev_priv) && old_primary_state->base.visible &&
-		    (modeset || !primary_state->base.visible))
+		if (IS_GEN2(dev_priv) && old_primary_state->visible &&
+		    (modeset || !new_primary_state->base.visible))
 			intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
 	}
 
@@ -10834,7 +10829,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
 		struct drm_connector_state *connector_state;
 		struct intel_encoder *encoder;
 
-		connector_state = drm_atomic_get_existing_connector_state(state, connector);
+		connector_state = drm_atomic_get_new_connector_state(state, connector);
 		if (!connector_state)
 			connector_state = connector->state;
 
@@ -12197,6 +12192,9 @@ static void intel_update_crtc(struct drm_crtc *crtc,
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
 	bool modeset = needs_modeset(new_crtc_state);
+	struct intel_plane_state *new_plane_state =
+		intel_atomic_get_new_plane_state(to_intel_atomic_state(state),
+						 to_intel_plane(crtc->primary));
 
 	if (modeset) {
 		update_scanline_offset(intel_crtc);
@@ -12209,11 +12207,8 @@ static void intel_update_crtc(struct drm_crtc *crtc,
 				       pipe_config);
 	}
 
-	if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
-		intel_fbc_enable(
-		    intel_crtc, pipe_config,
-		    to_intel_plane_state(crtc->primary->state));
-	}
+	if (new_plane_state)
+		intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
 
 	drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
 }
@@ -12794,8 +12789,8 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 
 	if (old_obj) {
 		struct drm_crtc_state *crtc_state =
-			drm_atomic_get_existing_crtc_state(new_state->state,
-							   plane->state->crtc);
+			drm_atomic_get_new_crtc_state(new_state->state,
+						      plane->state->crtc);
 
 		/* Big Hammer, we also need to ensure that any pending
 		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the

From 1b85147b4b8fb90da51b6e94a3e6c30469bf1de1 Mon Sep 17 00:00:00 2001
From: Imre Deak <imre.deak@intel.com>
Date: Mon, 9 Apr 2018 15:27:16 +0300
Subject: [PATCH 0264/1286] drm/i915/gen9_lp: Increase DDI PHY0 power well
 enabling timeout
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

On GLK sporadic timeouts occur during PHY0 enabling. Based on logs it looks
like they happen sometime after a system suspend/resume cycle, with the
same power well enabling succeeding both before and after the failed
one and no other problems observed. The current timeout in the code is
not actually specified by BSpec, so let's try to increase that until a
BSpec update.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=105771
Signed-off-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180409122716.4055-1-imre.deak@intel.com
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_dpio_phy.c | 11 ++++++-----
 1 file changed, 6 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index c8e9e44e5981..00b3ab656b06 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -380,13 +380,14 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
 	 * all 1s.  Eventually they become accessible as they power up, then
 	 * the reserved bit will give the default 0.  Poll on the reserved bit
 	 * becoming 0 to find when the PHY is accessible.
-	 * HW team confirmed that the time to reach phypowergood status is
-	 * anywhere between 50 us and 100us.
+	 * The flag should get set in 100us according to the HW team, but
+	 * use 1ms due to occasional timeouts observed with that.
 	 */
-	if (wait_for_us(((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
-		(PHY_RESERVED | PHY_POWER_GOOD)) == PHY_POWER_GOOD), 100)) {
+	if (intel_wait_for_register_fw(dev_priv, BXT_PORT_CL1CM_DW0(phy),
+				       PHY_RESERVED | PHY_POWER_GOOD,
+				       PHY_POWER_GOOD,
+				       1))
 		DRM_ERROR("timeout during PHY%d power on\n", phy);
-	}
 
 	/* Program PLL Rcomp code offset */
 	val = I915_READ(BXT_PORT_CL1CM_DW9(phy));

From daeb725e919c0d2d4b628aeaa1fa053125f888b2 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Thu, 5 Apr 2018 12:49:15 +0100
Subject: [PATCH 0265/1286] drm/i915/psr: Chase psr.enabled only under the
 psr.lock

Inside the psr work function, we want to wait for PSR to idle first and
wish to do so without blocking the normal modeset path, so we do so
without holding the PSR lock. However, we first have to find which pipe
PSR was enabled on, which requires chasing into the PSR struct and
requires locking to prevent intel_psr_disable() from concurrently
setting our pointer to NULL.

Fixes: 995d30477496 ("drm/i915: VLV/CHV PSR Software timer mode")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Durgadoss R <durgadoss.r@intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: <stable@vger.kernel.org> # v4.0+
Reviewed-by: Jose Roberto de Souza <jose.souza@intel.com>
Acked-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180405114915.29609-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_psr.c | 84 +++++++++++++++++---------------
 1 file changed, 45 insertions(+), 39 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 2d53f7398a6d..69a5b276f4d8 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -775,53 +775,59 @@ void intel_psr_disable(struct intel_dp *intel_dp,
 	cancel_delayed_work_sync(&dev_priv->psr.work);
 }
 
+static bool psr_wait_for_idle(struct drm_i915_private *dev_priv)
+{
+	struct intel_dp *intel_dp;
+	i915_reg_t reg;
+	u32 mask;
+	int err;
+
+	intel_dp = dev_priv->psr.enabled;
+	if (!intel_dp)
+		return false;
+
+	if (HAS_DDI(dev_priv)) {
+		if (dev_priv->psr.psr2_enabled) {
+			reg = EDP_PSR2_STATUS;
+			mask = EDP_PSR2_STATUS_STATE_MASK;
+		} else {
+			reg = EDP_PSR_STATUS;
+			mask = EDP_PSR_STATUS_STATE_MASK;
+		}
+	} else {
+		struct drm_crtc *crtc =
+			dp_to_dig_port(intel_dp)->base.base.crtc;
+		enum pipe pipe = to_intel_crtc(crtc)->pipe;
+
+		reg = VLV_PSRSTAT(pipe);
+		mask = VLV_EDP_PSR_IN_TRANS;
+	}
+
+	mutex_unlock(&dev_priv->psr.lock);
+
+	err = intel_wait_for_register(dev_priv, reg, mask, 0, 50);
+	if (err)
+		DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
+
+	/* After the unlocked wait, verify that PSR is still wanted! */
+	mutex_lock(&dev_priv->psr.lock);
+	return err == 0 && dev_priv->psr.enabled;
+}
+
 static void intel_psr_work(struct work_struct *work)
 {
 	struct drm_i915_private *dev_priv =
 		container_of(work, typeof(*dev_priv), psr.work.work);
-	struct intel_dp *intel_dp = dev_priv->psr.enabled;
-	struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
-	enum pipe pipe = to_intel_crtc(crtc)->pipe;
 
-	/* We have to make sure PSR is ready for re-enable
+	mutex_lock(&dev_priv->psr.lock);
+
+	/*
+	 * We have to make sure PSR is ready for re-enable
 	 * otherwise it keeps disabled until next full enable/disable cycle.
 	 * PSR might take some time to get fully disabled
 	 * and be ready for re-enable.
 	 */
-	if (HAS_DDI(dev_priv)) {
-		if (dev_priv->psr.psr2_enabled) {
-			if (intel_wait_for_register(dev_priv,
-						    EDP_PSR2_STATUS,
-						    EDP_PSR2_STATUS_STATE_MASK,
-						    0,
-						    50)) {
-				DRM_ERROR("Timed out waiting for PSR2 Idle for re-enable\n");
-				return;
-			}
-		} else {
-			if (intel_wait_for_register(dev_priv,
-						    EDP_PSR_STATUS,
-						    EDP_PSR_STATUS_STATE_MASK,
-						    0,
-						    50)) {
-				DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
-				return;
-			}
-		}
-	} else {
-		if (intel_wait_for_register(dev_priv,
-					    VLV_PSRSTAT(pipe),
-					    VLV_EDP_PSR_IN_TRANS,
-					    0,
-					    1)) {
-			DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
-			return;
-		}
-	}
-	mutex_lock(&dev_priv->psr.lock);
-	intel_dp = dev_priv->psr.enabled;
-
-	if (!intel_dp)
+	if (!psr_wait_for_idle(dev_priv))
 		goto unlock;
 
 	/*
@@ -832,7 +838,7 @@ static void intel_psr_work(struct work_struct *work)
 	if (dev_priv->psr.busy_frontbuffer_bits)
 		goto unlock;
 
-	intel_psr_activate(intel_dp);
+	intel_psr_activate(dev_priv->psr.enabled);
 unlock:
 	mutex_unlock(&dev_priv->psr.lock);
 }

From d52ad9cb9d6d3b696d6b7ad20a381a8f5520ea03 Mon Sep 17 00:00:00 2001
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Date: Wed, 28 Mar 2018 12:05:26 +0200
Subject: [PATCH 0266/1286] drm/i915: Add debugfs file to clear FIFO underruns.

Adding a i915_fifo_underrun_reset debugfs file will make it possible
for IGT tests to clear FIFO underrun fallout at the start of each
subtest, and make re-enable FBC so tests always have maximum exposure
to features used by IGT. FIFO underruns and FBC bugs will no longer
hide when an earlier subtests disables both.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
References: https://bugs.freedesktop.org/show_bug.cgi?id=105685
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=105681
Link: https://patchwork.freedesktop.org/patch/msgid/20180328100526.36467-1-maarten.lankhorst@linux.intel.com
Acked-by: Jani Nikula <jani.nikula@linux.intel.com>
[mlankhorst: Reset FBC reason if underrun had occurred. (vivijim)]
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c  | 62 ++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/intel_display.c | 30 ++++++++------
 drivers/gpu/drm/i915/intel_drv.h     |  3 ++
 drivers/gpu/drm/i915/intel_fbc.c     | 28 +++++++++++++
 4 files changed, 111 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 785b710e4ee4..2e6652a9bb9e 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -4732,6 +4732,67 @@ static int i915_drrs_ctl_set(void *data, u64 val)
 
 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
 
+static ssize_t
+i915_fifo_underrun_reset_write(struct file *filp,
+			       const char __user *ubuf,
+			       size_t cnt, loff_t *ppos)
+{
+	struct drm_i915_private *dev_priv = filp->private_data;
+	struct intel_crtc *intel_crtc;
+	struct drm_device *dev = &dev_priv->drm;
+	int ret;
+	bool reset;
+
+	ret = kstrtobool_from_user(ubuf, cnt, &reset);
+	if (ret)
+		return ret;
+
+	if (!reset)
+		return cnt;
+
+	for_each_intel_crtc(dev, intel_crtc) {
+		struct drm_crtc_commit *commit;
+		struct intel_crtc_state *crtc_state;
+
+		ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
+		if (ret)
+			return ret;
+
+		crtc_state = to_intel_crtc_state(intel_crtc->base.state);
+		commit = crtc_state->base.commit;
+		if (commit) {
+			ret = wait_for_completion_interruptible(&commit->hw_done);
+			if (!ret)
+				ret = wait_for_completion_interruptible(&commit->flip_done);
+		}
+
+		if (!ret && crtc_state->base.active) {
+			DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
+				      pipe_name(intel_crtc->pipe));
+
+			intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
+		}
+
+		drm_modeset_unlock(&intel_crtc->base.mutex);
+
+		if (ret)
+			return ret;
+	}
+
+	ret = intel_fbc_reset_underrun(dev_priv);
+	if (ret)
+		return ret;
+
+	return cnt;
+}
+
+static const struct file_operations i915_fifo_underrun_reset_ops = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.write = i915_fifo_underrun_reset_write,
+	.llseek = default_llseek,
+};
+
 static const struct drm_info_list i915_debugfs_list[] = {
 	{"i915_capabilities", i915_capabilities, 0},
 	{"i915_gem_objects", i915_gem_object_info, 0},
@@ -4799,6 +4860,7 @@ static const struct i915_debugfs_files {
 	{"i915_error_state", &i915_error_state_fops},
 	{"i915_gpu_info", &i915_gpu_info_fops},
 #endif
+	{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
 	{"i915_next_seqno", &i915_next_seqno_fops},
 	{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 8fe805983be8..e04050ea3e28 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -13019,10 +13019,25 @@ out:
 							   intel_cstate);
 }
 
+void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
+				  struct intel_crtc_state *crtc_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+	if (!IS_GEN2(dev_priv))
+		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
+
+	if (crtc_state->has_pch_encoder) {
+		enum pipe pch_transcoder =
+			intel_crtc_pch_transcoder(crtc);
+
+		intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
+	}
+}
+
 static void intel_finish_crtc_commit(struct drm_crtc *crtc,
 				     struct drm_crtc_state *old_crtc_state)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_atomic_state *old_intel_state =
 		to_intel_atomic_state(old_crtc_state->state);
@@ -13033,17 +13048,8 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
 
 	if (new_crtc_state->update_pipe &&
 	    !needs_modeset(&new_crtc_state->base) &&
-	    old_crtc_state->mode.private_flags & I915_MODE_FLAG_INHERITED) {
-		if (!IS_GEN2(dev_priv))
-			intel_set_cpu_fifo_underrun_reporting(dev_priv, intel_crtc->pipe, true);
-
-		if (new_crtc_state->has_pch_encoder) {
-			enum pipe pch_transcoder =
-				intel_crtc_pch_transcoder(intel_crtc);
-
-			intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
-		}
-	}
+	    old_crtc_state->mode.private_flags & I915_MODE_FLAG_INHERITED)
+		intel_crtc_arm_fifo_underrun(intel_crtc, new_crtc_state);
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 9969309132d0..5bd2263407b2 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1597,6 +1597,8 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
 enum intel_display_power_domain intel_port_to_power_domain(enum port port);
 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
 				 struct intel_crtc_state *pipe_config);
+void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
+				  struct intel_crtc_state *crtc_state);
 
 int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
 int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
@@ -1784,6 +1786,7 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
 		     unsigned int frontbuffer_bits, enum fb_op_origin origin);
 void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
 void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv);
+int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv);
 
 /* intel_hdmi.c */
 void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg,
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 707d49c12638..b431b6733cc1 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -1272,6 +1272,34 @@ out:
 	mutex_unlock(&fbc->lock);
 }
 
+/*
+ * intel_fbc_reset_underrun - reset FBC fifo underrun status.
+ * @dev_priv: i915 device instance
+ *
+ * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we
+ * want to re-enable FBC after an underrun to increase test coverage.
+ */
+int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv)
+{
+	int ret;
+
+	cancel_work_sync(&dev_priv->fbc.underrun_work);
+
+	ret = mutex_lock_interruptible(&dev_priv->fbc.lock);
+	if (ret)
+		return ret;
+
+	if (dev_priv->fbc.underrun_detected) {
+		DRM_DEBUG_KMS("Re-allowing FBC after fifo underrun\n");
+		dev_priv->fbc.no_fbc_reason = "FIFO underrun cleared";
+	}
+
+	dev_priv->fbc.underrun_detected = false;
+	mutex_unlock(&dev_priv->fbc.lock);
+
+	return 0;
+}
+
 /**
  * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun
  * @dev_priv: i915 device instance

From 3834dc1f0e57891a398b2cabd40c60fa9595cd7c Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Tue, 10 Apr 2018 14:33:54 +0100
Subject: [PATCH 0267/1286] drm/i915: Don't fiddle with rps/rc6 across GPU
 reset
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Resetting the GPU doesn't affect the RPS/RC6 state, so we can stop
forcibly reloading the registers.

Ville suggested this many moons ago, I said at that time that sanitizing
was no harm and meant that our bookkeeping was kept consistent with the
HW. However, in a forthcoming series, we want to split rps/rc6 GT
powermanagement and one of the key simplifications is the control of
when we enable it. Performing a crude sanitize in the middle of
i915_gem_reset() is then a huge wart.

Suggested-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180410133354.13425-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_gem.c | 7 -------
 1 file changed, 7 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 28ab0beff86c..60cf7cfc24ee 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3254,13 +3254,6 @@ void i915_gem_reset(struct drm_i915_private *dev_priv,
 	}
 
 	i915_gem_restore_fences(dev_priv);
-
-	if (dev_priv->gt.awake) {
-		intel_sanitize_gt_powersave(dev_priv);
-		intel_enable_gt_powersave(dev_priv);
-		if (INTEL_GEN(dev_priv) >= 6)
-			gen6_rps_busy(dev_priv);
-	}
 }
 
 void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)

From 2184b3d69b961b5beb2781acf200b5b93643e32a Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Tue, 10 Apr 2018 12:14:17 +0100
Subject: [PATCH 0268/1286] drm/i915/guc: Replace %phn with %ph

%phn is not a valid specifier, and the trailing 'n' is being eaten by
the format-specifier and defaulting to the ' ' separator. Avoid angering
smatch by using the unknown specifier, and use the default we expect.

drivers/gpu/drm/i915/intel_guc_ct.c:616 ctb_read() warn: '%ph' cannot be followed by 'n'
drivers/gpu/drm/i915/intel_guc_ct.c:616 ctb_read() warn: '%ph' cannot be followed by 'n'
drivers/gpu/drm/i915/intel_guc_ct.c:616 ctb_read() warn: '%ph' cannot be followed by 'n'
drivers/gpu/drm/i915/intel_guc_ct.c:669 ct_handle_response() warn: '%ph' cannot be followed by 'n'
drivers/gpu/drm/i915/intel_guc_ct.c:679 ct_handle_response() warn: '%ph' cannot be followed by 'n'
drivers/gpu/drm/i915/intel_guc_ct.c:693 ct_handle_response() warn: '%ph' cannot be followed by 'n'
drivers/gpu/drm/i915/intel_guc_ct.c:707 ct_handle_response() warn: '%ph' cannot be followed by 'n'
drivers/gpu/drm/i915/intel_guc_ct.c:727 ct_process_request() warn: '%ph' cannot be followed by 'n'
drivers/gpu/drm/i915/intel_guc_ct.c:803 ct_handle_request() warn: '%ph' cannot be followed by 'n'

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180410111417.27563-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_guc_ct.c | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c
index 990141d5f195..371b6005954a 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/intel_guc_ct.c
@@ -361,7 +361,7 @@ static int ctb_write(struct intel_guc_ct_buffer *ctb,
 		 (want_response ? GUC_CT_MSG_SEND_STATUS : 0) |
 		 (action[0] << GUC_CT_MSG_ACTION_SHIFT);
 
-	CT_DEBUG_DRIVER("CT: writing %*phn %*phn %*phn\n",
+	CT_DEBUG_DRIVER("CT: writing %*ph %*ph %*ph\n",
 			4, &header, 4, &fence,
 			4 * (len - 1), &action[1]);
 
@@ -613,7 +613,7 @@ static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data)
 	/* message len with header */
 	len = ct_header_get_len(data[0]) + 1;
 	if (unlikely(len > (u32)available)) {
-		DRM_ERROR("CT: incomplete message %*phn %*phn %*phn\n",
+		DRM_ERROR("CT: incomplete message %*ph %*ph %*ph\n",
 			  4, data,
 			  4 * (head + available - 1 > size ?
 			       size - head : available - 1), &cmds[head],
@@ -626,7 +626,7 @@ static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data)
 		data[i] = cmds[head];
 		head = (head + 1) % size;
 	}
-	CT_DEBUG_DRIVER("CT: received %*phn\n", 4 * len, data);
+	CT_DEBUG_DRIVER("CT: received %*ph\n", 4 * len, data);
 
 	desc->head = head * 4;
 	return 0;
@@ -666,7 +666,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
 
 	/* Response payload shall at least include fence and status */
 	if (unlikely(len < 2)) {
-		DRM_ERROR("CT: corrupted response %*phn\n", 4 * msglen, msg);
+		DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg);
 		return -EPROTO;
 	}
 
@@ -676,7 +676,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
 
 	/* Format of the status follows RESPONSE message */
 	if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status))) {
-		DRM_ERROR("CT: corrupted response %*phn\n", 4 * msglen, msg);
+		DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg);
 		return -EPROTO;
 	}
 
@@ -690,7 +690,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
 			continue;
 		}
 		if (unlikely(datalen > req->response_len)) {
-			DRM_ERROR("CT: response %u too long %*phn\n",
+			DRM_ERROR("CT: response %u too long %*ph\n",
 				  req->fence, 4 * msglen, msg);
 			datalen = 0;
 		}
@@ -704,7 +704,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
 	spin_unlock(&ct->lock);
 
 	if (!found)
-		DRM_ERROR("CT: unsolicited response %*phn\n", 4 * msglen, msg);
+		DRM_ERROR("CT: unsolicited response %*ph\n", 4 * msglen, msg);
 	return 0;
 }
 
@@ -713,7 +713,7 @@ static void ct_process_request(struct intel_guc_ct *ct,
 {
 	struct intel_guc *guc = ct_to_guc(ct);
 
-	CT_DEBUG_DRIVER("CT: request %x %*phn\n", action, 4 * len, payload);
+	CT_DEBUG_DRIVER("CT: request %x %*ph\n", action, 4 * len, payload);
 
 	switch (action) {
 	case INTEL_GUC_ACTION_DEFAULT:
@@ -724,7 +724,7 @@ static void ct_process_request(struct intel_guc_ct *ct,
 
 	default:
 fail_unexpected:
-		DRM_ERROR("CT: unexpected request %x %*phn\n",
+		DRM_ERROR("CT: unexpected request %x %*ph\n",
 			  action, 4 * len, payload);
 		break;
 	}
@@ -800,7 +800,7 @@ static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
 
 	request = kmalloc(sizeof(*request) + 4 * msglen, GFP_ATOMIC);
 	if (unlikely(!request)) {
-		DRM_ERROR("CT: dropping request %*phn\n", 4 * msglen, msg);
+		DRM_ERROR("CT: dropping request %*ph\n", 4 * msglen, msg);
 		return 0; /* XXX: -ENOMEM ? */
 	}
 	memcpy(request->msg, msg, 4 * msglen);

From 6aac0a48b02f5d7ed64e4fdc2aa48843d425905b Mon Sep 17 00:00:00 2001
From: Felix Kuehling <Felix.Kuehling@amd.com>
Date: Tue, 10 Apr 2018 17:33:14 -0400
Subject: [PATCH 0269/1286] drm/amdkfd: Remove limit on number of GPUs
 (follow-up)

This condition was missed in a previous commit with the same title.

Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 66852de410c8..f16ac2b2f060 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -307,9 +307,7 @@ int kfd_init_apertures(struct kfd_process *process)
 	struct kfd_process_device *pdd;
 
 	/*Iterating over all devices*/
-	while (kfd_topology_enum_kfd_devices(id, &dev) == 0 &&
-		id < NUM_OF_SUPPORTED_GPUS) {
-
+	while (kfd_topology_enum_kfd_devices(id, &dev) == 0) {
 		if (!dev) {
 			id++; /* Skip non GPU devices */
 			continue;

From 70a31d16ccac518c701b9fbfacce5460a226bfd9 Mon Sep 17 00:00:00 2001
From: Felix Kuehling <Felix.Kuehling@amd.com>
Date: Tue, 10 Apr 2018 17:33:15 -0400
Subject: [PATCH 0270/1286] drm/amdkfd: Support flat memory apertures for GFXv9

Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c | 115 ++++++++++++++-----
 1 file changed, 87 insertions(+), 28 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index f16ac2b2f060..97d5423c5673 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -275,23 +275,35 @@
  * for FLAT_* / S_LOAD operations.
  */
 
-#define MAKE_GPUVM_APP_BASE(gpu_num) \
+#define MAKE_GPUVM_APP_BASE_VI(gpu_num) \
 	(((uint64_t)(gpu_num) << 61) + 0x1000000000000L)
 
 #define MAKE_GPUVM_APP_LIMIT(base, size) \
 	(((uint64_t)(base) & 0xFFFFFF0000000000UL) + (size) - 1)
 
-#define MAKE_SCRATCH_APP_BASE() \
+#define MAKE_SCRATCH_APP_BASE_VI() \
 	(((uint64_t)(0x1UL) << 61) + 0x100000000L)
 
 #define MAKE_SCRATCH_APP_LIMIT(base) \
 	(((uint64_t)base & 0xFFFFFFFF00000000UL) | 0xFFFFFFFF)
 
-#define MAKE_LDS_APP_BASE() \
+#define MAKE_LDS_APP_BASE_VI() \
 	(((uint64_t)(0x1UL) << 61) + 0x0)
 #define MAKE_LDS_APP_LIMIT(base) \
 	(((uint64_t)(base) & 0xFFFFFFFF00000000UL) | 0xFFFFFFFF)
 
+/* On GFXv9 the LDS and scratch apertures are programmed independently
+ * using the high 16 bits of the 64-bit virtual address. They must be
+ * in the hole, which will be the case as long as the high 16 bits are
+ * not 0.
+ *
+ * The aperture sizes are still 4GB implicitly.
+ *
+ * A GPUVM aperture is not applicable on GFXv9.
+ */
+#define MAKE_LDS_APP_BASE_V9() ((uint64_t)(0x1UL) << 48)
+#define MAKE_SCRATCH_APP_BASE_V9() ((uint64_t)(0x2UL) << 48)
+
 /* User mode manages most of the SVM aperture address space. The low
  * 16MB are reserved for kernel use (CWSR trap handler and kernel IB
  * for now).
@@ -300,6 +312,55 @@
 #define SVM_CWSR_BASE (SVM_USER_BASE - KFD_CWSR_TBA_TMA_SIZE)
 #define SVM_IB_BASE   (SVM_CWSR_BASE - PAGE_SIZE)
 
+static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
+{
+	/*
+	 * node id couldn't be 0 - the three MSB bits of
+	 * aperture shoudn't be 0
+	 */
+	pdd->lds_base = MAKE_LDS_APP_BASE_VI();
+	pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
+
+	if (!pdd->dev->device_info->needs_iommu_device) {
+		/* dGPUs: SVM aperture starting at 0
+		 * with small reserved space for kernel.
+		 * Set them to CANONICAL addresses.
+		 */
+		pdd->gpuvm_base = SVM_USER_BASE;
+		pdd->gpuvm_limit =
+			pdd->dev->shared_resources.gpuvm_size - 1;
+	} else {
+		/* set them to non CANONICAL addresses, and no SVM is
+		 * allocated.
+		 */
+		pdd->gpuvm_base = MAKE_GPUVM_APP_BASE_VI(id + 1);
+		pdd->gpuvm_limit = MAKE_GPUVM_APP_LIMIT(pdd->gpuvm_base,
+				pdd->dev->shared_resources.gpuvm_size);
+	}
+
+	pdd->scratch_base = MAKE_SCRATCH_APP_BASE_VI();
+	pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
+}
+
+static void kfd_init_apertures_v9(struct kfd_process_device *pdd, uint8_t id)
+{
+	pdd->lds_base = MAKE_LDS_APP_BASE_V9();
+	pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
+
+	/* Raven needs SVM to support graphic handle, etc. Leave the small
+	 * reserved space before SVM on Raven as well, even though we don't
+	 * have to.
+	 * Set gpuvm_base and gpuvm_limit to CANONICAL addresses so that they
+	 * are used in Thunk to reserve SVM.
+	 */
+	pdd->gpuvm_base = SVM_USER_BASE;
+	pdd->gpuvm_limit =
+		pdd->dev->shared_resources.gpuvm_size - 1;
+
+	pdd->scratch_base = MAKE_SCRATCH_APP_BASE_V9();
+	pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
+}
+
 int kfd_init_apertures(struct kfd_process *process)
 {
 	uint8_t id  = 0;
@@ -316,7 +377,7 @@ int kfd_init_apertures(struct kfd_process *process)
 		pdd = kfd_create_process_device_data(dev, process);
 		if (!pdd) {
 			pr_err("Failed to create process device data\n");
-			return -1;
+			return -ENOMEM;
 		}
 		/*
 		 * For 64 bit process apertures will be statically reserved in
@@ -328,32 +389,30 @@ int kfd_init_apertures(struct kfd_process *process)
 			pdd->gpuvm_base = pdd->gpuvm_limit = 0;
 			pdd->scratch_base = pdd->scratch_limit = 0;
 		} else {
-			/* Same LDS and scratch apertures can be used
-			 * on all GPUs. This allows using more dGPUs
-			 * than placement options for apertures.
-			 */
-			pdd->lds_base = MAKE_LDS_APP_BASE();
-			pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
+			switch (dev->device_info->asic_family) {
+			case CHIP_KAVERI:
+			case CHIP_HAWAII:
+			case CHIP_CARRIZO:
+			case CHIP_TONGA:
+			case CHIP_FIJI:
+			case CHIP_POLARIS10:
+			case CHIP_POLARIS11:
+				kfd_init_apertures_vi(pdd, id);
+				break;
+			case CHIP_VEGA10:
+			case CHIP_RAVEN:
+				kfd_init_apertures_v9(pdd, id);
+				break;
+			default:
+				WARN(1, "Unexpected ASIC family %u",
+				     dev->device_info->asic_family);
+				return -EINVAL;
+			}
 
-			pdd->scratch_base = MAKE_SCRATCH_APP_BASE();
-			pdd->scratch_limit =
-				MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
-
-			if (dev->device_info->needs_iommu_device) {
-				/* APUs: GPUVM aperture in
-				 * non-canonical address space
+			if (!dev->device_info->needs_iommu_device) {
+				/* dGPUs: the reserved space for kernel
+				 * before SVM
 				 */
-				pdd->gpuvm_base = MAKE_GPUVM_APP_BASE(id + 1);
-				pdd->gpuvm_limit = MAKE_GPUVM_APP_LIMIT(
-					pdd->gpuvm_base,
-					dev->shared_resources.gpuvm_size);
-			} else {
-				/* dGPUs: SVM aperture starting at 0
-				 * with small reserved space for kernel
-				 */
-				pdd->gpuvm_base = SVM_USER_BASE;
-				pdd->gpuvm_limit =
-					dev->shared_resources.gpuvm_size - 1;
 				pdd->qpd.cwsr_base = SVM_CWSR_BASE;
 				pdd->qpd.ib_base = SVM_IB_BASE;
 			}

From 3e76c2399b55483b1a28499b090f9d6600ab9eff Mon Sep 17 00:00:00 2001
From: Felix Kuehling <Felix.Kuehling@amd.com>
Date: Tue, 10 Apr 2018 17:33:16 -0400
Subject: [PATCH 0271/1286] drm/amdkfd: Add GFXv9 CWSR trap handler

Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
Signed-off-by: Jay Cornwall <Jay.Cornwall@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 .../drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm | 1495 +++++++++++++++++
 drivers/gpu/drm/amd/amdkfd/kfd_device.c       |   13 +-
 2 files changed, 1505 insertions(+), 3 deletions(-)
 create mode 100644 drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm

diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
new file mode 100644
index 000000000000..033580c997ea
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
@@ -0,0 +1,1495 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#if 0
+HW (GFX9) source code for CWSR trap handler
+#Version 18 + multiple trap handler
+
+// this performance-optimal version was originally from Seven Xu at SRDC
+
+// Revison #18	 --...
+/* Rev History
+** #1. Branch from gc dv.   //gfxip/gfx9/main/src/test/suites/block/cs/sr/cs_trap_handler.sp3#1,#50, #51, #52-53(Skip, Already Fixed by PV), #54-56(merged),#57-58(mergerd, skiped-already fixed by PV)
+** #4. SR Memory Layout:
+**			 1. VGPR-SGPR-HWREG-{LDS}
+**			 2. tba_hi.bits.26 - reconfigured as the first wave in tg bits, for defer Save LDS for a threadgroup.. performance concern..
+** #5. Update: 1. Accurate g8sr_ts_save_d timestamp
+** #6. Update: 1. Fix s_barrier usage; 2. VGPR s/r using swizzle buffer?(NoNeed, already matched the swizzle pattern, more investigation)
+** #7. Update: 1. don't barrier if noLDS
+** #8. Branch: 1. Branch to ver#0, which is very similar to gc dv version
+**	       2. Fix SQ issue by s_sleep 2
+** #9. Update: 1. Fix scc restore failed issue, restore wave_status at last
+**	       2. optimize s_buffer save by burst 16sgprs...
+** #10. Update 1. Optimize restore sgpr by busrt 16 sgprs.
+** #11. Update 1. Add 2 more timestamp for debug version
+** #12. Update 1. Add VGPR SR using DWx4, some case improve and some case drop performance
+** #13. Integ  1. Always use MUBUF for PV trap shader...
+** #14. Update 1. s_buffer_store soft clause...
+** #15. Update 1. PERF - sclar write with glc:0/mtype0 to allow L2 combine. perf improvement a lot.
+** #16. Update 1. PRRF - UNROLL LDS_DMA got 2500cycle save in IP tree
+** #17. Update 1. FUNC - LDS_DMA has issues while ATC, replace with ds_read/buffer_store for save part[TODO restore part]
+**	       2. PERF - Save LDS before save VGPR to cover LDS save long latency...
+** #18. Update 1. FUNC - Implicitly estore STATUS.VCCZ, which is not writable by s_setreg_b32
+**	       2. FUNC - Handle non-CWSR traps
+*/
+
+var G8SR_WDMEM_HWREG_OFFSET = 0
+var G8SR_WDMEM_SGPR_OFFSET  = 128  // in bytes
+
+// Keep definition same as the app shader, These 2 time stamps are part of the app shader... Should before any Save and after restore.
+
+var G8SR_DEBUG_TIMESTAMP = 0
+var G8SR_DEBUG_TS_SAVE_D_OFFSET = 40*4	// ts_save_d timestamp offset relative to SGPR_SR_memory_offset
+var s_g8sr_ts_save_s	= s[34:35]   // save start
+var s_g8sr_ts_sq_save_msg  = s[36:37]	// The save shader send SAVEWAVE msg to spi
+var s_g8sr_ts_spi_wrexec   = s[38:39]	// the SPI write the sr address to SQ
+var s_g8sr_ts_save_d	= s[40:41]   // save end
+var s_g8sr_ts_restore_s = s[42:43]   // restore start
+var s_g8sr_ts_restore_d = s[44:45]   // restore end
+
+var G8SR_VGPR_SR_IN_DWX4 = 0
+var G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 = 0x00100000	 // DWx4 stride is 4*4Bytes
+var G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4  = G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4
+
+
+/*************************************************************************/
+/*		    control on how to run the shader			 */
+/*************************************************************************/
+//any hack that needs to be made to run this code in EMU (either because various EMU code are not ready or no compute save & restore in EMU run)
+var EMU_RUN_HACK		    =	0
+var EMU_RUN_HACK_RESTORE_NORMAL	    =	0
+var EMU_RUN_HACK_SAVE_NORMAL_EXIT   =	0
+var EMU_RUN_HACK_SAVE_SINGLE_WAVE   =	0
+var EMU_RUN_HACK_SAVE_FIRST_TIME    =	0		    //for interrupted restore in which the first save is through EMU_RUN_HACK
+var SAVE_LDS			    =	1
+var WG_BASE_ADDR_LO		    =	0x9000a000
+var WG_BASE_ADDR_HI		    =	0x0
+var WAVE_SPACE			    =	0x5000		    //memory size that each wave occupies in workgroup state mem
+var CTX_SAVE_CONTROL		    =	0x0
+var CTX_RESTORE_CONTROL		    =	CTX_SAVE_CONTROL
+var SIM_RUN_HACK		    =	0		    //any hack that needs to be made to run this code in SIM (either because various RTL code are not ready or no compute save & restore in RTL run)
+var SGPR_SAVE_USE_SQC		    =	1		    //use SQC D$ to do the write
+var USE_MTBUF_INSTEAD_OF_MUBUF	    =	0		    //because TC EMU currently asserts on 0 of // overload DFMT field to carry 4 more bits of stride for MUBUF opcodes
+var SWIZZLE_EN			    =	0		    //whether we use swizzled buffer addressing
+var ACK_SQC_STORE		    =	1		    //workaround for suspected SQC store bug causing incorrect stores under concurrency
+
+/**************************************************************************/
+/*			variables					  */
+/**************************************************************************/
+var SQ_WAVE_STATUS_INST_ATC_SHIFT  = 23
+var SQ_WAVE_STATUS_INST_ATC_MASK   = 0x00800000
+var SQ_WAVE_STATUS_SPI_PRIO_MASK   = 0x00000006
+var SQ_WAVE_STATUS_HALT_MASK       = 0x2000
+
+var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT	= 12
+var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE	= 9
+var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT	= 8
+var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE	= 6
+var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT	= 24
+var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE	= 3			//FIXME	 sq.blk still has 4 bits at this time while SQ programming guide has 3 bits
+
+var SQ_WAVE_TRAPSTS_SAVECTX_MASK    =	0x400
+var SQ_WAVE_TRAPSTS_EXCE_MASK	    =	0x1FF			// Exception mask
+var SQ_WAVE_TRAPSTS_SAVECTX_SHIFT   =	10
+var SQ_WAVE_TRAPSTS_MEM_VIOL_MASK   =	0x100
+var SQ_WAVE_TRAPSTS_MEM_VIOL_SHIFT  =	8
+var SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK	=   0x3FF
+var SQ_WAVE_TRAPSTS_PRE_SAVECTX_SHIFT	=   0x0
+var SQ_WAVE_TRAPSTS_PRE_SAVECTX_SIZE	=   10
+var SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK	=   0xFFFFF800
+var SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT	=   11
+var SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE	=   21
+var SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK	=   0x800
+
+var SQ_WAVE_IB_STS_RCNT_SHIFT		=   16			//FIXME
+var SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT	=   15			//FIXME
+var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK	= 0x1F8000
+var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG	= 0x00007FFF	//FIXME
+
+var SQ_BUF_RSRC_WORD1_ATC_SHIFT	    =	24
+var SQ_BUF_RSRC_WORD3_MTYPE_SHIFT   =	27
+
+var TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT	=   26			// bits [31:26] unused by SPI debug data
+var TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK	=   0xFC000000
+
+/*	Save	    */
+var S_SAVE_BUF_RSRC_WORD1_STRIDE	=   0x00040000		//stride is 4 bytes
+var S_SAVE_BUF_RSRC_WORD3_MISC		=   0x00807FAC		//SQ_SEL_X/Y/Z/W, BUF_NUM_FORMAT_FLOAT, (0 for MUBUF stride[17:14] when ADD_TID_ENABLE and BUF_DATA_FORMAT_32 for MTBUF), ADD_TID_ENABLE
+
+var S_SAVE_SPI_INIT_ATC_MASK		=   0x08000000		//bit[27]: ATC bit
+var S_SAVE_SPI_INIT_ATC_SHIFT		=   27
+var S_SAVE_SPI_INIT_MTYPE_MASK		=   0x70000000		//bit[30:28]: Mtype
+var S_SAVE_SPI_INIT_MTYPE_SHIFT		=   28
+var S_SAVE_SPI_INIT_FIRST_WAVE_MASK	=   0x04000000		//bit[26]: FirstWaveInTG
+var S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT	=   26
+
+var S_SAVE_PC_HI_RCNT_SHIFT		=   28			//FIXME	 check with Brian to ensure all fields other than PC[47:0] can be used
+var S_SAVE_PC_HI_RCNT_MASK		=   0xF0000000		//FIXME
+var S_SAVE_PC_HI_FIRST_REPLAY_SHIFT	=   27			//FIXME
+var S_SAVE_PC_HI_FIRST_REPLAY_MASK	=   0x08000000		//FIXME
+
+var s_save_spi_init_lo		    =	exec_lo
+var s_save_spi_init_hi		    =	exec_hi
+
+var s_save_pc_lo	    =	ttmp0		//{TTMP1, TTMP0} = {3??h0,pc_rewind[3:0], HT[0],trapID[7:0], PC[47:0]}
+var s_save_pc_hi	    =	ttmp1
+var s_save_exec_lo	    =	ttmp2
+var s_save_exec_hi	    =	ttmp3
+var s_save_tmp		    =	ttmp4
+var s_save_trapsts	    =	ttmp5		//not really used until the end of the SAVE routine
+var s_save_xnack_mask_lo    =	ttmp6
+var s_save_xnack_mask_hi    =	ttmp7
+var s_save_buf_rsrc0	    =	ttmp8
+var s_save_buf_rsrc1	    =	ttmp9
+var s_save_buf_rsrc2	    =	ttmp10
+var s_save_buf_rsrc3	    =	ttmp11
+var s_save_status	    =	ttmp12
+var s_save_mem_offset	    =	ttmp14
+var s_save_alloc_size	    =	s_save_trapsts		//conflict
+var s_save_m0		    =	ttmp15
+var s_save_ttmps_lo	    =	s_save_tmp		//no conflict
+var s_save_ttmps_hi	    =	s_save_trapsts		//no conflict
+
+/*	Restore	    */
+var S_RESTORE_BUF_RSRC_WORD1_STRIDE	    =	S_SAVE_BUF_RSRC_WORD1_STRIDE
+var S_RESTORE_BUF_RSRC_WORD3_MISC	    =	S_SAVE_BUF_RSRC_WORD3_MISC
+
+var S_RESTORE_SPI_INIT_ATC_MASK		    =	0x08000000	    //bit[27]: ATC bit
+var S_RESTORE_SPI_INIT_ATC_SHIFT	    =	27
+var S_RESTORE_SPI_INIT_MTYPE_MASK	    =	0x70000000	    //bit[30:28]: Mtype
+var S_RESTORE_SPI_INIT_MTYPE_SHIFT	    =	28
+var S_RESTORE_SPI_INIT_FIRST_WAVE_MASK	    =	0x04000000	    //bit[26]: FirstWaveInTG
+var S_RESTORE_SPI_INIT_FIRST_WAVE_SHIFT	    =	26
+
+var S_RESTORE_PC_HI_RCNT_SHIFT		    =	S_SAVE_PC_HI_RCNT_SHIFT
+var S_RESTORE_PC_HI_RCNT_MASK		    =	S_SAVE_PC_HI_RCNT_MASK
+var S_RESTORE_PC_HI_FIRST_REPLAY_SHIFT	    =	S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
+var S_RESTORE_PC_HI_FIRST_REPLAY_MASK	    =	S_SAVE_PC_HI_FIRST_REPLAY_MASK
+
+var s_restore_spi_init_lo		    =	exec_lo
+var s_restore_spi_init_hi		    =	exec_hi
+
+var s_restore_mem_offset	=   ttmp12
+var s_restore_alloc_size	=   ttmp3
+var s_restore_tmp		=   ttmp2
+var s_restore_mem_offset_save	=   s_restore_tmp	//no conflict
+
+var s_restore_m0	    =	s_restore_alloc_size	//no conflict
+
+var s_restore_mode	    =	ttmp7
+
+var s_restore_pc_lo	    =	ttmp0
+var s_restore_pc_hi	    =	ttmp1
+var s_restore_exec_lo	    =	ttmp14
+var s_restore_exec_hi	    = 	ttmp15
+var s_restore_status	    =	ttmp4
+var s_restore_trapsts	    =	ttmp5
+var s_restore_xnack_mask_lo =	xnack_mask_lo
+var s_restore_xnack_mask_hi =	xnack_mask_hi
+var s_restore_buf_rsrc0	    =	ttmp8
+var s_restore_buf_rsrc1	    =	ttmp9
+var s_restore_buf_rsrc2	    =	ttmp10
+var s_restore_buf_rsrc3	    =	ttmp11
+var s_restore_ttmps_lo	    =	s_restore_tmp		//no conflict
+var s_restore_ttmps_hi	    =	s_restore_alloc_size	//no conflict
+
+/**************************************************************************/
+/*			trap handler entry points			  */
+/**************************************************************************/
+/* Shader Main*/
+
+shader main
+  asic(GFX9)
+  type(CS)
+
+
+    if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))		    //hack to use trap_id for determining save/restore
+	//FIXME VCCZ un-init assertion s_getreg_b32	s_save_status, hwreg(HW_REG_STATUS)	    //save STATUS since we will change SCC
+	s_and_b32 s_save_tmp, s_save_pc_hi, 0xffff0000		    //change SCC
+	s_cmp_eq_u32 s_save_tmp, 0x007e0000			    //Save: trap_id = 0x7e. Restore: trap_id = 0x7f.
+	s_cbranch_scc0 L_JUMP_TO_RESTORE			    //do not need to recover STATUS here  since we are going to RESTORE
+	//FIXME	 s_setreg_b32	hwreg(HW_REG_STATUS),	s_save_status	    //need to recover STATUS since we are going to SAVE
+	s_branch L_SKIP_RESTORE					    //NOT restore, SAVE actually
+    else
+	s_branch L_SKIP_RESTORE					    //NOT restore. might be a regular trap or save
+    end
+
+L_JUMP_TO_RESTORE:
+    s_branch L_RESTORE						    //restore
+
+L_SKIP_RESTORE:
+
+    s_getreg_b32    s_save_status, hwreg(HW_REG_STATUS)				    //save STATUS since we will change SCC
+    s_andn2_b32	    s_save_status, s_save_status, SQ_WAVE_STATUS_SPI_PRIO_MASK	    //check whether this is for save
+    s_getreg_b32    s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+    s_and_b32       ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK    //check whether this is for save
+    s_cbranch_scc1  L_SAVE					//this is the operation for save
+
+    // *********    Handle non-CWSR traps	*******************
+if (!EMU_RUN_HACK)
+    // Illegal instruction is a non-maskable exception which blocks context save.
+    // Halt the wavefront and return from the trap.
+    s_and_b32       ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK
+    s_cbranch_scc1  L_HALT_WAVE
+
+    // If STATUS.MEM_VIOL is asserted then we cannot fetch from the TMA.
+    // Instead, halt the wavefront and return from the trap.
+    s_and_b32       ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK
+    s_cbranch_scc0  L_FETCH_2ND_TRAP
+
+L_HALT_WAVE:
+    // If STATUS.HALT is set then this fault must come from SQC instruction fetch.
+    // We cannot prevent further faults so just terminate the wavefront.
+    s_and_b32       ttmp2, s_save_status, SQ_WAVE_STATUS_HALT_MASK
+    s_cbranch_scc0  L_NOT_ALREADY_HALTED
+    s_endpgm
+L_NOT_ALREADY_HALTED:
+    s_or_b32        s_save_status, s_save_status, SQ_WAVE_STATUS_HALT_MASK
+
+    // If the PC points to S_ENDPGM then context save will fail if STATUS.HALT is set.
+    // Rewind the PC to prevent this from occurring. The debugger compensates for this.
+    s_sub_u32       ttmp0, ttmp0, 0x8
+    s_subb_u32      ttmp1, ttmp1, 0x0
+
+L_FETCH_2ND_TRAP:
+    // Preserve and clear scalar XNACK state before issuing scalar reads.
+    // Save IB_STS.FIRST_REPLAY[15] and IB_STS.RCNT[20:16] into unused space ttmp11[31:26].
+    s_getreg_b32    ttmp2, hwreg(HW_REG_IB_STS)
+    s_and_b32       ttmp3, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
+    s_lshl_b32      ttmp3, ttmp3, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
+    s_andn2_b32     ttmp11, ttmp11, TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK
+    s_or_b32        ttmp11, ttmp11, ttmp3
+
+    s_andn2_b32     ttmp2, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
+    s_setreg_b32    hwreg(HW_REG_IB_STS), ttmp2
+
+    // Read second-level TBA/TMA from first-level TMA and jump if available.
+    // ttmp[2:5] and ttmp12 can be used (others hold SPI-initialized debug data)
+    // ttmp12 holds SQ_WAVE_STATUS
+    s_getreg_b32    ttmp4, hwreg(HW_REG_SQ_SHADER_TMA_LO)
+    s_getreg_b32    ttmp5, hwreg(HW_REG_SQ_SHADER_TMA_HI)
+    s_lshl_b64      [ttmp4, ttmp5], [ttmp4, ttmp5], 0x8
+    s_load_dwordx2  [ttmp2, ttmp3], [ttmp4, ttmp5], 0x0 glc:1 // second-level TBA
+    s_waitcnt       lgkmcnt(0)
+    s_load_dwordx2  [ttmp4, ttmp5], [ttmp4, ttmp5], 0x8 glc:1 // second-level TMA
+    s_waitcnt       lgkmcnt(0)
+    s_and_b64       [ttmp2, ttmp3], [ttmp2, ttmp3], [ttmp2, ttmp3]
+    s_cbranch_scc0  L_NO_NEXT_TRAP // second-level trap handler not been set
+    s_setpc_b64     [ttmp2, ttmp3] // jump to second-level trap handler
+
+L_NO_NEXT_TRAP:
+    s_getreg_b32    s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+    s_and_b32	    s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_EXCE_MASK // Check whether it is an exception
+    s_cbranch_scc1  L_EXCP_CASE	  // Exception, jump back to the shader program directly.
+    s_add_u32	    ttmp0, ttmp0, 4   // S_TRAP case, add 4 to ttmp0
+    s_addc_u32	ttmp1, ttmp1, 0
+L_EXCP_CASE:
+    s_and_b32	ttmp1, ttmp1, 0xFFFF
+
+    // Restore SQ_WAVE_IB_STS.
+    s_lshr_b32      ttmp2, ttmp11, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
+    s_and_b32       ttmp2, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
+    s_setreg_b32    hwreg(HW_REG_IB_STS), ttmp2
+
+    // Restore SQ_WAVE_STATUS.
+    s_and_b64       exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
+    s_and_b64       vcc, vcc, vcc    // Restore STATUS.VCCZ, not writable by s_setreg_b32
+    s_setreg_b32    hwreg(HW_REG_STATUS), s_save_status
+
+    s_rfe_b64       [ttmp0, ttmp1]
+end
+    // *********	End handling of non-CWSR traps	 *******************
+
+/**************************************************************************/
+/*			save routine					  */
+/**************************************************************************/
+
+L_SAVE:
+
+if G8SR_DEBUG_TIMESTAMP
+	s_memrealtime	s_g8sr_ts_save_s
+	s_waitcnt lgkmcnt(0)	     //FIXME, will cause xnack??
+end
+
+    s_and_b32	    s_save_pc_hi, s_save_pc_hi, 0x0000ffff    //pc[47:32]
+
+    s_mov_b32	    s_save_tmp, 0							    //clear saveCtx bit
+    s_setreg_b32    hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_SAVECTX_SHIFT, 1), s_save_tmp	    //clear saveCtx bit
+
+    s_getreg_b32    s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_RCNT_SHIFT, SQ_WAVE_IB_STS_RCNT_SIZE)		    //save RCNT
+    s_lshl_b32	    s_save_tmp, s_save_tmp, S_SAVE_PC_HI_RCNT_SHIFT
+    s_or_b32	    s_save_pc_hi, s_save_pc_hi, s_save_tmp
+    s_getreg_b32    s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT, SQ_WAVE_IB_STS_FIRST_REPLAY_SIZE)   //save FIRST_REPLAY
+    s_lshl_b32	    s_save_tmp, s_save_tmp, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
+    s_or_b32	    s_save_pc_hi, s_save_pc_hi, s_save_tmp
+    s_getreg_b32    s_save_tmp, hwreg(HW_REG_IB_STS)					    //clear RCNT and FIRST_REPLAY in IB_STS
+    s_and_b32	    s_save_tmp, s_save_tmp, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG
+
+    s_setreg_b32    hwreg(HW_REG_IB_STS), s_save_tmp
+
+    /*	    inform SPI the readiness and wait for SPI's go signal */
+    s_mov_b32	    s_save_exec_lo, exec_lo						    //save EXEC and use EXEC for the go signal from SPI
+    s_mov_b32	    s_save_exec_hi, exec_hi
+    s_mov_b64	    exec,   0x0								    //clear EXEC to get ready to receive
+
+if G8SR_DEBUG_TIMESTAMP
+	s_memrealtime  s_g8sr_ts_sq_save_msg
+	s_waitcnt lgkmcnt(0)
+end
+
+    if (EMU_RUN_HACK)
+
+    else
+	s_sendmsg   sendmsg(MSG_SAVEWAVE)  //send SPI a message and wait for SPI's write to EXEC
+    end
+
+  L_SLEEP:
+    s_sleep 0x2		       // sleep 1 (64clk) is not enough for 8 waves per SIMD, which will cause SQ hang, since the 7,8th wave could not get arbit to exec inst, while other waves are stuck into the sleep-loop and waiting for wrexec!=0
+
+    if (EMU_RUN_HACK)
+
+    else
+	s_cbranch_execz L_SLEEP
+    end
+
+if G8SR_DEBUG_TIMESTAMP
+	s_memrealtime  s_g8sr_ts_spi_wrexec
+	s_waitcnt lgkmcnt(0)
+end
+
+    if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_SINGLE_WAVE))
+	//calculate wd_addr using absolute thread id
+	v_readlane_b32 s_save_tmp, v9, 0
+	s_lshr_b32 s_save_tmp, s_save_tmp, 6
+	s_mul_i32 s_save_tmp, s_save_tmp, WAVE_SPACE
+	s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO
+	s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI
+	s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL
+    else
+    end
+    if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_SINGLE_WAVE))
+	s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO
+	s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI
+	s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL
+    else
+    end
+
+    // Save trap temporaries 6-11, 13-15 initialized by SPI debug dispatch logic
+    // ttmp SR memory offset : size(VGPR)+size(SGPR)+0x40
+    get_vgpr_size_bytes(s_save_ttmps_lo)
+    get_sgpr_size_bytes(s_save_ttmps_hi)
+    s_add_u32	    s_save_ttmps_lo, s_save_ttmps_lo, s_save_ttmps_hi
+    s_add_u32	    s_save_ttmps_lo, s_save_ttmps_lo, s_save_spi_init_lo
+    s_addc_u32	    s_save_ttmps_hi, s_save_spi_init_hi, 0x0
+    s_and_b32	    s_save_ttmps_hi, s_save_ttmps_hi, 0xFFFF
+    s_store_dwordx2 [ttmp6, ttmp7], [s_save_ttmps_lo, s_save_ttmps_hi], 0x40 glc:1
+    ack_sqc_store_workaround()
+    s_store_dwordx4 [ttmp8, ttmp9, ttmp10, ttmp11], [s_save_ttmps_lo, s_save_ttmps_hi], 0x48 glc:1
+    ack_sqc_store_workaround()
+    s_store_dword   ttmp13, [s_save_ttmps_lo, s_save_ttmps_hi], 0x58 glc:1
+    ack_sqc_store_workaround()
+    s_store_dwordx2 [ttmp14, ttmp15], [s_save_ttmps_lo, s_save_ttmps_hi], 0x5C glc:1
+    ack_sqc_store_workaround()
+
+    /*	    setup Resource Contants    */
+    s_mov_b32	    s_save_buf_rsrc0,	s_save_spi_init_lo							//base_addr_lo
+    s_and_b32	    s_save_buf_rsrc1,	s_save_spi_init_hi, 0x0000FFFF						//base_addr_hi
+    s_or_b32	    s_save_buf_rsrc1,	s_save_buf_rsrc1,  S_SAVE_BUF_RSRC_WORD1_STRIDE
+    s_mov_b32	    s_save_buf_rsrc2,	0									//NUM_RECORDS initial value = 0 (in bytes) although not neccessarily inited
+    s_mov_b32	    s_save_buf_rsrc3,	S_SAVE_BUF_RSRC_WORD3_MISC
+    s_and_b32	    s_save_tmp,		s_save_spi_init_hi, S_SAVE_SPI_INIT_ATC_MASK
+    s_lshr_b32	    s_save_tmp,		s_save_tmp, (S_SAVE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT)	    //get ATC bit into position
+    s_or_b32	    s_save_buf_rsrc3,	s_save_buf_rsrc3,  s_save_tmp						//or ATC
+    s_and_b32	    s_save_tmp,		s_save_spi_init_hi, S_SAVE_SPI_INIT_MTYPE_MASK
+    s_lshr_b32	    s_save_tmp,		s_save_tmp, (S_SAVE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT)	    //get MTYPE bits into position
+    s_or_b32	    s_save_buf_rsrc3,	s_save_buf_rsrc3,  s_save_tmp						//or MTYPE
+
+    //FIXME  right now s_save_m0/s_save_mem_offset use tma_lo/tma_hi  (might need to save them before using them?)
+    s_mov_b32	    s_save_m0,		m0								    //save M0
+
+    /*	    global mem offset		*/
+    s_mov_b32	    s_save_mem_offset,	0x0									//mem offset initial value = 0
+
+
+
+
+    /*	    save HW registers	*/
+    //////////////////////////////
+
+  L_SAVE_HWREG:
+	// HWREG SR memory offset : size(VGPR)+size(SGPR)
+       get_vgpr_size_bytes(s_save_mem_offset)
+       get_sgpr_size_bytes(s_save_tmp)
+       s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp
+
+
+    s_mov_b32	    s_save_buf_rsrc2, 0x4				//NUM_RECORDS	in bytes
+    if (SWIZZLE_EN)
+	s_add_u32	s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0			    //FIXME need to use swizzle to enable bounds checking?
+    else
+	s_mov_b32	s_save_buf_rsrc2,  0x1000000				    //NUM_RECORDS in bytes
+    end
+
+
+    write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)			//M0
+
+    if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_FIRST_TIME))
+	s_add_u32 s_save_pc_lo, s_save_pc_lo, 4		    //pc[31:0]+4
+	s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0	    //carry bit over
+    end
+
+    write_hwreg_to_mem(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset)		    //PC
+    write_hwreg_to_mem(s_save_pc_hi, s_save_buf_rsrc0, s_save_mem_offset)
+    write_hwreg_to_mem(s_save_exec_lo, s_save_buf_rsrc0, s_save_mem_offset)		//EXEC
+    write_hwreg_to_mem(s_save_exec_hi, s_save_buf_rsrc0, s_save_mem_offset)
+    write_hwreg_to_mem(s_save_status, s_save_buf_rsrc0, s_save_mem_offset)		//STATUS
+
+    //s_save_trapsts conflicts with s_save_alloc_size
+    s_getreg_b32    s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+    write_hwreg_to_mem(s_save_trapsts, s_save_buf_rsrc0, s_save_mem_offset)		//TRAPSTS
+
+    write_hwreg_to_mem(xnack_mask_lo, s_save_buf_rsrc0, s_save_mem_offset)	    //XNACK_MASK_LO
+    write_hwreg_to_mem(xnack_mask_hi, s_save_buf_rsrc0, s_save_mem_offset)	    //XNACK_MASK_HI
+
+    //use s_save_tmp would introduce conflict here between s_save_tmp and s_save_buf_rsrc2
+    s_getreg_b32    s_save_m0, hwreg(HW_REG_MODE)						    //MODE
+    write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
+
+
+
+    /*	    the first wave in the threadgroup	 */
+    s_and_b32	    s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK	// extract fisrt wave bit
+    s_mov_b32	     s_save_exec_hi, 0x0
+    s_or_b32	     s_save_exec_hi, s_save_tmp, s_save_exec_hi				 // save first wave bit to s_save_exec_hi.bits[26]
+
+
+    /*		save SGPRs	*/
+	// Save SGPR before LDS save, then the s0 to s4 can be used during LDS save...
+    //////////////////////////////
+
+    // SGPR SR memory offset : size(VGPR)
+    get_vgpr_size_bytes(s_save_mem_offset)
+    // TODO, change RSRC word to rearrange memory layout for SGPRS
+
+    s_getreg_b32    s_save_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE)		//spgr_size
+    s_add_u32	    s_save_alloc_size, s_save_alloc_size, 1
+    s_lshl_b32	    s_save_alloc_size, s_save_alloc_size, 4			    //Number of SGPRs = (sgpr_size + 1) * 16   (non-zero value)
+
+    if (SGPR_SAVE_USE_SQC)
+	s_lshl_b32	s_save_buf_rsrc2,   s_save_alloc_size, 2		    //NUM_RECORDS in bytes
+    else
+	s_lshl_b32	s_save_buf_rsrc2,   s_save_alloc_size, 8		    //NUM_RECORDS in bytes (64 threads)
+    end
+
+    if (SWIZZLE_EN)
+	s_add_u32	s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0			    //FIXME need to use swizzle to enable bounds checking?
+    else
+	s_mov_b32	s_save_buf_rsrc2,  0x1000000				    //NUM_RECORDS in bytes
+    end
+
+
+    // backup s_save_buf_rsrc0,1 to s_save_pc_lo/hi, since write_16sgpr_to_mem function will change the rsrc0
+    //s_mov_b64 s_save_pc_lo, s_save_buf_rsrc0
+    s_mov_b64 s_save_xnack_mask_lo, s_save_buf_rsrc0
+    s_add_u32 s_save_buf_rsrc0, s_save_buf_rsrc0, s_save_mem_offset
+    s_addc_u32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0
+
+    s_mov_b32	    m0, 0x0			    //SGPR initial index value =0
+    s_nop	    0x0				    //Manually inserted wait states
+  L_SAVE_SGPR_LOOP:
+    // SGPR is allocated in 16 SGPR granularity
+    s_movrels_b64   s0, s0     //s0 = s[0+m0], s1 = s[1+m0]
+    s_movrels_b64   s2, s2     //s2 = s[2+m0], s3 = s[3+m0]
+    s_movrels_b64   s4, s4     //s4 = s[4+m0], s5 = s[5+m0]
+    s_movrels_b64   s6, s6     //s6 = s[6+m0], s7 = s[7+m0]
+    s_movrels_b64   s8, s8     //s8 = s[8+m0], s9 = s[9+m0]
+    s_movrels_b64   s10, s10   //s10 = s[10+m0], s11 = s[11+m0]
+    s_movrels_b64   s12, s12   //s12 = s[12+m0], s13 = s[13+m0]
+    s_movrels_b64   s14, s14   //s14 = s[14+m0], s15 = s[15+m0]
+
+    write_16sgpr_to_mem(s0, s_save_buf_rsrc0, s_save_mem_offset) //PV: the best performance should be using s_buffer_store_dwordx4
+    s_add_u32	    m0, m0, 16							    //next sgpr index
+    s_cmp_lt_u32    m0, s_save_alloc_size					    //scc = (m0 < s_save_alloc_size) ? 1 : 0
+    s_cbranch_scc1  L_SAVE_SGPR_LOOP					//SGPR save is complete?
+    // restore s_save_buf_rsrc0,1
+    //s_mov_b64 s_save_buf_rsrc0, s_save_pc_lo
+    s_mov_b64 s_save_buf_rsrc0, s_save_xnack_mask_lo
+
+
+
+
+    /*		save first 4 VGPR, then LDS save could use   */
+	// each wave will alloc 4 vgprs at least...
+    /////////////////////////////////////////////////////////////////////////////////////
+
+    s_mov_b32	    s_save_mem_offset, 0
+    s_mov_b32	    exec_lo, 0xFFFFFFFF						    //need every thread from now on
+    s_mov_b32	    exec_hi, 0xFFFFFFFF
+    s_mov_b32	    xnack_mask_lo, 0x0
+    s_mov_b32	    xnack_mask_hi, 0x0
+
+    if (SWIZZLE_EN)
+	s_add_u32	s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0			    //FIXME need to use swizzle to enable bounds checking?
+    else
+	s_mov_b32	s_save_buf_rsrc2,  0x1000000				    //NUM_RECORDS in bytes
+    end
+
+
+    // VGPR Allocated in 4-GPR granularity
+
+if G8SR_VGPR_SR_IN_DWX4
+	// the const stride for DWx4 is 4*4 bytes
+	s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF   // reset const stride to 0
+	s_or_b32  s_save_buf_rsrc1, s_save_buf_rsrc1, G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4  // const stride to 4*4 bytes
+
+	buffer_store_dwordx4 v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+
+	s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF   // reset const stride to 0
+	s_or_b32  s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE  // reset const stride to 4 bytes
+else
+	buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+	buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1  offset:256
+	buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1  offset:256*2
+	buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1  offset:256*3
+end
+
+
+
+    /*		save LDS	*/
+    //////////////////////////////
+
+  L_SAVE_LDS:
+
+	// Change EXEC to all threads...
+    s_mov_b32	    exec_lo, 0xFFFFFFFF	  //need every thread from now on
+    s_mov_b32	    exec_hi, 0xFFFFFFFF
+
+    s_getreg_b32    s_save_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE)		    //lds_size
+    s_and_b32	    s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF		    //lds_size is zero?
+    s_cbranch_scc0  L_SAVE_LDS_DONE									       //no lds used? jump to L_SAVE_DONE
+
+    s_barrier		    //LDS is used? wait for other waves in the same TG
+    s_and_b32	    s_save_tmp, s_save_exec_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK		       //exec is still used here
+    s_cbranch_scc0  L_SAVE_LDS_DONE
+
+	// first wave do LDS save;
+
+    s_lshl_b32	    s_save_alloc_size, s_save_alloc_size, 6			    //LDS size in dwords = lds_size * 64dw
+    s_lshl_b32	    s_save_alloc_size, s_save_alloc_size, 2			    //LDS size in bytes
+    s_mov_b32	    s_save_buf_rsrc2,  s_save_alloc_size			    //NUM_RECORDS in bytes
+
+    // LDS at offset: size(VGPR)+SIZE(SGPR)+SIZE(HWREG)
+    //
+    get_vgpr_size_bytes(s_save_mem_offset)
+    get_sgpr_size_bytes(s_save_tmp)
+    s_add_u32  s_save_mem_offset, s_save_mem_offset, s_save_tmp
+    s_add_u32 s_save_mem_offset, s_save_mem_offset, get_hwreg_size_bytes()
+
+
+    if (SWIZZLE_EN)
+	s_add_u32	s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0	      //FIXME need to use swizzle to enable bounds checking?
+    else
+	s_mov_b32	s_save_buf_rsrc2,  0x1000000		      //NUM_RECORDS in bytes
+    end
+
+    s_mov_b32	    m0, 0x0						  //lds_offset initial value = 0
+
+
+var LDS_DMA_ENABLE = 0
+var UNROLL = 0
+if UNROLL==0 && LDS_DMA_ENABLE==1
+	s_mov_b32  s3, 256*2
+	s_nop 0
+	s_nop 0
+	s_nop 0
+  L_SAVE_LDS_LOOP:
+	//TODO: looks the 2 buffer_store/load clause for s/r will hurt performance.???
+    if (SAVE_LDS)     //SPI always alloc LDS space in 128DW granularity
+	    buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1		// first 64DW
+	    buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:256 // second 64DW
+    end
+
+    s_add_u32	    m0, m0, s3						//every buffer_store_lds does 256 bytes
+    s_add_u32	    s_save_mem_offset, s_save_mem_offset, s3				//mem offset increased by 256 bytes
+    s_cmp_lt_u32    m0, s_save_alloc_size						//scc=(m0 < s_save_alloc_size) ? 1 : 0
+    s_cbranch_scc1  L_SAVE_LDS_LOOP							//LDS save is complete?
+
+elsif LDS_DMA_ENABLE==1 && UNROLL==1 // UNROOL	, has ichace miss
+      // store from higest LDS address to lowest
+      s_mov_b32	 s3, 256*2
+      s_sub_u32	 m0, s_save_alloc_size, s3
+      s_add_u32 s_save_mem_offset, s_save_mem_offset, m0
+      s_lshr_b32 s_save_alloc_size, s_save_alloc_size, 9   // how many 128 trunks...
+      s_sub_u32 s_save_alloc_size, 128, s_save_alloc_size   // store from higheset addr to lowest
+      s_mul_i32 s_save_alloc_size, s_save_alloc_size, 6*4   // PC offset increment,  each LDS save block cost 6*4 Bytes instruction
+      s_add_u32 s_save_alloc_size, s_save_alloc_size, 3*4   //2is the below 2 inst...//s_addc and s_setpc
+      s_nop 0
+      s_nop 0
+      s_nop 0	//pad 3 dw to let LDS_DMA align with 64Bytes
+      s_getpc_b64 s[0:1]			      // reuse s[0:1], since s[0:1] already saved
+      s_add_u32	  s0, s0,s_save_alloc_size
+      s_addc_u32  s1, s1, 0
+      s_setpc_b64 s[0:1]
+
+
+       for var i =0; i< 128; i++
+	    // be careful to make here a 64Byte aligned address, which could improve performance...
+	    buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:0		// first 64DW
+	    buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:256		  // second 64DW
+
+	if i!=127
+	s_sub_u32  m0, m0, s3	   // use a sgpr to shrink 2DW-inst to 1DW inst to improve performance , i.e.  pack more LDS_DMA inst to one Cacheline
+	    s_sub_u32  s_save_mem_offset, s_save_mem_offset,  s3
+	    end
+       end
+
+else   // BUFFER_STORE
+      v_mbcnt_lo_u32_b32 v2, 0xffffffff, 0x0
+      v_mbcnt_hi_u32_b32 v3, 0xffffffff, v2	// tid
+      v_mul_i32_i24 v2, v3, 8	// tid*8
+      v_mov_b32 v3, 256*2
+      s_mov_b32 m0, 0x10000
+      s_mov_b32 s0, s_save_buf_rsrc3
+      s_and_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, 0xFF7FFFFF	  // disable add_tid
+      s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, 0x58000   //DFMT
+
+L_SAVE_LDS_LOOP_VECTOR:
+      ds_read_b64 v[0:1], v2	//x =LDS[a], byte address
+      s_waitcnt lgkmcnt(0)
+      buffer_store_dwordx2  v[0:1], v2, s_save_buf_rsrc0, s_save_mem_offset offen:1  glc:1  slc:1
+//	s_waitcnt vmcnt(0)
+//	v_add_u32 v2, vcc[0:1], v2, v3
+      v_add_u32 v2, v2, v3
+      v_cmp_lt_u32 vcc[0:1], v2, s_save_alloc_size
+      s_cbranch_vccnz L_SAVE_LDS_LOOP_VECTOR
+
+      // restore rsrc3
+      s_mov_b32 s_save_buf_rsrc3, s0
+
+end
+
+L_SAVE_LDS_DONE:
+
+
+    /*		save VGPRs  - set the Rest VGPRs	*/
+    //////////////////////////////////////////////////////////////////////////////////////
+  L_SAVE_VGPR:
+    // VGPR SR memory offset: 0
+    // TODO rearrange the RSRC words to use swizzle for VGPR save...
+
+    s_mov_b32	    s_save_mem_offset, (0+256*4)				    // for the rest VGPRs
+    s_mov_b32	    exec_lo, 0xFFFFFFFF						    //need every thread from now on
+    s_mov_b32	    exec_hi, 0xFFFFFFFF
+
+    s_getreg_b32    s_save_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)		    //vpgr_size
+    s_add_u32	    s_save_alloc_size, s_save_alloc_size, 1
+    s_lshl_b32	    s_save_alloc_size, s_save_alloc_size, 2			    //Number of VGPRs = (vgpr_size + 1) * 4    (non-zero value)	  //FIXME for GFX, zero is possible
+    s_lshl_b32	    s_save_buf_rsrc2,  s_save_alloc_size, 8			    //NUM_RECORDS in bytes (64 threads*4)
+    if (SWIZZLE_EN)
+	s_add_u32	s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0			    //FIXME need to use swizzle to enable bounds checking?
+    else
+	s_mov_b32	s_save_buf_rsrc2,  0x1000000				    //NUM_RECORDS in bytes
+    end
+
+
+    // VGPR Allocated in 4-GPR granularity
+
+if G8SR_VGPR_SR_IN_DWX4
+	// the const stride for DWx4 is 4*4 bytes
+	s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF   // reset const stride to 0
+	s_or_b32  s_save_buf_rsrc1, s_save_buf_rsrc1, G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4  // const stride to 4*4 bytes
+
+	s_mov_b32	  m0, 4	    // skip first 4 VGPRs
+	s_cmp_lt_u32	  m0, s_save_alloc_size
+	s_cbranch_scc0	  L_SAVE_VGPR_LOOP_END	    // no more vgprs
+
+	s_set_gpr_idx_on  m0, 0x1   // This will change M0
+	s_add_u32	  s_save_alloc_size, s_save_alloc_size, 0x1000	// because above inst change m0
+L_SAVE_VGPR_LOOP:
+	v_mov_b32	  v0, v0   // v0 = v[0+m0]
+	v_mov_b32	  v1, v1
+	v_mov_b32	  v2, v2
+	v_mov_b32	  v3, v3
+
+
+	buffer_store_dwordx4 v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+	s_add_u32	  m0, m0, 4
+	s_add_u32	  s_save_mem_offset, s_save_mem_offset, 256*4
+	s_cmp_lt_u32	  m0, s_save_alloc_size
+    s_cbranch_scc1  L_SAVE_VGPR_LOOP						    //VGPR save is complete?
+    s_set_gpr_idx_off
+L_SAVE_VGPR_LOOP_END:
+
+	s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF   // reset const stride to 0
+	s_or_b32  s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE  // reset const stride to 4 bytes
+else
+    // VGPR store using dw burst
+    s_mov_b32	      m0, 0x4	//VGPR initial index value =0
+    s_cmp_lt_u32      m0, s_save_alloc_size
+    s_cbranch_scc0    L_SAVE_VGPR_END
+
+
+    s_set_gpr_idx_on	m0, 0x1 //M0[7:0] = M0[7:0] and M0[15:12] = 0x1
+    s_add_u32	    s_save_alloc_size, s_save_alloc_size, 0x1000		    //add 0x1000 since we compare m0 against it later
+
+  L_SAVE_VGPR_LOOP:
+    v_mov_b32	    v0, v0		//v0 = v[0+m0]
+    v_mov_b32	    v1, v1		//v0 = v[0+m0]
+    v_mov_b32	    v2, v2		//v0 = v[0+m0]
+    v_mov_b32	    v3, v3		//v0 = v[0+m0]
+
+    if(USE_MTBUF_INSTEAD_OF_MUBUF)
+	tbuffer_store_format_x v0, v0, s_save_buf_rsrc0, s_save_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
+    else
+	buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+	buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1  offset:256
+	buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1  offset:256*2
+	buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1  offset:256*3
+    end
+
+    s_add_u32	    m0, m0, 4							    //next vgpr index
+    s_add_u32	    s_save_mem_offset, s_save_mem_offset, 256*4			    //every buffer_store_dword does 256 bytes
+    s_cmp_lt_u32    m0, s_save_alloc_size					    //scc = (m0 < s_save_alloc_size) ? 1 : 0
+    s_cbranch_scc1  L_SAVE_VGPR_LOOP						    //VGPR save is complete?
+    s_set_gpr_idx_off
+end
+
+L_SAVE_VGPR_END:
+
+
+
+
+
+
+    /*	   S_PGM_END_SAVED  */				    //FIXME  graphics ONLY
+    if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_NORMAL_EXIT))
+	s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff    //pc[47:32]
+	s_add_u32 s_save_pc_lo, s_save_pc_lo, 4		    //pc[31:0]+4
+	s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0	    //carry bit over
+	s_rfe_b64 s_save_pc_lo				    //Return to the main shader program
+    else
+    end
+
+// Save Done timestamp
+if G8SR_DEBUG_TIMESTAMP
+	s_memrealtime	s_g8sr_ts_save_d
+	// SGPR SR memory offset : size(VGPR)
+	get_vgpr_size_bytes(s_save_mem_offset)
+	s_add_u32 s_save_mem_offset, s_save_mem_offset, G8SR_DEBUG_TS_SAVE_D_OFFSET
+	s_waitcnt lgkmcnt(0)	     //FIXME, will cause xnack??
+	// Need reset rsrc2??
+	s_mov_b32 m0, s_save_mem_offset
+	s_mov_b32 s_save_buf_rsrc2,  0x1000000					//NUM_RECORDS in bytes
+	s_buffer_store_dwordx2 s_g8sr_ts_save_d, s_save_buf_rsrc0, m0	    glc:1
+end
+
+
+    s_branch	L_END_PGM
+
+
+
+/**************************************************************************/
+/*			restore routine					  */
+/**************************************************************************/
+
+L_RESTORE:
+    /*	    Setup Resource Contants    */
+    if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
+	//calculate wd_addr using absolute thread id
+	v_readlane_b32 s_restore_tmp, v9, 0
+	s_lshr_b32 s_restore_tmp, s_restore_tmp, 6
+	s_mul_i32 s_restore_tmp, s_restore_tmp, WAVE_SPACE
+	s_add_i32 s_restore_spi_init_lo, s_restore_tmp, WG_BASE_ADDR_LO
+	s_mov_b32 s_restore_spi_init_hi, WG_BASE_ADDR_HI
+	s_and_b32 s_restore_spi_init_hi, s_restore_spi_init_hi, CTX_RESTORE_CONTROL
+    else
+    end
+
+if G8SR_DEBUG_TIMESTAMP
+	s_memrealtime	s_g8sr_ts_restore_s
+	s_waitcnt lgkmcnt(0)	     //FIXME, will cause xnack??
+	// tma_lo/hi are sgpr 110, 111, which will not used for 112 SGPR allocated case...
+	s_mov_b32 s_restore_pc_lo, s_g8sr_ts_restore_s[0]
+	s_mov_b32 s_restore_pc_hi, s_g8sr_ts_restore_s[1]   //backup ts to ttmp0/1, sicne exec will be finally restored..
+end
+
+
+
+    s_mov_b32	    s_restore_buf_rsrc0,    s_restore_spi_init_lo							    //base_addr_lo
+    s_and_b32	    s_restore_buf_rsrc1,    s_restore_spi_init_hi, 0x0000FFFF						    //base_addr_hi
+    s_or_b32	    s_restore_buf_rsrc1,    s_restore_buf_rsrc1,  S_RESTORE_BUF_RSRC_WORD1_STRIDE
+    s_mov_b32	    s_restore_buf_rsrc2,    0										    //NUM_RECORDS initial value = 0 (in bytes)
+    s_mov_b32	    s_restore_buf_rsrc3,    S_RESTORE_BUF_RSRC_WORD3_MISC
+    s_and_b32	    s_restore_tmp,	    s_restore_spi_init_hi, S_RESTORE_SPI_INIT_ATC_MASK
+    s_lshr_b32	    s_restore_tmp,	    s_restore_tmp, (S_RESTORE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT)	    //get ATC bit into position
+    s_or_b32	    s_restore_buf_rsrc3,    s_restore_buf_rsrc3,  s_restore_tmp						    //or ATC
+    s_and_b32	    s_restore_tmp,	    s_restore_spi_init_hi, S_RESTORE_SPI_INIT_MTYPE_MASK
+    s_lshr_b32	    s_restore_tmp,	    s_restore_tmp, (S_RESTORE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT)   //get MTYPE bits into position
+    s_or_b32	    s_restore_buf_rsrc3,    s_restore_buf_rsrc3,  s_restore_tmp						    //or MTYPE
+
+    /*	    global mem offset		*/
+//  s_mov_b32	    s_restore_mem_offset, 0x0				    //mem offset initial value = 0
+
+    /*	    the first wave in the threadgroup	 */
+    s_and_b32	    s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK
+    s_cbranch_scc0  L_RESTORE_VGPR
+
+    /*		restore LDS	*/
+    //////////////////////////////
+  L_RESTORE_LDS:
+
+    s_mov_b32	    exec_lo, 0xFFFFFFFF							    //need every thread from now on   //be consistent with SAVE although can be moved ahead
+    s_mov_b32	    exec_hi, 0xFFFFFFFF
+
+    s_getreg_b32    s_restore_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE)		//lds_size
+    s_and_b32	    s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF		    //lds_size is zero?
+    s_cbranch_scc0  L_RESTORE_VGPR							    //no lds used? jump to L_RESTORE_VGPR
+    s_lshl_b32	    s_restore_alloc_size, s_restore_alloc_size, 6			    //LDS size in dwords = lds_size * 64dw
+    s_lshl_b32	    s_restore_alloc_size, s_restore_alloc_size, 2			    //LDS size in bytes
+    s_mov_b32	    s_restore_buf_rsrc2,    s_restore_alloc_size			    //NUM_RECORDS in bytes
+
+    // LDS at offset: size(VGPR)+SIZE(SGPR)+SIZE(HWREG)
+    //
+    get_vgpr_size_bytes(s_restore_mem_offset)
+    get_sgpr_size_bytes(s_restore_tmp)
+    s_add_u32  s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
+    s_add_u32  s_restore_mem_offset, s_restore_mem_offset, get_hwreg_size_bytes()	     //FIXME, Check if offset overflow???
+
+
+    if (SWIZZLE_EN)
+	s_add_u32	s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0			    //FIXME need to use swizzle to enable bounds checking?
+    else
+	s_mov_b32	s_restore_buf_rsrc2,  0x1000000					    //NUM_RECORDS in bytes
+    end
+    s_mov_b32	    m0, 0x0								    //lds_offset initial value = 0
+
+  L_RESTORE_LDS_LOOP:
+    if (SAVE_LDS)
+	buffer_load_dword   v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1		       // first 64DW
+	buffer_load_dword   v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 offset:256	       // second 64DW
+    end
+    s_add_u32	    m0, m0, 256*2						// 128 DW
+    s_add_u32	    s_restore_mem_offset, s_restore_mem_offset, 256*2		//mem offset increased by 128DW
+    s_cmp_lt_u32    m0, s_restore_alloc_size					//scc=(m0 < s_restore_alloc_size) ? 1 : 0
+    s_cbranch_scc1  L_RESTORE_LDS_LOOP							    //LDS restore is complete?
+
+
+    /*		restore VGPRs	    */
+    //////////////////////////////
+  L_RESTORE_VGPR:
+	// VGPR SR memory offset : 0
+    s_mov_b32	    s_restore_mem_offset, 0x0
+    s_mov_b32	    exec_lo, 0xFFFFFFFF							    //need every thread from now on   //be consistent with SAVE although can be moved ahead
+    s_mov_b32	    exec_hi, 0xFFFFFFFF
+
+    s_getreg_b32    s_restore_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)	//vpgr_size
+    s_add_u32	    s_restore_alloc_size, s_restore_alloc_size, 1
+    s_lshl_b32	    s_restore_alloc_size, s_restore_alloc_size, 2			    //Number of VGPRs = (vgpr_size + 1) * 4    (non-zero value)
+    s_lshl_b32	    s_restore_buf_rsrc2,  s_restore_alloc_size, 8			    //NUM_RECORDS in bytes (64 threads*4)
+    if (SWIZZLE_EN)
+	s_add_u32	s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0			    //FIXME need to use swizzle to enable bounds checking?
+    else
+	s_mov_b32	s_restore_buf_rsrc2,  0x1000000					    //NUM_RECORDS in bytes
+    end
+
+if G8SR_VGPR_SR_IN_DWX4
+     get_vgpr_size_bytes(s_restore_mem_offset)
+     s_sub_u32	       s_restore_mem_offset, s_restore_mem_offset, 256*4
+
+     // the const stride for DWx4 is 4*4 bytes
+     s_and_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, 0x0000FFFF   // reset const stride to 0
+     s_or_b32  s_restore_buf_rsrc1, s_restore_buf_rsrc1, G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4  // const stride to 4*4 bytes
+
+     s_mov_b32	       m0, s_restore_alloc_size
+     s_set_gpr_idx_on  m0, 0x8	  // Note.. This will change m0
+
+L_RESTORE_VGPR_LOOP:
+     buffer_load_dwordx4 v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
+     s_waitcnt vmcnt(0)
+     s_sub_u32	       m0, m0, 4
+     v_mov_b32	       v0, v0	// v[0+m0] = v0
+     v_mov_b32	       v1, v1
+     v_mov_b32	       v2, v2
+     v_mov_b32	       v3, v3
+     s_sub_u32	       s_restore_mem_offset, s_restore_mem_offset, 256*4
+     s_cmp_eq_u32      m0, 0x8000
+     s_cbranch_scc0    L_RESTORE_VGPR_LOOP
+     s_set_gpr_idx_off
+
+     s_and_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, 0x0000FFFF   // reset const stride to 0
+     s_or_b32  s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE  // const stride to 4*4 bytes
+
+else
+    // VGPR load using dw burst
+    s_mov_b32	    s_restore_mem_offset_save, s_restore_mem_offset	// restore start with v1, v0 will be the last
+    s_add_u32	    s_restore_mem_offset, s_restore_mem_offset, 256*4
+    s_mov_b32	    m0, 4				//VGPR initial index value = 1
+    s_set_gpr_idx_on  m0, 0x8			    //M0[7:0] = M0[7:0] and M0[15:12] = 0x8
+    s_add_u32	    s_restore_alloc_size, s_restore_alloc_size, 0x8000			    //add 0x8000 since we compare m0 against it later
+
+  L_RESTORE_VGPR_LOOP:
+    if(USE_MTBUF_INSTEAD_OF_MUBUF)
+	tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
+    else
+	buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
+	buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256
+	buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*2
+	buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*3
+    end
+    s_waitcnt	    vmcnt(0)								    //ensure data ready
+    v_mov_b32	    v0, v0								    //v[0+m0] = v0
+    v_mov_b32	    v1, v1
+    v_mov_b32	    v2, v2
+    v_mov_b32	    v3, v3
+    s_add_u32	    m0, m0, 4								    //next vgpr index
+    s_add_u32	    s_restore_mem_offset, s_restore_mem_offset, 256*4				//every buffer_load_dword does 256 bytes
+    s_cmp_lt_u32    m0, s_restore_alloc_size						    //scc = (m0 < s_restore_alloc_size) ? 1 : 0
+    s_cbranch_scc1  L_RESTORE_VGPR_LOOP							    //VGPR restore (except v0) is complete?
+    s_set_gpr_idx_off
+											    /* VGPR restore on v0 */
+    if(USE_MTBUF_INSTEAD_OF_MUBUF)
+	tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
+    else
+	buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save    slc:1 glc:1
+	buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save    slc:1 glc:1 offset:256
+	buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save    slc:1 glc:1 offset:256*2
+	buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save    slc:1 glc:1 offset:256*3
+    end
+
+end
+
+    /*		restore SGPRs	    */
+    //////////////////////////////
+
+    // SGPR SR memory offset : size(VGPR)
+    get_vgpr_size_bytes(s_restore_mem_offset)
+    get_sgpr_size_bytes(s_restore_tmp)
+    s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
+    s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 16*4	   // restore SGPR from S[n] to S[0], by 16 sgprs group
+    // TODO, change RSRC word to rearrange memory layout for SGPRS
+
+    s_getreg_b32    s_restore_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE)		    //spgr_size
+    s_add_u32	    s_restore_alloc_size, s_restore_alloc_size, 1
+    s_lshl_b32	    s_restore_alloc_size, s_restore_alloc_size, 4			    //Number of SGPRs = (sgpr_size + 1) * 16   (non-zero value)
+
+    if (SGPR_SAVE_USE_SQC)
+	s_lshl_b32	s_restore_buf_rsrc2,	s_restore_alloc_size, 2			    //NUM_RECORDS in bytes
+    else
+	s_lshl_b32	s_restore_buf_rsrc2,	s_restore_alloc_size, 8			    //NUM_RECORDS in bytes (64 threads)
+    end
+    if (SWIZZLE_EN)
+	s_add_u32	s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0			    //FIXME need to use swizzle to enable bounds checking?
+    else
+	s_mov_b32	s_restore_buf_rsrc2,  0x1000000					    //NUM_RECORDS in bytes
+    end
+
+    s_mov_b32 m0, s_restore_alloc_size
+
+ L_RESTORE_SGPR_LOOP:
+    read_16sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset)	 //PV: further performance improvement can be made
+    s_waitcnt	    lgkmcnt(0)								    //ensure data ready
+
+    s_sub_u32 m0, m0, 16    // Restore from S[n] to S[0]
+    s_nop 0 // hazard SALU M0=> S_MOVREL
+
+    s_movreld_b64   s0, s0	//s[0+m0] = s0
+    s_movreld_b64   s2, s2
+    s_movreld_b64   s4, s4
+    s_movreld_b64   s6, s6
+    s_movreld_b64   s8, s8
+    s_movreld_b64   s10, s10
+    s_movreld_b64   s12, s12
+    s_movreld_b64   s14, s14
+
+    s_cmp_eq_u32    m0, 0		//scc = (m0 < s_restore_alloc_size) ? 1 : 0
+    s_cbranch_scc0  L_RESTORE_SGPR_LOOP		    //SGPR restore (except s0) is complete?
+
+    /*	    restore HW registers    */
+    //////////////////////////////
+  L_RESTORE_HWREG:
+
+
+if G8SR_DEBUG_TIMESTAMP
+      s_mov_b32 s_g8sr_ts_restore_s[0], s_restore_pc_lo
+      s_mov_b32 s_g8sr_ts_restore_s[1], s_restore_pc_hi
+end
+
+    // HWREG SR memory offset : size(VGPR)+size(SGPR)
+    get_vgpr_size_bytes(s_restore_mem_offset)
+    get_sgpr_size_bytes(s_restore_tmp)
+    s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
+
+
+    s_mov_b32	    s_restore_buf_rsrc2, 0x4						    //NUM_RECORDS   in bytes
+    if (SWIZZLE_EN)
+	s_add_u32	s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0			    //FIXME need to use swizzle to enable bounds checking?
+    else
+	s_mov_b32	s_restore_buf_rsrc2,  0x1000000					    //NUM_RECORDS in bytes
+    end
+
+    read_hwreg_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset)		    //M0
+    read_hwreg_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset)		//PC
+    read_hwreg_from_mem(s_restore_pc_hi, s_restore_buf_rsrc0, s_restore_mem_offset)
+    read_hwreg_from_mem(s_restore_exec_lo, s_restore_buf_rsrc0, s_restore_mem_offset)		    //EXEC
+    read_hwreg_from_mem(s_restore_exec_hi, s_restore_buf_rsrc0, s_restore_mem_offset)
+    read_hwreg_from_mem(s_restore_status, s_restore_buf_rsrc0, s_restore_mem_offset)		    //STATUS
+    read_hwreg_from_mem(s_restore_trapsts, s_restore_buf_rsrc0, s_restore_mem_offset)		    //TRAPSTS
+    read_hwreg_from_mem(xnack_mask_lo, s_restore_buf_rsrc0, s_restore_mem_offset)		    //XNACK_MASK_LO
+    read_hwreg_from_mem(xnack_mask_hi, s_restore_buf_rsrc0, s_restore_mem_offset)		    //XNACK_MASK_HI
+    read_hwreg_from_mem(s_restore_mode, s_restore_buf_rsrc0, s_restore_mem_offset)		//MODE
+
+    s_waitcnt	    lgkmcnt(0)											    //from now on, it is safe to restore STATUS and IB_STS
+
+    s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff	//pc[47:32]	   //Do it here in order not to affect STATUS
+
+    //for normal save & restore, the saved PC points to the next inst to execute, no adjustment needs to be made, otherwise:
+    if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
+	s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 8		 //pc[31:0]+8	  //two back-to-back s_trap are used (first for save and second for restore)
+	s_addc_u32  s_restore_pc_hi, s_restore_pc_hi, 0x0	 //carry bit over
+    end
+    if ((EMU_RUN_HACK) && (EMU_RUN_HACK_RESTORE_NORMAL))
+	s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 4		 //pc[31:0]+4	  // save is hack through s_trap but restore is normal
+	s_addc_u32  s_restore_pc_hi, s_restore_pc_hi, 0x0	 //carry bit over
+    end
+
+    s_mov_b32	    m0,		s_restore_m0
+    s_mov_b32	    exec_lo,	s_restore_exec_lo
+    s_mov_b32	    exec_hi,	s_restore_exec_hi
+
+    s_and_b32	    s_restore_m0, SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK, s_restore_trapsts
+    s_setreg_b32    hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_PRE_SAVECTX_SHIFT, SQ_WAVE_TRAPSTS_PRE_SAVECTX_SIZE), s_restore_m0
+    s_and_b32	    s_restore_m0, SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK, s_restore_trapsts
+    s_lshr_b32	    s_restore_m0, s_restore_m0, SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT
+    s_setreg_b32    hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT, SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE), s_restore_m0
+    //s_setreg_b32  hwreg(HW_REG_TRAPSTS),  s_restore_trapsts	   //don't overwrite SAVECTX bit as it may be set through external SAVECTX during restore
+    s_setreg_b32    hwreg(HW_REG_MODE),	    s_restore_mode
+
+    // Restore trap temporaries 6-11, 13-15 initialized by SPI debug dispatch logic
+    // ttmp SR memory offset : size(VGPR)+size(SGPR)+0x40
+    get_vgpr_size_bytes(s_restore_ttmps_lo)
+    get_sgpr_size_bytes(s_restore_ttmps_hi)
+    s_add_u32	    s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_ttmps_hi
+    s_add_u32	    s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_buf_rsrc0
+    s_addc_u32	    s_restore_ttmps_hi, s_restore_buf_rsrc1, 0x0
+    s_and_b32	    s_restore_ttmps_hi, s_restore_ttmps_hi, 0xFFFF
+    s_load_dwordx2  [ttmp6, ttmp7], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x40 glc:1
+    s_load_dwordx4  [ttmp8, ttmp9, ttmp10, ttmp11], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x48 glc:1
+    s_load_dword    ttmp13, [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x58 glc:1
+    s_load_dwordx2  [ttmp14, ttmp15], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x5C glc:1
+    s_waitcnt	    lgkmcnt(0)
+
+    //reuse s_restore_m0 as a temp register
+    s_and_b32	    s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_RCNT_MASK
+    s_lshr_b32	    s_restore_m0, s_restore_m0, S_SAVE_PC_HI_RCNT_SHIFT
+    s_lshl_b32	    s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_RCNT_SHIFT
+    s_mov_b32	    s_restore_tmp, 0x0										    //IB_STS is zero
+    s_or_b32	    s_restore_tmp, s_restore_tmp, s_restore_m0
+    s_and_b32	    s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_FIRST_REPLAY_MASK
+    s_lshr_b32	    s_restore_m0, s_restore_m0, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
+    s_lshl_b32	    s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT
+    s_or_b32	    s_restore_tmp, s_restore_tmp, s_restore_m0
+    s_and_b32	    s_restore_m0, s_restore_status, SQ_WAVE_STATUS_INST_ATC_MASK
+    s_lshr_b32	    s_restore_m0, s_restore_m0, SQ_WAVE_STATUS_INST_ATC_SHIFT
+    s_setreg_b32    hwreg(HW_REG_IB_STS),   s_restore_tmp
+
+    s_and_b64	 exec, exec, exec  // Restore STATUS.EXECZ, not writable by s_setreg_b32
+    s_and_b64	 vcc, vcc, vcc	// Restore STATUS.VCCZ, not writable by s_setreg_b32
+    s_setreg_b32    hwreg(HW_REG_STATUS),   s_restore_status	 // SCC is included, which is changed by previous salu
+
+    s_barrier							//barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
+
+if G8SR_DEBUG_TIMESTAMP
+    s_memrealtime s_g8sr_ts_restore_d
+    s_waitcnt lgkmcnt(0)
+end
+
+//  s_rfe_b64 s_restore_pc_lo					//Return to the main shader program and resume execution
+    s_rfe_restore_b64  s_restore_pc_lo, s_restore_m0		// s_restore_m0[0] is used to set STATUS.inst_atc
+
+
+/**************************************************************************/
+/*			the END						  */
+/**************************************************************************/
+L_END_PGM:
+    s_endpgm
+
+end
+
+
+/**************************************************************************/
+/*			the helper functions				  */
+/**************************************************************************/
+
+//Only for save hwreg to mem
+function write_hwreg_to_mem(s, s_rsrc, s_mem_offset)
+	s_mov_b32 exec_lo, m0			//assuming exec_lo is not needed anymore from this point on
+	s_mov_b32 m0, s_mem_offset
+	s_buffer_store_dword s, s_rsrc, m0	glc:1
+	ack_sqc_store_workaround()
+	s_add_u32	s_mem_offset, s_mem_offset, 4
+	s_mov_b32   m0, exec_lo
+end
+
+
+// HWREG are saved before SGPRs, so all HWREG could be use.
+function write_16sgpr_to_mem(s, s_rsrc, s_mem_offset)
+
+	s_buffer_store_dwordx4 s[0], s_rsrc, 0	glc:1
+	ack_sqc_store_workaround()
+	s_buffer_store_dwordx4 s[4], s_rsrc, 16	 glc:1
+	ack_sqc_store_workaround()
+	s_buffer_store_dwordx4 s[8], s_rsrc, 32	 glc:1
+	ack_sqc_store_workaround()
+	s_buffer_store_dwordx4 s[12], s_rsrc, 48 glc:1
+	ack_sqc_store_workaround()
+	s_add_u32	s_rsrc[0], s_rsrc[0], 4*16
+	s_addc_u32	s_rsrc[1], s_rsrc[1], 0x0	      // +scc
+end
+
+
+function read_hwreg_from_mem(s, s_rsrc, s_mem_offset)
+    s_buffer_load_dword s, s_rsrc, s_mem_offset	    glc:1
+    s_add_u32	    s_mem_offset, s_mem_offset, 4
+end
+
+function read_16sgpr_from_mem(s, s_rsrc, s_mem_offset)
+    s_buffer_load_dwordx16 s, s_rsrc, s_mem_offset	glc:1
+    s_sub_u32	    s_mem_offset, s_mem_offset, 4*16
+end
+
+
+
+function get_lds_size_bytes(s_lds_size_byte)
+    // SQ LDS granularity is 64DW, while PGM_RSRC2.lds_size is in granularity 128DW
+    s_getreg_b32   s_lds_size_byte, hwreg(HW_REG_LDS_ALLOC, SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT, SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE)		// lds_size
+    s_lshl_b32	   s_lds_size_byte, s_lds_size_byte, 8			    //LDS size in dwords = lds_size * 64 *4Bytes    // granularity 64DW
+end
+
+function get_vgpr_size_bytes(s_vgpr_size_byte)
+    s_getreg_b32   s_vgpr_size_byte, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)	 //vpgr_size
+    s_add_u32	   s_vgpr_size_byte, s_vgpr_size_byte, 1
+    s_lshl_b32	   s_vgpr_size_byte, s_vgpr_size_byte, (2+8) //Number of VGPRs = (vgpr_size + 1) * 4 * 64 * 4	(non-zero value)   //FIXME for GFX, zero is possible
+end
+
+function get_sgpr_size_bytes(s_sgpr_size_byte)
+    s_getreg_b32   s_sgpr_size_byte, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE)	 //spgr_size
+    s_add_u32	   s_sgpr_size_byte, s_sgpr_size_byte, 1
+    s_lshl_b32	   s_sgpr_size_byte, s_sgpr_size_byte, 6 //Number of SGPRs = (sgpr_size + 1) * 16 *4   (non-zero value)
+end
+
+function get_hwreg_size_bytes
+    return 128 //HWREG size 128 bytes
+end
+
+function ack_sqc_store_workaround
+    if ACK_SQC_STORE
+        s_waitcnt lgkmcnt(0)
+    end
+end
+
+
+#endif
+
+static const uint32_t cwsr_trap_gfx9_hex[] = {
+	0xbf820001, 0xbf820158,
+	0xb8f8f802, 0x89788678,
+	0xb8f1f803, 0x866eff71,
+	0x00000400, 0xbf850034,
+	0x866eff71, 0x00000800,
+	0xbf850003, 0x866eff71,
+	0x00000100, 0xbf840008,
+	0x866eff78, 0x00002000,
+	0xbf840001, 0xbf810000,
+	0x8778ff78, 0x00002000,
+	0x80ec886c, 0x82ed806d,
+	0xb8eef807, 0x866fff6e,
+	0x001f8000, 0x8e6f8b6f,
+	0x8977ff77, 0xfc000000,
+	0x87776f77, 0x896eff6e,
+	0x001f8000, 0xb96ef807,
+	0xb8f0f812, 0xb8f1f813,
+	0x8ef08870, 0xc0071bb8,
+	0x00000000, 0xbf8cc07f,
+	0xc0071c38, 0x00000008,
+	0xbf8cc07f, 0x86ee6e6e,
+	0xbf840001, 0xbe801d6e,
+	0xb8f1f803, 0x8671ff71,
+	0x000001ff, 0xbf850002,
+	0x806c846c, 0x826d806d,
+	0x866dff6d, 0x0000ffff,
+	0x8f6e8b77, 0x866eff6e,
+	0x001f8000, 0xb96ef807,
+	0x86fe7e7e, 0x86ea6a6a,
+	0xb978f802, 0xbe801f6c,
+	0x866dff6d, 0x0000ffff,
+	0xbef00080, 0xb9700283,
+	0xb8f02407, 0x8e709c70,
+	0x876d706d, 0xb8f003c7,
+	0x8e709b70, 0x876d706d,
+	0xb8f0f807, 0x8670ff70,
+	0x00007fff, 0xb970f807,
+	0xbeee007e, 0xbeef007f,
+	0xbefe0180, 0xbf900004,
+	0xbf8e0002, 0xbf88fffe,
+	0xb8f02a05, 0x80708170,
+	0x8e708a70, 0xb8f11605,
+	0x80718171, 0x8e718671,
+	0x80707170, 0x80707e70,
+	0x8271807f, 0x8671ff71,
+	0x0000ffff, 0xc0471cb8,
+	0x00000040, 0xbf8cc07f,
+	0xc04b1d38, 0x00000048,
+	0xbf8cc07f, 0xc0431e78,
+	0x00000058, 0xbf8cc07f,
+	0xc0471eb8, 0x0000005c,
+	0xbf8cc07f, 0xbef4007e,
+	0x8675ff7f, 0x0000ffff,
+	0x8775ff75, 0x00040000,
+	0xbef60080, 0xbef700ff,
+	0x00807fac, 0x8670ff7f,
+	0x08000000, 0x8f708370,
+	0x87777077, 0x8670ff7f,
+	0x70000000, 0x8f708170,
+	0x87777077, 0xbefb007c,
+	0xbefa0080, 0xb8fa2a05,
+	0x807a817a, 0x8e7a8a7a,
+	0xb8f01605, 0x80708170,
+	0x8e708670, 0x807a707a,
+	0xbef60084, 0xbef600ff,
+	0x01000000, 0xbefe007c,
+	0xbefc007a, 0xc0611efa,
+	0x0000007c, 0xbf8cc07f,
+	0x807a847a, 0xbefc007e,
+	0xbefe007c, 0xbefc007a,
+	0xc0611b3a, 0x0000007c,
+	0xbf8cc07f, 0x807a847a,
+	0xbefc007e, 0xbefe007c,
+	0xbefc007a, 0xc0611b7a,
+	0x0000007c, 0xbf8cc07f,
+	0x807a847a, 0xbefc007e,
+	0xbefe007c, 0xbefc007a,
+	0xc0611bba, 0x0000007c,
+	0xbf8cc07f, 0x807a847a,
+	0xbefc007e, 0xbefe007c,
+	0xbefc007a, 0xc0611bfa,
+	0x0000007c, 0xbf8cc07f,
+	0x807a847a, 0xbefc007e,
+	0xbefe007c, 0xbefc007a,
+	0xc0611e3a, 0x0000007c,
+	0xbf8cc07f, 0x807a847a,
+	0xbefc007e, 0xb8f1f803,
+	0xbefe007c, 0xbefc007a,
+	0xc0611c7a, 0x0000007c,
+	0xbf8cc07f, 0x807a847a,
+	0xbefc007e, 0xbefe007c,
+	0xbefc007a, 0xc0611a3a,
+	0x0000007c, 0xbf8cc07f,
+	0x807a847a, 0xbefc007e,
+	0xbefe007c, 0xbefc007a,
+	0xc0611a7a, 0x0000007c,
+	0xbf8cc07f, 0x807a847a,
+	0xbefc007e, 0xb8fbf801,
+	0xbefe007c, 0xbefc007a,
+	0xc0611efa, 0x0000007c,
+	0xbf8cc07f, 0x807a847a,
+	0xbefc007e, 0x8670ff7f,
+	0x04000000, 0xbeef0080,
+	0x876f6f70, 0xb8fa2a05,
+	0x807a817a, 0x8e7a8a7a,
+	0xb8f11605, 0x80718171,
+	0x8e718471, 0x8e768271,
+	0xbef600ff, 0x01000000,
+	0xbef20174, 0x80747a74,
+	0x82758075, 0xbefc0080,
+	0xbf800000, 0xbe802b00,
+	0xbe822b02, 0xbe842b04,
+	0xbe862b06, 0xbe882b08,
+	0xbe8a2b0a, 0xbe8c2b0c,
+	0xbe8e2b0e, 0xc06b003a,
+	0x00000000, 0xbf8cc07f,
+	0xc06b013a, 0x00000010,
+	0xbf8cc07f, 0xc06b023a,
+	0x00000020, 0xbf8cc07f,
+	0xc06b033a, 0x00000030,
+	0xbf8cc07f, 0x8074c074,
+	0x82758075, 0x807c907c,
+	0xbf0a717c, 0xbf85ffe7,
+	0xbef40172, 0xbefa0080,
+	0xbefe00c1, 0xbeff00c1,
+	0xbee80080, 0xbee90080,
+	0xbef600ff, 0x01000000,
+	0xe0724000, 0x7a1d0000,
+	0xe0724100, 0x7a1d0100,
+	0xe0724200, 0x7a1d0200,
+	0xe0724300, 0x7a1d0300,
+	0xbefe00c1, 0xbeff00c1,
+	0xb8f14306, 0x8671c171,
+	0xbf84002c, 0xbf8a0000,
+	0x8670ff6f, 0x04000000,
+	0xbf840028, 0x8e718671,
+	0x8e718271, 0xbef60071,
+	0xb8fa2a05, 0x807a817a,
+	0x8e7a8a7a, 0xb8f01605,
+	0x80708170, 0x8e708670,
+	0x807a707a, 0x807aff7a,
+	0x00000080, 0xbef600ff,
+	0x01000000, 0xbefc0080,
+	0xd28c0002, 0x000100c1,
+	0xd28d0003, 0x000204c1,
+	0xd1060002, 0x00011103,
+	0x7e0602ff, 0x00000200,
+	0xbefc00ff, 0x00010000,
+	0xbe800077, 0x8677ff77,
+	0xff7fffff, 0x8777ff77,
+	0x00058000, 0xd8ec0000,
+	0x00000002, 0xbf8cc07f,
+	0xe0765000, 0x7a1d0002,
+	0x68040702, 0xd0c9006a,
+	0x0000e302, 0xbf87fff7,
+	0xbef70000, 0xbefa00ff,
+	0x00000400, 0xbefe00c1,
+	0xbeff00c1, 0xb8f12a05,
+	0x80718171, 0x8e718271,
+	0x8e768871, 0xbef600ff,
+	0x01000000, 0xbefc0084,
+	0xbf0a717c, 0xbf840015,
+	0xbf11017c, 0x8071ff71,
+	0x00001000, 0x7e000300,
+	0x7e020301, 0x7e040302,
+	0x7e060303, 0xe0724000,
+	0x7a1d0000, 0xe0724100,
+	0x7a1d0100, 0xe0724200,
+	0x7a1d0200, 0xe0724300,
+	0x7a1d0300, 0x807c847c,
+	0x807aff7a, 0x00000400,
+	0xbf0a717c, 0xbf85ffef,
+	0xbf9c0000, 0xbf8200d9,
+	0xbef4007e, 0x8675ff7f,
+	0x0000ffff, 0x8775ff75,
+	0x00040000, 0xbef60080,
+	0xbef700ff, 0x00807fac,
+	0x866eff7f, 0x08000000,
+	0x8f6e836e, 0x87776e77,
+	0x866eff7f, 0x70000000,
+	0x8f6e816e, 0x87776e77,
+	0x866eff7f, 0x04000000,
+	0xbf84001e, 0xbefe00c1,
+	0xbeff00c1, 0xb8ef4306,
+	0x866fc16f, 0xbf840019,
+	0x8e6f866f, 0x8e6f826f,
+	0xbef6006f, 0xb8f82a05,
+	0x80788178, 0x8e788a78,
+	0xb8ee1605, 0x806e816e,
+	0x8e6e866e, 0x80786e78,
+	0x8078ff78, 0x00000080,
+	0xbef600ff, 0x01000000,
+	0xbefc0080, 0xe0510000,
+	0x781d0000, 0xe0510100,
+	0x781d0000, 0x807cff7c,
+	0x00000200, 0x8078ff78,
+	0x00000200, 0xbf0a6f7c,
+	0xbf85fff6, 0xbef80080,
+	0xbefe00c1, 0xbeff00c1,
+	0xb8ef2a05, 0x806f816f,
+	0x8e6f826f, 0x8e76886f,
+	0xbef600ff, 0x01000000,
+	0xbeee0078, 0x8078ff78,
+	0x00000400, 0xbefc0084,
+	0xbf11087c, 0x806fff6f,
+	0x00008000, 0xe0524000,
+	0x781d0000, 0xe0524100,
+	0x781d0100, 0xe0524200,
+	0x781d0200, 0xe0524300,
+	0x781d0300, 0xbf8c0f70,
+	0x7e000300, 0x7e020301,
+	0x7e040302, 0x7e060303,
+	0x807c847c, 0x8078ff78,
+	0x00000400, 0xbf0a6f7c,
+	0xbf85ffee, 0xbf9c0000,
+	0xe0524000, 0x6e1d0000,
+	0xe0524100, 0x6e1d0100,
+	0xe0524200, 0x6e1d0200,
+	0xe0524300, 0x6e1d0300,
+	0xb8f82a05, 0x80788178,
+	0x8e788a78, 0xb8ee1605,
+	0x806e816e, 0x8e6e866e,
+	0x80786e78, 0x80f8c078,
+	0xb8ef1605, 0x806f816f,
+	0x8e6f846f, 0x8e76826f,
+	0xbef600ff, 0x01000000,
+	0xbefc006f, 0xc031003a,
+	0x00000078, 0x80f8c078,
+	0xbf8cc07f, 0x80fc907c,
+	0xbf800000, 0xbe802d00,
+	0xbe822d02, 0xbe842d04,
+	0xbe862d06, 0xbe882d08,
+	0xbe8a2d0a, 0xbe8c2d0c,
+	0xbe8e2d0e, 0xbf06807c,
+	0xbf84fff0, 0xb8f82a05,
+	0x80788178, 0x8e788a78,
+	0xb8ee1605, 0x806e816e,
+	0x8e6e866e, 0x80786e78,
+	0xbef60084, 0xbef600ff,
+	0x01000000, 0xc0211bfa,
+	0x00000078, 0x80788478,
+	0xc0211b3a, 0x00000078,
+	0x80788478, 0xc0211b7a,
+	0x00000078, 0x80788478,
+	0xc0211eba, 0x00000078,
+	0x80788478, 0xc0211efa,
+	0x00000078, 0x80788478,
+	0xc0211c3a, 0x00000078,
+	0x80788478, 0xc0211c7a,
+	0x00000078, 0x80788478,
+	0xc0211a3a, 0x00000078,
+	0x80788478, 0xc0211a7a,
+	0x00000078, 0x80788478,
+	0xc0211cfa, 0x00000078,
+	0x80788478, 0xbf8cc07f,
+	0x866dff6d, 0x0000ffff,
+	0xbefc006f, 0xbefe007a,
+	0xbeff007b, 0x866f71ff,
+	0x000003ff, 0xb96f4803,
+	0x866f71ff, 0xfffff800,
+	0x8f6f8b6f, 0xb96fa2c3,
+	0xb973f801, 0xb8ee2a05,
+	0x806e816e, 0x8e6e8a6e,
+	0xb8ef1605, 0x806f816f,
+	0x8e6f866f, 0x806e6f6e,
+	0x806e746e, 0x826f8075,
+	0x866fff6f, 0x0000ffff,
+	0xc0071cb7, 0x00000040,
+	0xc00b1d37, 0x00000048,
+	0xc0031e77, 0x00000058,
+	0xc0071eb7, 0x0000005c,
+	0xbf8cc07f, 0x866fff6d,
+	0xf0000000, 0x8f6f9c6f,
+	0x8e6f906f, 0xbeee0080,
+	0x876e6f6e, 0x866fff6d,
+	0x08000000, 0x8f6f9b6f,
+	0x8e6f8f6f, 0x876e6f6e,
+	0x866fff70, 0x00800000,
+	0x8f6f976f, 0xb96ef807,
+	0x86fe7e7e, 0x86ea6a6a,
+	0xb970f802, 0xbf8a0000,
+	0x95806f6c, 0xbf810000,
+};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index c368ce3e96ff..053f1d0f80b8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -30,6 +30,7 @@
 #include "kfd_device_queue_manager.h"
 #include "kfd_pm4_headers_vi.h"
 #include "cwsr_trap_handler_gfx8.asm"
+#include "cwsr_trap_handler_gfx9.asm"
 #include "kfd_iommu.h"
 
 #define MQD_SIZE_ALIGNED 768
@@ -333,10 +334,16 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
 static void kfd_cwsr_init(struct kfd_dev *kfd)
 {
 	if (cwsr_enable && kfd->device_info->supports_cwsr) {
-		BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE);
+		if (kfd->device_info->asic_family < CHIP_VEGA10) {
+			BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE);
+			kfd->cwsr_isa = cwsr_trap_gfx8_hex;
+			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
+		} else {
+			BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE);
+			kfd->cwsr_isa = cwsr_trap_gfx9_hex;
+			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex);
+		}
 
-		kfd->cwsr_isa = cwsr_trap_gfx8_hex;
-		kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
 		kfd->cwsr_enabled = true;
 	}
 }

From 6106dce9559ec5a4b1a97302f2fcc508e40d2747 Mon Sep 17 00:00:00 2001
From: welu <Wei.Lu2@amd.com>
Date: Tue, 10 Apr 2018 17:33:17 -0400
Subject: [PATCH 0272/1286] drm/amdkfd: Try to enable atomics for all GPUs

Report failure to enable atomics only on GPUs that require them.
This allows GPUs that don't require atomics to function, but can
benefit if they are available. This is the case for Vega10, which
doesn't use atomics for basic functioning of the MEC, AQL and HWS
microcode. So it can work without atomics. But shader programs can
still use atomic instructions on systems that support PCIe atomics.

Signed-off-by: welu <Wei.Lu2@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_device.c | 27 ++++++++++++-------------
 1 file changed, 13 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 053f1d0f80b8..0e64fb2c95e5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -290,7 +290,7 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
 	struct pci_dev *pdev, const struct kfd2kgd_calls *f2g)
 {
 	struct kfd_dev *kfd;
-
+	int ret;
 	const struct kfd_device_info *device_info =
 					lookup_device_info(pdev->device);
 
@@ -299,19 +299,18 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
 		return NULL;
 	}
 
-	if (device_info->needs_pci_atomics) {
-		/* Allow BIF to recode atomics to PCIe 3.0
-		 * AtomicOps. 32 and 64-bit requests are possible and
-		 * must be supported.
-		 */
-		if (pci_enable_atomic_ops_to_root(pdev,
-				PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
-				PCI_EXP_DEVCAP2_ATOMIC_COMP64) < 0) {
-			dev_info(kfd_device,
-				"skipped device %x:%x, PCI rejects atomics",
-				 pdev->vendor, pdev->device);
-			return NULL;
-		}
+	/* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
+	 * 32 and 64-bit requests are possible and must be
+	 * supported.
+	 */
+	ret = pci_enable_atomic_ops_to_root(pdev,
+			PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
+			PCI_EXP_DEVCAP2_ATOMIC_COMP64);
+	if (device_info->needs_pci_atomics && ret < 0) {
+		dev_info(kfd_device,
+			 "skipped device %x:%x, PCI rejects atomics\n",
+			 pdev->vendor, pdev->device);
+		return NULL;
 	}
 
 	kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);

From 389056e5fef477c838dc20a08d6f1de960cf027b Mon Sep 17 00:00:00 2001
From: Felix Kuehling <Felix.Kuehling@amd.com>
Date: Tue, 10 Apr 2018 17:33:18 -0400
Subject: [PATCH 0273/1286] drm/amdkfd: Add Vega10 topology and device info

* Report 64-bit doorbells as HSA_CAP_DOORBELL_TYPE_2_0 in topology
* Report cache information in topology (duplicates GFXv8 info for now)
* Add device info for Vega10 support in KFD

Raven is not enabled at this time as it needs additional changes in
DQM to work with a single SDMA engine.

Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_crat.c     | 11 +++++++
 drivers/gpu/drm/amd/amdkfd/kfd_device.c   | 37 +++++++++++++++++++++++
 drivers/gpu/drm/amd/amdkfd/kfd_topology.c |  6 ++++
 drivers/gpu/drm/amd/amdkfd/kfd_topology.h |  1 +
 4 files changed, 55 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 4f126ef6139b..296b3f230280 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -132,6 +132,9 @@ static struct kfd_gpu_cache_info carrizo_cache_info[] = {
 #define fiji_cache_info  carrizo_cache_info
 #define polaris10_cache_info carrizo_cache_info
 #define polaris11_cache_info carrizo_cache_info
+/* TODO - check & update Vega10 cache details */
+#define vega10_cache_info carrizo_cache_info
+#define raven_cache_info carrizo_cache_info
 
 static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
 		struct crat_subtype_computeunit *cu)
@@ -603,6 +606,14 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
 		pcache_info = polaris11_cache_info;
 		num_of_cache_types = ARRAY_SIZE(polaris11_cache_info);
 		break;
+	case CHIP_VEGA10:
+		pcache_info = vega10_cache_info;
+		num_of_cache_types = ARRAY_SIZE(vega10_cache_info);
+		break;
+	case CHIP_RAVEN:
+		pcache_info = raven_cache_info;
+		num_of_cache_types = ARRAY_SIZE(raven_cache_info);
+		break;
 	default:
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 0e64fb2c95e5..dd6c7535b6b4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -182,6 +182,34 @@ static const struct kfd_device_info polaris11_device_info = {
 	.needs_pci_atomics = true,
 };
 
+static const struct kfd_device_info vega10_device_info = {
+	.asic_family = CHIP_VEGA10,
+	.max_pasid_bits = 16,
+	.max_no_of_hqd  = 24,
+	.doorbell_size  = 8,
+	.ih_ring_entry_size = 8 * sizeof(uint32_t),
+	.event_interrupt_class = &event_interrupt_class_v9,
+	.num_of_watch_points = 4,
+	.mqd_size_aligned = MQD_SIZE_ALIGNED,
+	.supports_cwsr = true,
+	.needs_iommu_device = false,
+	.needs_pci_atomics = false,
+};
+
+static const struct kfd_device_info vega10_vf_device_info = {
+	.asic_family = CHIP_VEGA10,
+	.max_pasid_bits = 16,
+	.max_no_of_hqd  = 24,
+	.doorbell_size  = 8,
+	.ih_ring_entry_size = 8 * sizeof(uint32_t),
+	.event_interrupt_class = &event_interrupt_class_v9,
+	.num_of_watch_points = 4,
+	.mqd_size_aligned = MQD_SIZE_ALIGNED,
+	.supports_cwsr = true,
+	.needs_iommu_device = false,
+	.needs_pci_atomics = false,
+};
+
 
 struct kfd_deviceid {
 	unsigned short did;
@@ -261,6 +289,15 @@ static const struct kfd_deviceid supported_devices[] = {
 	{ 0x67EB, &polaris11_device_info },	/* Polaris11 */
 	{ 0x67EF, &polaris11_device_info },	/* Polaris11 */
 	{ 0x67FF, &polaris11_device_info },	/* Polaris11 */
+	{ 0x6860, &vega10_device_info },	/* Vega10 */
+	{ 0x6861, &vega10_device_info },	/* Vega10 */
+	{ 0x6862, &vega10_device_info },	/* Vega10 */
+	{ 0x6863, &vega10_device_info },	/* Vega10 */
+	{ 0x6864, &vega10_device_info },	/* Vega10 */
+	{ 0x6867, &vega10_device_info },	/* Vega10 */
+	{ 0x6868, &vega10_device_info },	/* Vega10 */
+	{ 0x686C, &vega10_vf_device_info },	/* Vega10  vf*/
+	{ 0x687F, &vega10_device_info },	/* Vega10 */
 };
 
 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index ac28abc94e57..bc95d4dfee2e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1239,6 +1239,12 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
 			HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
 			HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
 		break;
+	case CHIP_VEGA10:
+	case CHIP_RAVEN:
+		dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 <<
+			HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
+			HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
+		break;
 	default:
 		WARN(1, "Unexpected ASIC family %u",
 		     dev->gpu->device_info->asic_family);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index eb54cfcaf039..7d9c3f948dff 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -45,6 +45,7 @@
 
 #define HSA_CAP_DOORBELL_TYPE_PRE_1_0		0x0
 #define HSA_CAP_DOORBELL_TYPE_1_0		0x1
+#define HSA_CAP_DOORBELL_TYPE_2_0		0x2
 #define HSA_CAP_AQL_QUEUE_DOUBLE_MAP		0x00004000
 
 struct kfd_node_properties {

From 2924bdee21edd6785a4df1b4d17fd3cb265fddd9 Mon Sep 17 00:00:00 2001
From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Date: Tue, 10 Apr 2018 12:27:04 +0100
Subject: [PATCH 0274/1286] drm/i915/pmu: Inspect runtime PM state more
 carefully while estimating RC6

While thinking about sporadic failures of perf_pmu/rc6-runtime-pm* tests
on some CI machines I have concluded that: a) the PMU readout of RC6 can
race against runtime PM transitions, and b) there are other reasons than
being runtime suspended which can cause intel_runtime_pm_get_if_in_use to
fail.

Therefore when estimating RC6 the code needs to assert we are indeed in
suspended state, and if not, the best we can do is return the last known
RC6 value.

Without this check we can calculate the estimated value based on un-
initialized or inappropriate internal state, which can result in over-
estimation, or in any case incorrect value being returned.

v2:
 * Re-arrange the code a bit to avoid second unlock and return branch.
   (Chris Wilson)

v3:
 * Insert some strategic blank lines and improve commit msg.
   (Chris Wilson)

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Fixes: 1fe699e30113 ("drm/i915/pmu: Fix sleep under atomic in RC6 readout")
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=105010
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180410112704.24462-1-tvrtko.ursulin@linux.intel.com
---
 drivers/gpu/drm/i915/i915_pmu.c | 39 +++++++++++++++++++++++----------
 1 file changed, 28 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 11fb76bd3860..dc87797db500 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -452,20 +452,37 @@ static u64 get_rc6(struct drm_i915_private *i915)
 		spin_lock_irqsave(&i915->pmu.lock, flags);
 		spin_lock(&kdev->power.lock);
 
-		if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
-			i915->pmu.suspended_jiffies_last =
-						kdev->power.suspended_jiffies;
+		/*
+		 * After the above branch intel_runtime_pm_get_if_in_use failed
+		 * to get the runtime PM reference we cannot assume we are in
+		 * runtime suspend since we can either: a) race with coming out
+		 * of it before we took the power.lock, or b) there are other
+		 * states than suspended which can bring us here.
+		 *
+		 * We need to double-check that we are indeed currently runtime
+		 * suspended and if not we cannot do better than report the last
+		 * known RC6 value.
+		 */
+		if (kdev->power.runtime_status == RPM_SUSPENDED) {
+			if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
+				i915->pmu.suspended_jiffies_last =
+						  kdev->power.suspended_jiffies;
 
-		val = kdev->power.suspended_jiffies -
-		      i915->pmu.suspended_jiffies_last;
-		val += jiffies - kdev->power.accounting_timestamp;
+			val = kdev->power.suspended_jiffies -
+			      i915->pmu.suspended_jiffies_last;
+			val += jiffies - kdev->power.accounting_timestamp;
+
+			val = jiffies_to_nsecs(val);
+			val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
+
+			i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
+		} else if (i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
+			val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
+		} else {
+			val = i915->pmu.sample[__I915_SAMPLE_RC6].cur;
+		}
 
 		spin_unlock(&kdev->power.lock);
-
-		val = jiffies_to_nsecs(val);
-		val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
-		i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
-
 		spin_unlock_irqrestore(&i915->pmu.lock, flags);
 	}
 

From a493ceae72ab21e6451fedb1a321862e51dc6cb7 Mon Sep 17 00:00:00 2001
From: Maxime Ripard <maxime.ripard@bootlin.com>
Date: Wed, 4 Apr 2018 11:57:09 +0200
Subject: [PATCH 0275/1286] drm/sun4i: tcon: Add TRI finish interrupt for
 vblank

The "CPU" (or Intel 8080) interface uses a different interrupt called
TRI_FINISH (most likely TRI being for trigger) to notify the end of frames,
and hence the VBLANK period.

And that interrupt to the possible VBLANK interrupts source.

Reviewed-by: Chen-Yu Tsai <wens@csie.org>
Signed-off-by: Maxime Ripard <maxime.ripard@bootlin.com>
Link: https://patchwork.freedesktop.org/patch/msgid/de6d6ad8959da77ea3a974a31a4c0c8391178748.1522835818.git-series.maxime.ripard@bootlin.com
---
 drivers/gpu/drm/sun4i/sun4i_tcon.c | 9 ++++++---
 drivers/gpu/drm/sun4i/sun4i_tcon.h | 4 ++++
 2 files changed, 10 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index c3d92d537240..5f423ed2f01b 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -201,7 +201,8 @@ void sun4i_tcon_enable_vblank(struct sun4i_tcon *tcon, bool enable)
 	DRM_DEBUG_DRIVER("%sabling VBLANK interrupt\n", enable ? "En" : "Dis");
 
 	mask = SUN4I_TCON_GINT0_VBLANK_ENABLE(0) |
-	       SUN4I_TCON_GINT0_VBLANK_ENABLE(1);
+		SUN4I_TCON_GINT0_VBLANK_ENABLE(1) |
+		SUN4I_TCON_GINT0_TCON0_TRI_FINISH_ENABLE;
 
 	if (enable)
 		val = mask;
@@ -582,7 +583,8 @@ static irqreturn_t sun4i_tcon_handler(int irq, void *private)
 	regmap_read(tcon->regs, SUN4I_TCON_GINT0_REG, &status);
 
 	if (!(status & (SUN4I_TCON_GINT0_VBLANK_INT(0) |
-			SUN4I_TCON_GINT0_VBLANK_INT(1))))
+			SUN4I_TCON_GINT0_VBLANK_INT(1) |
+			SUN4I_TCON_GINT0_TCON0_TRI_FINISH_INT)))
 		return IRQ_NONE;
 
 	drm_crtc_handle_vblank(&scrtc->crtc);
@@ -591,7 +593,8 @@ static irqreturn_t sun4i_tcon_handler(int irq, void *private)
 	/* Acknowledge the interrupt */
 	regmap_update_bits(tcon->regs, SUN4I_TCON_GINT0_REG,
 			   SUN4I_TCON_GINT0_VBLANK_INT(0) |
-			   SUN4I_TCON_GINT0_VBLANK_INT(1),
+			   SUN4I_TCON_GINT0_VBLANK_INT(1) |
+			   SUN4I_TCON_GINT0_TCON0_TRI_FINISH_INT,
 			   0);
 
 	if (engine->ops->vblank_quirk)
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index 161e09427124..2e0fb9640ed9 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -28,7 +28,11 @@
 
 #define SUN4I_TCON_GINT0_REG			0x4
 #define SUN4I_TCON_GINT0_VBLANK_ENABLE(pipe)		BIT(31 - (pipe))
+#define SUN4I_TCON_GINT0_TCON0_TRI_FINISH_ENABLE	BIT(27)
+#define SUN4I_TCON_GINT0_TCON0_TRI_COUNTER_ENABLE	BIT(26)
 #define SUN4I_TCON_GINT0_VBLANK_INT(pipe)		BIT(15 - (pipe))
+#define SUN4I_TCON_GINT0_TCON0_TRI_FINISH_INT		BIT(11)
+#define SUN4I_TCON_GINT0_TCON0_TRI_COUNTER_INT		BIT(10)
 
 #define SUN4I_TCON_GINT1_REG			0x8
 #define SUN4I_TCON_FRM_CTL_REG			0x10

From 7605225004d99df343acf647c2870e45a1d38d97 Mon Sep 17 00:00:00 2001
From: Maxime Ripard <maxime.ripard@bootlin.com>
Date: Wed, 4 Apr 2018 11:57:10 +0200
Subject: [PATCH 0276/1286] dt-bindings: display: Add Allwinner MIPI-DSI
 bindings

The Allwinner SoCs usually come with a DSI encoder. Add a binding for it.

Reviewed-by: Rob Herring <robh@kernel.org>
Signed-off-by: Maxime Ripard <maxime.ripard@bootlin.com>
Link: https://patchwork.freedesktop.org/patch/msgid/cdc5402570199f5c08211f29d9182ea5948d3c40.1522835818.git-series.maxime.ripard@bootlin.com
---
 .../bindings/display/sunxi/sun6i-dsi.txt      | 93 +++++++++++++++++++
 1 file changed, 93 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/display/sunxi/sun6i-dsi.txt

diff --git a/Documentation/devicetree/bindings/display/sunxi/sun6i-dsi.txt b/Documentation/devicetree/bindings/display/sunxi/sun6i-dsi.txt
new file mode 100644
index 000000000000..6a6cf5de08b0
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/sunxi/sun6i-dsi.txt
@@ -0,0 +1,93 @@
+Allwinner A31 DSI Encoder
+=========================
+
+The DSI pipeline consists of two separate blocks: the DSI controller
+itself, and its associated D-PHY.
+
+DSI Encoder
+-----------
+
+The DSI Encoder generates the DSI signal from the TCON's.
+
+Required properties:
+  - compatible: value must be one of:
+    * allwinner,sun6i-a31-mipi-dsi
+  - reg: base address and size of memory-mapped region
+  - interrupts: interrupt associated to this IP
+  - clocks: phandles to the clocks feeding the DSI encoder
+    * bus: the DSI interface clock
+    * mod: the DSI module clock
+  - clock-names: the clock names mentioned above
+  - phys: phandle to the D-PHY
+  - phy-names: must be "dphy"
+  - resets: phandle to the reset controller driving the encoder
+
+  - ports: A ports node with endpoint definitions as defined in
+    Documentation/devicetree/bindings/media/video-interfaces.txt. The
+    first port should be the input endpoint, usually coming from the
+    associated TCON.
+
+Any MIPI-DSI device attached to this should be described according to
+the bindings defined in ../mipi-dsi-bus.txt
+
+D-PHY
+-----
+
+Required properties:
+  - compatible: value must be one of:
+    * allwinner,sun6i-a31-mipi-dphy
+  - reg: base address and size of memory-mapped region
+  - clocks: phandles to the clocks feeding the DSI encoder
+    * bus: the DSI interface clock
+    * mod: the DSI module clock
+  - clock-names: the clock names mentioned above
+  - resets: phandle to the reset controller driving the encoder
+
+Example:
+
+dsi0: dsi@1ca0000 {
+	compatible = "allwinner,sun6i-a31-mipi-dsi";
+	reg = <0x01ca0000 0x1000>;
+	interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
+	clocks = <&ccu CLK_BUS_MIPI_DSI>,
+		 <&ccu CLK_DSI_SCLK>;
+	clock-names = "bus", "mod";
+	resets = <&ccu RST_BUS_MIPI_DSI>;
+	phys = <&dphy0>;
+	phy-names = "dphy";
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	panel@0 {
+		compatible = "bananapi,lhr050h41", "ilitek,ili9881c";
+		reg = <0>;
+		power-gpios = <&pio 1 7 GPIO_ACTIVE_HIGH>; /* PB07 */
+		reset-gpios = <&r_pio 0 5 GPIO_ACTIVE_LOW>; /* PL05 */
+		backlight = <&pwm_bl>;
+	};
+
+	ports {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		port@0 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0>;
+
+			dsi0_in_tcon0: endpoint {
+				remote-endpoint = <&tcon0_out_dsi0>;
+			};
+		};
+	};
+};
+
+dphy0: d-phy@1ca1000 {
+	compatible = "allwinner,sun6i-a31-mipi-dphy";
+	reg = <0x01ca1000 0x1000>;
+	clocks = <&ccu CLK_BUS_MIPI_DSI>,
+		 <&ccu CLK_DSI_DPHY>;
+	clock-names = "bus", "mod";
+	resets = <&ccu RST_BUS_MIPI_DSI>;
+	#phy-cells = <0>;
+};

From 133add5b5ad42b7bb5fcd59d681aef6475d08600 Mon Sep 17 00:00:00 2001
From: Maxime Ripard <maxime.ripard@bootlin.com>
Date: Wed, 4 Apr 2018 11:57:11 +0200
Subject: [PATCH 0277/1286] drm/sun4i: Add Allwinner A31 MIPI-DSI controller
 support

Most of the Allwinner SoCs since the A31 share the same MIPI-DSI
controller.

While that controller is mostly undocumented, the code is out there and has
been cleaned up in order to be integrated into DRM. However, there's still
some dark areas that are a bit unclear about how the block exactly
operates.

Reviewed-by: Chen-Yu Tsai <wens@csie.org>
Signed-off-by: Maxime Ripard <maxime.ripard@bootlin.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ad9e6224fced87c0889ddd2765d1942610061f72.1522835818.git-series.maxime.ripard@bootlin.com
---
 drivers/gpu/drm/sun4i/Kconfig           |   10 +
 drivers/gpu/drm/sun4i/Makefile          |    4 +
 drivers/gpu/drm/sun4i/sun6i_mipi_dphy.c |  292 ++++++
 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c  | 1107 +++++++++++++++++++++++
 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h  |   63 ++
 5 files changed, 1476 insertions(+)
 create mode 100644 drivers/gpu/drm/sun4i/sun6i_mipi_dphy.c
 create mode 100644 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
 create mode 100644 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h

diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig
index eee6bc0eaf97..156a865c3e6d 100644
--- a/drivers/gpu/drm/sun4i/Kconfig
+++ b/drivers/gpu/drm/sun4i/Kconfig
@@ -40,6 +40,16 @@ config DRM_SUN4I_BACKEND
 	  do some alpha blending and feed graphics to TCON. If M is
 	  selected the module will be called sun4i-backend.
 
+config DRM_SUN6I_DSI
+	tristate "Allwinner A31 MIPI-DSI Controller Support"
+	default MACH_SUN8I
+	select CRC_CCITT
+	select DRM_MIPI_DSI
+	help
+	  Choose this option if you want have an Allwinner SoC with
+	  MIPI-DSI support. If M is selected the module will be called
+	  sun6i-dsi
+
 config DRM_SUN8I_DW_HDMI
 	tristate "Support for Allwinner version of DesignWare HDMI"
 	depends on DRM_SUN4I
diff --git a/drivers/gpu/drm/sun4i/Makefile b/drivers/gpu/drm/sun4i/Makefile
index 330843ce4280..2589f4acd5ae 100644
--- a/drivers/gpu/drm/sun4i/Makefile
+++ b/drivers/gpu/drm/sun4i/Makefile
@@ -24,6 +24,9 @@ sun4i-tcon-y			+= sun4i_lvds.o
 sun4i-tcon-y			+= sun4i_tcon.o
 sun4i-tcon-y			+= sun4i_rgb.o
 
+sun6i-dsi-y			+= sun6i_mipi_dphy.o
+sun6i-dsi-y			+= sun6i_mipi_dsi.o
+
 obj-$(CONFIG_DRM_SUN4I)		+= sun4i-drm.o
 obj-$(CONFIG_DRM_SUN4I)		+= sun4i-tcon.o
 obj-$(CONFIG_DRM_SUN4I)		+= sun4i_tv.o
@@ -31,5 +34,6 @@ obj-$(CONFIG_DRM_SUN4I)		+= sun6i_drc.o
 
 obj-$(CONFIG_DRM_SUN4I_BACKEND)	+= sun4i-backend.o sun4i-frontend.o
 obj-$(CONFIG_DRM_SUN4I_HDMI)	+= sun4i-drm-hdmi.o
+obj-$(CONFIG_DRM_SUN6I_DSI)	+= sun6i-dsi.o
 obj-$(CONFIG_DRM_SUN8I_DW_HDMI)	+= sun8i-drm-hdmi.o
 obj-$(CONFIG_DRM_SUN8I_MIXER)	+= sun8i-mixer.o
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dphy.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dphy.c
new file mode 100644
index 000000000000..e4d19431fa0e
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dphy.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2016 Allwinnertech Co., Ltd.
+ * Copyright (C) 2017-2018 Bootlin
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/of_address.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include "sun6i_mipi_dsi.h"
+
+#define SUN6I_DPHY_GCTL_REG		0x00
+#define SUN6I_DPHY_GCTL_LANE_NUM(n)		((((n) - 1) & 3) << 4)
+#define SUN6I_DPHY_GCTL_EN			BIT(0)
+
+#define SUN6I_DPHY_TX_CTL_REG		0x04
+#define SUN6I_DPHY_TX_CTL_HS_TX_CLK_CONT	BIT(28)
+
+#define SUN6I_DPHY_TX_TIME0_REG		0x10
+#define SUN6I_DPHY_TX_TIME0_HS_TRAIL(n)		(((n) & 0xff) << 24)
+#define SUN6I_DPHY_TX_TIME0_HS_PREPARE(n)	(((n) & 0xff) << 16)
+#define SUN6I_DPHY_TX_TIME0_LP_CLK_DIV(n)	((n) & 0xff)
+
+#define SUN6I_DPHY_TX_TIME1_REG		0x14
+#define SUN6I_DPHY_TX_TIME1_CLK_POST(n)		(((n) & 0xff) << 24)
+#define SUN6I_DPHY_TX_TIME1_CLK_PRE(n)		(((n) & 0xff) << 16)
+#define SUN6I_DPHY_TX_TIME1_CLK_ZERO(n)		(((n) & 0xff) << 8)
+#define SUN6I_DPHY_TX_TIME1_CLK_PREPARE(n)	((n) & 0xff)
+
+#define SUN6I_DPHY_TX_TIME2_REG		0x18
+#define SUN6I_DPHY_TX_TIME2_CLK_TRAIL(n)	((n) & 0xff)
+
+#define SUN6I_DPHY_TX_TIME3_REG		0x1c
+
+#define SUN6I_DPHY_TX_TIME4_REG		0x20
+#define SUN6I_DPHY_TX_TIME4_HS_TX_ANA1(n)	(((n) & 0xff) << 8)
+#define SUN6I_DPHY_TX_TIME4_HS_TX_ANA0(n)	((n) & 0xff)
+
+#define SUN6I_DPHY_ANA0_REG		0x4c
+#define SUN6I_DPHY_ANA0_REG_PWS			BIT(31)
+#define SUN6I_DPHY_ANA0_REG_DMPC		BIT(28)
+#define SUN6I_DPHY_ANA0_REG_DMPD(n)		(((n) & 0xf) << 24)
+#define SUN6I_DPHY_ANA0_REG_SLV(n)		(((n) & 7) << 12)
+#define SUN6I_DPHY_ANA0_REG_DEN(n)		(((n) & 0xf) << 8)
+
+#define SUN6I_DPHY_ANA1_REG		0x50
+#define SUN6I_DPHY_ANA1_REG_VTTMODE		BIT(31)
+#define SUN6I_DPHY_ANA1_REG_CSMPS(n)		(((n) & 3) << 28)
+#define SUN6I_DPHY_ANA1_REG_SVTT(n)		(((n) & 0xf) << 24)
+
+#define SUN6I_DPHY_ANA2_REG		0x54
+#define SUN6I_DPHY_ANA2_EN_P2S_CPU(n)		(((n) & 0xf) << 24)
+#define SUN6I_DPHY_ANA2_EN_P2S_CPU_MASK		GENMASK(27, 24)
+#define SUN6I_DPHY_ANA2_EN_CK_CPU		BIT(4)
+#define SUN6I_DPHY_ANA2_REG_ENIB		BIT(1)
+
+#define SUN6I_DPHY_ANA3_REG		0x58
+#define SUN6I_DPHY_ANA3_EN_VTTD(n)		(((n) & 0xf) << 28)
+#define SUN6I_DPHY_ANA3_EN_VTTD_MASK		GENMASK(31, 28)
+#define SUN6I_DPHY_ANA3_EN_VTTC			BIT(27)
+#define SUN6I_DPHY_ANA3_EN_DIV			BIT(26)
+#define SUN6I_DPHY_ANA3_EN_LDOC			BIT(25)
+#define SUN6I_DPHY_ANA3_EN_LDOD			BIT(24)
+#define SUN6I_DPHY_ANA3_EN_LDOR			BIT(18)
+
+#define SUN6I_DPHY_ANA4_REG		0x5c
+#define SUN6I_DPHY_ANA4_REG_DMPLVC		BIT(24)
+#define SUN6I_DPHY_ANA4_REG_DMPLVD(n)		(((n) & 0xf) << 20)
+#define SUN6I_DPHY_ANA4_REG_CKDV(n)		(((n) & 0x1f) << 12)
+#define SUN6I_DPHY_ANA4_REG_TMSC(n)		(((n) & 3) << 10)
+#define SUN6I_DPHY_ANA4_REG_TMSD(n)		(((n) & 3) << 8)
+#define SUN6I_DPHY_ANA4_REG_TXDNSC(n)		(((n) & 3) << 6)
+#define SUN6I_DPHY_ANA4_REG_TXDNSD(n)		(((n) & 3) << 4)
+#define SUN6I_DPHY_ANA4_REG_TXPUSC(n)		(((n) & 3) << 2)
+#define SUN6I_DPHY_ANA4_REG_TXPUSD(n)		((n) & 3)
+
+#define SUN6I_DPHY_DBG5_REG		0xf4
+
+int sun6i_dphy_init(struct sun6i_dphy *dphy, unsigned int lanes)
+{
+	reset_control_deassert(dphy->reset);
+	clk_prepare_enable(dphy->mod_clk);
+	clk_set_rate_exclusive(dphy->mod_clk, 150000000);
+
+	regmap_write(dphy->regs, SUN6I_DPHY_TX_CTL_REG,
+		     SUN6I_DPHY_TX_CTL_HS_TX_CLK_CONT);
+
+	regmap_write(dphy->regs, SUN6I_DPHY_TX_TIME0_REG,
+		     SUN6I_DPHY_TX_TIME0_LP_CLK_DIV(14) |
+		     SUN6I_DPHY_TX_TIME0_HS_PREPARE(6) |
+		     SUN6I_DPHY_TX_TIME0_HS_TRAIL(10));
+
+	regmap_write(dphy->regs, SUN6I_DPHY_TX_TIME1_REG,
+		     SUN6I_DPHY_TX_TIME1_CLK_PREPARE(7) |
+		     SUN6I_DPHY_TX_TIME1_CLK_ZERO(50) |
+		     SUN6I_DPHY_TX_TIME1_CLK_PRE(3) |
+		     SUN6I_DPHY_TX_TIME1_CLK_POST(10));
+
+	regmap_write(dphy->regs, SUN6I_DPHY_TX_TIME2_REG,
+		     SUN6I_DPHY_TX_TIME2_CLK_TRAIL(30));
+
+	regmap_write(dphy->regs, SUN6I_DPHY_TX_TIME3_REG, 0);
+
+	regmap_write(dphy->regs, SUN6I_DPHY_TX_TIME4_REG,
+		     SUN6I_DPHY_TX_TIME4_HS_TX_ANA0(3) |
+		     SUN6I_DPHY_TX_TIME4_HS_TX_ANA1(3));
+
+	regmap_write(dphy->regs, SUN6I_DPHY_GCTL_REG,
+		     SUN6I_DPHY_GCTL_LANE_NUM(lanes) |
+		     SUN6I_DPHY_GCTL_EN);
+
+	return 0;
+}
+
+int sun6i_dphy_power_on(struct sun6i_dphy *dphy, unsigned int lanes)
+{
+	u8 lanes_mask = GENMASK(lanes - 1, 0);
+
+	regmap_write(dphy->regs, SUN6I_DPHY_ANA0_REG,
+		     SUN6I_DPHY_ANA0_REG_PWS |
+		     SUN6I_DPHY_ANA0_REG_DMPC |
+		     SUN6I_DPHY_ANA0_REG_SLV(7) |
+		     SUN6I_DPHY_ANA0_REG_DMPD(lanes_mask) |
+		     SUN6I_DPHY_ANA0_REG_DEN(lanes_mask));
+
+	regmap_write(dphy->regs, SUN6I_DPHY_ANA1_REG,
+		     SUN6I_DPHY_ANA1_REG_CSMPS(1) |
+		     SUN6I_DPHY_ANA1_REG_SVTT(7));
+
+	regmap_write(dphy->regs, SUN6I_DPHY_ANA4_REG,
+		     SUN6I_DPHY_ANA4_REG_CKDV(1) |
+		     SUN6I_DPHY_ANA4_REG_TMSC(1) |
+		     SUN6I_DPHY_ANA4_REG_TMSD(1) |
+		     SUN6I_DPHY_ANA4_REG_TXDNSC(1) |
+		     SUN6I_DPHY_ANA4_REG_TXDNSD(1) |
+		     SUN6I_DPHY_ANA4_REG_TXPUSC(1) |
+		     SUN6I_DPHY_ANA4_REG_TXPUSD(1) |
+		     SUN6I_DPHY_ANA4_REG_DMPLVC |
+		     SUN6I_DPHY_ANA4_REG_DMPLVD(lanes_mask));
+
+	regmap_write(dphy->regs, SUN6I_DPHY_ANA2_REG,
+		     SUN6I_DPHY_ANA2_REG_ENIB);
+	udelay(5);
+
+	regmap_write(dphy->regs, SUN6I_DPHY_ANA3_REG,
+		     SUN6I_DPHY_ANA3_EN_LDOR |
+		     SUN6I_DPHY_ANA3_EN_LDOC |
+		     SUN6I_DPHY_ANA3_EN_LDOD);
+	udelay(1);
+
+	regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA3_REG,
+			   SUN6I_DPHY_ANA3_EN_VTTC |
+			   SUN6I_DPHY_ANA3_EN_VTTD_MASK,
+			   SUN6I_DPHY_ANA3_EN_VTTC |
+			   SUN6I_DPHY_ANA3_EN_VTTD(lanes_mask));
+	udelay(1);
+
+	regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA3_REG,
+			   SUN6I_DPHY_ANA3_EN_DIV,
+			   SUN6I_DPHY_ANA3_EN_DIV);
+	udelay(1);
+
+	regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA2_REG,
+			   SUN6I_DPHY_ANA2_EN_CK_CPU,
+			   SUN6I_DPHY_ANA2_EN_CK_CPU);
+	udelay(1);
+
+	regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA1_REG,
+			   SUN6I_DPHY_ANA1_REG_VTTMODE,
+			   SUN6I_DPHY_ANA1_REG_VTTMODE);
+
+	regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA2_REG,
+			   SUN6I_DPHY_ANA2_EN_P2S_CPU_MASK,
+			   SUN6I_DPHY_ANA2_EN_P2S_CPU(lanes_mask));
+
+	return 0;
+}
+
+int sun6i_dphy_power_off(struct sun6i_dphy *dphy)
+{
+	regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA1_REG,
+			   SUN6I_DPHY_ANA1_REG_VTTMODE, 0);
+
+	return 0;
+}
+
+int sun6i_dphy_exit(struct sun6i_dphy *dphy)
+{
+	clk_rate_exclusive_put(dphy->mod_clk);
+	clk_disable_unprepare(dphy->mod_clk);
+	reset_control_assert(dphy->reset);
+
+	return 0;
+}
+
+static struct regmap_config sun6i_dphy_regmap_config = {
+	.reg_bits	= 32,
+	.val_bits	= 32,
+	.reg_stride	= 4,
+	.max_register	= SUN6I_DPHY_DBG5_REG,
+	.name		= "mipi-dphy",
+};
+
+static const struct of_device_id sun6i_dphy_of_table[] = {
+	{ .compatible = "allwinner,sun6i-a31-mipi-dphy" },
+	{ }
+};
+
+int sun6i_dphy_probe(struct sun6i_dsi *dsi, struct device_node *node)
+{
+	struct sun6i_dphy *dphy;
+	struct resource res;
+	void __iomem *regs;
+	int ret;
+
+	if (!of_match_node(sun6i_dphy_of_table, node)) {
+		dev_err(dsi->dev, "Incompatible D-PHY\n");
+		return -EINVAL;
+	}
+
+	dphy = devm_kzalloc(dsi->dev, sizeof(*dphy), GFP_KERNEL);
+	if (!dphy)
+		return -ENOMEM;
+
+	ret = of_address_to_resource(node, 0, &res);
+	if (ret) {
+		dev_err(dsi->dev, "phy: Couldn't get our resources\n");
+		return ret;
+	}
+
+	regs = devm_ioremap_resource(dsi->dev, &res);
+	if (IS_ERR(regs)) {
+		dev_err(dsi->dev, "Couldn't map the DPHY encoder registers\n");
+		return PTR_ERR(regs);
+	}
+
+	dphy->regs = devm_regmap_init_mmio(dsi->dev, regs,
+					   &sun6i_dphy_regmap_config);
+	if (IS_ERR(dphy->regs)) {
+		dev_err(dsi->dev, "Couldn't create the DPHY encoder regmap\n");
+		return PTR_ERR(dphy->regs);
+	}
+
+	dphy->reset = of_reset_control_get_shared(node, NULL);
+	if (IS_ERR(dphy->reset)) {
+		dev_err(dsi->dev, "Couldn't get our reset line\n");
+		return PTR_ERR(dphy->reset);
+	}
+
+	dphy->bus_clk = of_clk_get_by_name(node, "bus");
+	if (IS_ERR(dphy->bus_clk)) {
+		dev_err(dsi->dev, "Couldn't get the DPHY bus clock\n");
+		ret = PTR_ERR(dphy->bus_clk);
+		goto err_free_reset;
+	}
+	regmap_mmio_attach_clk(dphy->regs, dphy->bus_clk);
+
+	dphy->mod_clk = of_clk_get_by_name(node, "mod");
+	if (IS_ERR(dphy->mod_clk)) {
+		dev_err(dsi->dev, "Couldn't get the DPHY mod clock\n");
+		ret = PTR_ERR(dphy->mod_clk);
+		goto err_free_bus;
+	}
+
+	dsi->dphy = dphy;
+
+	return 0;
+
+err_free_bus:
+	regmap_mmio_detach_clk(dphy->regs);
+	clk_put(dphy->bus_clk);
+err_free_reset:
+	reset_control_put(dphy->reset);
+	return ret;
+}
+
+int sun6i_dphy_remove(struct sun6i_dsi *dsi)
+{
+	struct sun6i_dphy *dphy = dsi->dphy;
+
+	regmap_mmio_detach_clk(dphy->regs);
+	clk_put(dphy->mod_clk);
+	clk_put(dphy->bus_clk);
+	reset_control_put(dphy->reset);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
new file mode 100644
index 000000000000..bfbf761f0c1d
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -0,0 +1,1107 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2016 Allwinnertech Co., Ltd.
+ * Copyright (C) 2017-2018 Bootlin
+ *
+ * Maxime Ripard <maxime.ripard@bootlin.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/crc-ccitt.h>
+#include <linux/of_address.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include <linux/phy/phy.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include "sun4i_drv.h"
+#include "sun6i_mipi_dsi.h"
+
+#include <video/mipi_display.h>
+
+#define SUN6I_DSI_CTL_REG		0x000
+#define SUN6I_DSI_CTL_EN			BIT(0)
+
+#define SUN6I_DSI_BASIC_CTL_REG		0x00c
+#define SUN6I_DSI_BASIC_CTL_HBP_DIS		BIT(2)
+#define SUN6I_DSI_BASIC_CTL_HSA_HSE_DIS		BIT(1)
+#define SUN6I_DSI_BASIC_CTL_VIDEO_BURST		BIT(0)
+
+#define SUN6I_DSI_BASIC_CTL0_REG	0x010
+#define SUN6I_DSI_BASIC_CTL0_HS_EOTP_EN		BIT(18)
+#define SUN6I_DSI_BASIC_CTL0_CRC_EN		BIT(17)
+#define SUN6I_DSI_BASIC_CTL0_ECC_EN		BIT(16)
+#define SUN6I_DSI_BASIC_CTL0_INST_ST		BIT(0)
+
+#define SUN6I_DSI_BASIC_CTL1_REG	0x014
+#define SUN6I_DSI_BASIC_CTL1_VIDEO_ST_DELAY(n)	(((n) & 0x1fff) << 4)
+#define SUN6I_DSI_BASIC_CTL1_VIDEO_FILL		BIT(2)
+#define SUN6I_DSI_BASIC_CTL1_VIDEO_PRECISION	BIT(1)
+#define SUN6I_DSI_BASIC_CTL1_VIDEO_MODE		BIT(0)
+
+#define SUN6I_DSI_BASIC_SIZE0_REG	0x018
+#define SUN6I_DSI_BASIC_SIZE0_VBP(n)		(((n) & 0xfff) << 16)
+#define SUN6I_DSI_BASIC_SIZE0_VSA(n)		((n) & 0xfff)
+
+#define SUN6I_DSI_BASIC_SIZE1_REG	0x01c
+#define SUN6I_DSI_BASIC_SIZE1_VT(n)		(((n) & 0xfff) << 16)
+#define SUN6I_DSI_BASIC_SIZE1_VACT(n)		((n) & 0xfff)
+
+#define SUN6I_DSI_INST_FUNC_REG(n)	(0x020 + (n) * 0x04)
+#define SUN6I_DSI_INST_FUNC_INST_MODE(n)	(((n) & 0xf) << 28)
+#define SUN6I_DSI_INST_FUNC_ESCAPE_ENTRY(n)	(((n) & 0xf) << 24)
+#define SUN6I_DSI_INST_FUNC_TRANS_PACKET(n)	(((n) & 0xf) << 20)
+#define SUN6I_DSI_INST_FUNC_LANE_CEN		BIT(4)
+#define SUN6I_DSI_INST_FUNC_LANE_DEN(n)		((n) & 0xf)
+
+#define SUN6I_DSI_INST_LOOP_SEL_REG	0x040
+
+#define SUN6I_DSI_INST_LOOP_NUM_REG(n)	(0x044 + (n) * 0x10)
+#define SUN6I_DSI_INST_LOOP_NUM_N1(n)		(((n) & 0xfff) << 16)
+#define SUN6I_DSI_INST_LOOP_NUM_N0(n)		((n) & 0xfff)
+
+#define SUN6I_DSI_INST_JUMP_SEL_REG	0x048
+
+#define SUN6I_DSI_INST_JUMP_CFG_REG(n)	(0x04c + (n) * 0x04)
+#define SUN6I_DSI_INST_JUMP_CFG_TO(n)		(((n) & 0xf) << 20)
+#define SUN6I_DSI_INST_JUMP_CFG_POINT(n)	(((n) & 0xf) << 16)
+#define SUN6I_DSI_INST_JUMP_CFG_NUM(n)		((n) & 0xffff)
+
+#define SUN6I_DSI_TRANS_START_REG	0x060
+
+#define SUN6I_DSI_TRANS_ZERO_REG	0x078
+
+#define SUN6I_DSI_TCON_DRQ_REG		0x07c
+#define SUN6I_DSI_TCON_DRQ_ENABLE_MODE		BIT(28)
+#define SUN6I_DSI_TCON_DRQ_SET(n)		((n) & 0x3ff)
+
+#define SUN6I_DSI_PIXEL_CTL0_REG	0x080
+#define SUN6I_DSI_PIXEL_CTL0_PD_PLUG_DISABLE	BIT(16)
+#define SUN6I_DSI_PIXEL_CTL0_FORMAT(n)		((n) & 0xf)
+
+#define SUN6I_DSI_PIXEL_CTL1_REG	0x084
+
+#define SUN6I_DSI_PIXEL_PH_REG		0x090
+#define SUN6I_DSI_PIXEL_PH_ECC(n)		(((n) & 0xff) << 24)
+#define SUN6I_DSI_PIXEL_PH_WC(n)		(((n) & 0xffff) << 8)
+#define SUN6I_DSI_PIXEL_PH_VC(n)		(((n) & 3) << 6)
+#define SUN6I_DSI_PIXEL_PH_DT(n)		((n) & 0x3f)
+
+#define SUN6I_DSI_PIXEL_PF0_REG		0x098
+#define SUN6I_DSI_PIXEL_PF0_CRC_FORCE(n)	((n) & 0xffff)
+
+#define SUN6I_DSI_PIXEL_PF1_REG		0x09c
+#define SUN6I_DSI_PIXEL_PF1_CRC_INIT_LINEN(n)	(((n) & 0xffff) << 16)
+#define SUN6I_DSI_PIXEL_PF1_CRC_INIT_LINE0(n)	((n) & 0xffff)
+
+#define SUN6I_DSI_SYNC_HSS_REG		0x0b0
+
+#define SUN6I_DSI_SYNC_HSE_REG		0x0b4
+
+#define SUN6I_DSI_SYNC_VSS_REG		0x0b8
+
+#define SUN6I_DSI_SYNC_VSE_REG		0x0bc
+
+#define SUN6I_DSI_BLK_HSA0_REG		0x0c0
+
+#define SUN6I_DSI_BLK_HSA1_REG		0x0c4
+#define SUN6I_DSI_BLK_PF(n)			(((n) & 0xffff) << 16)
+#define SUN6I_DSI_BLK_PD(n)			((n) & 0xff)
+
+#define SUN6I_DSI_BLK_HBP0_REG		0x0c8
+
+#define SUN6I_DSI_BLK_HBP1_REG		0x0cc
+
+#define SUN6I_DSI_BLK_HFP0_REG		0x0d0
+
+#define SUN6I_DSI_BLK_HFP1_REG		0x0d4
+
+#define SUN6I_DSI_BLK_HBLK0_REG		0x0e0
+
+#define SUN6I_DSI_BLK_HBLK1_REG		0x0e4
+
+#define SUN6I_DSI_BLK_VBLK0_REG		0x0e8
+
+#define SUN6I_DSI_BLK_VBLK1_REG		0x0ec
+
+#define SUN6I_DSI_BURST_LINE_REG	0x0f0
+#define SUN6I_DSI_BURST_LINE_SYNC_POINT(n)	(((n) & 0xffff) << 16)
+#define SUN6I_DSI_BURST_LINE_NUM(n)		((n) & 0xffff)
+
+#define SUN6I_DSI_BURST_DRQ_REG		0x0f4
+#define SUN6I_DSI_BURST_DRQ_EDGE1(n)		(((n) & 0xffff) << 16)
+#define SUN6I_DSI_BURST_DRQ_EDGE0(n)		((n) & 0xffff)
+
+#define SUN6I_DSI_CMD_CTL_REG		0x200
+#define SUN6I_DSI_CMD_CTL_RX_OVERFLOW		BIT(26)
+#define SUN6I_DSI_CMD_CTL_RX_FLAG		BIT(25)
+#define SUN6I_DSI_CMD_CTL_TX_FLAG		BIT(9)
+
+#define SUN6I_DSI_CMD_RX_REG(n)		(0x240 + (n) * 0x04)
+
+#define SUN6I_DSI_DEBUG_DATA_REG	0x2f8
+
+#define SUN6I_DSI_CMD_TX_REG(n)		(0x300 + (n) * 0x04)
+
+enum sun6i_dsi_start_inst {
+	DSI_START_LPRX,
+	DSI_START_LPTX,
+	DSI_START_HSC,
+	DSI_START_HSD,
+};
+
+enum sun6i_dsi_inst_id {
+	DSI_INST_ID_LP11	= 0,
+	DSI_INST_ID_TBA,
+	DSI_INST_ID_HSC,
+	DSI_INST_ID_HSD,
+	DSI_INST_ID_LPDT,
+	DSI_INST_ID_HSCEXIT,
+	DSI_INST_ID_NOP,
+	DSI_INST_ID_DLY,
+	DSI_INST_ID_END		= 15,
+};
+
+enum sun6i_dsi_inst_mode {
+	DSI_INST_MODE_STOP	= 0,
+	DSI_INST_MODE_TBA,
+	DSI_INST_MODE_HS,
+	DSI_INST_MODE_ESCAPE,
+	DSI_INST_MODE_HSCEXIT,
+	DSI_INST_MODE_NOP,
+};
+
+enum sun6i_dsi_inst_escape {
+	DSI_INST_ESCA_LPDT	= 0,
+	DSI_INST_ESCA_ULPS,
+	DSI_INST_ESCA_UN1,
+	DSI_INST_ESCA_UN2,
+	DSI_INST_ESCA_RESET,
+	DSI_INST_ESCA_UN3,
+	DSI_INST_ESCA_UN4,
+	DSI_INST_ESCA_UN5,
+};
+
+enum sun6i_dsi_inst_packet {
+	DSI_INST_PACK_PIXEL	= 0,
+	DSI_INST_PACK_COMMAND,
+};
+
+static const u32 sun6i_dsi_ecc_array[] = {
+	[0] = (BIT(0) | BIT(1) | BIT(2) | BIT(4) | BIT(5) | BIT(7) | BIT(10) |
+	       BIT(11) | BIT(13) | BIT(16) | BIT(20) | BIT(21) | BIT(22) |
+	       BIT(23)),
+	[1] = (BIT(0) | BIT(1) | BIT(3) | BIT(4) | BIT(6) | BIT(8) | BIT(10) |
+	       BIT(12) | BIT(14) | BIT(17) | BIT(20) | BIT(21) | BIT(22) |
+	       BIT(23)),
+	[2] = (BIT(0) | BIT(2) | BIT(3) | BIT(5) | BIT(6) | BIT(9) | BIT(11) |
+	       BIT(12) | BIT(15) | BIT(18) | BIT(20) | BIT(21) | BIT(22)),
+	[3] = (BIT(1) | BIT(2) | BIT(3) | BIT(7) | BIT(8) | BIT(9) | BIT(13) |
+	       BIT(14) | BIT(15) | BIT(19) | BIT(20) | BIT(21) | BIT(23)),
+	[4] = (BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(16) |
+	       BIT(17) | BIT(18) | BIT(19) | BIT(20) | BIT(22) | BIT(23)),
+	[5] = (BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(15) |
+	       BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(21) | BIT(22) |
+	       BIT(23)),
+};
+
+static u32 sun6i_dsi_ecc_compute(unsigned int data)
+{
+	int i;
+	u8 ecc = 0;
+
+	for (i = 0; i < ARRAY_SIZE(sun6i_dsi_ecc_array); i++) {
+		u32 field = sun6i_dsi_ecc_array[i];
+		bool init = false;
+		u8 val = 0;
+		int j;
+
+		for (j = 0; j < 24; j++) {
+			if (!(BIT(j) & field))
+				continue;
+
+			if (!init) {
+				val = (BIT(j) & data) ? 1 : 0;
+				init = true;
+			} else {
+				val ^= (BIT(j) & data) ? 1 : 0;
+			}
+		}
+
+		ecc |= val << i;
+	}
+
+	return ecc;
+}
+
+static u16 sun6i_dsi_crc_compute(u8 const *buffer, size_t len)
+{
+	return crc_ccitt(0xffff, buffer, len);
+}
+
+static u16 sun6i_dsi_crc_repeat_compute(u8 pd, size_t len)
+{
+	u8 buffer[len];
+
+	memset(buffer, pd, len);
+
+	return sun6i_dsi_crc_compute(buffer, len);
+}
+
+static u32 sun6i_dsi_build_sync_pkt(u8 dt, u8 vc, u8 d0, u8 d1)
+{
+	u32 val = dt & 0x3f;
+
+	val |= (vc & 3) << 6;
+	val |= (d0 & 0xff) << 8;
+	val |= (d1 & 0xff) << 16;
+	val |= sun6i_dsi_ecc_compute(val) << 24;
+
+	return val;
+}
+
+static u32 sun6i_dsi_build_blk0_pkt(u8 vc, u16 wc)
+{
+	return sun6i_dsi_build_sync_pkt(MIPI_DSI_BLANKING_PACKET, vc,
+					wc & 0xff, wc >> 8);
+}
+
+static u32 sun6i_dsi_build_blk1_pkt(u16 pd, size_t len)
+{
+	u32 val = SUN6I_DSI_BLK_PD(pd);
+
+	return val | SUN6I_DSI_BLK_PF(sun6i_dsi_crc_repeat_compute(pd, len));
+}
+
+static void sun6i_dsi_inst_abort(struct sun6i_dsi *dsi)
+{
+	regmap_update_bits(dsi->regs, SUN6I_DSI_BASIC_CTL0_REG,
+			   SUN6I_DSI_BASIC_CTL0_INST_ST, 0);
+}
+
+static void sun6i_dsi_inst_commit(struct sun6i_dsi *dsi)
+{
+	regmap_update_bits(dsi->regs, SUN6I_DSI_BASIC_CTL0_REG,
+			   SUN6I_DSI_BASIC_CTL0_INST_ST,
+			   SUN6I_DSI_BASIC_CTL0_INST_ST);
+}
+
+static int sun6i_dsi_inst_wait_for_completion(struct sun6i_dsi *dsi)
+{
+	u32 val;
+
+	return regmap_read_poll_timeout(dsi->regs, SUN6I_DSI_BASIC_CTL0_REG,
+					val,
+					!(val & SUN6I_DSI_BASIC_CTL0_INST_ST),
+					100, 5000);
+}
+
+static void sun6i_dsi_inst_setup(struct sun6i_dsi *dsi,
+				 enum sun6i_dsi_inst_id id,
+				 enum sun6i_dsi_inst_mode mode,
+				 bool clock, u8 data,
+				 enum sun6i_dsi_inst_packet packet,
+				 enum sun6i_dsi_inst_escape escape)
+{
+	regmap_write(dsi->regs, SUN6I_DSI_INST_FUNC_REG(id),
+		     SUN6I_DSI_INST_FUNC_INST_MODE(mode) |
+		     SUN6I_DSI_INST_FUNC_ESCAPE_ENTRY(escape) |
+		     SUN6I_DSI_INST_FUNC_TRANS_PACKET(packet) |
+		     (clock ? SUN6I_DSI_INST_FUNC_LANE_CEN : 0) |
+		     SUN6I_DSI_INST_FUNC_LANE_DEN(data));
+}
+
+static void sun6i_dsi_inst_init(struct sun6i_dsi *dsi,
+				struct mipi_dsi_device *device)
+{
+	u8 lanes_mask = GENMASK(device->lanes - 1, 0);
+
+	sun6i_dsi_inst_setup(dsi, DSI_INST_ID_LP11, DSI_INST_MODE_STOP,
+			     true, lanes_mask, 0, 0);
+
+	sun6i_dsi_inst_setup(dsi, DSI_INST_ID_TBA, DSI_INST_MODE_TBA,
+			     false, 1, 0, 0);
+
+	sun6i_dsi_inst_setup(dsi, DSI_INST_ID_HSC, DSI_INST_MODE_HS,
+			     true, 0, DSI_INST_PACK_PIXEL, 0);
+
+	sun6i_dsi_inst_setup(dsi, DSI_INST_ID_HSD, DSI_INST_MODE_HS,
+			     false, lanes_mask, DSI_INST_PACK_PIXEL, 0);
+
+	sun6i_dsi_inst_setup(dsi, DSI_INST_ID_LPDT, DSI_INST_MODE_ESCAPE,
+			     false, 1, DSI_INST_PACK_COMMAND,
+			     DSI_INST_ESCA_LPDT);
+
+	sun6i_dsi_inst_setup(dsi, DSI_INST_ID_HSCEXIT, DSI_INST_MODE_HSCEXIT,
+			     true, 0, 0, 0);
+
+	sun6i_dsi_inst_setup(dsi, DSI_INST_ID_NOP, DSI_INST_MODE_STOP,
+			     false, lanes_mask, 0, 0);
+
+	sun6i_dsi_inst_setup(dsi, DSI_INST_ID_DLY, DSI_INST_MODE_NOP,
+			     true, lanes_mask, 0, 0);
+
+	regmap_write(dsi->regs, SUN6I_DSI_INST_JUMP_CFG_REG(0),
+		     SUN6I_DSI_INST_JUMP_CFG_POINT(DSI_INST_ID_NOP) |
+		     SUN6I_DSI_INST_JUMP_CFG_TO(DSI_INST_ID_HSCEXIT) |
+		     SUN6I_DSI_INST_JUMP_CFG_NUM(1));
+};
+
+static u16 sun6i_dsi_get_video_start_delay(struct sun6i_dsi *dsi,
+					   struct drm_display_mode *mode)
+{
+	return mode->vtotal - (mode->vsync_end - mode->vdisplay) + 1;
+}
+
+static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi,
+				  struct drm_display_mode *mode)
+{
+	struct mipi_dsi_device *device = dsi->device;
+	u32 val = 0;
+
+	if ((mode->hsync_end - mode->hdisplay) > 20) {
+		/* Maaaaaagic */
+		u16 drq = (mode->hsync_end - mode->hdisplay) - 20;
+
+		drq *= mipi_dsi_pixel_format_to_bpp(device->format);
+		drq /= 32;
+
+		val = (SUN6I_DSI_TCON_DRQ_ENABLE_MODE |
+		       SUN6I_DSI_TCON_DRQ_SET(drq));
+	}
+
+	regmap_write(dsi->regs, SUN6I_DSI_TCON_DRQ_REG, val);
+}
+
+static void sun6i_dsi_setup_inst_loop(struct sun6i_dsi *dsi,
+				      struct drm_display_mode *mode)
+{
+	u16 delay = 50 - 1;
+
+	regmap_write(dsi->regs, SUN6I_DSI_INST_LOOP_NUM_REG(0),
+		     SUN6I_DSI_INST_LOOP_NUM_N0(50 - 1) |
+		     SUN6I_DSI_INST_LOOP_NUM_N1(delay));
+	regmap_write(dsi->regs, SUN6I_DSI_INST_LOOP_NUM_REG(1),
+		     SUN6I_DSI_INST_LOOP_NUM_N0(50 - 1) |
+		     SUN6I_DSI_INST_LOOP_NUM_N1(delay));
+}
+
+static void sun6i_dsi_setup_format(struct sun6i_dsi *dsi,
+				   struct drm_display_mode *mode)
+{
+	struct mipi_dsi_device *device = dsi->device;
+	u32 val = SUN6I_DSI_PIXEL_PH_VC(device->channel);
+	u8 dt, fmt;
+	u16 wc;
+
+	/*
+	 * TODO: The format defines are only valid in video mode and
+	 * change in command mode.
+	 */
+	switch (device->format) {
+	case MIPI_DSI_FMT_RGB888:
+		dt = MIPI_DSI_PACKED_PIXEL_STREAM_24;
+		fmt = 8;
+		break;
+	case MIPI_DSI_FMT_RGB666:
+		dt = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
+		fmt = 9;
+		break;
+	case MIPI_DSI_FMT_RGB666_PACKED:
+		dt = MIPI_DSI_PACKED_PIXEL_STREAM_18;
+		fmt = 10;
+		break;
+	case MIPI_DSI_FMT_RGB565:
+		dt = MIPI_DSI_PACKED_PIXEL_STREAM_16;
+		fmt = 11;
+		break;
+	default:
+		return;
+	}
+	val |= SUN6I_DSI_PIXEL_PH_DT(dt);
+
+	wc = mode->hdisplay * mipi_dsi_pixel_format_to_bpp(device->format) / 8;
+	val |= SUN6I_DSI_PIXEL_PH_WC(wc);
+	val |= SUN6I_DSI_PIXEL_PH_ECC(sun6i_dsi_ecc_compute(val));
+
+	regmap_write(dsi->regs, SUN6I_DSI_PIXEL_PH_REG, val);
+
+	regmap_write(dsi->regs, SUN6I_DSI_PIXEL_PF0_REG,
+		     SUN6I_DSI_PIXEL_PF0_CRC_FORCE(0xffff));
+
+	regmap_write(dsi->regs, SUN6I_DSI_PIXEL_PF1_REG,
+		     SUN6I_DSI_PIXEL_PF1_CRC_INIT_LINE0(0xffff) |
+		     SUN6I_DSI_PIXEL_PF1_CRC_INIT_LINEN(0xffff));
+
+	regmap_write(dsi->regs, SUN6I_DSI_PIXEL_CTL0_REG,
+		     SUN6I_DSI_PIXEL_CTL0_PD_PLUG_DISABLE |
+		     SUN6I_DSI_PIXEL_CTL0_FORMAT(fmt));
+}
+
+static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
+				    struct drm_display_mode *mode)
+{
+	struct mipi_dsi_device *device = dsi->device;
+	unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
+	u16 hbp, hfp, hsa, hblk, vblk;
+
+	regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL_REG, 0);
+
+	regmap_write(dsi->regs, SUN6I_DSI_SYNC_HSS_REG,
+		     sun6i_dsi_build_sync_pkt(MIPI_DSI_H_SYNC_START,
+					      device->channel,
+					      0, 0));
+
+	regmap_write(dsi->regs, SUN6I_DSI_SYNC_HSE_REG,
+		     sun6i_dsi_build_sync_pkt(MIPI_DSI_H_SYNC_END,
+					      device->channel,
+					      0, 0));
+
+	regmap_write(dsi->regs, SUN6I_DSI_SYNC_VSS_REG,
+		     sun6i_dsi_build_sync_pkt(MIPI_DSI_V_SYNC_START,
+					      device->channel,
+					      0, 0));
+
+	regmap_write(dsi->regs, SUN6I_DSI_SYNC_VSE_REG,
+		     sun6i_dsi_build_sync_pkt(MIPI_DSI_V_SYNC_END,
+					      device->channel,
+					      0, 0));
+
+	regmap_write(dsi->regs, SUN6I_DSI_BASIC_SIZE0_REG,
+		     SUN6I_DSI_BASIC_SIZE0_VSA(mode->vsync_end -
+					       mode->vsync_start) |
+		     SUN6I_DSI_BASIC_SIZE0_VBP(mode->vsync_start -
+					       mode->vdisplay));
+
+	regmap_write(dsi->regs, SUN6I_DSI_BASIC_SIZE1_REG,
+		     SUN6I_DSI_BASIC_SIZE1_VACT(mode->vdisplay) |
+		     SUN6I_DSI_BASIC_SIZE1_VT(mode->vtotal));
+
+	/*
+	 * A sync period is composed of a blanking packet (4 bytes +
+	 * payload + 2 bytes) and a sync event packet (4 bytes). Its
+	 * minimal size is therefore 10 bytes
+	 */
+#define HSA_PACKET_OVERHEAD	10
+	hsa = max((unsigned int)HSA_PACKET_OVERHEAD,
+		  (mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD);
+	regmap_write(dsi->regs, SUN6I_DSI_BLK_HSA0_REG,
+		     sun6i_dsi_build_blk0_pkt(device->channel, hsa));
+	regmap_write(dsi->regs, SUN6I_DSI_BLK_HSA1_REG,
+		     sun6i_dsi_build_blk1_pkt(0, hsa));
+
+	/*
+	 * The backporch is set using a blanking packet (4 bytes +
+	 * payload + 2 bytes). Its minimal size is therefore 6 bytes
+	 */
+#define HBP_PACKET_OVERHEAD	6
+	hbp = max((unsigned int)HBP_PACKET_OVERHEAD,
+		  (mode->hsync_start - mode->hdisplay) * Bpp - HBP_PACKET_OVERHEAD);
+	regmap_write(dsi->regs, SUN6I_DSI_BLK_HBP0_REG,
+		     sun6i_dsi_build_blk0_pkt(device->channel, hbp));
+	regmap_write(dsi->regs, SUN6I_DSI_BLK_HBP1_REG,
+		     sun6i_dsi_build_blk1_pkt(0, hbp));
+
+	/*
+	 * The frontporch is set using a blanking packet (4 bytes +
+	 * payload + 2 bytes). Its minimal size is therefore 6 bytes
+	 */
+#define HFP_PACKET_OVERHEAD	6
+	hfp = max((unsigned int)HFP_PACKET_OVERHEAD,
+		  (mode->htotal - mode->hsync_end) * Bpp - HFP_PACKET_OVERHEAD);
+	regmap_write(dsi->regs, SUN6I_DSI_BLK_HFP0_REG,
+		     sun6i_dsi_build_blk0_pkt(device->channel, hfp));
+	regmap_write(dsi->regs, SUN6I_DSI_BLK_HFP1_REG,
+		     sun6i_dsi_build_blk1_pkt(0, hfp));
+
+	/*
+	 * hblk seems to be the line + porches length.
+	 */
+	hblk = mode->htotal * Bpp - hsa;
+	regmap_write(dsi->regs, SUN6I_DSI_BLK_HBLK0_REG,
+		     sun6i_dsi_build_blk0_pkt(device->channel, hblk));
+	regmap_write(dsi->regs, SUN6I_DSI_BLK_HBLK1_REG,
+		     sun6i_dsi_build_blk1_pkt(0, hblk));
+
+	/*
+	 * And I'm not entirely sure what vblk is about. The driver in
+	 * Allwinner BSP is using a rather convoluted calculation
+	 * there only for 4 lanes. However, using 0 (the !4 lanes
+	 * case) even with a 4 lanes screen seems to work...
+	 */
+	vblk = 0;
+	regmap_write(dsi->regs, SUN6I_DSI_BLK_VBLK0_REG,
+		     sun6i_dsi_build_blk0_pkt(device->channel, vblk));
+	regmap_write(dsi->regs, SUN6I_DSI_BLK_VBLK1_REG,
+		     sun6i_dsi_build_blk1_pkt(0, vblk));
+}
+
+static int sun6i_dsi_start(struct sun6i_dsi *dsi,
+			   enum sun6i_dsi_start_inst func)
+{
+	switch (func) {
+	case DSI_START_LPTX:
+		regmap_write(dsi->regs, SUN6I_DSI_INST_JUMP_SEL_REG,
+			     DSI_INST_ID_LPDT << (4 * DSI_INST_ID_LP11) |
+			     DSI_INST_ID_END  << (4 * DSI_INST_ID_LPDT));
+		break;
+	case DSI_START_LPRX:
+		regmap_write(dsi->regs, SUN6I_DSI_INST_JUMP_SEL_REG,
+			     DSI_INST_ID_LPDT << (4 * DSI_INST_ID_LP11) |
+			     DSI_INST_ID_DLY  << (4 * DSI_INST_ID_LPDT) |
+			     DSI_INST_ID_TBA  << (4 * DSI_INST_ID_DLY) |
+			     DSI_INST_ID_END  << (4 * DSI_INST_ID_TBA));
+		break;
+	case DSI_START_HSC:
+		regmap_write(dsi->regs, SUN6I_DSI_INST_JUMP_SEL_REG,
+			     DSI_INST_ID_HSC  << (4 * DSI_INST_ID_LP11) |
+			     DSI_INST_ID_END  << (4 * DSI_INST_ID_HSC));
+		break;
+	case DSI_START_HSD:
+		regmap_write(dsi->regs, SUN6I_DSI_INST_JUMP_SEL_REG,
+			     DSI_INST_ID_NOP  << (4 * DSI_INST_ID_LP11) |
+			     DSI_INST_ID_HSD  << (4 * DSI_INST_ID_NOP) |
+			     DSI_INST_ID_DLY  << (4 * DSI_INST_ID_HSD) |
+			     DSI_INST_ID_NOP  << (4 * DSI_INST_ID_DLY) |
+			     DSI_INST_ID_END  << (4 * DSI_INST_ID_HSCEXIT));
+		break;
+	default:
+		regmap_write(dsi->regs, SUN6I_DSI_INST_JUMP_SEL_REG,
+			     DSI_INST_ID_END  << (4 * DSI_INST_ID_LP11));
+		break;
+	}
+
+	sun6i_dsi_inst_abort(dsi);
+	sun6i_dsi_inst_commit(dsi);
+
+	if (func == DSI_START_HSC)
+		regmap_write_bits(dsi->regs,
+				  SUN6I_DSI_INST_FUNC_REG(DSI_INST_ID_LP11),
+				  SUN6I_DSI_INST_FUNC_LANE_CEN, 0);
+
+	return 0;
+}
+
+static void sun6i_dsi_encoder_enable(struct drm_encoder *encoder)
+{
+	struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
+	struct sun6i_dsi *dsi = encoder_to_sun6i_dsi(encoder);
+	struct mipi_dsi_device *device = dsi->device;
+	u16 delay;
+
+	DRM_DEBUG_DRIVER("Enabling DSI output\n");
+
+	pm_runtime_get_sync(dsi->dev);
+
+	delay = sun6i_dsi_get_video_start_delay(dsi, mode);
+	regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL1_REG,
+		     SUN6I_DSI_BASIC_CTL1_VIDEO_ST_DELAY(delay) |
+		     SUN6I_DSI_BASIC_CTL1_VIDEO_FILL |
+		     SUN6I_DSI_BASIC_CTL1_VIDEO_PRECISION |
+		     SUN6I_DSI_BASIC_CTL1_VIDEO_MODE);
+
+	sun6i_dsi_setup_burst(dsi, mode);
+	sun6i_dsi_setup_inst_loop(dsi, mode);
+	sun6i_dsi_setup_format(dsi, mode);
+	sun6i_dsi_setup_timings(dsi, mode);
+
+	sun6i_dphy_init(dsi->dphy, device->lanes);
+	sun6i_dphy_power_on(dsi->dphy, device->lanes);
+
+	if (!IS_ERR(dsi->panel))
+		drm_panel_prepare(dsi->panel);
+
+	/*
+	 * FIXME: This should be moved after the switch to HS mode.
+	 *
+	 * Unfortunately, once in HS mode, it seems like we're not
+	 * able to send DCS commands anymore, which would prevent any
+	 * panel to send any DCS command as part as their enable
+	 * method, which is quite common.
+	 *
+	 * I haven't seen any artifact due to that sub-optimal
+	 * ordering on the panels I've tested it with, so I guess this
+	 * will do for now, until that IP is better understood.
+	 */
+	if (!IS_ERR(dsi->panel))
+		drm_panel_enable(dsi->panel);
+
+	sun6i_dsi_start(dsi, DSI_START_HSC);
+
+	udelay(1000);
+
+	sun6i_dsi_start(dsi, DSI_START_HSD);
+}
+
+static void sun6i_dsi_encoder_disable(struct drm_encoder *encoder)
+{
+	struct sun6i_dsi *dsi = encoder_to_sun6i_dsi(encoder);
+
+	DRM_DEBUG_DRIVER("Disabling DSI output\n");
+
+	if (!IS_ERR(dsi->panel)) {
+		drm_panel_disable(dsi->panel);
+		drm_panel_unprepare(dsi->panel);
+	}
+
+	sun6i_dphy_power_off(dsi->dphy);
+	sun6i_dphy_exit(dsi->dphy);
+
+	pm_runtime_put(dsi->dev);
+}
+
+static int sun6i_dsi_get_modes(struct drm_connector *connector)
+{
+	struct sun6i_dsi *dsi = connector_to_sun6i_dsi(connector);
+
+	return drm_panel_get_modes(dsi->panel);
+}
+
+static struct drm_connector_helper_funcs sun6i_dsi_connector_helper_funcs = {
+	.get_modes	= sun6i_dsi_get_modes,
+};
+
+static enum drm_connector_status
+sun6i_dsi_connector_detect(struct drm_connector *connector, bool force)
+{
+	return connector_status_connected;
+}
+
+static const struct drm_connector_funcs sun6i_dsi_connector_funcs = {
+	.detect			= sun6i_dsi_connector_detect,
+	.fill_modes		= drm_helper_probe_single_connector_modes,
+	.destroy		= drm_connector_cleanup,
+	.reset			= drm_atomic_helper_connector_reset,
+	.atomic_duplicate_state	= drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state	= drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_encoder_helper_funcs sun6i_dsi_enc_helper_funcs = {
+	.disable	= sun6i_dsi_encoder_disable,
+	.enable		= sun6i_dsi_encoder_enable,
+};
+
+static const struct drm_encoder_funcs sun6i_dsi_enc_funcs = {
+	.destroy	= drm_encoder_cleanup,
+};
+
+static u32 sun6i_dsi_dcs_build_pkt_hdr(struct sun6i_dsi *dsi,
+				       const struct mipi_dsi_msg *msg)
+{
+	u32 pkt = msg->type;
+
+	if (msg->type == MIPI_DSI_DCS_LONG_WRITE) {
+		pkt |= ((msg->tx_len + 1) & 0xffff) << 8;
+		pkt |= (((msg->tx_len + 1) >> 8) & 0xffff) << 16;
+	} else {
+		pkt |= (((u8 *)msg->tx_buf)[0] << 8);
+		if (msg->tx_len > 1)
+			pkt |= (((u8 *)msg->tx_buf)[1] << 16);
+	}
+
+	pkt |= sun6i_dsi_ecc_compute(pkt) << 24;
+
+	return pkt;
+}
+
+static int sun6i_dsi_dcs_write_short(struct sun6i_dsi *dsi,
+				     const struct mipi_dsi_msg *msg)
+{
+	regmap_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(0),
+		     sun6i_dsi_dcs_build_pkt_hdr(dsi, msg));
+	regmap_write_bits(dsi->regs, SUN6I_DSI_CMD_CTL_REG,
+			  0xff, (4 - 1));
+
+	sun6i_dsi_start(dsi, DSI_START_LPTX);
+
+	return msg->tx_len;
+}
+
+static int sun6i_dsi_dcs_write_long(struct sun6i_dsi *dsi,
+				    const struct mipi_dsi_msg *msg)
+{
+	int ret, len = 0;
+	u8 *bounce;
+	u16 crc;
+
+	regmap_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(0),
+		     sun6i_dsi_dcs_build_pkt_hdr(dsi, msg));
+
+	bounce = kzalloc(msg->tx_len + sizeof(crc), GFP_KERNEL);
+	if (!bounce)
+		return -ENOMEM;
+
+	memcpy(bounce, msg->tx_buf, msg->tx_len);
+	len += msg->tx_len;
+
+	crc = sun6i_dsi_crc_compute(bounce, msg->tx_len);
+	memcpy((u8 *)bounce + msg->tx_len, &crc, sizeof(crc));
+	len += sizeof(crc);
+
+	regmap_bulk_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(1), bounce, len);
+	regmap_write(dsi->regs, SUN6I_DSI_CMD_CTL_REG, len + 4 - 1);
+	kfree(bounce);
+
+	sun6i_dsi_start(dsi, DSI_START_LPTX);
+
+	ret = sun6i_dsi_inst_wait_for_completion(dsi);
+	if (ret < 0) {
+		sun6i_dsi_inst_abort(dsi);
+		return ret;
+	}
+
+	/*
+	 * TODO: There's some bits (reg 0x200, bits 8/9) that
+	 * apparently can be used to check whether the data have been
+	 * sent, but I couldn't get it to work reliably.
+	 */
+	return msg->tx_len;
+}
+
+static int sun6i_dsi_dcs_read(struct sun6i_dsi *dsi,
+			      const struct mipi_dsi_msg *msg)
+{
+	u32 val;
+	int ret;
+	u8 byte0;
+
+	regmap_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(0),
+		     sun6i_dsi_dcs_build_pkt_hdr(dsi, msg));
+	regmap_write(dsi->regs, SUN6I_DSI_CMD_CTL_REG,
+		     (4 - 1));
+
+	sun6i_dsi_start(dsi, DSI_START_LPRX);
+
+	ret = sun6i_dsi_inst_wait_for_completion(dsi);
+	if (ret < 0) {
+		sun6i_dsi_inst_abort(dsi);
+		return ret;
+	}
+
+	/*
+	 * TODO: There's some bits (reg 0x200, bits 24/25) that
+	 * apparently can be used to check whether the data have been
+	 * received, but I couldn't get it to work reliably.
+	 */
+	regmap_read(dsi->regs, SUN6I_DSI_CMD_CTL_REG, &val);
+	if (val & SUN6I_DSI_CMD_CTL_RX_OVERFLOW)
+		return -EIO;
+
+	regmap_read(dsi->regs, SUN6I_DSI_CMD_RX_REG(0), &val);
+	byte0 = val & 0xff;
+	if (byte0 == MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT)
+		return -EIO;
+
+	((u8 *)msg->rx_buf)[0] = (val >> 8);
+
+	return 1;
+}
+
+static int sun6i_dsi_attach(struct mipi_dsi_host *host,
+			    struct mipi_dsi_device *device)
+{
+	struct sun6i_dsi *dsi = host_to_sun6i_dsi(host);
+
+	dsi->device = device;
+	dsi->panel = of_drm_find_panel(device->dev.of_node);
+	if (!dsi->panel)
+		return -EINVAL;
+
+	dev_info(host->dev, "Attached device %s\n", device->name);
+
+	return 0;
+}
+
+static int sun6i_dsi_detach(struct mipi_dsi_host *host,
+			    struct mipi_dsi_device *device)
+{
+	struct sun6i_dsi *dsi = host_to_sun6i_dsi(host);
+
+	dsi->panel = NULL;
+	dsi->device = NULL;
+
+	return 0;
+}
+
+static ssize_t sun6i_dsi_transfer(struct mipi_dsi_host *host,
+				  const struct mipi_dsi_msg *msg)
+{
+	struct sun6i_dsi *dsi = host_to_sun6i_dsi(host);
+	int ret;
+
+	ret = sun6i_dsi_inst_wait_for_completion(dsi);
+	if (ret < 0)
+		sun6i_dsi_inst_abort(dsi);
+
+	regmap_write(dsi->regs, SUN6I_DSI_CMD_CTL_REG,
+		     SUN6I_DSI_CMD_CTL_RX_OVERFLOW |
+		     SUN6I_DSI_CMD_CTL_RX_FLAG |
+		     SUN6I_DSI_CMD_CTL_TX_FLAG);
+
+	switch (msg->type) {
+	case MIPI_DSI_DCS_SHORT_WRITE:
+	case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
+		ret = sun6i_dsi_dcs_write_short(dsi, msg);
+		break;
+
+	case MIPI_DSI_DCS_LONG_WRITE:
+		ret = sun6i_dsi_dcs_write_long(dsi, msg);
+		break;
+
+	case MIPI_DSI_DCS_READ:
+		if (msg->rx_len == 1) {
+			ret = sun6i_dsi_dcs_read(dsi, msg);
+			break;
+		}
+
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static const struct mipi_dsi_host_ops sun6i_dsi_host_ops = {
+	.attach		= sun6i_dsi_attach,
+	.detach		= sun6i_dsi_detach,
+	.transfer	= sun6i_dsi_transfer,
+};
+
+static const struct regmap_config sun6i_dsi_regmap_config = {
+	.reg_bits	= 32,
+	.val_bits	= 32,
+	.reg_stride	= 4,
+	.max_register	= SUN6I_DSI_CMD_TX_REG(255),
+	.name		= "mipi-dsi",
+};
+
+static int sun6i_dsi_bind(struct device *dev, struct device *master,
+			 void *data)
+{
+	struct drm_device *drm = data;
+	struct sun4i_drv *drv = drm->dev_private;
+	struct sun6i_dsi *dsi = dev_get_drvdata(dev);
+	int ret;
+
+	if (!dsi->panel)
+		return -EPROBE_DEFER;
+
+	dsi->drv = drv;
+
+	drm_encoder_helper_add(&dsi->encoder,
+			       &sun6i_dsi_enc_helper_funcs);
+	ret = drm_encoder_init(drm,
+			       &dsi->encoder,
+			       &sun6i_dsi_enc_funcs,
+			       DRM_MODE_ENCODER_DSI,
+			       NULL);
+	if (ret) {
+		dev_err(dsi->dev, "Couldn't initialise the DSI encoder\n");
+		return ret;
+	}
+	dsi->encoder.possible_crtcs = BIT(0);
+
+	drm_connector_helper_add(&dsi->connector,
+				 &sun6i_dsi_connector_helper_funcs);
+	ret = drm_connector_init(drm, &dsi->connector,
+				 &sun6i_dsi_connector_funcs,
+				 DRM_MODE_CONNECTOR_DSI);
+	if (ret) {
+		dev_err(dsi->dev,
+			"Couldn't initialise the DSI connector\n");
+		goto err_cleanup_connector;
+	}
+
+	drm_mode_connector_attach_encoder(&dsi->connector, &dsi->encoder);
+	drm_panel_attach(dsi->panel, &dsi->connector);
+
+	return 0;
+
+err_cleanup_connector:
+	drm_encoder_cleanup(&dsi->encoder);
+	return ret;
+}
+
+static void sun6i_dsi_unbind(struct device *dev, struct device *master,
+			    void *data)
+{
+	struct sun6i_dsi *dsi = dev_get_drvdata(dev);
+
+	drm_panel_detach(dsi->panel);
+}
+
+static const struct component_ops sun6i_dsi_ops = {
+	.bind	= sun6i_dsi_bind,
+	.unbind	= sun6i_dsi_unbind,
+};
+
+static int sun6i_dsi_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *dphy_node;
+	struct sun6i_dsi *dsi;
+	struct resource *res;
+	void __iomem *base;
+	int ret;
+
+	dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+	if (!dsi)
+		return -ENOMEM;
+	dev_set_drvdata(dev, dsi);
+	dsi->dev = dev;
+	dsi->host.ops = &sun6i_dsi_host_ops;
+	dsi->host.dev = dev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(base)) {
+		dev_err(dev, "Couldn't map the DSI encoder registers\n");
+		return PTR_ERR(base);
+	}
+
+	dsi->regs = devm_regmap_init_mmio_clk(dev, "bus", base,
+					      &sun6i_dsi_regmap_config);
+	if (IS_ERR(dsi->regs)) {
+		dev_err(dev, "Couldn't create the DSI encoder regmap\n");
+		return PTR_ERR(dsi->regs);
+	}
+
+	dsi->reset = devm_reset_control_get_shared(dev, NULL);
+	if (IS_ERR(dsi->reset)) {
+		dev_err(dev, "Couldn't get our reset line\n");
+		return PTR_ERR(dsi->reset);
+	}
+
+	dsi->mod_clk = devm_clk_get(dev, "mod");
+	if (IS_ERR(dsi->mod_clk)) {
+		dev_err(dev, "Couldn't get the DSI mod clock\n");
+		return PTR_ERR(dsi->mod_clk);
+	}
+
+	/*
+	 * In order to operate properly, that clock seems to be always
+	 * set to 297MHz.
+	 */
+	clk_set_rate_exclusive(dsi->mod_clk, 297000000);
+
+	dphy_node = of_parse_phandle(dev->of_node, "phys", 0);
+	ret = sun6i_dphy_probe(dsi, dphy_node);
+	of_node_put(dphy_node);
+	if (ret) {
+		dev_err(dev, "Couldn't get the MIPI D-PHY\n");
+		goto err_unprotect_clk;
+	}
+
+	pm_runtime_enable(dev);
+
+	ret = mipi_dsi_host_register(&dsi->host);
+	if (ret) {
+		dev_err(dev, "Couldn't register MIPI-DSI host\n");
+		goto err_remove_phy;
+	}
+
+	ret = component_add(&pdev->dev, &sun6i_dsi_ops);
+	if (ret) {
+		dev_err(dev, "Couldn't register our component\n");
+		goto err_remove_dsi_host;
+	}
+
+	return 0;
+
+err_remove_dsi_host:
+	mipi_dsi_host_unregister(&dsi->host);
+err_remove_phy:
+	pm_runtime_disable(dev);
+	sun6i_dphy_remove(dsi);
+err_unprotect_clk:
+	clk_rate_exclusive_put(dsi->mod_clk);
+	return ret;
+}
+
+static int sun6i_dsi_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct sun6i_dsi *dsi = dev_get_drvdata(dev);
+
+	component_del(&pdev->dev, &sun6i_dsi_ops);
+	mipi_dsi_host_unregister(&dsi->host);
+	pm_runtime_disable(dev);
+	sun6i_dphy_remove(dsi);
+	clk_rate_exclusive_put(dsi->mod_clk);
+
+	return 0;
+}
+
+static int sun6i_dsi_runtime_resume(struct device *dev)
+{
+	struct sun6i_dsi *dsi = dev_get_drvdata(dev);
+
+	reset_control_deassert(dsi->reset);
+	clk_prepare_enable(dsi->mod_clk);
+
+	/*
+	 * Enable the DSI block.
+	 *
+	 * Some part of it can only be done once we get a number of
+	 * lanes, see sun6i_dsi_inst_init
+	 */
+	regmap_write(dsi->regs, SUN6I_DSI_CTL_REG, SUN6I_DSI_CTL_EN);
+
+	regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL0_REG,
+		     SUN6I_DSI_BASIC_CTL0_ECC_EN | SUN6I_DSI_BASIC_CTL0_CRC_EN);
+
+	regmap_write(dsi->regs, SUN6I_DSI_TRANS_START_REG, 10);
+	regmap_write(dsi->regs, SUN6I_DSI_TRANS_ZERO_REG, 0);
+
+	if (dsi->device)
+		sun6i_dsi_inst_init(dsi, dsi->device);
+
+	regmap_write(dsi->regs, SUN6I_DSI_DEBUG_DATA_REG, 0xff);
+
+	return 0;
+}
+
+static int sun6i_dsi_runtime_suspend(struct device *dev)
+{
+	struct sun6i_dsi *dsi = dev_get_drvdata(dev);
+
+	clk_disable_unprepare(dsi->mod_clk);
+	reset_control_assert(dsi->reset);
+
+	return 0;
+}
+
+static const struct dev_pm_ops sun6i_dsi_pm_ops = {
+	SET_RUNTIME_PM_OPS(sun6i_dsi_runtime_suspend,
+			   sun6i_dsi_runtime_resume,
+			   NULL)
+};
+
+static const struct of_device_id sun6i_dsi_of_table[] = {
+	{ .compatible = "allwinner,sun6i-a31-mipi-dsi" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, sun6i_dsi_of_table);
+
+static struct platform_driver sun6i_dsi_platform_driver = {
+	.probe		= sun6i_dsi_probe,
+	.remove		= sun6i_dsi_remove,
+	.driver		= {
+		.name		= "sun6i-mipi-dsi",
+		.of_match_table	= sun6i_dsi_of_table,
+		.pm		= &sun6i_dsi_pm_ops,
+	},
+};
+module_platform_driver(sun6i_dsi_platform_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner A31 DSI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
new file mode 100644
index 000000000000..dbbc5b3ecbda
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2016 Allwinnertech Co., Ltd.
+ * Copyright (C) 2017-2018 Bootlin
+ *
+ * Maxime Ripard <maxime.ripard@bootlin.com>
+ */
+
+#ifndef _SUN6I_MIPI_DSI_H_
+#define _SUN6I_MIPI_DSI_H_
+
+#include <drm/drm_connector.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_mipi_dsi.h>
+
+struct sun6i_dphy {
+	struct clk		*bus_clk;
+	struct clk		*mod_clk;
+	struct regmap		*regs;
+	struct reset_control	*reset;
+};
+
+struct sun6i_dsi {
+	struct drm_connector	connector;
+	struct drm_encoder	encoder;
+	struct mipi_dsi_host	host;
+
+	struct clk		*bus_clk;
+	struct clk		*mod_clk;
+	struct regmap		*regs;
+	struct reset_control	*reset;
+	struct sun6i_dphy	*dphy;
+
+	struct device		*dev;
+	struct sun4i_drv	*drv;
+	struct mipi_dsi_device	*device;
+	struct drm_panel	*panel;
+};
+
+static inline struct sun6i_dsi *host_to_sun6i_dsi(struct mipi_dsi_host *host)
+{
+	return container_of(host, struct sun6i_dsi, host);
+};
+
+static inline struct sun6i_dsi *connector_to_sun6i_dsi(struct drm_connector *connector)
+{
+	return container_of(connector, struct sun6i_dsi, connector);
+};
+
+static inline struct sun6i_dsi *encoder_to_sun6i_dsi(const struct drm_encoder *encoder)
+{
+	return container_of(encoder, struct sun6i_dsi, encoder);
+};
+
+int sun6i_dphy_probe(struct sun6i_dsi *dsi, struct device_node *node);
+int sun6i_dphy_remove(struct sun6i_dsi *dsi);
+
+int sun6i_dphy_init(struct sun6i_dphy *dphy, unsigned int lanes);
+int sun6i_dphy_power_on(struct sun6i_dphy *dphy, unsigned int lanes);
+int sun6i_dphy_power_off(struct sun6i_dphy *dphy);
+int sun6i_dphy_exit(struct sun6i_dphy *dphy);
+
+#endif /* _SUN6I_MIPI_DSI_H_ */

From a08fc7c8056e75b08285a2ad955228002dcd86bc Mon Sep 17 00:00:00 2001
From: Maxime Ripard <maxime.ripard@bootlin.com>
Date: Wed, 4 Apr 2018 11:57:12 +0200
Subject: [PATCH 0278/1286] drm/sun4i: Tie the DSI controller in the TCON

The DSI controller needs a particular interface (CPU aka 8080) with some
modifications from the TCON in order to run.

Make sure the TCON is able to provide it when we are using the DSI output.

Reviewed-by: Chen-Yu Tsai <wens@csie.org>
Signed-off-by: Maxime Ripard <maxime.ripard@bootlin.com>
Link: https://patchwork.freedesktop.org/patch/msgid/129f5928113d2ca865bf5269047c2e4ba6fed5e6.1522835818.git-series.maxime.ripard@bootlin.com
---
 drivers/gpu/drm/sun4i/sun4i_tcon.c | 77 ++++++++++++++++++++++++++++++
 drivers/gpu/drm/sun4i/sun4i_tcon.h | 42 ++++++++++++++++
 2 files changed, 119 insertions(+)

diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 5f423ed2f01b..08747fc3ee71 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -35,6 +35,7 @@
 #include "sun4i_lvds.h"
 #include "sun4i_rgb.h"
 #include "sun4i_tcon.h"
+#include "sun6i_mipi_dsi.h"
 #include "sunxi_engine.h"
 
 static struct drm_connector *sun4i_tcon_get_connector(const struct drm_encoder *encoder)
@@ -169,6 +170,7 @@ void sun4i_tcon_set_status(struct sun4i_tcon *tcon,
 	case DRM_MODE_ENCODER_LVDS:
 		is_lvds = true;
 		/* Fallthrough */
+	case DRM_MODE_ENCODER_DSI:
 	case DRM_MODE_ENCODER_NONE:
 		channel = 0;
 		break;
@@ -274,6 +276,71 @@ static void sun4i_tcon0_mode_set_common(struct sun4i_tcon *tcon,
 		     SUN4I_TCON0_BASIC0_Y(mode->crtc_vdisplay));
 }
 
+static void sun4i_tcon0_mode_set_cpu(struct sun4i_tcon *tcon,
+				     struct mipi_dsi_device *device,
+				     const struct drm_display_mode *mode)
+{
+	u8 bpp = mipi_dsi_pixel_format_to_bpp(device->format);
+	u8 lanes = device->lanes;
+	u32 block_space, start_delay;
+	u32 tcon_div;
+
+	tcon->dclk_min_div = 4;
+	tcon->dclk_max_div = 127;
+
+	sun4i_tcon0_mode_set_common(tcon, mode);
+
+	regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
+			   SUN4I_TCON0_CTL_IF_MASK,
+			   SUN4I_TCON0_CTL_IF_8080);
+
+	regmap_write(tcon->regs, SUN4I_TCON_ECC_FIFO_REG,
+		     SUN4I_TCON_ECC_FIFO_EN);
+
+	regmap_write(tcon->regs, SUN4I_TCON0_CPU_IF_REG,
+		     SUN4I_TCON0_CPU_IF_MODE_DSI |
+		     SUN4I_TCON0_CPU_IF_TRI_FIFO_FLUSH |
+		     SUN4I_TCON0_CPU_IF_TRI_FIFO_EN |
+		     SUN4I_TCON0_CPU_IF_TRI_EN);
+
+	/*
+	 * This looks suspicious, but it works...
+	 *
+	 * The datasheet says that this should be set higher than 20 *
+	 * pixel cycle, but it's not clear what a pixel cycle is.
+	 */
+	regmap_read(tcon->regs, SUN4I_TCON0_DCLK_REG, &tcon_div);
+	tcon_div &= GENMASK(6, 0);
+	block_space = mode->htotal * bpp / (tcon_div * lanes);
+	block_space -= mode->hdisplay + 40;
+
+	regmap_write(tcon->regs, SUN4I_TCON0_CPU_TRI0_REG,
+		     SUN4I_TCON0_CPU_TRI0_BLOCK_SPACE(block_space) |
+		     SUN4I_TCON0_CPU_TRI0_BLOCK_SIZE(mode->hdisplay));
+
+	regmap_write(tcon->regs, SUN4I_TCON0_CPU_TRI1_REG,
+		     SUN4I_TCON0_CPU_TRI1_BLOCK_NUM(mode->vdisplay));
+
+	start_delay = (mode->crtc_vtotal - mode->crtc_vdisplay - 10 - 1);
+	start_delay = start_delay * mode->crtc_htotal * 149;
+	start_delay = start_delay / (mode->crtc_clock / 1000) / 8;
+	regmap_write(tcon->regs, SUN4I_TCON0_CPU_TRI2_REG,
+		     SUN4I_TCON0_CPU_TRI2_TRANS_START_SET(10) |
+		     SUN4I_TCON0_CPU_TRI2_START_DELAY(start_delay));
+
+	/*
+	 * The Allwinner BSP has a comment that the period should be
+	 * the display clock * 15, but uses an hardcoded 3000...
+	 */
+	regmap_write(tcon->regs, SUN4I_TCON_SAFE_PERIOD_REG,
+		     SUN4I_TCON_SAFE_PERIOD_NUM(3000) |
+		     SUN4I_TCON_SAFE_PERIOD_MODE(3));
+
+	/* Enable the output on the pins */
+	regmap_write(tcon->regs, SUN4I_TCON0_IO_TRI_REG,
+		     0xe0000000);
+}
+
 static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon,
 				      const struct drm_encoder *encoder,
 				      const struct drm_display_mode *mode)
@@ -539,7 +606,17 @@ void sun4i_tcon_mode_set(struct sun4i_tcon *tcon,
 			 const struct drm_encoder *encoder,
 			 const struct drm_display_mode *mode)
 {
+	struct sun6i_dsi *dsi;
+
 	switch (encoder->encoder_type) {
+	case DRM_MODE_ENCODER_DSI:
+		/*
+		 * This is not really elegant, but it's the "cleaner"
+		 * way I could think of...
+		 */
+		dsi = encoder_to_sun6i_dsi(encoder);
+		sun4i_tcon0_mode_set_cpu(tcon, dsi->device, mode);
+		break;
 	case DRM_MODE_ENCODER_LVDS:
 		sun4i_tcon0_mode_set_lvds(tcon, encoder, mode);
 		break;
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index 2e0fb9640ed9..f6a071cd5a6f 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -35,10 +35,25 @@
 #define SUN4I_TCON_GINT0_TCON0_TRI_COUNTER_INT		BIT(10)
 
 #define SUN4I_TCON_GINT1_REG			0x8
+
 #define SUN4I_TCON_FRM_CTL_REG			0x10
+#define SUN4I_TCON_FRM_CTL_EN				BIT(31)
+
+#define SUN4I_TCON_FRM_SEED_PR_REG		0x14
+#define SUN4I_TCON_FRM_SEED_PG_REG		0x18
+#define SUN4I_TCON_FRM_SEED_PB_REG		0x1c
+#define SUN4I_TCON_FRM_SEED_LR_REG		0x20
+#define SUN4I_TCON_FRM_SEED_LG_REG		0x24
+#define SUN4I_TCON_FRM_SEED_LB_REG		0x28
+#define SUN4I_TCON_FRM_TBL0_REG			0x2c
+#define SUN4I_TCON_FRM_TBL1_REG			0x30
+#define SUN4I_TCON_FRM_TBL2_REG			0x34
+#define SUN4I_TCON_FRM_TBL3_REG			0x38
 
 #define SUN4I_TCON0_CTL_REG			0x40
 #define SUN4I_TCON0_CTL_TCON_ENABLE			BIT(31)
+#define SUN4I_TCON0_CTL_IF_MASK				GENMASK(25, 24)
+#define SUN4I_TCON0_CTL_IF_8080				(1 << 24)
 #define SUN4I_TCON0_CTL_CLK_DELAY_MASK			GENMASK(8, 4)
 #define SUN4I_TCON0_CTL_CLK_DELAY(delay)		((delay << 4) & SUN4I_TCON0_CTL_CLK_DELAY_MASK)
 #define SUN4I_TCON0_CTL_SRC_SEL_MASK			GENMASK(2, 0)
@@ -65,7 +80,14 @@
 #define SUN4I_TCON0_BASIC3_V_SYNC(height)		(((height) - 1) & 0x7ff)
 
 #define SUN4I_TCON0_HV_IF_REG			0x58
+
 #define SUN4I_TCON0_CPU_IF_REG			0x60
+#define SUN4I_TCON0_CPU_IF_MODE_MASK			GENMASK(31, 28)
+#define SUN4I_TCON0_CPU_IF_MODE_DSI			(1 << 28)
+#define SUN4I_TCON0_CPU_IF_TRI_FIFO_FLUSH		BIT(16)
+#define SUN4I_TCON0_CPU_IF_TRI_FIFO_EN			BIT(2)
+#define SUN4I_TCON0_CPU_IF_TRI_EN			BIT(0)
+
 #define SUN4I_TCON0_CPU_WR_REG			0x64
 #define SUN4I_TCON0_CPU_RD0_REG			0x68
 #define SUN4I_TCON0_CPU_RDA_REG			0x6c
@@ -132,6 +154,10 @@
 
 #define SUN4I_TCON1_IO_POL_REG			0xf0
 #define SUN4I_TCON1_IO_TRI_REG			0xf4
+
+#define SUN4I_TCON_ECC_FIFO_REG			0xf8
+#define SUN4I_TCON_ECC_FIFO_EN				BIT(3)
+
 #define SUN4I_TCON_CEU_CTL_REG			0x100
 #define SUN4I_TCON_CEU_MUL_RR_REG		0x110
 #define SUN4I_TCON_CEU_MUL_RG_REG		0x114
@@ -148,6 +174,22 @@
 #define SUN4I_TCON_CEU_RANGE_R_REG		0x140
 #define SUN4I_TCON_CEU_RANGE_G_REG		0x144
 #define SUN4I_TCON_CEU_RANGE_B_REG		0x148
+
+#define SUN4I_TCON0_CPU_TRI0_REG		0x160
+#define SUN4I_TCON0_CPU_TRI0_BLOCK_SPACE(space)		((((space) - 1) & 0xfff) << 16)
+#define SUN4I_TCON0_CPU_TRI0_BLOCK_SIZE(size)		(((size) - 1) & 0xfff)
+
+#define SUN4I_TCON0_CPU_TRI1_REG		0x164
+#define SUN4I_TCON0_CPU_TRI1_BLOCK_NUM(num)		(((num) - 1) & 0xffff)
+
+#define SUN4I_TCON0_CPU_TRI2_REG		0x168
+#define SUN4I_TCON0_CPU_TRI2_START_DELAY(delay)		(((delay) & 0xffff) << 16)
+#define SUN4I_TCON0_CPU_TRI2_TRANS_START_SET(set)	((set) & 0xfff)
+
+#define SUN4I_TCON_SAFE_PERIOD_REG		0x1f0
+#define SUN4I_TCON_SAFE_PERIOD_NUM(num)			(((num) & 0xfff) << 16)
+#define SUN4I_TCON_SAFE_PERIOD_MODE(mode)		((mode) & 0x3)
+
 #define SUN4I_TCON_MUX_CTRL_REG			0x200
 
 #define SUN4I_TCON0_LVDS_ANA0_REG		0x220

From f3aa929c59c945e50386fde65d37c24e8a898e48 Mon Sep 17 00:00:00 2001
From: Jani Nikula <jani.nikula@intel.com>
Date: Tue, 10 Apr 2018 12:12:48 +0300
Subject: [PATCH 0279/1286] drm/i915/bios: remove duplicated code

Apparently caused by a merge fail at some point. Due to the nature of
the duplicated block, the second one will have no effect, and there's no
need to backport.

Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180410091248.1454-1-jani.nikula@intel.com
---
 drivers/gpu/drm/i915/intel_bios.c | 7 -------
 1 file changed, 7 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index c5c7530ba157..6aae88c4df52 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1271,13 +1271,6 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
 		is_hdmi = false;
 	}
 
-	if (port == PORT_A && is_dvi) {
-		DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
-			      is_hdmi ? "/HDMI" : "");
-		is_dvi = false;
-		is_hdmi = false;
-	}
-
 	info->supports_dvi = is_dvi;
 	info->supports_hdmi = is_hdmi;
 	info->supports_dp = is_dp;

From 15c83c436424adf3fe0365e9085a82da1190c95e Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed, 11 Apr 2018 11:39:29 +0100
Subject: [PATCH 0280/1286] drm/i915/execlists: Set queue priority from
 secondary port
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

We can refine our current execlists->queue_priority if we inspect
ELSP[1] rather than the head of the unsubmitted queue. Currently, we use
the unsubmitted queue and say that if a subsequent request is more
important than the current queue, we will rerun the submission tasklet
to evaluate the need for preemption. However, we only want to preempt if
we need to jump ahead of a currently executing request in ELSP. The
second reason for running the submission tasklet is amalgamate requests
into the active context on ELSP[0] to avoid a stall when ELSP[0] drains.
(Though repeatedly amalgamating requests into the active context and
triggering many lite-restore is off question gain, the goal really is to
put a context into ELSP[1] to cover the interrupt.) So if instead of
looking at the head of the queue, we look at the context in ELSP[1] we
can answer both of the questions more accurately -- we don't need to
rerun the submission tasklet unless our new request is important enough
to feed into, at least, ELSP[1].

v2: Add some comments from the discussion with Tvrtko.
v3: More commentary to cross-reference queue_request()

References: f6322eddaff7 ("drm/i915/preemption: Allow preemption between submission ports")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Cc: Michel Thierry <michel.thierry@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180411103929.27374-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_engine_cs.c |  3 +++
 drivers/gpu/drm/i915/intel_lrc.c       | 21 ++++++++++++++++++++-
 2 files changed, 23 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 12486d8f534b..a217b3fe5f0b 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1687,6 +1687,9 @@ void intel_engines_park(struct drm_i915_private *i915)
 			intel_engine_dump(engine, &p, NULL);
 		}
 
+		/* Must be reset upon idling, or we may miss the busy wakeup. */
+		GEM_BUG_ON(engine->execlists.queue_priority != INT_MIN);
+
 		if (engine->park)
 			engine->park(engine);
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 02b25bf2378a..665d9e82e954 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -713,8 +713,27 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 		if (p->priority != I915_PRIORITY_NORMAL)
 			kmem_cache_free(engine->i915->priorities, p);
 	}
+
 done:
-	execlists->queue_priority = rb ? to_priolist(rb)->priority : INT_MIN;
+	/*
+	 * Here be a bit of magic! Or sleight-of-hand, whichever you prefer.
+	 *
+	 * We choose queue_priority such that if we add a request of greater
+	 * priority than this, we kick the submission tasklet to decide on
+	 * the right order of submitting the requests to hardware. We must
+	 * also be prepared to reorder requests as they are in-flight on the
+	 * HW. We derive the queue_priority then as the first "hole" in
+	 * the HW submission ports and if there are no available slots,
+	 * the priority of the lowest executing request, i.e. last.
+	 *
+	 * When we do receive a higher priority request ready to run from the
+	 * user, see queue_request(), the queue_priority is bumped to that
+	 * request triggering preemption on the next dequeue (or subsequent
+	 * interrupt for secondary ports).
+	 */
+	execlists->queue_priority =
+		port != execlists->port ? rq_prio(last) : INT_MIN;
+
 	execlists->first = rb;
 	if (submit)
 		port_assign(port, last);

From 86993018d7d23b934d1c884be0fbf0bcfa15b8c5 Mon Sep 17 00:00:00 2001
From: Harry Wentland <harry.wentland@amd.com>
Date: Thu, 15 Mar 2018 16:40:02 -0400
Subject: [PATCH 0281/1286] drm/amdgpu: Add CM_TEST_DEBUG regs for DCN

We'd like to use them for reading DCN debug status.

Signed-off-by: Harry Wentland <harry.wentland@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../amd/include/asic_reg/dcn/dcn_1_0_offset.h | 19 ++++++++++++++++---
 .../include/asic_reg/dcn/dcn_1_0_sh_mask.h    |  8 ++++++++
 2 files changed, 24 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h
index 4ccf9681c45d..721c61171045 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h
@@ -3895,6 +3895,10 @@
 #define mmCM0_CM_MEM_PWR_CTRL_BASE_IDX                                                                 2
 #define mmCM0_CM_MEM_PWR_STATUS                                                                        0x0d33
 #define mmCM0_CM_MEM_PWR_STATUS_BASE_IDX                                                               2
+#define mmCM0_CM_TEST_DEBUG_INDEX                                                                      0x0d35
+#define mmCM0_CM_TEST_DEBUG_INDEX_BASE_IDX                                                             2
+#define mmCM0_CM_TEST_DEBUG_DATA                                                                       0x0d36
+#define mmCM0_CM_TEST_DEBUG_DATA_BASE_IDX                                                              2
 
 
 // addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
@@ -4367,7 +4371,10 @@
 #define mmCM1_CM_MEM_PWR_CTRL_BASE_IDX                                                                 2
 #define mmCM1_CM_MEM_PWR_STATUS                                                                        0x0e4e
 #define mmCM1_CM_MEM_PWR_STATUS_BASE_IDX                                                               2
-
+#define mmCM1_CM_TEST_DEBUG_INDEX                                                                      0x0e50
+#define mmCM1_CM_TEST_DEBUG_INDEX_BASE_IDX                                                             2
+#define mmCM1_CM_TEST_DEBUG_DATA                                                                       0x0e51
+#define mmCM1_CM_TEST_DEBUG_DATA_BASE_IDX                                                              2
 
 // addressBlock: dce_dc_dpp1_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
 // base address: 0x399c
@@ -4839,7 +4846,10 @@
 #define mmCM2_CM_MEM_PWR_CTRL_BASE_IDX                                                                 2
 #define mmCM2_CM_MEM_PWR_STATUS                                                                        0x0f69
 #define mmCM2_CM_MEM_PWR_STATUS_BASE_IDX                                                               2
-
+#define mmCM2_CM_TEST_DEBUG_INDEX                                                                      0x0f6b
+#define mmCM2_CM_TEST_DEBUG_INDEX_BASE_IDX                                                             2
+#define mmCM2_CM_TEST_DEBUG_DATA                                                                       0x0f6c
+#define mmCM2_CM_TEST_DEBUG_DATA_BASE_IDX                                                              2
 
 // addressBlock: dce_dc_dpp2_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
 // base address: 0x3e08
@@ -5311,7 +5321,10 @@
 #define mmCM3_CM_MEM_PWR_CTRL_BASE_IDX                                                                 2
 #define mmCM3_CM_MEM_PWR_STATUS                                                                        0x1084
 #define mmCM3_CM_MEM_PWR_STATUS_BASE_IDX                                                               2
-
+#define mmCM3_CM_TEST_DEBUG_INDEX                                                                      0x1086
+#define mmCM3_CM_TEST_DEBUG_INDEX_BASE_IDX                                                             2
+#define mmCM3_CM_TEST_DEBUG_DATA                                                                       0x1087
+#define mmCM3_CM_TEST_DEBUG_DATA_BASE_IDX                                                              2
 
 // addressBlock: dce_dc_dpp3_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
 // base address: 0x4274
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h
index e2a2f114bd8e..e7c0cad41081 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h
@@ -14049,6 +14049,14 @@
 #define CM0_CM_MEM_PWR_STATUS__RGAM_MEM_PWR_STATE__SHIFT                                                      0x2
 #define CM0_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE_MASK                                                      0x00000003L
 #define CM0_CM_MEM_PWR_STATUS__RGAM_MEM_PWR_STATE_MASK                                                        0x0000000CL
+//CM0_CM_TEST_DEBUG_INDEX
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT                                                   0x0
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT                                                0x8
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK                                                     0x000000FFL
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK                                                  0x00000100L
+//CM0_CM_TEST_DEBUG_DATA
+#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA__SHIFT                                                     0x0
+#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA_MASK                                                       0xFFFFFFFFL
 
 
 // addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec

From 35d13315957f906774013ec374ce2263b665706c Mon Sep 17 00:00:00 2001
From: Martin Tsai <martin.tsai@amd.com>
Date: Wed, 7 Mar 2018 04:22:03 +0800
Subject: [PATCH 0282/1286] drm/amd/display: correct the condition in setting
 cursor not visible beyond left edge

Signed-off-by: Martin Tsai <martin.tsai@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c  | 2 +-
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index e305c28c98de..3356125a6117 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -416,7 +416,7 @@ void dpp1_set_cursor_position(
 	if (src_x_offset >= (int)param->viewport_width)
 		cur_en = 0;  /* not visible beyond right edge*/
 
-	if (src_x_offset + (int)width < 0)
+	if (src_x_offset + (int)width <= 0)
 		cur_en = 0;  /* not visible beyond left edge*/
 
 	REG_UPDATE(CURSOR0_CONTROL,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 39b72f696ae9..81b81e6efcd4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -897,7 +897,7 @@ void hubp1_cursor_set_position(
 	if (src_x_offset >= (int)param->viewport_width)
 		cur_en = 0;  /* not visible beyond right edge*/
 
-	if (src_x_offset + (int)hubp->curs_attr.width < 0)
+	if (src_x_offset + (int)hubp->curs_attr.width <= 0)
 		cur_en = 0;  /* not visible beyond left edge*/
 
 	if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)

From ba0a5aaa867d77cedb2cd6ad9e647243d9ba9650 Mon Sep 17 00:00:00 2001
From: Tony Cheng <tony.cheng@amd.com>
Date: Wed, 21 Feb 2018 16:41:42 -0500
Subject: [PATCH 0283/1286] drm/amd/display: dal 3.1.39

Signed-off-by: Tony Cheng <tony.cheng@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index fa4b3c8b3bb7..4d9da9d9c731 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -38,7 +38,7 @@
 #include "inc/compressor.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.1.38"
+#define DC_VER "3.1.39"
 
 #define MAX_SURFACES 3
 #define MAX_STREAMS 6

From 3c1a312aa4e4201efa8719e70a6dccd3acd6eba4 Mon Sep 17 00:00:00 2001
From: Yongqiang Sun <yongqiang.sun@amd.com>
Date: Wed, 7 Mar 2018 09:12:53 -0500
Subject: [PATCH 0284/1286] drm/amd/display: Retry when read dpcd caps failed.

Some DP panel not detected intermittently due to read dpcd
caps failed when doing hot plug.
[root cause] DC_HPD_CONNECT_INT_DELAY is set to 0, not delay
after HPD toggle and read dpcd data, while some panel need 4ms defer
to read.
[solution] Add a retry when read failed.

Signed-off-by: Yongqiang Sun <yongqiang.sun@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 16 +++++++++++-----
 1 file changed, 11 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 3b5053570229..b86325bb636f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -2278,6 +2278,8 @@ static bool retrieve_link_cap(struct dc_link *link)
 	union edp_configuration_cap edp_config_cap;
 	union dp_downstream_port_present ds_port = { 0 };
 	enum dc_status status = DC_ERROR_UNEXPECTED;
+	uint32_t read_dpcd_retry_cnt = 3;
+	int i;
 
 	memset(dpcd_data, '\0', sizeof(dpcd_data));
 	memset(&down_strm_port_count,
@@ -2285,11 +2287,15 @@ static bool retrieve_link_cap(struct dc_link *link)
 	memset(&edp_config_cap, '\0',
 		sizeof(union edp_configuration_cap));
 
-	status = core_link_read_dpcd(
-			link,
-			DP_DPCD_REV,
-			dpcd_data,
-			sizeof(dpcd_data));
+	for (i = 0; i < read_dpcd_retry_cnt; i++) {
+		status = core_link_read_dpcd(
+				link,
+				DP_DPCD_REV,
+				dpcd_data,
+				sizeof(dpcd_data));
+		if (status == DC_OK)
+			break;
+	}
 
 	if (status != DC_OK) {
 		dm_error("%s: Read dpcd data failed.\n", __func__);

From b552204b10ef30940d374510a1572b2eb4e24af6 Mon Sep 17 00:00:00 2001
From: Nikola Cornij <nikola.cornij@amd.com>
Date: Tue, 6 Mar 2018 13:41:38 -0500
Subject: [PATCH 0285/1286] drm/amd/display: Update ASIC header files

Also separate register address initialization between ASICs for the
registers that were removed in scaled-down variation of the ASIC.

Signed-off-by: Nikola Cornij <nikola.cornij@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 20 +++++++-----
 .../gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 32 ++++++++++++-------
 2 files changed, 33 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 81b81e6efcd4..4ca9b6e9a824 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -535,11 +535,13 @@ void hubp1_program_deadline(
 	REG_SET(VBLANK_PARAMETERS_3, 0,
 		REFCYC_PER_META_CHUNK_VBLANK_L, dlg_attr->refcyc_per_meta_chunk_vblank_l);
 
-	REG_SET(NOM_PARAMETERS_0, 0,
-		DST_Y_PER_PTE_ROW_NOM_L, dlg_attr->dst_y_per_pte_row_nom_l);
+	if (REG(NOM_PARAMETERS_0))
+		REG_SET(NOM_PARAMETERS_0, 0,
+			DST_Y_PER_PTE_ROW_NOM_L, dlg_attr->dst_y_per_pte_row_nom_l);
 
-	REG_SET(NOM_PARAMETERS_1, 0,
-		REFCYC_PER_PTE_GROUP_NOM_L, dlg_attr->refcyc_per_pte_group_nom_l);
+	if (REG(NOM_PARAMETERS_1))
+		REG_SET(NOM_PARAMETERS_1, 0,
+			REFCYC_PER_PTE_GROUP_NOM_L, dlg_attr->refcyc_per_pte_group_nom_l);
 
 	REG_SET(NOM_PARAMETERS_4, 0,
 		DST_Y_PER_META_ROW_NOM_L, dlg_attr->dst_y_per_meta_row_nom_l);
@@ -568,11 +570,13 @@ void hubp1_program_deadline(
 	REG_SET(VBLANK_PARAMETERS_4, 0,
 		REFCYC_PER_META_CHUNK_VBLANK_C, dlg_attr->refcyc_per_meta_chunk_vblank_c);
 
-	REG_SET(NOM_PARAMETERS_2, 0,
-		DST_Y_PER_PTE_ROW_NOM_C, dlg_attr->dst_y_per_pte_row_nom_c);
+	if (REG(NOM_PARAMETERS_2))
+		REG_SET(NOM_PARAMETERS_2, 0,
+			DST_Y_PER_PTE_ROW_NOM_C, dlg_attr->dst_y_per_pte_row_nom_c);
 
-	REG_SET(NOM_PARAMETERS_3, 0,
-		REFCYC_PER_PTE_GROUP_NOM_C, dlg_attr->refcyc_per_pte_group_nom_c);
+	if (REG(NOM_PARAMETERS_3))
+		REG_SET(NOM_PARAMETERS_3, 0,
+			REFCYC_PER_PTE_GROUP_NOM_C, dlg_attr->refcyc_per_pte_group_nom_c);
 
 	REG_SET(NOM_PARAMETERS_6, 0,
 		DST_Y_PER_META_ROW_NOM_C, dlg_attr->dst_y_per_meta_row_nom_c);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index 4a3703e12ea1..c794ce4a8177 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -30,6 +30,7 @@
 #define TO_DCN10_HUBP(hubp)\
 	container_of(hubp, struct dcn10_hubp, base)
 
+/* Register address initialization macro for all ASICs (including those with reduced functionality) */
 #define HUBP_REG_LIST_DCN(id)\
 	SRI(DCHUBP_CNTL, HUBP, id),\
 	SRI(HUBPREQ_DEBUG_DB, HUBP, id),\
@@ -78,16 +79,12 @@
 	SRI(REF_FREQ_TO_PIX_FREQ, HUBPREQ, id),\
 	SRI(VBLANK_PARAMETERS_1, HUBPREQ, id),\
 	SRI(VBLANK_PARAMETERS_3, HUBPREQ, id),\
-	SRI(NOM_PARAMETERS_0, HUBPREQ, id),\
-	SRI(NOM_PARAMETERS_1, HUBPREQ, id),\
 	SRI(NOM_PARAMETERS_4, HUBPREQ, id),\
 	SRI(NOM_PARAMETERS_5, HUBPREQ, id),\
 	SRI(PER_LINE_DELIVERY_PRE, HUBPREQ, id),\
 	SRI(PER_LINE_DELIVERY, HUBPREQ, id),\
 	SRI(VBLANK_PARAMETERS_2, HUBPREQ, id),\
 	SRI(VBLANK_PARAMETERS_4, HUBPREQ, id),\
-	SRI(NOM_PARAMETERS_2, HUBPREQ, id),\
-	SRI(NOM_PARAMETERS_3, HUBPREQ, id),\
 	SRI(NOM_PARAMETERS_6, HUBPREQ, id),\
 	SRI(NOM_PARAMETERS_7, HUBPREQ, id),\
 	SRI(DCN_TTU_QOS_WM, HUBPREQ, id),\
@@ -96,11 +93,19 @@
 	SRI(DCN_SURF0_TTU_CNTL1, HUBPREQ, id),\
 	SRI(DCN_SURF1_TTU_CNTL0, HUBPREQ, id),\
 	SRI(DCN_SURF1_TTU_CNTL1, HUBPREQ, id),\
-	SRI(DCN_VM_MX_L1_TLB_CNTL, HUBPREQ, id),\
 	SRI(HUBP_CLK_CNTL, HUBP, id)
 
+/* Register address initialization macro for "generic" ASICs with full functionality */
+#define HUBP_REG_LIST_DCN_GEN(id)\
+	SRI(NOM_PARAMETERS_0, HUBPREQ, id),\
+	SRI(NOM_PARAMETERS_1, HUBPREQ, id),\
+	SRI(NOM_PARAMETERS_2, HUBPREQ, id),\
+	SRI(NOM_PARAMETERS_3, HUBPREQ, id),\
+	SRI(DCN_VM_MX_L1_TLB_CNTL, HUBPREQ, id)
+
 #define HUBP_REG_LIST_DCN10(id)\
 	HUBP_REG_LIST_DCN(id),\
+	HUBP_REG_LIST_DCN_GEN(id),\
 	SRI(PREFETCH_SETTINS, HUBPREQ, id),\
 	SRI(PREFETCH_SETTINS_C, HUBPREQ, id),\
 	SRI(DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB, HUBPREQ, id),\
@@ -237,6 +242,7 @@
 #define HUBP_SF(reg_name, field_name, post_fix)\
 	.field_name = reg_name ## __ ## field_name ## post_fix
 
+/* Mask/shift struct generation macro for all ASICs (including those with reduced functionality) */
 #define HUBP_MASK_SH_LIST_DCN(mask_sh)\
 	HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\
 	HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\
@@ -335,8 +341,6 @@
 	HUBP_SF(HUBPREQ0_REF_FREQ_TO_PIX_FREQ, REF_FREQ_TO_PIX_FREQ, mask_sh),\
 	HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_1, REFCYC_PER_PTE_GROUP_VBLANK_L, mask_sh),\
 	HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_3, REFCYC_PER_META_CHUNK_VBLANK_L, mask_sh),\
-	HUBP_SF(HUBPREQ0_NOM_PARAMETERS_0, DST_Y_PER_PTE_ROW_NOM_L, mask_sh),\
-	HUBP_SF(HUBPREQ0_NOM_PARAMETERS_1, REFCYC_PER_PTE_GROUP_NOM_L, mask_sh),\
 	HUBP_SF(HUBPREQ0_NOM_PARAMETERS_4, DST_Y_PER_META_ROW_NOM_L, mask_sh),\
 	HUBP_SF(HUBPREQ0_NOM_PARAMETERS_5, REFCYC_PER_META_CHUNK_NOM_L, mask_sh),\
 	HUBP_SF(HUBPREQ0_PER_LINE_DELIVERY_PRE, REFCYC_PER_LINE_DELIVERY_PRE_L, mask_sh),\
@@ -345,8 +349,6 @@
 	HUBP_SF(HUBPREQ0_PER_LINE_DELIVERY, REFCYC_PER_LINE_DELIVERY_C, mask_sh),\
 	HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_2, REFCYC_PER_PTE_GROUP_VBLANK_C, mask_sh),\
 	HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_4, REFCYC_PER_META_CHUNK_VBLANK_C, mask_sh),\
-	HUBP_SF(HUBPREQ0_NOM_PARAMETERS_2, DST_Y_PER_PTE_ROW_NOM_C, mask_sh),\
-	HUBP_SF(HUBPREQ0_NOM_PARAMETERS_3, REFCYC_PER_PTE_GROUP_NOM_C, mask_sh),\
 	HUBP_SF(HUBPREQ0_NOM_PARAMETERS_6, DST_Y_PER_META_ROW_NOM_C, mask_sh),\
 	HUBP_SF(HUBPREQ0_NOM_PARAMETERS_7, REFCYC_PER_META_CHUNK_NOM_C, mask_sh),\
 	HUBP_SF(HUBPREQ0_DCN_TTU_QOS_WM, QoS_LEVEL_LOW_WM, mask_sh),\
@@ -357,12 +359,20 @@
 	HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, QoS_LEVEL_FIXED, mask_sh),\
 	HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, QoS_RAMP_DISABLE, mask_sh),\
 	HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh),\
-	HUBP_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, mask_sh),\
-	HUBP_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, mask_sh),\
 	HUBP_SF(HUBP0_HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, mask_sh)
 
+/* Mask/shift struct generation macro for "generic" ASICs with full functionality */
+#define HUBP_MASK_SH_LIST_DCN_GEN(mask_sh)\
+	HUBP_SF(HUBPREQ0_NOM_PARAMETERS_0, DST_Y_PER_PTE_ROW_NOM_L, mask_sh),\
+	HUBP_SF(HUBPREQ0_NOM_PARAMETERS_1, REFCYC_PER_PTE_GROUP_NOM_L, mask_sh),\
+	HUBP_SF(HUBPREQ0_NOM_PARAMETERS_2, DST_Y_PER_PTE_ROW_NOM_C, mask_sh),\
+	HUBP_SF(HUBPREQ0_NOM_PARAMETERS_3, REFCYC_PER_PTE_GROUP_NOM_C, mask_sh),\
+	HUBP_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, mask_sh),\
+	HUBP_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, mask_sh)
+
 #define HUBP_MASK_SH_LIST_DCN10(mask_sh)\
 	HUBP_MASK_SH_LIST_DCN(mask_sh),\
+	HUBP_MASK_SH_LIST_DCN_GEN(mask_sh),\
 	HUBP_SF(HUBPREQ0_PREFETCH_SETTINS, DST_Y_PREFETCH, mask_sh),\
 	HUBP_SF(HUBPREQ0_PREFETCH_SETTINS, VRATIO_PREFETCH, mask_sh),\
 	HUBP_SF(HUBPREQ0_PREFETCH_SETTINS_C, VRATIO_PREFETCH_C, mask_sh),\

From e4b3f6f299436be812aca4845bd20f592eaf074e Mon Sep 17 00:00:00 2001
From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Date: Thu, 8 Mar 2018 12:08:01 -0500
Subject: [PATCH 0286/1286] drm/amd/display: fix Polaris 12 bw bounding box

Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Signed-off-by: Harry Wentland <harry.wentland@amd.com>
Reviewed-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/dc/calcs/dce_calcs.c  | 121 +++++++++++++++++-
 .../gpu/drm/amd/display/dc/inc/dce_calcs.h    |   1 +
 2 files changed, 120 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index 0cbab81ab304..821502b1acba 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -52,10 +52,11 @@ static enum bw_calcs_version bw_calcs_version_from_asic_id(struct hw_asic_id asi
 		return BW_CALCS_VERSION_CARRIZO;
 
 	case FAMILY_VI:
+		if (ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev))
+			return BW_CALCS_VERSION_POLARIS12;
 		if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev))
 			return BW_CALCS_VERSION_POLARIS10;
-		if (ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev) ||
-				ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev))
+		if (ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev))
 			return BW_CALCS_VERSION_POLARIS11;
 		return BW_CALCS_VERSION_INVALID;
 
@@ -2373,6 +2374,122 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
 		dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
 		dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
 		break;
+	case BW_CALCS_VERSION_POLARIS12:
+		vbios.memory_type = bw_def_gddr5;
+		vbios.dram_channel_width_in_bits = 32;
+		vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
+		vbios.number_of_dram_banks = 8;
+		vbios.high_yclk = bw_int_to_fixed(6000);
+		vbios.mid_yclk = bw_int_to_fixed(3200);
+		vbios.low_yclk = bw_int_to_fixed(1000);
+		vbios.low_sclk = bw_int_to_fixed(678);
+		vbios.mid1_sclk = bw_int_to_fixed(864);
+		vbios.mid2_sclk = bw_int_to_fixed(900);
+		vbios.mid3_sclk = bw_int_to_fixed(920);
+		vbios.mid4_sclk = bw_int_to_fixed(940);
+		vbios.mid5_sclk = bw_int_to_fixed(960);
+		vbios.mid6_sclk = bw_int_to_fixed(980);
+		vbios.high_sclk = bw_int_to_fixed(1049);
+		vbios.low_voltage_max_dispclk = bw_int_to_fixed(459);
+		vbios.mid_voltage_max_dispclk = bw_int_to_fixed(654);
+		vbios.high_voltage_max_dispclk = bw_int_to_fixed(1108);
+		vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
+		vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
+		vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
+		vbios.data_return_bus_width = bw_int_to_fixed(32);
+		vbios.trc = bw_int_to_fixed(48);
+		if (vbios.number_of_dram_channels == 2) // 64-bit
+			vbios.dmifmc_urgent_latency = bw_int_to_fixed(4);
+		else
+			vbios.dmifmc_urgent_latency = bw_int_to_fixed(3);
+		vbios.stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
+		vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+		vbios.nbp_state_change_latency = bw_int_to_fixed(250);
+		vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+		vbios.scatter_gather_enable = false;
+		vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
+		vbios.cursor_width = 32;
+		vbios.average_compression_rate = 4;
+		vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+		vbios.blackout_duration = bw_int_to_fixed(0); /* us */
+		vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+		dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
+		dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
+		dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
+		dceip.large_cursor = false;
+		dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
+		dceip.dmif_pipe_en_fbc_chunk_tracker = false;
+		dceip.cursor_max_outstanding_group_num = 1;
+		dceip.lines_interleaved_into_lb = 2;
+		dceip.chunk_width = 256;
+		dceip.number_of_graphics_pipes = 5;
+		dceip.number_of_underlay_pipes = 0;
+		dceip.low_power_tiling_mode = 0;
+		dceip.display_write_back_supported = true;
+		dceip.argb_compression_support = true;
+		dceip.underlay_vscaler_efficiency6_bit_per_component =
+			bw_frc_to_fixed(35556, 10000);
+		dceip.underlay_vscaler_efficiency8_bit_per_component =
+			bw_frc_to_fixed(34286, 10000);
+		dceip.underlay_vscaler_efficiency10_bit_per_component =
+			bw_frc_to_fixed(32, 10);
+		dceip.underlay_vscaler_efficiency12_bit_per_component =
+			bw_int_to_fixed(3);
+		dceip.graphics_vscaler_efficiency6_bit_per_component =
+			bw_frc_to_fixed(35, 10);
+		dceip.graphics_vscaler_efficiency8_bit_per_component =
+			bw_frc_to_fixed(34286, 10000);
+		dceip.graphics_vscaler_efficiency10_bit_per_component =
+			bw_frc_to_fixed(32, 10);
+		dceip.graphics_vscaler_efficiency12_bit_per_component =
+			bw_int_to_fixed(3);
+		dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
+		dceip.max_dmif_buffer_allocated = 4;
+		dceip.graphics_dmif_size = 12288;
+		dceip.underlay_luma_dmif_size = 19456;
+		dceip.underlay_chroma_dmif_size = 23552;
+		dceip.pre_downscaler_enabled = true;
+		dceip.underlay_downscale_prefetch_enabled = true;
+		dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+		dceip.lb_size_per_component444 = bw_int_to_fixed(245952);
+		dceip.graphics_lb_nodownscaling_multi_line_prefetching = true;
+		dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+			bw_int_to_fixed(1);
+		dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+			82176);
+		dceip.underlay420_chroma_lb_size_per_component =
+			bw_int_to_fixed(164352);
+		dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+			82176);
+		dceip.cursor_chunk_width = bw_int_to_fixed(64);
+		dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+		dceip.underlay_maximum_width_efficient_for_tiling =
+			bw_int_to_fixed(1920);
+		dceip.underlay_maximum_height_efficient_for_tiling =
+			bw_int_to_fixed(1080);
+		dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+			bw_frc_to_fixed(3, 10);
+		dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+			bw_int_to_fixed(25);
+		dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+			2);
+		dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+			bw_int_to_fixed(128);
+		dceip.limit_excessive_outstanding_dmif_requests = true;
+		dceip.linear_mode_line_request_alternation_slice =
+			bw_int_to_fixed(64);
+		dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+			32;
+		dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
+		dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
+		dceip.request_efficiency = bw_frc_to_fixed(8, 10);
+		dceip.dispclk_per_request = bw_int_to_fixed(2);
+		dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+		dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+		dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
+		dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+		break;
 	case BW_CALCS_VERSION_STONEY:
 		vbios.memory_type = bw_def_gddr5;
 		vbios.dram_channel_width_in_bits = 64;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
index a9bfe9ff8ce6..0bd87f24fc06 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
@@ -42,6 +42,7 @@ enum bw_calcs_version {
 	BW_CALCS_VERSION_CARRIZO,
 	BW_CALCS_VERSION_POLARIS10,
 	BW_CALCS_VERSION_POLARIS11,
+	BW_CALCS_VERSION_POLARIS12,
 	BW_CALCS_VERSION_STONEY,
 	BW_CALCS_VERSION_VEGA10
 };

From deb0aac6af79265408c2b3c62b3d1150e7c46a1b Mon Sep 17 00:00:00 2001
From: Nikola Cornij <nikola.cornij@amd.com>
Date: Fri, 9 Mar 2018 14:45:07 -0500
Subject: [PATCH 0287/1286] drm/amd/display: Rename feature-specific register
 address init macro

Signed-off-by: Nikola Cornij <nikola.cornij@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index c794ce4a8177..e0d6d32357c0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -95,8 +95,8 @@
 	SRI(DCN_SURF1_TTU_CNTL1, HUBPREQ, id),\
 	SRI(HUBP_CLK_CNTL, HUBP, id)
 
-/* Register address initialization macro for "generic" ASICs with full functionality */
-#define HUBP_REG_LIST_DCN_GEN(id)\
+/* Register address initialization macro for ASICs with VM */
+#define HUBP_REG_LIST_DCN_VM(id)\
 	SRI(NOM_PARAMETERS_0, HUBPREQ, id),\
 	SRI(NOM_PARAMETERS_1, HUBPREQ, id),\
 	SRI(NOM_PARAMETERS_2, HUBPREQ, id),\
@@ -105,7 +105,7 @@
 
 #define HUBP_REG_LIST_DCN10(id)\
 	HUBP_REG_LIST_DCN(id),\
-	HUBP_REG_LIST_DCN_GEN(id),\
+	HUBP_REG_LIST_DCN_VM(id),\
 	SRI(PREFETCH_SETTINS, HUBPREQ, id),\
 	SRI(PREFETCH_SETTINS_C, HUBPREQ, id),\
 	SRI(DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB, HUBPREQ, id),\
@@ -361,8 +361,8 @@
 	HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh),\
 	HUBP_SF(HUBP0_HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, mask_sh)
 
-/* Mask/shift struct generation macro for "generic" ASICs with full functionality */
-#define HUBP_MASK_SH_LIST_DCN_GEN(mask_sh)\
+/* Mask/shift struct generation macro for ASICs with VM */
+#define HUBP_MASK_SH_LIST_DCN_VM(mask_sh)\
 	HUBP_SF(HUBPREQ0_NOM_PARAMETERS_0, DST_Y_PER_PTE_ROW_NOM_L, mask_sh),\
 	HUBP_SF(HUBPREQ0_NOM_PARAMETERS_1, REFCYC_PER_PTE_GROUP_NOM_L, mask_sh),\
 	HUBP_SF(HUBPREQ0_NOM_PARAMETERS_2, DST_Y_PER_PTE_ROW_NOM_C, mask_sh),\
@@ -372,7 +372,7 @@
 
 #define HUBP_MASK_SH_LIST_DCN10(mask_sh)\
 	HUBP_MASK_SH_LIST_DCN(mask_sh),\
-	HUBP_MASK_SH_LIST_DCN_GEN(mask_sh),\
+	HUBP_MASK_SH_LIST_DCN_VM(mask_sh),\
 	HUBP_SF(HUBPREQ0_PREFETCH_SETTINS, DST_Y_PREFETCH, mask_sh),\
 	HUBP_SF(HUBPREQ0_PREFETCH_SETTINS, VRATIO_PREFETCH, mask_sh),\
 	HUBP_SF(HUBPREQ0_PREFETCH_SETTINS_C, VRATIO_PREFETCH_C, mask_sh),\

From a12c3b7d4e2ac7837c23620ebc3e42b397c1c321 Mon Sep 17 00:00:00 2001
From: Harry Wentland <harry.wentland@amd.com>
Date: Thu, 8 Mar 2018 22:05:35 -0500
Subject: [PATCH 0288/1286] drm/amd/display: Don't read EDID in atomic_check

We shouldn't attempt to read EDID in atomic_check. We really shouldn't
even be modifying the connector object, or any other non-state object,
but this is a start at least.

Moving EDID cleanup to dm_dp_mst_connector_destroy from
dm_dp_destroy_mst_connector to ensure the EDID is still available for
headless mode.

Signed-off-by: Harry Wentland <harry.wentland@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../display/amdgpu_dm/amdgpu_dm_mst_types.c   | 32 ++++++-------------
 1 file changed, 10 insertions(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 8291d74f26bc..305292a9ff80 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -161,6 +161,11 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector)
 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
 	struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder;
 
+	if (amdgpu_dm_connector->edid) {
+		kfree(amdgpu_dm_connector->edid);
+		amdgpu_dm_connector->edid = NULL;
+	}
+
 	drm_encoder_cleanup(&amdgpu_encoder->base);
 	kfree(amdgpu_encoder);
 	drm_connector_cleanup(connector);
@@ -181,28 +186,22 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
 void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
 {
 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
-	struct edid *edid;
 	struct dc_sink *dc_sink;
 	struct dc_sink_init_data init_params = {
 			.link = aconnector->dc_link,
 			.sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
 
+	/* FIXME none of this is safe. we shouldn't touch aconnector here in
+	 * atomic_check
+	 */
+
 	/*
 	 * TODO: Need to further figure out why ddc.algo is NULL while MST port exists
 	 */
 	if (!aconnector->port || !aconnector->port->aux.ddc.algo)
 		return;
 
-	edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
-
-	if (!edid) {
-		drm_mode_connector_update_edid_property(
-			&aconnector->base,
-			NULL);
-		return;
-	}
-
-	aconnector->edid = edid;
+	ASSERT(aconnector->edid);
 
 	dc_sink = dc_link_add_remote_sink(
 		aconnector->dc_link,
@@ -215,9 +214,6 @@ void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
 
 	amdgpu_dm_add_sink_to_freesync_module(
 			connector, aconnector->edid);
-
-	drm_mode_connector_update_edid_property(
-					&aconnector->base, aconnector->edid);
 }
 
 static int dm_dp_mst_get_modes(struct drm_connector *connector)
@@ -424,14 +420,6 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
 		dc_sink_release(aconnector->dc_sink);
 		aconnector->dc_sink = NULL;
 	}
-	if (aconnector->edid) {
-		kfree(aconnector->edid);
-		aconnector->edid = NULL;
-	}
-
-	drm_mode_connector_update_edid_property(
-			&aconnector->base,
-			NULL);
 
 	aconnector->mst_connected = false;
 }

From dfd01f299987e7ede74e27d422c43846d1326010 Mon Sep 17 00:00:00 2001
From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Date: Wed, 24 Jan 2018 14:28:30 -0500
Subject: [PATCH 0289/1286] drm/amd/display: add mpc to dtn log

Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Reviewed-by: Wesley Chalmers <Wesley.Chalmers@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c | 31 ++++++++++++++-----
 .../gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c  | 17 ++++++++++
 .../gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h  |  5 +++
 drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h   | 15 +++++++++
 4 files changed, 60 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 8b0f6b8a5627..999190aa8a08 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -121,20 +121,19 @@ void dcn10_log_hw_state(struct dc *dc)
 
 	dcn10_log_hubbub_state(dc);
 
-	DTN_INFO("HUBP:\t format \t addr_hi \t width \t height \t "
-			"rotation \t mirror \t  sw_mode \t "
-			"dcc_en \t blank_en \t ttu_dis \t underflow \t "
-			"min_ttu_vblank \t qos_low_wm \t qos_high_wm \n");
-
+	DTN_INFO("HUBP:  format  addr_hi  width  height  "
+			"rotation  mirror  sw_mode  "
+			"dcc_en  blank_en  ttu_dis  underflow  "
+			"min_ttu_vblank  qos_low_wm  qos_high_wm\n");
 	for (i = 0; i < pool->pipe_count; i++) {
 		struct hubp *hubp = pool->hubps[i];
 		struct dcn_hubp_state s;
 
 		hubp1_read_state(TO_DCN10_HUBP(hubp), &s);
 
-		DTN_INFO("[%d]:\t %xh \t %xh \t %d \t %d \t "
-				"%xh \t %xh \t %xh \t "
-				"%d \t %d \t %d \t %xh \t",
+		DTN_INFO("[%-2d]:  %5xh  %6xh  %5d  %6d  "
+				"%7xh  %5xh  %6xh  "
+				"%6d  %8d  %7d  %8xh \t",
 				hubp->inst,
 				s.pixel_format,
 				s.inuse_addr_hi,
@@ -153,6 +152,22 @@ void dcn10_log_hw_state(struct dc *dc)
 		DTN_INFO("\n");
 	}
 	DTN_INFO("\n");
+	for (i = 0; i < pool->pipe_count; i++) {
+		struct output_pixel_processor *opp = pool->opps[i];
+		struct mpcc *mpcc = opp->mpc_tree_params.opp_list;
+		struct mpcc_state s = {0};
+
+		while (mpcc) {
+			ASSERT(opp->mpc_tree_params.opp_id == opp->inst);
+			pool->mpc->funcs->read_mpcc_state(pool->mpc, mpcc->mpcc_id, &s);
+			DTN_INFO("[OPP%d - MPCC%d]: DPP%d MPCCBOT%x MODE:%d ALPHA_MODE:%d PREMULT:%d OVERLAP_ONLY:%d\n",
+				s.opp_id, mpcc->mpcc_id, s.dpp_id, s.bot_mpcc_id,
+				s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only);
+			mpcc = mpcc->mpcc_bot;
+			ASSERT(!mpcc || mpcc->mpcc_id == s.bot_mpcc_id);
+		}
+	}
+	DTN_INFO("\n");
 
 	DTN_INFO("OTG:\t v_bs \t v_be \t v_ss \t v_se \t vpol \t vmax \t vmin \t "
 			"h_bs \t h_be \t h_ss \t h_se \t hpol \t htot \t vtot \t underflow\n");
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index 179890b1a8c4..29e15a93a7d0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -409,7 +409,24 @@ void mpc1_init_mpcc_list_from_hw(
 	}
 }
 
+void mpc1_read_mpcc_state(
+		struct mpc *mpc,
+		int mpcc_inst,
+		struct mpcc_state *s)
+{
+	struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+
+	REG_GET(MPCC_OPP_ID[mpcc_inst], MPCC_OPP_ID, &s->opp_id);
+	REG_GET(MPCC_TOP_SEL[mpcc_inst], MPCC_TOP_SEL, &s->dpp_id);
+	REG_GET(MPCC_BOT_SEL[mpcc_inst], MPCC_BOT_SEL, &s->bot_mpcc_id);
+	REG_GET_4(MPCC_CONTROL[mpcc_inst], MPCC_MODE, &s->mode,
+			MPCC_ALPHA_BLND_MODE, &s->alpha_mode,
+			MPCC_ALPHA_MULTIPLIED_MODE, &s->pre_multiplied_alpha,
+			MPCC_BLND_ACTIVE_OVERLAP_ONLY, &s->pre_multiplied_alpha);
+}
+
 const struct mpc_funcs dcn10_mpc_funcs = {
+	.read_mpcc_state = mpc1_read_mpcc_state,
 	.insert_plane = mpc1_insert_plane,
 	.remove_mpcc = mpc1_remove_mpcc,
 	.mpc_init = mpc1_mpc_init,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
index 267a2995ef6e..d3d16c4cbea3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
@@ -183,4 +183,9 @@ struct mpcc *mpc1_get_mpcc_for_dpp(
 	struct mpc_tree *tree,
 	int dpp_id);
 
+void mpc1_read_mpcc_state(
+		struct mpc *mpc,
+		int mpcc_inst,
+		struct mpcc_state *s);
+
 #endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 23a8d5e53a89..5caacab216b5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -105,7 +105,22 @@ struct mpc {
 	struct mpcc mpcc_array[MAX_MPCC];
 };
 
+struct mpcc_state {
+	uint32_t opp_id;
+	uint32_t dpp_id;
+	uint32_t bot_mpcc_id;
+	uint32_t mode;
+	uint32_t alpha_mode;
+	uint32_t pre_multiplied_alpha;
+	uint32_t overlap_only;
+};
+
 struct mpc_funcs {
+	void (*read_mpcc_state)(
+			struct mpc *mpc,
+			int mpcc_inst,
+			struct mpcc_state *s);
+
 	/*
 	 * Insert DPP into MPC tree based on specified blending position.
 	 * Only used for planes that are part of blending chain for OPP output

From 1249acefefd43006127e58acf9c67de8038d770b Mon Sep 17 00:00:00 2001
From: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
Date: Thu, 8 Mar 2018 14:58:11 -0500
Subject: [PATCH 0290/1286] drm/amd/display: Add debug prints for bandwidth
 calculations

Using the three functions we can print the dceip, vbios and data struct
for bandwidth calculations. This is useful for debugging bandwidth
calculation issues without a debugger

Signed-off-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../drm/amd/display/dc/calcs/calcs_logger.h   | 579 ++++++++++++++++++
 .../gpu/drm/amd/display/dc/calcs/dce_calcs.c  |   6 +
 drivers/gpu/drm/amd/display/dc/dc.h           |   1 +
 3 files changed, 586 insertions(+)
 create mode 100644 drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h

diff --git a/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h b/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h
new file mode 100644
index 000000000000..fc3f98fb09ea
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h
@@ -0,0 +1,579 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _CALCS_CALCS_LOGGER_H_
+#define _CALCS_CALCS_LOGGER_H_
+#define DC_LOGGER \
+	logger
+
+static void print_bw_calcs_dceip(struct dal_logger *logger, const struct bw_calcs_dceip *dceip)
+{
+
+	DC_LOG_BANDWIDTH_CALCS("#####################################################################");
+	DC_LOG_BANDWIDTH_CALCS("struct bw_calcs_dceip");
+	DC_LOG_BANDWIDTH_CALCS("#####################################################################");
+	DC_LOG_BANDWIDTH_CALCS("	[enum]   bw_calcs_version version %d", dceip->version);
+	DC_LOG_BANDWIDTH_CALCS("	[bool] large_cursor: %d", dceip->large_cursor);
+	DC_LOG_BANDWIDTH_CALCS("	[bool] dmif_pipe_en_fbc_chunk_tracker: %d", dceip->dmif_pipe_en_fbc_chunk_tracker);
+	DC_LOG_BANDWIDTH_CALCS("	[bool] display_write_back_supported: %d", dceip->display_write_back_supported);
+	DC_LOG_BANDWIDTH_CALCS("	[bool] argb_compression_support: %d", dceip->argb_compression_support);
+	DC_LOG_BANDWIDTH_CALCS("	[bool] pre_downscaler_enabled: %d", dceip->pre_downscaler_enabled);
+	DC_LOG_BANDWIDTH_CALCS("	[bool] underlay_downscale_prefetch_enabled: %d",
+				dceip->underlay_downscale_prefetch_enabled);
+	DC_LOG_BANDWIDTH_CALCS("	[bool] graphics_lb_nodownscaling_multi_line_prefetching: %d",
+				dceip->graphics_lb_nodownscaling_multi_line_prefetching);
+	DC_LOG_BANDWIDTH_CALCS("	[bool] limit_excessive_outstanding_dmif_requests: %d",
+				dceip->limit_excessive_outstanding_dmif_requests);
+	DC_LOG_BANDWIDTH_CALCS("	[uint32_t] cursor_max_outstanding_group_num: %d",
+				dceip->cursor_max_outstanding_group_num);
+	DC_LOG_BANDWIDTH_CALCS("	[uint32_t] lines_interleaved_into_lb: %d", dceip->lines_interleaved_into_lb);
+	DC_LOG_BANDWIDTH_CALCS("	[uint32_t] low_power_tiling_mode: %d", dceip->low_power_tiling_mode);
+	DC_LOG_BANDWIDTH_CALCS("	[uint32_t] chunk_width: %d", dceip->chunk_width);
+	DC_LOG_BANDWIDTH_CALCS("	[uint32_t] number_of_graphics_pipes: %d", dceip->number_of_graphics_pipes);
+	DC_LOG_BANDWIDTH_CALCS("	[uint32_t] number_of_underlay_pipes: %d", dceip->number_of_underlay_pipes);
+	DC_LOG_BANDWIDTH_CALCS("	[uint32_t] max_dmif_buffer_allocated: %d", dceip->max_dmif_buffer_allocated);
+	DC_LOG_BANDWIDTH_CALCS("	[uint32_t] graphics_dmif_size: %d", dceip->graphics_dmif_size);
+	DC_LOG_BANDWIDTH_CALCS("	[uint32_t] underlay_luma_dmif_size: %d", dceip->underlay_luma_dmif_size);
+	DC_LOG_BANDWIDTH_CALCS("	[uint32_t] underlay_chroma_dmif_size: %d", dceip->underlay_chroma_dmif_size);
+	DC_LOG_BANDWIDTH_CALCS("	[uint32_t] scatter_gather_lines_of_pte_prefetching_in_linear_mode: %d",
+				dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode);
+	DC_LOG_BANDWIDTH_CALCS("	[uint32_t] display_write_back420_luma_mcifwr_buffer_size: %d",
+				dceip->display_write_back420_luma_mcifwr_buffer_size);
+	DC_LOG_BANDWIDTH_CALCS("	[uint32_t] display_write_back420_chroma_mcifwr_buffer_size: %d",
+				dceip->display_write_back420_chroma_mcifwr_buffer_size);
+	DC_LOG_BANDWIDTH_CALCS("	[uint32_t] scatter_gather_pte_request_rows_in_tiling_mode: %d",
+				dceip->scatter_gather_pte_request_rows_in_tiling_mode);
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] underlay_vscaler_efficiency10_bit_per_component: %d",
+				bw_fixed_to_int(dceip->underlay_vscaler_efficiency10_bit_per_component));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] underlay_vscaler_efficiency12_bit_per_component: %d",
+				bw_fixed_to_int(dceip->underlay_vscaler_efficiency12_bit_per_component));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] graphics_vscaler_efficiency6_bit_per_component: %d",
+				bw_fixed_to_int(dceip->graphics_vscaler_efficiency6_bit_per_component));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] graphics_vscaler_efficiency8_bit_per_component: %d",
+				bw_fixed_to_int(dceip->graphics_vscaler_efficiency8_bit_per_component));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] graphics_vscaler_efficiency10_bit_per_component: %d",
+				bw_fixed_to_int(dceip->graphics_vscaler_efficiency10_bit_per_component));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] graphics_vscaler_efficiency12_bit_per_component: %d",
+				bw_fixed_to_int(dceip->graphics_vscaler_efficiency12_bit_per_component));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] alpha_vscaler_efficiency: %d",
+				bw_fixed_to_int(dceip->alpha_vscaler_efficiency));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] lb_write_pixels_per_dispclk: %d",
+				bw_fixed_to_int(dceip->lb_write_pixels_per_dispclk));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] lb_size_per_component444: %d",
+				bw_fixed_to_int(dceip->lb_size_per_component444));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] stutter_and_dram_clock_state_change_gated_before_cursor: %d",
+				bw_fixed_to_int(dceip->stutter_and_dram_clock_state_change_gated_before_cursor));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] underlay420_luma_lb_size_per_component: %d",
+				bw_fixed_to_int(dceip->underlay420_luma_lb_size_per_component));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] underlay420_chroma_lb_size_per_component: %d",
+				bw_fixed_to_int(dceip->underlay420_chroma_lb_size_per_component));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] underlay422_lb_size_per_component: %d",
+				bw_fixed_to_int(dceip->underlay422_lb_size_per_component));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] cursor_chunk_width: %d", bw_fixed_to_int(dceip->cursor_chunk_width));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] cursor_dcp_buffer_lines: %d",
+				bw_fixed_to_int(dceip->cursor_dcp_buffer_lines));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] underlay_maximum_width_efficient_for_tiling: %d",
+				bw_fixed_to_int(dceip->underlay_maximum_width_efficient_for_tiling));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] underlay_maximum_height_efficient_for_tiling: %d",
+				bw_fixed_to_int(dceip->underlay_maximum_height_efficient_for_tiling));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display: %d",
+				bw_fixed_to_int(dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation: %d",
+				bw_fixed_to_int(dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] minimum_outstanding_pte_request_limit: %d",
+				bw_fixed_to_int(dceip->minimum_outstanding_pte_request_limit));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] maximum_total_outstanding_pte_requests_allowed_by_saw: %d",
+				bw_fixed_to_int(dceip->maximum_total_outstanding_pte_requests_allowed_by_saw));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] linear_mode_line_request_alternation_slice: %d",
+				bw_fixed_to_int(dceip->linear_mode_line_request_alternation_slice));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] request_efficiency: %d", bw_fixed_to_int(dceip->request_efficiency));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] dispclk_per_request: %d", bw_fixed_to_int(dceip->dispclk_per_request));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] dispclk_ramping_factor: %d",
+				bw_fixed_to_int(dceip->dispclk_ramping_factor));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] display_pipe_throughput_factor: %d",
+				bw_fixed_to_int(dceip->display_pipe_throughput_factor));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] mcifwr_all_surfaces_burst_time: %d",
+				bw_fixed_to_int(dceip->mcifwr_all_surfaces_burst_time));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] dmif_request_buffer_size: %d",
+				bw_fixed_to_int(dceip->dmif_request_buffer_size));
+
+
+}
+
+static void print_bw_calcs_vbios(struct dal_logger *logger, const struct bw_calcs_vbios *vbios)
+{
+
+	DC_LOG_BANDWIDTH_CALCS("#####################################################################");
+	DC_LOG_BANDWIDTH_CALCS("struct bw_calcs_vbios vbios");
+	DC_LOG_BANDWIDTH_CALCS("#####################################################################");
+	DC_LOG_BANDWIDTH_CALCS("	[enum] bw_defines memory_type: %d", vbios->memory_type);
+	DC_LOG_BANDWIDTH_CALCS("	[enum] bw_defines memory_type: %d", vbios->memory_type);
+	DC_LOG_BANDWIDTH_CALCS("	[uint32_t] dram_channel_width_in_bits: %d", vbios->dram_channel_width_in_bits);
+	DC_LOG_BANDWIDTH_CALCS("	[uint32_t] number_of_dram_channels: %d", vbios->number_of_dram_channels);
+	DC_LOG_BANDWIDTH_CALCS("	[uint32_t] number_of_dram_banks: %d", vbios->number_of_dram_banks);
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] low_yclk: %d", bw_fixed_to_int(vbios->low_yclk));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] mid_yclk: %d", bw_fixed_to_int(vbios->mid_yclk));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] high_yclk: %d", bw_fixed_to_int(vbios->high_yclk));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] low_sclk: %d", bw_fixed_to_int(vbios->low_sclk));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] mid1_sclk: %d", bw_fixed_to_int(vbios->mid1_sclk));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] mid2_sclk: %d", bw_fixed_to_int(vbios->mid2_sclk));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] mid3_sclk: %d", bw_fixed_to_int(vbios->mid3_sclk));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] mid4_sclk: %d", bw_fixed_to_int(vbios->mid4_sclk));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] mid5_sclk: %d", bw_fixed_to_int(vbios->mid5_sclk));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] mid6_sclk: %d", bw_fixed_to_int(vbios->mid6_sclk));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] high_sclk: %d", bw_fixed_to_int(vbios->high_sclk));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] low_voltage_max_dispclk: %d",
+				bw_fixed_to_int(vbios->low_voltage_max_dispclk));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] mid_voltage_max_dispclk;: %d",
+				bw_fixed_to_int(vbios->mid_voltage_max_dispclk));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] high_voltage_max_dispclk;: %d",
+				bw_fixed_to_int(vbios->high_voltage_max_dispclk));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] low_voltage_max_phyclk: %d",
+				bw_fixed_to_int(vbios->low_voltage_max_phyclk));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] mid_voltage_max_phyclk: %d",
+				bw_fixed_to_int(vbios->mid_voltage_max_phyclk));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] high_voltage_max_phyclk: %d",
+				bw_fixed_to_int(vbios->high_voltage_max_phyclk));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] data_return_bus_width: %d", bw_fixed_to_int(vbios->data_return_bus_width));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] trc: %d", bw_fixed_to_int(vbios->trc));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] dmifmc_urgent_latency: %d", bw_fixed_to_int(vbios->dmifmc_urgent_latency));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] stutter_self_refresh_exit_latency: %d",
+				bw_fixed_to_int(vbios->stutter_self_refresh_exit_latency));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] stutter_self_refresh_entry_latency: %d",
+				bw_fixed_to_int(vbios->stutter_self_refresh_entry_latency));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] nbp_state_change_latency: %d",
+				bw_fixed_to_int(vbios->nbp_state_change_latency));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] mcifwrmc_urgent_latency: %d",
+				bw_fixed_to_int(vbios->mcifwrmc_urgent_latency));
+	DC_LOG_BANDWIDTH_CALCS("	[bool] scatter_gather_enable: %d", vbios->scatter_gather_enable);
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] down_spread_percentage: %d",
+				bw_fixed_to_int(vbios->down_spread_percentage));
+	DC_LOG_BANDWIDTH_CALCS("	[uint32_t] cursor_width: %d", vbios->cursor_width);
+	DC_LOG_BANDWIDTH_CALCS("	[uint32_t] average_compression_rate: %d", vbios->average_compression_rate);
+	DC_LOG_BANDWIDTH_CALCS("	[uint32_t] number_of_request_slots_gmc_reserves_for_dmif_per_channel: %d",
+				vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel);
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] blackout_duration: %d", bw_fixed_to_int(vbios->blackout_duration));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] maximum_blackout_recovery_time: %d",
+				bw_fixed_to_int(vbios->maximum_blackout_recovery_time));
+
+
+}
+
+static void print_bw_calcs_data(struct dal_logger *logger, struct bw_calcs_data *data)
+{
+
+	int i, j, k;
+
+	DC_LOG_BANDWIDTH_CALCS("#####################################################################");
+	DC_LOG_BANDWIDTH_CALCS("struct bw_calcs_data data");
+	DC_LOG_BANDWIDTH_CALCS("#####################################################################");
+	DC_LOG_BANDWIDTH_CALCS("	[uint32_t] number_of_displays: %d", data->number_of_displays);
+	DC_LOG_BANDWIDTH_CALCS("	[enum] bw_defines underlay_surface_type: %d", data->underlay_surface_type);
+	DC_LOG_BANDWIDTH_CALCS("	[enum] bw_defines panning_and_bezel_adjustment: %d",
+				data->panning_and_bezel_adjustment);
+	DC_LOG_BANDWIDTH_CALCS("	[enum] bw_defines graphics_tiling_mode: %d", data->graphics_tiling_mode);
+	DC_LOG_BANDWIDTH_CALCS("	[uint32_t] graphics_lb_bpc: %d", data->graphics_lb_bpc);
+	DC_LOG_BANDWIDTH_CALCS("	[uint32_t] underlay_lb_bpc: %d", data->underlay_lb_bpc);
+	DC_LOG_BANDWIDTH_CALCS("	[enum] bw_defines underlay_tiling_mode: %d", data->underlay_tiling_mode);
+	DC_LOG_BANDWIDTH_CALCS("	[enum] bw_defines d0_underlay_mode: %d", data->d0_underlay_mode);
+	DC_LOG_BANDWIDTH_CALCS("	[bool] d1_display_write_back_dwb_enable: %d", data->d1_display_write_back_dwb_enable);
+	DC_LOG_BANDWIDTH_CALCS("	[enum] bw_defines d1_underlay_mode: %d", data->d1_underlay_mode);
+	DC_LOG_BANDWIDTH_CALCS("	[bool] cpup_state_change_enable: %d", data->cpup_state_change_enable);
+	DC_LOG_BANDWIDTH_CALCS("	[bool] cpuc_state_change_enable: %d", data->cpuc_state_change_enable);
+	DC_LOG_BANDWIDTH_CALCS("	[bool] nbp_state_change_enable: %d", data->nbp_state_change_enable);
+	DC_LOG_BANDWIDTH_CALCS("	[bool] stutter_mode_enable: %d", data->stutter_mode_enable);
+	DC_LOG_BANDWIDTH_CALCS("	[uint32_t] y_clk_level: %d", data->y_clk_level);
+	DC_LOG_BANDWIDTH_CALCS("	[uint32_t] sclk_level: %d", data->sclk_level);
+	DC_LOG_BANDWIDTH_CALCS("	[uint32_t] number_of_underlay_surfaces: %d", data->number_of_underlay_surfaces);
+	DC_LOG_BANDWIDTH_CALCS("	[uint32_t] number_of_dram_wrchannels: %d", data->number_of_dram_wrchannels);
+	DC_LOG_BANDWIDTH_CALCS("	[uint32_t] chunk_request_delay: %d", data->chunk_request_delay);
+	DC_LOG_BANDWIDTH_CALCS("	[uint32_t] number_of_dram_channels: %d", data->number_of_dram_channels);
+	DC_LOG_BANDWIDTH_CALCS("	[enum] bw_defines underlay_micro_tile_mode: %d", data->underlay_micro_tile_mode);
+	DC_LOG_BANDWIDTH_CALCS("	[enum] bw_defines graphics_micro_tile_mode: %d", data->graphics_micro_tile_mode);
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] max_phyclk: %d", bw_fixed_to_int(data->max_phyclk));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] dram_efficiency: %d", bw_fixed_to_int(data->dram_efficiency));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] src_width_after_surface_type: %d",
+				bw_fixed_to_int(data->src_width_after_surface_type));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] src_height_after_surface_type: %d",
+				bw_fixed_to_int(data->src_height_after_surface_type));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] hsr_after_surface_type: %d",
+				bw_fixed_to_int(data->hsr_after_surface_type));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] vsr_after_surface_type: %d", bw_fixed_to_int(data->vsr_after_surface_type));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] src_width_after_rotation: %d",
+				bw_fixed_to_int(data->src_width_after_rotation));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] src_height_after_rotation: %d",
+				bw_fixed_to_int(data->src_height_after_rotation));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] hsr_after_rotation: %d", bw_fixed_to_int(data->hsr_after_rotation));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] vsr_after_rotation: %d", bw_fixed_to_int(data->vsr_after_rotation));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] source_height_pixels: %d", bw_fixed_to_int(data->source_height_pixels));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] hsr_after_stereo: %d", bw_fixed_to_int(data->hsr_after_stereo));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] vsr_after_stereo: %d", bw_fixed_to_int(data->vsr_after_stereo));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] source_width_in_lb: %d", bw_fixed_to_int(data->source_width_in_lb));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] lb_line_pitch: %d", bw_fixed_to_int(data->lb_line_pitch));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] underlay_maximum_source_efficient_for_tiling: %d",
+				bw_fixed_to_int(data->underlay_maximum_source_efficient_for_tiling));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] num_lines_at_frame_start: %d",
+				bw_fixed_to_int(data->num_lines_at_frame_start));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] min_dmif_size_in_time: %d", bw_fixed_to_int(data->min_dmif_size_in_time));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] min_mcifwr_size_in_time: %d",
+				bw_fixed_to_int(data->min_mcifwr_size_in_time));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] total_requests_for_dmif_size: %d",
+				bw_fixed_to_int(data->total_requests_for_dmif_size));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] peak_pte_request_to_eviction_ratio_limiting: %d",
+				bw_fixed_to_int(data->peak_pte_request_to_eviction_ratio_limiting));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] useful_pte_per_pte_request: %d",
+				bw_fixed_to_int(data->useful_pte_per_pte_request));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] scatter_gather_pte_request_rows: %d",
+				bw_fixed_to_int(data->scatter_gather_pte_request_rows));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] scatter_gather_row_height: %d",
+				bw_fixed_to_int(data->scatter_gather_row_height));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] scatter_gather_pte_requests_in_vblank: %d",
+				bw_fixed_to_int(data->scatter_gather_pte_requests_in_vblank));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] inefficient_linear_pitch_in_bytes: %d",
+				bw_fixed_to_int(data->inefficient_linear_pitch_in_bytes));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] cursor_total_data: %d", bw_fixed_to_int(data->cursor_total_data));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] cursor_total_request_groups: %d",
+				bw_fixed_to_int(data->cursor_total_request_groups));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] scatter_gather_total_pte_requests: %d",
+				bw_fixed_to_int(data->scatter_gather_total_pte_requests));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] scatter_gather_total_pte_request_groups: %d",
+				bw_fixed_to_int(data->scatter_gather_total_pte_request_groups));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] tile_width_in_pixels: %d", bw_fixed_to_int(data->tile_width_in_pixels));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] dmif_total_number_of_data_request_page_close_open: %d",
+				bw_fixed_to_int(data->dmif_total_number_of_data_request_page_close_open));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] mcifwr_total_number_of_data_request_page_close_open: %d",
+				bw_fixed_to_int(data->mcifwr_total_number_of_data_request_page_close_open));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] bytes_per_page_close_open: %d",
+				bw_fixed_to_int(data->bytes_per_page_close_open));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] mcifwr_total_page_close_open_time: %d",
+				bw_fixed_to_int(data->mcifwr_total_page_close_open_time));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] total_requests_for_adjusted_dmif_size: %d",
+				bw_fixed_to_int(data->total_requests_for_adjusted_dmif_size));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] total_dmifmc_urgent_trips: %d",
+				bw_fixed_to_int(data->total_dmifmc_urgent_trips));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] total_dmifmc_urgent_latency: %d",
+				bw_fixed_to_int(data->total_dmifmc_urgent_latency));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] total_display_reads_required_data: %d",
+				bw_fixed_to_int(data->total_display_reads_required_data));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] total_display_reads_required_dram_access_data: %d",
+				bw_fixed_to_int(data->total_display_reads_required_dram_access_data));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] total_display_writes_required_data: %d",
+				bw_fixed_to_int(data->total_display_writes_required_data));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] total_display_writes_required_dram_access_data: %d",
+				bw_fixed_to_int(data->total_display_writes_required_dram_access_data));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] display_reads_required_data: %d",
+				bw_fixed_to_int(data->display_reads_required_data));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] display_reads_required_dram_access_data: %d",
+				bw_fixed_to_int(data->display_reads_required_dram_access_data));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] dmif_total_page_close_open_time: %d",
+				bw_fixed_to_int(data->dmif_total_page_close_open_time));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] min_cursor_memory_interface_buffer_size_in_time: %d",
+				bw_fixed_to_int(data->min_cursor_memory_interface_buffer_size_in_time));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] min_read_buffer_size_in_time: %d",
+				bw_fixed_to_int(data->min_read_buffer_size_in_time));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] display_reads_time_for_data_transfer: %d",
+				bw_fixed_to_int(data->display_reads_time_for_data_transfer));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] display_writes_time_for_data_transfer: %d",
+				bw_fixed_to_int(data->display_writes_time_for_data_transfer));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] dmif_required_dram_bandwidth: %d",
+				bw_fixed_to_int(data->dmif_required_dram_bandwidth));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] mcifwr_required_dram_bandwidth: %d",
+				bw_fixed_to_int(data->mcifwr_required_dram_bandwidth));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] required_dmifmc_urgent_latency_for_page_close_open: %d",
+				bw_fixed_to_int(data->required_dmifmc_urgent_latency_for_page_close_open));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] required_mcifmcwr_urgent_latency: %d",
+				bw_fixed_to_int(data->required_mcifmcwr_urgent_latency));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] required_dram_bandwidth_gbyte_per_second: %d",
+				bw_fixed_to_int(data->required_dram_bandwidth_gbyte_per_second));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] dram_bandwidth: %d", bw_fixed_to_int(data->dram_bandwidth));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] dmif_required_sclk: %d", bw_fixed_to_int(data->dmif_required_sclk));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] mcifwr_required_sclk: %d", bw_fixed_to_int(data->mcifwr_required_sclk));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] required_sclk: %d", bw_fixed_to_int(data->required_sclk));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] downspread_factor: %d", bw_fixed_to_int(data->downspread_factor));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] v_scaler_efficiency: %d", bw_fixed_to_int(data->v_scaler_efficiency));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] scaler_limits_factor: %d", bw_fixed_to_int(data->scaler_limits_factor));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] display_pipe_pixel_throughput: %d",
+				bw_fixed_to_int(data->display_pipe_pixel_throughput));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] total_dispclk_required_with_ramping: %d",
+				bw_fixed_to_int(data->total_dispclk_required_with_ramping));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] total_dispclk_required_without_ramping: %d",
+				bw_fixed_to_int(data->total_dispclk_required_without_ramping));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] total_read_request_bandwidth: %d",
+				bw_fixed_to_int(data->total_read_request_bandwidth));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] total_write_request_bandwidth: %d",
+				bw_fixed_to_int(data->total_write_request_bandwidth));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] dispclk_required_for_total_read_request_bandwidth: %d",
+				bw_fixed_to_int(data->dispclk_required_for_total_read_request_bandwidth));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] total_dispclk_required_with_ramping_with_request_bandwidth: %d",
+				bw_fixed_to_int(data->total_dispclk_required_with_ramping_with_request_bandwidth));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] total_dispclk_required_without_ramping_with_request_bandwidth: %d",
+				bw_fixed_to_int(data->total_dispclk_required_without_ramping_with_request_bandwidth));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] dispclk: %d", bw_fixed_to_int(data->dispclk));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] blackout_recovery_time: %d", bw_fixed_to_int(data->blackout_recovery_time));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] min_pixels_per_data_fifo_entry: %d",
+				bw_fixed_to_int(data->min_pixels_per_data_fifo_entry));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] sclk_deep_sleep: %d", bw_fixed_to_int(data->sclk_deep_sleep));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] chunk_request_time: %d", bw_fixed_to_int(data->chunk_request_time));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] cursor_request_time: %d", bw_fixed_to_int(data->cursor_request_time));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] line_source_pixels_transfer_time: %d",
+				bw_fixed_to_int(data->line_source_pixels_transfer_time));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] dmifdram_access_efficiency: %d",
+				bw_fixed_to_int(data->dmifdram_access_efficiency));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] mcifwrdram_access_efficiency: %d",
+				bw_fixed_to_int(data->mcifwrdram_access_efficiency));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] total_average_bandwidth_no_compression: %d",
+				bw_fixed_to_int(data->total_average_bandwidth_no_compression));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] total_average_bandwidth: %d",
+				bw_fixed_to_int(data->total_average_bandwidth));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] total_stutter_cycle_duration: %d",
+				bw_fixed_to_int(data->total_stutter_cycle_duration));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] stutter_burst_time: %d", bw_fixed_to_int(data->stutter_burst_time));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] time_in_self_refresh: %d", bw_fixed_to_int(data->time_in_self_refresh));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] stutter_efficiency: %d", bw_fixed_to_int(data->stutter_efficiency));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] worst_number_of_trips_to_memory: %d",
+				bw_fixed_to_int(data->worst_number_of_trips_to_memory));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] immediate_flip_time: %d", bw_fixed_to_int(data->immediate_flip_time));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] latency_for_non_dmif_clients: %d",
+				bw_fixed_to_int(data->latency_for_non_dmif_clients));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] latency_for_non_mcifwr_clients: %d",
+				bw_fixed_to_int(data->latency_for_non_mcifwr_clients));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] dmifmc_urgent_latency_supported_in_high_sclk_and_yclk: %d",
+				bw_fixed_to_int(data->dmifmc_urgent_latency_supported_in_high_sclk_and_yclk));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] nbp_state_dram_speed_change_margin: %d",
+				bw_fixed_to_int(data->nbp_state_dram_speed_change_margin));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] display_reads_time_for_data_transfer_and_urgent_latency: %d",
+				bw_fixed_to_int(data->display_reads_time_for_data_transfer_and_urgent_latency));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] dram_speed_change_margin: %d",
+				bw_fixed_to_int(data->dram_speed_change_margin));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] min_vblank_dram_speed_change_margin: %d",
+				bw_fixed_to_int(data->min_vblank_dram_speed_change_margin));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] min_stutter_refresh_duration: %d",
+				bw_fixed_to_int(data->min_stutter_refresh_duration));
+	DC_LOG_BANDWIDTH_CALCS("	[uint32_t] total_stutter_dmif_buffer_size: %d", data->total_stutter_dmif_buffer_size);
+	DC_LOG_BANDWIDTH_CALCS("	[uint32_t] total_bytes_requested: %d", data->total_bytes_requested);
+	DC_LOG_BANDWIDTH_CALCS("	[uint32_t] min_stutter_dmif_buffer_size: %d", data->min_stutter_dmif_buffer_size);
+	DC_LOG_BANDWIDTH_CALCS("	[uint32_t] num_stutter_bursts: %d", data->num_stutter_bursts);
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] v_blank_nbp_state_dram_speed_change_latency_supported: %d",
+				bw_fixed_to_int(data->v_blank_nbp_state_dram_speed_change_latency_supported));
+	DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] nbp_state_dram_speed_change_latency_supported: %d",
+				bw_fixed_to_int(data->nbp_state_dram_speed_change_latency_supported));
+
+	for (i = 0; i < maximum_number_of_surfaces; i++) {
+		DC_LOG_BANDWIDTH_CALCS("	[bool] fbc_en[%d]:%d\n", i, data->fbc_en[i]);
+		DC_LOG_BANDWIDTH_CALCS("	[bool] lpt_en[%d]:%d", i, data->lpt_en[i]);
+		DC_LOG_BANDWIDTH_CALCS("	[bool] displays_match_flag[%d]:%d", i, data->displays_match_flag[i]);
+		DC_LOG_BANDWIDTH_CALCS("	[bool] use_alpha[%d]:%d", i, data->use_alpha[i]);
+		DC_LOG_BANDWIDTH_CALCS("	[bool] orthogonal_rotation[%d]:%d", i, data->orthogonal_rotation[i]);
+		DC_LOG_BANDWIDTH_CALCS("	[bool] enable[%d]:%d", i, data->enable[i]);
+		DC_LOG_BANDWIDTH_CALCS("	[bool] access_one_channel_only[%d]:%d", i, data->access_one_channel_only[i]);
+		DC_LOG_BANDWIDTH_CALCS("	[bool] scatter_gather_enable_for_pipe[%d]:%d",
+					i, data->scatter_gather_enable_for_pipe[i]);
+		DC_LOG_BANDWIDTH_CALCS("	[bool] interlace_mode[%d]:%d",
+					i, data->interlace_mode[i]);
+		DC_LOG_BANDWIDTH_CALCS("	[bool] display_pstate_change_enable[%d]:%d",
+					i, data->display_pstate_change_enable[i]);
+		DC_LOG_BANDWIDTH_CALCS("	[bool] line_buffer_prefetch[%d]:%d", i, data->line_buffer_prefetch[i]);
+		DC_LOG_BANDWIDTH_CALCS("	[uint32_t] bytes_per_pixel[%d]:%d", i, data->bytes_per_pixel[i]);
+		DC_LOG_BANDWIDTH_CALCS("	[uint32_t] max_chunks_non_fbc_mode[%d]:%d",
+					i, data->max_chunks_non_fbc_mode[i]);
+		DC_LOG_BANDWIDTH_CALCS("	[uint32_t] lb_bpc[%d]:%d", i, data->lb_bpc[i]);
+		DC_LOG_BANDWIDTH_CALCS("	[uint32_t] output_bpphdmi[%d]:%d", i, data->output_bpphdmi[i]);
+		DC_LOG_BANDWIDTH_CALCS("	[uint32_t] output_bppdp4_lane_hbr[%d]:%d", i, data->output_bppdp4_lane_hbr[i]);
+		DC_LOG_BANDWIDTH_CALCS("	[uint32_t] output_bppdp4_lane_hbr2[%d]:%d",
+					i, data->output_bppdp4_lane_hbr2[i]);
+		DC_LOG_BANDWIDTH_CALCS("	[uint32_t] output_bppdp4_lane_hbr3[%d]:%d",
+					i, data->output_bppdp4_lane_hbr3[i]);
+		DC_LOG_BANDWIDTH_CALCS("	[enum] bw_defines stereo_mode[%d]:%d", i, data->stereo_mode[i]);
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] dmif_buffer_transfer_time[%d]:%d",
+					i, bw_fixed_to_int(data->dmif_buffer_transfer_time[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] displays_with_same_mode[%d]:%d",
+					i, bw_fixed_to_int(data->displays_with_same_mode[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] stutter_dmif_buffer_size[%d]:%d",
+					i, bw_fixed_to_int(data->stutter_dmif_buffer_size[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] stutter_refresh_duration[%d]:%d",
+					i, bw_fixed_to_int(data->stutter_refresh_duration[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] stutter_exit_watermark[%d]:%d",
+					i, bw_fixed_to_int(data->stutter_exit_watermark[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] stutter_entry_watermark[%d]:%d",
+					i, bw_fixed_to_int(data->stutter_entry_watermark[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] h_total[%d]:%d", i, bw_fixed_to_int(data->h_total[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] v_total[%d]:%d", i, bw_fixed_to_int(data->v_total[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] pixel_rate[%d]:%d", i, bw_fixed_to_int(data->pixel_rate[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] src_width[%d]:%d", i, bw_fixed_to_int(data->src_width[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] pitch_in_pixels[%d]:%d",
+					i, bw_fixed_to_int(data->pitch_in_pixels[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] pitch_in_pixels_after_surface_type[%d]:%d",
+					i, bw_fixed_to_int(data->pitch_in_pixels_after_surface_type[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] src_height[%d]:%d", i, bw_fixed_to_int(data->src_height[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] scale_ratio[%d]:%d", i, bw_fixed_to_int(data->scale_ratio[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] h_taps[%d]:%d", i, bw_fixed_to_int(data->h_taps[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] v_taps[%d]:%d", i, bw_fixed_to_int(data->v_taps[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] h_scale_ratio[%d]:%d", i, bw_fixed_to_int(data->h_scale_ratio[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] v_scale_ratio[%d]:%d", i, bw_fixed_to_int(data->v_scale_ratio[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] rotation_angle[%d]:%d",
+					i, bw_fixed_to_int(data->rotation_angle[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] compression_rate[%d]:%d",
+					i, bw_fixed_to_int(data->compression_rate[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] hsr[%d]:%d", i, bw_fixed_to_int(data->hsr[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] vsr[%d]:%d", i, bw_fixed_to_int(data->vsr[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] source_width_rounded_up_to_chunks[%d]:%d",
+					i, bw_fixed_to_int(data->source_width_rounded_up_to_chunks[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] source_width_pixels[%d]:%d",
+					i, bw_fixed_to_int(data->source_width_pixels[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] source_height_rounded_up_to_chunks[%d]:%d",
+					i, bw_fixed_to_int(data->source_height_rounded_up_to_chunks[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] display_bandwidth[%d]:%d",
+					i, bw_fixed_to_int(data->display_bandwidth[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] request_bandwidth[%d]:%d",
+					i, bw_fixed_to_int(data->request_bandwidth[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] bytes_per_request[%d]:%d",
+					i, bw_fixed_to_int(data->bytes_per_request[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] useful_bytes_per_request[%d]:%d",
+					i, bw_fixed_to_int(data->useful_bytes_per_request[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] lines_interleaved_in_mem_access[%d]:%d",
+					i, bw_fixed_to_int(data->lines_interleaved_in_mem_access[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] latency_hiding_lines[%d]:%d",
+					i, bw_fixed_to_int(data->latency_hiding_lines[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] lb_partitions[%d]:%d",
+					i, bw_fixed_to_int(data->lb_partitions[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] lb_partitions_max[%d]:%d",
+					i, bw_fixed_to_int(data->lb_partitions_max[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] dispclk_required_with_ramping[%d]:%d",
+					i, bw_fixed_to_int(data->dispclk_required_with_ramping[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] dispclk_required_without_ramping[%d]:%d",
+					i, bw_fixed_to_int(data->dispclk_required_without_ramping[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] data_buffer_size[%d]:%d",
+					i, bw_fixed_to_int(data->data_buffer_size[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] outstanding_chunk_request_limit[%d]:%d",
+					i, bw_fixed_to_int(data->outstanding_chunk_request_limit[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] urgent_watermark[%d]:%d",
+					i, bw_fixed_to_int(data->urgent_watermark[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] nbp_state_change_watermark[%d]:%d",
+					i, bw_fixed_to_int(data->nbp_state_change_watermark[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] v_filter_init[%d]:%d", i, bw_fixed_to_int(data->v_filter_init[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] stutter_cycle_duration[%d]:%d",
+					i, bw_fixed_to_int(data->stutter_cycle_duration[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] average_bandwidth[%d]:%d",
+					i, bw_fixed_to_int(data->average_bandwidth[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] average_bandwidth_no_compression[%d]:%d",
+					i, bw_fixed_to_int(data->average_bandwidth_no_compression[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] scatter_gather_pte_request_limit[%d]:%d",
+					i, bw_fixed_to_int(data->scatter_gather_pte_request_limit[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] lb_size_per_component[%d]:%d",
+					i, bw_fixed_to_int(data->lb_size_per_component[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] memory_chunk_size_in_bytes[%d]:%d",
+					i, bw_fixed_to_int(data->memory_chunk_size_in_bytes[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] pipe_chunk_size_in_bytes[%d]:%d",
+					i, bw_fixed_to_int(data->pipe_chunk_size_in_bytes[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] number_of_trips_to_memory_for_getting_apte_row[%d]:%d",
+					i, bw_fixed_to_int(data->number_of_trips_to_memory_for_getting_apte_row[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] adjusted_data_buffer_size[%d]:%d",
+					i, bw_fixed_to_int(data->adjusted_data_buffer_size[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] adjusted_data_buffer_size_in_memory[%d]:%d",
+					i, bw_fixed_to_int(data->adjusted_data_buffer_size_in_memory[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] pixels_per_data_fifo_entry[%d]:%d",
+					i, bw_fixed_to_int(data->pixels_per_data_fifo_entry[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] scatter_gather_pte_requests_in_row[%d]:%d",
+					i, bw_fixed_to_int(data->scatter_gather_pte_requests_in_row[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] pte_request_per_chunk[%d]:%d",
+					i, bw_fixed_to_int(data->pte_request_per_chunk[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] scatter_gather_page_width[%d]:%d",
+					i, bw_fixed_to_int(data->scatter_gather_page_width[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] scatter_gather_page_height[%d]:%d",
+					i, bw_fixed_to_int(data->scatter_gather_page_height[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] lb_lines_in_per_line_out_in_beginning_of_frame[%d]:%d",
+					i, bw_fixed_to_int(data->lb_lines_in_per_line_out_in_beginning_of_frame[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] lb_lines_in_per_line_out_in_middle_of_frame[%d]:%d",
+					i, bw_fixed_to_int(data->lb_lines_in_per_line_out_in_middle_of_frame[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] cursor_width_pixels[%d]:%d",
+					i, bw_fixed_to_int(data->cursor_width_pixels[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] minimum_latency_hiding[%d]:%d",
+					i, bw_fixed_to_int(data->minimum_latency_hiding[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] maximum_latency_hiding[%d]:%d",
+					i, bw_fixed_to_int(data->maximum_latency_hiding[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] minimum_latency_hiding_with_cursor[%d]:%d",
+					i, bw_fixed_to_int(data->minimum_latency_hiding_with_cursor[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] maximum_latency_hiding_with_cursor[%d]:%d",
+					i, bw_fixed_to_int(data->maximum_latency_hiding_with_cursor[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] src_pixels_for_first_output_pixel[%d]:%d",
+					i, bw_fixed_to_int(data->src_pixels_for_first_output_pixel[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] src_pixels_for_last_output_pixel[%d]:%d",
+					i, bw_fixed_to_int(data->src_pixels_for_last_output_pixel[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] src_data_for_first_output_pixel[%d]:%d",
+					i, bw_fixed_to_int(data->src_data_for_first_output_pixel[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] src_data_for_last_output_pixel[%d]:%d",
+					i, bw_fixed_to_int(data->src_data_for_last_output_pixel[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] active_time[%d]:%d", i, bw_fixed_to_int(data->active_time[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] horizontal_blank_and_chunk_granularity_factor[%d]:%d",
+					i, bw_fixed_to_int(data->horizontal_blank_and_chunk_granularity_factor[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] cursor_latency_hiding[%d]:%d",
+					i, bw_fixed_to_int(data->cursor_latency_hiding[i]));
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] v_blank_dram_speed_change_margin[%d]:%d",
+					i, bw_fixed_to_int(data->v_blank_dram_speed_change_margin[i]));
+		}
+
+	for (i = 0; i < maximum_number_of_surfaces; i++) {
+		for (j = 0; j < 3; j++) {
+			for (k = 0; k < 8; k++) {
+
+				DC_LOG_BANDWIDTH_CALCS("\n	[bw_fixed] line_source_transfer_time[%d][%d][%d]:%d",
+					i, j, k, bw_fixed_to_int(data->line_source_transfer_time[i][j][k]));
+				DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] dram_speed_change_line_source_transfer_time[%d][%d][%d]:%d",
+					i, j, k,
+					bw_fixed_to_int(data->dram_speed_change_line_source_transfer_time[i][j][k]));
+			}
+		}
+	}
+
+	for (i = 0; i < 3; i++) {
+		for (j = 0; j < 8; j++) {
+
+			DC_LOG_BANDWIDTH_CALCS("\n	[uint32_t] num_displays_with_margin[%d][%d]:%d",
+					i, j, data->num_displays_with_margin[i][j]);
+			DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] dmif_burst_time[%d][%d]:%d",
+					i, j, bw_fixed_to_int(data->dmif_burst_time[i][j]));
+			DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] mcifwr_burst_time[%d][%d]:%d",
+					i, j, bw_fixed_to_int(data->mcifwr_burst_time[i][j]));
+			DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] min_dram_speed_change_margin[%d][%d]:%d",
+					i, j, bw_fixed_to_int(data->min_dram_speed_change_margin[i][j]));
+			DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] dispclk_required_for_dram_speed_change[%d][%d]:%d",
+					i, j, bw_fixed_to_int(data->dispclk_required_for_dram_speed_change[i][j]));
+			DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] blackout_duration_margin[%d][%d]:%d",
+					i, j, bw_fixed_to_int(data->blackout_duration_margin[i][j]));
+			DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] dispclk_required_for_blackout_duration[%d][%d]:%d",
+					i, j, bw_fixed_to_int(data->dispclk_required_for_blackout_duration[i][j]));
+			DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] dispclk_required_for_blackout_recovery[%d][%d]:%d",
+					i, j, bw_fixed_to_int(data->dispclk_required_for_blackout_recovery[i][j]));
+		}
+	}
+
+	for (i = 0; i < 6; i++) {
+		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] dmif_required_sclk_for_urgent_latency[%d]:%d",
+					i, bw_fixed_to_int(data->dmif_required_sclk_for_urgent_latency[i]));
+	}
+}
+;
+
+#endif /* _CALCS_CALCS_LOGGER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index 821502b1acba..59acb0885039 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -28,6 +28,7 @@
 #include "dc.h"
 #include "core_types.h"
 #include "dal_asic_id.h"
+#include "calcs_logger.h"
 
 /*
  * NOTE:
@@ -2990,6 +2991,11 @@ bool bw_calcs(struct dc_context *ctx,
 		struct bw_fixed mid_yclk = vbios->mid_yclk;
 		struct bw_fixed low_yclk = vbios->low_yclk;
 
+		if (ctx->dc->debug.bandwidth_calcs_trace) {
+			print_bw_calcs_dceip(ctx->logger, dceip);
+			print_bw_calcs_vbios(ctx->logger, vbios);
+			print_bw_calcs_data(ctx->logger, data);
+		}
 		calculate_bandwidth(dceip, vbios, data);
 
 		yclk_lvl = data->y_clk_level;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 4d9da9d9c731..bdc3cef002d6 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -202,6 +202,7 @@ struct dc_debug {
 	bool timing_trace;
 	bool clock_trace;
 	bool validation_trace;
+	bool bandwidth_calcs_trace;
 
 	/* stutter efficiency related */
 	bool disable_stutter;

From c1f8d3fa4627ec7ec31cc538b471488980738631 Mon Sep 17 00:00:00 2001
From: Mikita Lipski <mikita.lipski@amd.com>
Date: Wed, 7 Mar 2018 16:59:43 -0500
Subject: [PATCH 0291/1286] drm/amd/display: Don't call
 amdgpu_dm_display_resume as it doesn't exist

amdgpu_dm_display_resume was merged into dm_resume.
No need to call these functions separately.

Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index e42a28e3adc5..bad9f09c588b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1629,7 +1629,6 @@ static ssize_t s3_debug_store(struct device *device,
 	if (ret == 0) {
 		if (s3_state) {
 			dm_resume(adev);
-			amdgpu_dm_display_resume(adev);
 			drm_kms_helper_hotplug_event(adev->ddev);
 		} else
 			dm_suspend(adev);

From b361521f59764139067ada4ea9d6c213d583678f Mon Sep 17 00:00:00 2001
From: Mikita Lipski <mikita.lipski@amd.com>
Date: Mon, 12 Mar 2018 15:53:47 -0400
Subject: [PATCH 0292/1286] drm/amd/display: Adding stutter entry wm to dce bw
 struct

Adding the stutter_entry_wm object to dce_bw_output struct
and populating it with bw calculations data

Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/dc/calcs/dce_calcs.c  | 111 +++++++++++++++++-
 .../gpu/drm/amd/display/dc/inc/core_types.h   |   1 +
 2 files changed, 110 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index 59acb0885039..4b719328afd6 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -3091,7 +3091,33 @@ bool bw_calcs(struct dc_context *ctx,
 			bw_fixed_to_int(bw_mul(data->
 				stutter_exit_watermark[9], bw_int_to_fixed(1000)));
 
-
+		calcs_output->stutter_entry_wm_ns[0].a_mark =
+			bw_fixed_to_int(bw_mul(data->
+				stutter_entry_watermark[4], bw_int_to_fixed(1000)));
+		calcs_output->stutter_entry_wm_ns[1].a_mark =
+			bw_fixed_to_int(bw_mul(data->
+				stutter_entry_watermark[5], bw_int_to_fixed(1000)));
+		calcs_output->stutter_entry_wm_ns[2].a_mark =
+			bw_fixed_to_int(bw_mul(data->
+				stutter_entry_watermark[6], bw_int_to_fixed(1000)));
+		if (ctx->dc->caps.max_slave_planes) {
+			calcs_output->stutter_entry_wm_ns[3].a_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_entry_watermark[0], bw_int_to_fixed(1000)));
+			calcs_output->stutter_entry_wm_ns[4].a_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_entry_watermark[1], bw_int_to_fixed(1000)));
+		} else {
+			calcs_output->stutter_entry_wm_ns[3].a_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_entry_watermark[7], bw_int_to_fixed(1000)));
+			calcs_output->stutter_entry_wm_ns[4].a_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_entry_watermark[8], bw_int_to_fixed(1000)));
+		}
+		calcs_output->stutter_entry_wm_ns[5].a_mark =
+			bw_fixed_to_int(bw_mul(data->
+				stutter_entry_watermark[9], bw_int_to_fixed(1000)));
 
 		calcs_output->urgent_wm_ns[0].a_mark =
 			bw_fixed_to_int(bw_mul(data->
@@ -3186,7 +3212,33 @@ bool bw_calcs(struct dc_context *ctx,
 				bw_fixed_to_int(bw_mul(data->
 					stutter_exit_watermark[9], bw_int_to_fixed(1000)));
 
-
+		calcs_output->stutter_entry_wm_ns[0].b_mark =
+			bw_fixed_to_int(bw_mul(data->
+				stutter_entry_watermark[4], bw_int_to_fixed(1000)));
+		calcs_output->stutter_entry_wm_ns[1].b_mark =
+			bw_fixed_to_int(bw_mul(data->
+				stutter_entry_watermark[5], bw_int_to_fixed(1000)));
+		calcs_output->stutter_entry_wm_ns[2].b_mark =
+			bw_fixed_to_int(bw_mul(data->
+				stutter_entry_watermark[6], bw_int_to_fixed(1000)));
+		if (ctx->dc->caps.max_slave_planes) {
+			calcs_output->stutter_entry_wm_ns[3].b_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_entry_watermark[0], bw_int_to_fixed(1000)));
+			calcs_output->stutter_entry_wm_ns[4].b_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_entry_watermark[1], bw_int_to_fixed(1000)));
+		} else {
+			calcs_output->stutter_entry_wm_ns[3].b_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_entry_watermark[7], bw_int_to_fixed(1000)));
+			calcs_output->stutter_entry_wm_ns[4].b_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_entry_watermark[8], bw_int_to_fixed(1000)));
+		}
+		calcs_output->stutter_entry_wm_ns[5].b_mark =
+			bw_fixed_to_int(bw_mul(data->
+				stutter_entry_watermark[9], bw_int_to_fixed(1000)));
 
 			calcs_output->urgent_wm_ns[0].b_mark =
 				bw_fixed_to_int(bw_mul(data->
@@ -3279,6 +3331,34 @@ bool bw_calcs(struct dc_context *ctx,
 				bw_fixed_to_int(bw_mul(data->
 					stutter_exit_watermark[9], bw_int_to_fixed(1000)));
 
+		calcs_output->stutter_entry_wm_ns[0].c_mark =
+			bw_fixed_to_int(bw_mul(data->
+				stutter_entry_watermark[4], bw_int_to_fixed(1000)));
+		calcs_output->stutter_entry_wm_ns[1].c_mark =
+			bw_fixed_to_int(bw_mul(data->
+				stutter_entry_watermark[5], bw_int_to_fixed(1000)));
+		calcs_output->stutter_entry_wm_ns[2].c_mark =
+			bw_fixed_to_int(bw_mul(data->
+				stutter_entry_watermark[6], bw_int_to_fixed(1000)));
+		if (ctx->dc->caps.max_slave_planes) {
+			calcs_output->stutter_entry_wm_ns[3].c_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_entry_watermark[0], bw_int_to_fixed(1000)));
+			calcs_output->stutter_entry_wm_ns[4].c_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_entry_watermark[1], bw_int_to_fixed(1000)));
+		} else {
+			calcs_output->stutter_entry_wm_ns[3].c_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_entry_watermark[7], bw_int_to_fixed(1000)));
+			calcs_output->stutter_entry_wm_ns[4].c_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_entry_watermark[8], bw_int_to_fixed(1000)));
+		}
+		calcs_output->stutter_entry_wm_ns[5].c_mark =
+			bw_fixed_to_int(bw_mul(data->
+				stutter_entry_watermark[9], bw_int_to_fixed(1000)));
+
 			calcs_output->urgent_wm_ns[0].c_mark =
 				bw_fixed_to_int(bw_mul(data->
 					urgent_watermark[4], bw_int_to_fixed(1000)));
@@ -3383,6 +3463,33 @@ bool bw_calcs(struct dc_context *ctx,
 			bw_fixed_to_int(bw_mul(data->
 				stutter_exit_watermark[9], bw_int_to_fixed(1000)));
 
+		calcs_output->stutter_entry_wm_ns[0].d_mark =
+			bw_fixed_to_int(bw_mul(data->
+				stutter_entry_watermark[4], bw_int_to_fixed(1000)));
+		calcs_output->stutter_entry_wm_ns[1].d_mark =
+			bw_fixed_to_int(bw_mul(data->
+				stutter_entry_watermark[5], bw_int_to_fixed(1000)));
+		calcs_output->stutter_entry_wm_ns[2].d_mark =
+			bw_fixed_to_int(bw_mul(data->
+				stutter_entry_watermark[6], bw_int_to_fixed(1000)));
+		if (ctx->dc->caps.max_slave_planes) {
+			calcs_output->stutter_entry_wm_ns[3].d_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_entry_watermark[0], bw_int_to_fixed(1000)));
+			calcs_output->stutter_entry_wm_ns[4].d_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_entry_watermark[1], bw_int_to_fixed(1000)));
+		} else {
+			calcs_output->stutter_entry_wm_ns[3].d_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_entry_watermark[7], bw_int_to_fixed(1000)));
+			calcs_output->stutter_entry_wm_ns[4].d_mark =
+				bw_fixed_to_int(bw_mul(data->
+					stutter_entry_watermark[8], bw_int_to_fixed(1000)));
+		}
+		calcs_output->stutter_entry_wm_ns[5].d_mark =
+			bw_fixed_to_int(bw_mul(data->
+				stutter_entry_watermark[9], bw_int_to_fixed(1000)));
 
 		calcs_output->urgent_wm_ns[0].d_mark =
 			bw_fixed_to_int(bw_mul(data->
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 8c51ad70cace..55f56bf7d5b6 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -250,6 +250,7 @@ struct dce_bw_output {
 	bool all_displays_in_sync;
 	struct dce_watermarks urgent_wm_ns[MAX_PIPES];
 	struct dce_watermarks stutter_exit_wm_ns[MAX_PIPES];
+	struct dce_watermarks stutter_entry_wm_ns[MAX_PIPES];
 	struct dce_watermarks nbp_state_change_wm_ns[MAX_PIPES];
 	int sclk_khz;
 	int sclk_deep_sleep_khz;

From f8931ea730ffa6c84e98c970c173935cfd38c0aa Mon Sep 17 00:00:00 2001
From: Eric Bernstein <eric.bernstein@amd.com>
Date: Mon, 12 Mar 2018 17:07:24 -0400
Subject: [PATCH 0293/1286] drm/amd/display: Change wb_h/vratio to double

Signed-off-by: Eric Bernstein <eric.bernstein@amd.com>
Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 09affa16cc43..e296de6ca502 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -215,8 +215,8 @@ struct writeback_st {
 	int wb_vtaps_luma;
 	int wb_htaps_chroma;
 	int wb_vtaps_chroma;
-	int wb_hratio;
-	int wb_vratio;
+	double wb_hratio;
+	double wb_vratio;
 };
 
 struct	_vcs_dpi_display_output_params_st	{

From 6133470c8e2ffdc6a5d67a1d79a9a0c1c0a94a10 Mon Sep 17 00:00:00 2001
From: Julian Parkin <jparkin@amd.com>
Date: Tue, 13 Mar 2018 15:53:13 -0400
Subject: [PATCH 0294/1286] drm/amd/display: drop dc_validate_guaranteed

Block FP16 scaling in validate_resources codepath.

Signed-off-by: Julian Parkin <jparkin@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/dc/core/dc_resource.c | 32 ------------
 drivers/gpu/drm/amd/display/dc/dc_stream.h    |  8 ---
 .../amd/display/dc/dce100/dce100_resource.c   | 33 -------------
 .../amd/display/dc/dce110/dce110_resource.c   | 33 -------------
 .../amd/display/dc/dce112/dce112_resource.c   | 33 -------------
 .../amd/display/dc/dce112/dce112_resource.h   |  5 --
 .../amd/display/dc/dce120/dce120_resource.c   |  1 -
 .../drm/amd/display/dc/dce80/dce80_resource.c | 49 -------------------
 .../gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c  |  7 +++
 .../drm/amd/display/dc/dcn10/dcn10_resource.c | 31 ------------
 .../gpu/drm/amd/display/dc/inc/core_types.h   |  5 --
 drivers/gpu/drm/amd/display/dc/inc/resource.h |  4 --
 12 files changed, 7 insertions(+), 234 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index ba3487e97361..cae78ee9a6fc 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1599,18 +1599,6 @@ enum dc_status dc_remove_stream_from_ctx(
 	return DC_OK;
 }
 
-static void copy_pipe_ctx(
-	const struct pipe_ctx *from_pipe_ctx, struct pipe_ctx *to_pipe_ctx)
-{
-	struct dc_plane_state *plane_state = to_pipe_ctx->plane_state;
-	struct dc_stream_state *stream = to_pipe_ctx->stream;
-
-	*to_pipe_ctx = *from_pipe_ctx;
-	to_pipe_ctx->stream = stream;
-	if (plane_state != NULL)
-		to_pipe_ctx->plane_state = plane_state;
-}
-
 static struct dc_stream_state *find_pll_sharable_stream(
 		struct dc_stream_state *stream_needs_pll,
 		struct dc_state *context)
@@ -1752,26 +1740,6 @@ enum dc_status resource_map_pool_resources(
 	return DC_ERROR_UNEXPECTED;
 }
 
-/* first stream in the context is used to populate the rest */
-void validate_guaranteed_copy_streams(
-		struct dc_state *context,
-		int max_streams)
-{
-	int i;
-
-	for (i = 1; i < max_streams; i++) {
-		context->streams[i] = context->streams[0];
-
-		copy_pipe_ctx(&context->res_ctx.pipe_ctx[0],
-			      &context->res_ctx.pipe_ctx[i]);
-		context->res_ctx.pipe_ctx[i].stream =
-				context->res_ctx.pipe_ctx[0].stream;
-
-		dc_stream_retain(context->streams[i]);
-		context->stream_count++;
-	}
-}
-
 void dc_resource_state_copy_construct_current(
 		const struct dc *dc,
 		struct dc_state *dst_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index d017df56b2ba..3a7093ede569 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -208,14 +208,6 @@ bool dc_add_all_planes_for_stream(
 
 enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream);
 
-/*
- * This function takes a stream and checks if it is guaranteed to be supported.
- * Guaranteed means that MAX_COFUNC similar streams are supported.
- *
- * After this call:
- *   No hardware is programmed for call.  Only validation is done.
- */
-
 /*
  * Set up streams and links associated to drive sinks
  * The streams parameter is an absolute set of all active streams.
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 3092f76bdb75..38ec0d609297 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -733,38 +733,6 @@ enum dc_status dce100_add_stream_to_ctx(
 	return result;
 }
 
-enum dc_status dce100_validate_guaranteed(
-		struct dc  *dc,
-		struct dc_stream_state *dc_stream,
-		struct dc_state *context)
-{
-	enum dc_status result = DC_ERROR_UNEXPECTED;
-
-	context->streams[0] = dc_stream;
-	dc_stream_retain(context->streams[0]);
-	context->stream_count++;
-
-	result = resource_map_pool_resources(dc, context, dc_stream);
-
-	if (result == DC_OK)
-		result = resource_map_clock_resources(dc, context, dc_stream);
-
-	if (result == DC_OK)
-		result = build_mapped_resource(dc, context, dc_stream);
-
-	if (result == DC_OK) {
-		validate_guaranteed_copy_streams(
-				context, dc->caps.max_streams);
-		result = resource_build_scaling_params_for_context(dc, context);
-	}
-
-	if (result == DC_OK)
-		if (!dce100_validate_bandwidth(dc, context))
-			result = DC_FAIL_BANDWIDTH_VALIDATE;
-
-	return result;
-}
-
 static void dce100_destroy_resource_pool(struct resource_pool **pool)
 {
 	struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
@@ -786,7 +754,6 @@ enum dc_status dce100_validate_plane(const struct dc_plane_state *plane_state, s
 static const struct resource_funcs dce100_res_pool_funcs = {
 	.destroy = dce100_destroy_resource_pool,
 	.link_enc_create = dce100_link_encoder_create,
-	.validate_guaranteed = dce100_validate_guaranteed,
 	.validate_bandwidth = dce100_validate_bandwidth,
 	.validate_plane = dce100_validate_plane,
 	.add_stream_to_ctx = dce100_add_stream_to_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index b1f14be20fdf..ee33786bdef6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -930,38 +930,6 @@ static enum dc_status dce110_add_stream_to_ctx(
 	return result;
 }
 
-static enum dc_status dce110_validate_guaranteed(
-		struct dc *dc,
-		struct dc_stream_state *dc_stream,
-		struct dc_state *context)
-{
-	enum dc_status result = DC_ERROR_UNEXPECTED;
-
-	context->streams[0] = dc_stream;
-	dc_stream_retain(context->streams[0]);
-	context->stream_count++;
-
-	result = resource_map_pool_resources(dc, context, dc_stream);
-
-	if (result == DC_OK)
-		result = resource_map_clock_resources(dc, context, dc_stream);
-
-	if (result == DC_OK)
-		result = build_mapped_resource(dc, context, dc_stream);
-
-	if (result == DC_OK) {
-		validate_guaranteed_copy_streams(
-				context, dc->caps.max_streams);
-		result = resource_build_scaling_params_for_context(dc, context);
-	}
-
-	if (result == DC_OK)
-		if (!dce110_validate_bandwidth(dc, context))
-			result = DC_FAIL_BANDWIDTH_VALIDATE;
-
-	return result;
-}
-
 static struct pipe_ctx *dce110_acquire_underlay(
 		struct dc_state *context,
 		const struct resource_pool *pool,
@@ -1036,7 +1004,6 @@ static void dce110_destroy_resource_pool(struct resource_pool **pool)
 static const struct resource_funcs dce110_res_pool_funcs = {
 	.destroy = dce110_destroy_resource_pool,
 	.link_enc_create = dce110_link_encoder_create,
-	.validate_guaranteed = dce110_validate_guaranteed,
 	.validate_bandwidth = dce110_validate_bandwidth,
 	.validate_plane = dce110_validate_plane,
 	.acquire_idle_pipe_for_layer = dce110_acquire_underlay,
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index cd1e3f72c44e..0a476636c5c7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -867,38 +867,6 @@ enum dc_status dce112_add_stream_to_ctx(
 	return result;
 }
 
-enum dc_status dce112_validate_guaranteed(
-		struct dc *dc,
-		struct dc_stream_state *stream,
-		struct dc_state *context)
-{
-	enum dc_status result = DC_ERROR_UNEXPECTED;
-
-	context->streams[0] = stream;
-	dc_stream_retain(context->streams[0]);
-	context->stream_count++;
-
-	result = resource_map_pool_resources(dc, context, stream);
-
-	if (result == DC_OK)
-		result = resource_map_phy_clock_resources(dc, context, stream);
-
-	if (result == DC_OK)
-		result = build_mapped_resource(dc, context, stream);
-
-	if (result == DC_OK) {
-		validate_guaranteed_copy_streams(
-				context, dc->caps.max_streams);
-		result = resource_build_scaling_params_for_context(dc, context);
-	}
-
-	if (result == DC_OK)
-		if (!dce112_validate_bandwidth(dc, context))
-			result = DC_FAIL_BANDWIDTH_VALIDATE;
-
-	return result;
-}
-
 enum dc_status dce112_validate_global(
 		struct dc *dc,
 		struct dc_state *context)
@@ -921,7 +889,6 @@ static void dce112_destroy_resource_pool(struct resource_pool **pool)
 static const struct resource_funcs dce112_res_pool_funcs = {
 	.destroy = dce112_destroy_resource_pool,
 	.link_enc_create = dce112_link_encoder_create,
-	.validate_guaranteed = dce112_validate_guaranteed,
 	.validate_bandwidth = dce112_validate_bandwidth,
 	.validate_plane = dce100_validate_plane,
 	.add_stream_to_ctx = dce112_add_stream_to_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h
index d5c19d34eb0a..95a403396219 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h
@@ -42,11 +42,6 @@ enum dc_status dce112_validate_with_context(
 		struct dc_state *context,
 		struct dc_state *old_context);
 
-enum dc_status dce112_validate_guaranteed(
-		struct dc *dc,
-		struct dc_stream_state *dc_stream,
-		struct dc_state *context);
-
 bool dce112_validate_bandwidth(
 	struct dc *dc,
 	struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 4659a4bfabaa..567e6b487877 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -684,7 +684,6 @@ static void dce120_destroy_resource_pool(struct resource_pool **pool)
 static const struct resource_funcs dce120_res_pool_funcs = {
 	.destroy = dce120_destroy_resource_pool,
 	.link_enc_create = dce120_link_encoder_create,
-	.validate_guaranteed = dce112_validate_guaranteed,
 	.validate_bandwidth = dce112_validate_bandwidth,
 	.validate_plane = dce100_validate_plane,
 	.add_stream_to_ctx = dce112_add_stream_to_ctx
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 5d854a37a978..48a068964722 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -691,23 +691,6 @@ static void destruct(struct dce110_resource_pool *pool)
 	}
 }
 
-static enum dc_status build_mapped_resource(
-		const struct dc *dc,
-		struct dc_state *context,
-		struct dc_stream_state *stream)
-{
-	struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
-
-	if (!pipe_ctx)
-		return DC_ERROR_UNEXPECTED;
-
-	dce110_resource_build_pipe_hw_param(pipe_ctx);
-
-	resource_build_info_frame(pipe_ctx);
-
-	return DC_OK;
-}
-
 bool dce80_validate_bandwidth(
 	struct dc *dc,
 	struct dc_state *context)
@@ -749,37 +732,6 @@ enum dc_status dce80_validate_global(
 	return DC_OK;
 }
 
-enum dc_status dce80_validate_guaranteed(
-		struct dc *dc,
-		struct dc_stream_state *dc_stream,
-		struct dc_state *context)
-{
-	enum dc_status result = DC_ERROR_UNEXPECTED;
-
-	context->streams[0] = dc_stream;
-	dc_stream_retain(context->streams[0]);
-	context->stream_count++;
-
-	result = resource_map_pool_resources(dc, context, dc_stream);
-
-	if (result == DC_OK)
-		result = resource_map_clock_resources(dc, context, dc_stream);
-
-	if (result == DC_OK)
-		result = build_mapped_resource(dc, context, dc_stream);
-
-	if (result == DC_OK) {
-		validate_guaranteed_copy_streams(
-				context, dc->caps.max_streams);
-		result = resource_build_scaling_params_for_context(dc, context);
-	}
-
-	if (result == DC_OK)
-		result = dce80_validate_bandwidth(dc, context);
-
-	return result;
-}
-
 static void dce80_destroy_resource_pool(struct resource_pool **pool)
 {
 	struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
@@ -792,7 +744,6 @@ static void dce80_destroy_resource_pool(struct resource_pool **pool)
 static const struct resource_funcs dce80_res_pool_funcs = {
 	.destroy = dce80_destroy_resource_pool,
 	.link_enc_create = dce80_link_encoder_create,
-	.validate_guaranteed = dce80_validate_guaranteed,
 	.validate_bandwidth = dce80_validate_bandwidth,
 	.validate_plane = dce100_validate_plane,
 	.add_stream_to_ctx = dce100_add_stream_to_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index 3356125a6117..5f40a7374c02 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -121,6 +121,13 @@ bool dpp_get_optimal_number_of_taps(
 	else
 		pixel_width = scl_data->viewport.width;
 
+	/* Some ASICs does not support  FP16 scaling, so we reject modes require this*/
+	if (scl_data->viewport.width  != scl_data->h_active &&
+		scl_data->viewport.height != scl_data->v_active &&
+		dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
+		scl_data->format == PIXEL_FORMAT_FP16)
+		return false;
+
 	/* TODO: add lb check */
 
 	/* No support for programming ratio of 4, drop to 3.99999.. */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 02bd664aed3e..a3fe343b4a85 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -918,36 +918,6 @@ enum dc_status dcn10_add_stream_to_ctx(
 	return result;
 }
 
-enum dc_status dcn10_validate_guaranteed(
-		struct dc *dc,
-		struct dc_stream_state *dc_stream,
-		struct dc_state *context)
-{
-	enum dc_status result = DC_ERROR_UNEXPECTED;
-
-	context->streams[0] = dc_stream;
-	dc_stream_retain(context->streams[0]);
-	context->stream_count++;
-
-	result = resource_map_pool_resources(dc, context, dc_stream);
-
-	if (result == DC_OK)
-		result = resource_map_phy_clock_resources(dc, context, dc_stream);
-
-	if (result == DC_OK)
-		result = build_mapped_resource(dc, context, dc_stream);
-
-	if (result == DC_OK) {
-		validate_guaranteed_copy_streams(
-				context, dc->caps.max_streams);
-		result = resource_build_scaling_params_for_context(dc, context);
-	}
-	if (result == DC_OK && !dcn_validate_bandwidth(dc, context))
-		return DC_FAIL_BANDWIDTH_VALIDATE;
-
-	return result;
-}
-
 static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer(
 		struct dc_state *context,
 		const struct resource_pool *pool,
@@ -1233,7 +1203,6 @@ static struct dc_cap_funcs cap_funcs = {
 static struct resource_funcs dcn10_res_pool_funcs = {
 	.destroy = dcn10_destroy_resource_pool,
 	.link_enc_create = dcn10_link_encoder_create,
-	.validate_guaranteed = dcn10_validate_guaranteed,
 	.validate_bandwidth = dcn_validate_bandwidth,
 	.acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer,
 	.validate_plane = dcn10_validate_plane,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 55f56bf7d5b6..a94942d4e66b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -95,11 +95,6 @@ struct resource_funcs {
 	struct link_encoder *(*link_enc_create)(
 			const struct encoder_init_data *init);
 
-	enum dc_status (*validate_guaranteed)(
-					struct dc *dc,
-					struct dc_stream_state *stream,
-					struct dc_state *context);
-
 	bool (*validate_bandwidth)(
 					struct dc *dc,
 					struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 5467332faf7b..640a647f4611 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -139,10 +139,6 @@ bool resource_validate_attach_surfaces(
 		struct dc_state *context,
 		const struct resource_pool *pool);
 
-void validate_guaranteed_copy_streams(
-		struct dc_state *context,
-		int max_streams);
-
 void resource_validate_ctx_update_pointer_after_copy(
 		const struct dc_state *src_ctx,
 		struct dc_state *dst_ctx);

From 3722c794641f91e0b960dd901d6c5d2f3cc24080 Mon Sep 17 00:00:00 2001
From: Mikita Lipski <mikita.lipski@amd.com>
Date: Wed, 21 Feb 2018 16:57:10 -0500
Subject: [PATCH 0295/1286] drm/amd/display: Implementing new bandwidth
 registers for DCE120

Registers are added and defined.
Programmed to default values.
Stutter level watermark register is being set to calculated value.
Urgent level registers are programmed to the same as urgency.
The programming of the registers is not expected to have any
functional difference in performance.

Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../drm/amd/display/dc/dce/dce_mem_input.c    | 58 +++++++++++++------
 .../drm/amd/display/dc/dce/dce_mem_input.h    |  9 +++
 .../display/dc/dce110/dce110_hw_sequencer.c   |  3 +
 .../display/dc/dce110/dce110_mem_input_v.c    |  1 +
 .../gpu/drm/amd/display/dc/inc/hw/mem_input.h |  1 +
 5 files changed, 55 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
index 0790f25c7b3b..04fc86bb95a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -174,6 +174,25 @@ static void program_urgency_watermark(
 		URGENCY_HIGH_WATERMARK, urgency_high_wm);
 }
 
+static void dce120_program_urgency_watermark(
+	struct dce_mem_input *dce_mi,
+	uint32_t wm_select,
+	uint32_t urgency_low_wm,
+	uint32_t urgency_high_wm)
+{
+	REG_UPDATE(DPG_WATERMARK_MASK_CONTROL,
+		URGENCY_WATERMARK_MASK, wm_select);
+
+	REG_SET_2(DPG_PIPE_URGENCY_CONTROL, 0,
+		URGENCY_LOW_WATERMARK, urgency_low_wm,
+		URGENCY_HIGH_WATERMARK, urgency_high_wm);
+
+	REG_SET_2(DPG_PIPE_URGENT_LEVEL_CONTROL, 0,
+		URGENT_LEVEL_LOW_WATERMARK, urgency_low_wm,
+		URGENT_LEVEL_HIGH_WATERMARK, urgency_high_wm);
+
+}
+
 static void program_nbp_watermark(
 	struct dce_mem_input *dce_mi,
 	uint32_t wm_select,
@@ -209,23 +228,27 @@ static void program_nbp_watermark(
 static void program_stutter_watermark(
 	struct dce_mem_input *dce_mi,
 	uint32_t wm_select,
-	uint32_t stutter_mark)
+	uint32_t stutter_mark,
+	uint32_t stutter_entry)
 {
 	REG_UPDATE(DPG_WATERMARK_MASK_CONTROL,
 		STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK, wm_select);
 
 	if (REG(DPG_PIPE_STUTTER_CONTROL2))
-		REG_UPDATE(DPG_PIPE_STUTTER_CONTROL2,
-				STUTTER_EXIT_SELF_REFRESH_WATERMARK, stutter_mark);
+		REG_UPDATE_2(DPG_PIPE_STUTTER_CONTROL2,
+				STUTTER_EXIT_SELF_REFRESH_WATERMARK, stutter_mark,
+				STUTTER_ENTER_SELF_REFRESH_WATERMARK, stutter_entry);
 	else
-		REG_UPDATE(DPG_PIPE_STUTTER_CONTROL,
-				STUTTER_EXIT_SELF_REFRESH_WATERMARK, stutter_mark);
+		REG_UPDATE_2(DPG_PIPE_STUTTER_CONTROL,
+				STUTTER_EXIT_SELF_REFRESH_WATERMARK, stutter_mark,
+				STUTTER_ENTER_SELF_REFRESH_WATERMARK, stutter_entry);
 }
 
 static void dce_mi_program_display_marks(
 	struct mem_input *mi,
 	struct dce_watermarks nbp,
-	struct dce_watermarks stutter,
+	struct dce_watermarks stutter_exit,
+	struct dce_watermarks stutter_enter,
 	struct dce_watermarks urgent,
 	uint32_t total_dest_line_time_ns)
 {
@@ -243,26 +266,27 @@ static void dce_mi_program_display_marks(
 	program_nbp_watermark(dce_mi, 2, nbp.a_mark); /* set a */
 	program_nbp_watermark(dce_mi, 1, nbp.d_mark); /* set d */
 
-	program_stutter_watermark(dce_mi, 2, stutter.a_mark); /* set a */
-	program_stutter_watermark(dce_mi, 1, stutter.d_mark); /* set d */
+	program_stutter_watermark(dce_mi, 2, stutter_exit.a_mark, stutter_enter.a_mark); /* set a */
+	program_stutter_watermark(dce_mi, 1, stutter_exit.d_mark, stutter_enter.d_mark); /* set d */
 }
 
 static void dce120_mi_program_display_marks(struct mem_input *mi,
 	struct dce_watermarks nbp,
-	struct dce_watermarks stutter,
+	struct dce_watermarks stutter_exit,
+	struct dce_watermarks stutter_entry,
 	struct dce_watermarks urgent,
 	uint32_t total_dest_line_time_ns)
 {
 	struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
 	uint32_t stutter_en = mi->ctx->dc->debug.disable_stutter ? 0 : 1;
 
-	program_urgency_watermark(dce_mi, 0, /* set a */
+	dce120_program_urgency_watermark(dce_mi, 0, /* set a */
 			urgent.a_mark, total_dest_line_time_ns);
-	program_urgency_watermark(dce_mi, 1, /* set b */
+	dce120_program_urgency_watermark(dce_mi, 1, /* set b */
 			urgent.b_mark, total_dest_line_time_ns);
-	program_urgency_watermark(dce_mi, 2, /* set c */
+	dce120_program_urgency_watermark(dce_mi, 2, /* set c */
 			urgent.c_mark, total_dest_line_time_ns);
-	program_urgency_watermark(dce_mi, 3, /* set d */
+	dce120_program_urgency_watermark(dce_mi, 3, /* set d */
 			urgent.d_mark, total_dest_line_time_ns);
 
 	REG_UPDATE_2(DPG_PIPE_STUTTER_CONTROL,
@@ -273,10 +297,10 @@ static void dce120_mi_program_display_marks(struct mem_input *mi,
 	program_nbp_watermark(dce_mi, 2, nbp.c_mark); /* set c */
 	program_nbp_watermark(dce_mi, 3, nbp.d_mark); /* set d */
 
-	program_stutter_watermark(dce_mi, 0, stutter.a_mark); /* set a */
-	program_stutter_watermark(dce_mi, 1, stutter.b_mark); /* set b */
-	program_stutter_watermark(dce_mi, 2, stutter.c_mark); /* set c */
-	program_stutter_watermark(dce_mi, 3, stutter.d_mark); /* set d */
+	program_stutter_watermark(dce_mi, 0, stutter_exit.a_mark, stutter_entry.a_mark); /* set a */
+	program_stutter_watermark(dce_mi, 1, stutter_exit.b_mark, stutter_entry.b_mark); /* set b */
+	program_stutter_watermark(dce_mi, 2, stutter_exit.c_mark, stutter_entry.c_mark); /* set c */
+	program_stutter_watermark(dce_mi, 3, stutter_exit.d_mark, stutter_entry.d_mark); /* set d */
 }
 
 static void program_tiling(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
index 05d39c0cbe87..e877e7329e8f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
@@ -106,6 +106,7 @@ struct dce_mem_input_registers {
 	uint32_t DPG_PIPE_ARBITRATION_CONTROL1;
 	uint32_t DPG_WATERMARK_MASK_CONTROL;
 	uint32_t DPG_PIPE_URGENCY_CONTROL;
+	uint32_t DPG_PIPE_URGENT_LEVEL_CONTROL;
 	uint32_t DPG_PIPE_NB_PSTATE_CHANGE_CONTROL;
 	uint32_t DPG_PIPE_LOW_POWER_CONTROL;
 	uint32_t DPG_PIPE_STUTTER_CONTROL;
@@ -213,6 +214,11 @@ struct dce_mem_input_registers {
 
 #define MI_DCE12_DMIF_PG_MASK_SH_LIST(mask_sh, blk)\
 	SFB(blk, DPG_PIPE_STUTTER_CONTROL2, STUTTER_EXIT_SELF_REFRESH_WATERMARK, mask_sh),\
+	SFB(blk, DPG_PIPE_STUTTER_CONTROL2, STUTTER_ENTER_SELF_REFRESH_WATERMARK, mask_sh),\
+	SFB(blk, DPG_PIPE_URGENT_LEVEL_CONTROL, URGENT_LEVEL_LOW_WATERMARK, mask_sh),\
+	SFB(blk, DPG_PIPE_URGENT_LEVEL_CONTROL, URGENT_LEVEL_HIGH_WATERMARK, mask_sh),\
+	SFB(blk, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, mask_sh),\
+	SFB(blk, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, mask_sh),\
 	SFB(blk, DPG_WATERMARK_MASK_CONTROL, PSTATE_CHANGE_WATERMARK_MASK, mask_sh),\
 	SFB(blk, DPG_PIPE_LOW_POWER_CONTROL, PSTATE_CHANGE_ENABLE, mask_sh),\
 	SFB(blk, DPG_PIPE_LOW_POWER_CONTROL, PSTATE_CHANGE_URGENT_DURING_REQUEST, mask_sh),\
@@ -286,6 +292,8 @@ struct dce_mem_input_registers {
 	type STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK; \
 	type URGENCY_LOW_WATERMARK; \
 	type URGENCY_HIGH_WATERMARK; \
+	type URGENT_LEVEL_LOW_WATERMARK;\
+	type URGENT_LEVEL_HIGH_WATERMARK;\
 	type NB_PSTATE_CHANGE_ENABLE; \
 	type NB_PSTATE_CHANGE_URGENT_DURING_REQUEST; \
 	type NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST; \
@@ -297,6 +305,7 @@ struct dce_mem_input_registers {
 	type STUTTER_ENABLE; \
 	type STUTTER_IGNORE_FBC; \
 	type STUTTER_EXIT_SELF_REFRESH_WATERMARK; \
+	type STUTTER_ENTER_SELF_REFRESH_WATERMARK; \
 	type DMIF_BUFFERS_ALLOCATED; \
 	type DMIF_BUFFERS_ALLOCATION_COMPLETED; \
 	type ENABLE; /* MC_HUB_RDREQ_DMIF_LIMIT */\
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 30dd62f0f5fa..daa4673675f1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1539,6 +1539,7 @@ static void dce110_set_displaymarks(
 			pipe_ctx->plane_res.mi,
 			context->bw.dce.nbp_state_change_wm_ns[num_pipes],
 			context->bw.dce.stutter_exit_wm_ns[num_pipes],
+			context->bw.dce.stutter_entry_wm_ns[num_pipes],
 			context->bw.dce.urgent_wm_ns[num_pipes],
 			total_dest_line_time_ns);
 		if (i == underlay_idx) {
@@ -1564,6 +1565,7 @@ static void set_safe_displaymarks(
 		MAX_WATERMARK, MAX_WATERMARK, MAX_WATERMARK, MAX_WATERMARK };
 	struct dce_watermarks nbp_marks = {
 		SAFE_NBP_MARK, SAFE_NBP_MARK, SAFE_NBP_MARK, SAFE_NBP_MARK };
+	struct dce_watermarks min_marks = { 0, 0, 0, 0};
 
 	for (i = 0; i < MAX_PIPES; i++) {
 		if (res_ctx->pipe_ctx[i].stream == NULL || res_ctx->pipe_ctx[i].plane_res.mi == NULL)
@@ -1573,6 +1575,7 @@ static void set_safe_displaymarks(
 				res_ctx->pipe_ctx[i].plane_res.mi,
 				nbp_marks,
 				max_marks,
+				min_marks,
 				max_marks,
 				MAX_WATERMARK);
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
index 7bab8c6d2a73..0564c8e31252 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
@@ -923,6 +923,7 @@ void dce_mem_input_v_program_display_marks(
 	struct mem_input *mem_input,
 	struct dce_watermarks nbp,
 	struct dce_watermarks stutter,
+	struct dce_watermarks stutter_enter,
 	struct dce_watermarks urgent,
 	uint32_t total_dest_line_time_ns)
 {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
index 3e1e7e6a8792..47f1dc5a43b7 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -104,6 +104,7 @@ struct mem_input_funcs {
 		struct mem_input *mem_input,
 		struct dce_watermarks nbp,
 		struct dce_watermarks stutter,
+		struct dce_watermarks stutter_enter,
 		struct dce_watermarks urgent,
 		uint32_t total_dest_line_time_ns);
 

From 8a6095e08e4ae279de570b175e5aee525ae1251e Mon Sep 17 00:00:00 2001
From: Tony Cheng <tony.cheng@amd.com>
Date: Wed, 21 Feb 2018 16:42:05 -0500
Subject: [PATCH 0296/1286] drm/amd/display: dal 3.1.40

Signed-off-by: Tony Cheng <tony.cheng@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index bdc3cef002d6..63817ed56c11 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -38,7 +38,7 @@
 #include "inc/compressor.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.1.39"
+#define DC_VER "3.1.40"
 
 #define MAX_SURFACES 3
 #define MAX_STREAMS 6

From a052a516de4c3e46f2e442ec118c391dbf9932e3 Mon Sep 17 00:00:00 2001
From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Date: Tue, 13 Mar 2018 15:00:20 -0400
Subject: [PATCH 0297/1286] drm/amd/display: align dtn logs and add mpc idle
 bit print

Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c | 53 ++++++++-----------
 .../gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c  |  4 +-
 drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h   |  2 +
 3 files changed, 28 insertions(+), 31 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 999190aa8a08..675a81a87099 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -56,16 +56,17 @@
 #define FN(reg_name, field_name) \
 	hws->shifts->field_name, hws->masks->field_name
 
+/*print is 17 wide, first two characters are spaces*/
 #define DTN_INFO_MICRO_SEC(ref_cycle) \
 	print_microsec(dc_ctx, ref_cycle)
 
 void print_microsec(struct dc_context *dc_ctx, uint32_t ref_cycle)
 {
-	static const uint32_t ref_clk_mhz = 48;
-	static const unsigned int frac = 10;
+	const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clock_inKhz / 1000;
+	static const unsigned int frac = 1000;
 	uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
 
-	DTN_INFO("%d.%d \t ",
+	DTN_INFO("  %11d.%03d",
 			us_x10 / frac,
 			us_x10 % frac);
 }
@@ -92,14 +93,14 @@ void dcn10_log_hubbub_state(struct dc *dc)
 
 	hubbub1_wm_read_state(dc->res_pool->hubbub, &wm);
 
-	DTN_INFO("HUBBUB WM: \t data_urgent \t pte_meta_urgent \t "
-			"sr_enter \t sr_exit \t dram_clk_change \n");
+	DTN_INFO("HUBBUB WM:      data_urgent  pte_meta_urgent"
+			"         sr_enter          sr_exit  dram_clk_change\n");
 
 	for (i = 0; i < 4; i++) {
 		struct dcn_hubbub_wm_set *s;
 
 		s = &wm.sets[i];
-		DTN_INFO("WM_Set[%d]:\t ", s->wm_set);
+		DTN_INFO("WM_Set[%d]:", s->wm_set);
 		DTN_INFO_MICRO_SEC(s->data_urgent);
 		DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
 		DTN_INFO_MICRO_SEC(s->sr_enter);
@@ -121,19 +122,17 @@ void dcn10_log_hw_state(struct dc *dc)
 
 	dcn10_log_hubbub_state(dc);
 
-	DTN_INFO("HUBP:  format  addr_hi  width  height  "
-			"rotation  mirror  sw_mode  "
-			"dcc_en  blank_en  ttu_dis  underflow  "
-			"min_ttu_vblank  qos_low_wm  qos_high_wm\n");
+	DTN_INFO("HUBP:  format  addr_hi  width  height"
+			"  rot  mir  sw_mode  dcc_en  blank_en  ttu_dis  underflow"
+			"   min_ttu_vblank       qos_low_wm      qos_high_wm\n");
 	for (i = 0; i < pool->pipe_count; i++) {
 		struct hubp *hubp = pool->hubps[i];
 		struct dcn_hubp_state s;
 
 		hubp1_read_state(TO_DCN10_HUBP(hubp), &s);
 
-		DTN_INFO("[%-2d]:  %5xh  %6xh  %5d  %6d  "
-				"%7xh  %5xh  %6xh  "
-				"%6d  %8d  %7d  %8xh \t",
+		DTN_INFO("[%2d]:  %5xh  %6xh  %5d  %6d  %2xh  %2xh  %6xh"
+				"  %6d  %8d  %7d  %8xh",
 				hubp->inst,
 				s.pixel_format,
 				s.inuse_addr_hi,
@@ -152,25 +151,21 @@ void dcn10_log_hw_state(struct dc *dc)
 		DTN_INFO("\n");
 	}
 	DTN_INFO("\n");
+
+	DTN_INFO("MPCC:  OPP  DPP  MPCCBOT  MODE  ALPHA_MODE  PREMULT  OVERLAP_ONLY  IDLE\n");
 	for (i = 0; i < pool->pipe_count; i++) {
-		struct output_pixel_processor *opp = pool->opps[i];
-		struct mpcc *mpcc = opp->mpc_tree_params.opp_list;
 		struct mpcc_state s = {0};
 
-		while (mpcc) {
-			ASSERT(opp->mpc_tree_params.opp_id == opp->inst);
-			pool->mpc->funcs->read_mpcc_state(pool->mpc, mpcc->mpcc_id, &s);
-			DTN_INFO("[OPP%d - MPCC%d]: DPP%d MPCCBOT%x MODE:%d ALPHA_MODE:%d PREMULT:%d OVERLAP_ONLY:%d\n",
-				s.opp_id, mpcc->mpcc_id, s.dpp_id, s.bot_mpcc_id,
-				s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only);
-			mpcc = mpcc->mpcc_bot;
-			ASSERT(!mpcc || mpcc->mpcc_id == s.bot_mpcc_id);
-		}
+		pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
+		DTN_INFO("[%2d]:  %2xh  %2xh  %6xh  %4d  %10d  %7d  %12d  %4d\n",
+			i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
+			s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
+			s.idle);
 	}
 	DTN_INFO("\n");
 
-	DTN_INFO("OTG:\t v_bs \t v_be \t v_ss \t v_se \t vpol \t vmax \t vmin \t "
-			"h_bs \t h_be \t h_ss \t h_se \t hpol \t htot \t vtot \t underflow\n");
+	DTN_INFO("OTG:  v_bs  v_be  v_ss  v_se  vpol  vmax  vmin"
+			"  h_bs  h_be  h_ss  h_se  hpol  htot  vtot  underflow\n");
 
 	for (i = 0; i < pool->timing_generator_count; i++) {
 		struct timing_generator *tg = pool->timing_generators[i];
@@ -182,9 +177,8 @@ void dcn10_log_hw_state(struct dc *dc)
 		if ((s.otg_enabled & 1) == 0)
 			continue;
 
-		DTN_INFO("[%d]:\t %d \t %d \t %d \t %d \t "
-				"%d \t %d \t %d \t %d \t %d \t %d \t "
-				"%d \t %d \t %d \t %d \t %d \t ",
+		DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %5d %5d %5d"
+				" %5d %5d %5d %5d  %9d\n",
 				tg->inst,
 				s.v_blank_start,
 				s.v_blank_end,
@@ -201,7 +195,6 @@ void dcn10_log_hw_state(struct dc *dc)
 				s.h_total,
 				s.v_total,
 				s.underflow_occurred_status);
-		DTN_INFO("\n");
 	}
 	DTN_INFO("\n");
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index 29e15a93a7d0..6f7016a2a11e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -422,7 +422,9 @@ void mpc1_read_mpcc_state(
 	REG_GET_4(MPCC_CONTROL[mpcc_inst], MPCC_MODE, &s->mode,
 			MPCC_ALPHA_BLND_MODE, &s->alpha_mode,
 			MPCC_ALPHA_MULTIPLIED_MODE, &s->pre_multiplied_alpha,
-			MPCC_BLND_ACTIVE_OVERLAP_ONLY, &s->pre_multiplied_alpha);
+			MPCC_BLND_ACTIVE_OVERLAP_ONLY, &s->overlap_only);
+	REG_GET_2(MPCC_STATUS[mpcc_inst], MPCC_IDLE, &s->idle,
+			MPCC_BUSY, &s->busy);
 }
 
 const struct mpc_funcs dcn10_mpc_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 5caacab216b5..caf74e3c836f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -113,6 +113,8 @@ struct mpcc_state {
 	uint32_t alpha_mode;
 	uint32_t pre_multiplied_alpha;
 	uint32_t overlap_only;
+	uint32_t idle;
+	uint32_t busy;
 };
 
 struct mpc_funcs {

From 7a84077304e84d0254d505a76cc40971cb74c2de Mon Sep 17 00:00:00 2001
From: Roman Li <Roman.Li@amd.com>
Date: Wed, 14 Mar 2018 18:02:07 -0400
Subject: [PATCH 0298/1286] drm/amd/display: add assert in enable FBC

Adding assert to prevent possible null deref warning.
Only can happen under abnormal circumstances.

Signed-off-by: Roman Li <Roman.Li@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index daa4673675f1..075ab291cdc7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1801,6 +1801,9 @@ static bool should_enable_fbc(struct dc *dc,
 		}
 	}
 
+	/* Pipe context should be found */
+	ASSERT(pipe_ctx);
+
 	/* Only supports eDP */
 	if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP)
 		return false;

From d72eb20379022a948c219e1fc451b6b0200cc7c5 Mon Sep 17 00:00:00 2001
From: Mikita Lipski <mikita.lipski@amd.com>
Date: Wed, 14 Mar 2018 14:42:25 -0400
Subject: [PATCH 0299/1286] drm/amd/display: Separate mem input constuctors for
 dce 112 and 120

Override the memory input functions for dce120 not to program
new registers on dce112.
This will fix warnings thrown on Polaris asics.

Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../drm/amd/display/dc/dce/dce_mem_input.c    | 75 +++++++++++++++++--
 .../drm/amd/display/dc/dce/dce_mem_input.h    |  8 ++
 .../amd/display/dc/dce120/dce120_resource.c   |  2 +-
 3 files changed, 77 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
index 04fc86bb95a1..b235a75355b8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -225,7 +225,7 @@ static void program_nbp_watermark(
 	}
 }
 
-static void program_stutter_watermark(
+static void dce120_program_stutter_watermark(
 	struct dce_mem_input *dce_mi,
 	uint32_t wm_select,
 	uint32_t stutter_mark,
@@ -244,6 +244,22 @@ static void program_stutter_watermark(
 				STUTTER_ENTER_SELF_REFRESH_WATERMARK, stutter_entry);
 }
 
+static void program_stutter_watermark(
+	struct dce_mem_input *dce_mi,
+	uint32_t wm_select,
+	uint32_t stutter_mark)
+{
+	REG_UPDATE(DPG_WATERMARK_MASK_CONTROL,
+		STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK, wm_select);
+
+	if (REG(DPG_PIPE_STUTTER_CONTROL2))
+		REG_UPDATE(DPG_PIPE_STUTTER_CONTROL2,
+				STUTTER_EXIT_SELF_REFRESH_WATERMARK, stutter_mark);
+	else
+		REG_UPDATE(DPG_PIPE_STUTTER_CONTROL,
+				STUTTER_EXIT_SELF_REFRESH_WATERMARK, stutter_mark);
+}
+
 static void dce_mi_program_display_marks(
 	struct mem_input *mi,
 	struct dce_watermarks nbp,
@@ -266,8 +282,41 @@ static void dce_mi_program_display_marks(
 	program_nbp_watermark(dce_mi, 2, nbp.a_mark); /* set a */
 	program_nbp_watermark(dce_mi, 1, nbp.d_mark); /* set d */
 
-	program_stutter_watermark(dce_mi, 2, stutter_exit.a_mark, stutter_enter.a_mark); /* set a */
-	program_stutter_watermark(dce_mi, 1, stutter_exit.d_mark, stutter_enter.d_mark); /* set d */
+	program_stutter_watermark(dce_mi, 2, stutter_exit.a_mark); /* set a */
+	program_stutter_watermark(dce_mi, 1, stutter_exit.d_mark); /* set d */
+}
+
+static void dce112_mi_program_display_marks(struct mem_input *mi,
+	struct dce_watermarks nbp,
+	struct dce_watermarks stutter_exit,
+	struct dce_watermarks stutter_entry,
+	struct dce_watermarks urgent,
+	uint32_t total_dest_line_time_ns)
+{
+	struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
+	uint32_t stutter_en = mi->ctx->dc->debug.disable_stutter ? 0 : 1;
+
+	program_urgency_watermark(dce_mi, 0, /* set a */
+			urgent.a_mark, total_dest_line_time_ns);
+	program_urgency_watermark(dce_mi, 1, /* set b */
+			urgent.b_mark, total_dest_line_time_ns);
+	program_urgency_watermark(dce_mi, 2, /* set c */
+			urgent.c_mark, total_dest_line_time_ns);
+	program_urgency_watermark(dce_mi, 3, /* set d */
+			urgent.d_mark, total_dest_line_time_ns);
+
+	REG_UPDATE_2(DPG_PIPE_STUTTER_CONTROL,
+		STUTTER_ENABLE, stutter_en,
+		STUTTER_IGNORE_FBC, 1);
+	program_nbp_watermark(dce_mi, 0, nbp.a_mark); /* set a */
+	program_nbp_watermark(dce_mi, 1, nbp.b_mark); /* set b */
+	program_nbp_watermark(dce_mi, 2, nbp.c_mark); /* set c */
+	program_nbp_watermark(dce_mi, 3, nbp.d_mark); /* set d */
+
+	program_stutter_watermark(dce_mi, 0, stutter_exit.a_mark); /* set a */
+	program_stutter_watermark(dce_mi, 1, stutter_exit.b_mark); /* set b */
+	program_stutter_watermark(dce_mi, 2, stutter_exit.c_mark); /* set c */
+	program_stutter_watermark(dce_mi, 3, stutter_exit.d_mark); /* set d */
 }
 
 static void dce120_mi_program_display_marks(struct mem_input *mi,
@@ -297,10 +346,10 @@ static void dce120_mi_program_display_marks(struct mem_input *mi,
 	program_nbp_watermark(dce_mi, 2, nbp.c_mark); /* set c */
 	program_nbp_watermark(dce_mi, 3, nbp.d_mark); /* set d */
 
-	program_stutter_watermark(dce_mi, 0, stutter_exit.a_mark, stutter_entry.a_mark); /* set a */
-	program_stutter_watermark(dce_mi, 1, stutter_exit.b_mark, stutter_entry.b_mark); /* set b */
-	program_stutter_watermark(dce_mi, 2, stutter_exit.c_mark, stutter_entry.c_mark); /* set c */
-	program_stutter_watermark(dce_mi, 3, stutter_exit.d_mark, stutter_entry.d_mark); /* set d */
+	dce120_program_stutter_watermark(dce_mi, 0, stutter_exit.a_mark, stutter_entry.a_mark); /* set a */
+	dce120_program_stutter_watermark(dce_mi, 1, stutter_exit.b_mark, stutter_entry.b_mark); /* set b */
+	dce120_program_stutter_watermark(dce_mi, 2, stutter_exit.c_mark, stutter_entry.c_mark); /* set c */
+	dce120_program_stutter_watermark(dce_mi, 3, stutter_exit.d_mark, stutter_entry.d_mark); /* set d */
 }
 
 static void program_tiling(
@@ -718,6 +767,18 @@ void dce112_mem_input_construct(
 	const struct dce_mem_input_registers *regs,
 	const struct dce_mem_input_shift *mi_shift,
 	const struct dce_mem_input_mask *mi_mask)
+{
+	dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask);
+	dce_mi->base.funcs->mem_input_program_display_marks = dce112_mi_program_display_marks;
+}
+
+void dce120_mem_input_construct(
+	struct dce_mem_input *dce_mi,
+	struct dc_context *ctx,
+	int inst,
+	const struct dce_mem_input_registers *regs,
+	const struct dce_mem_input_shift *mi_shift,
+	const struct dce_mem_input_mask *mi_mask)
 {
 	dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask);
 	dce_mi->base.funcs->mem_input_program_display_marks = dce120_mi_program_display_marks;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
index e877e7329e8f..d15b0d7f47fc 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
@@ -353,4 +353,12 @@ void dce112_mem_input_construct(
 	const struct dce_mem_input_shift *mi_shift,
 	const struct dce_mem_input_mask *mi_mask);
 
+void dce120_mem_input_construct(
+	struct dce_mem_input *dce_mi,
+	struct dc_context *ctx,
+	int inst,
+	const struct dce_mem_input_registers *regs,
+	const struct dce_mem_input_shift *mi_shift,
+	const struct dce_mem_input_mask *mi_mask);
+
 #endif /*__DCE_MEM_INPUT_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 567e6b487877..fda01574d1ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -652,7 +652,7 @@ static struct mem_input *dce120_mem_input_create(
 		return NULL;
 	}
 
-	dce112_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks);
+	dce120_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks);
 	return &dce_mi->base;
 }
 

From 4173c0bdd7b79ef46161037f8845654416dbaca9 Mon Sep 17 00:00:00 2001
From: Eric Yang <Eric.Yang2@amd.com>
Date: Wed, 14 Mar 2018 17:41:57 -0400
Subject: [PATCH 0300/1286] drm/amd/display: Only update mpc blend config if
 not full update

The current mpcc insert/remove logic does not support updating
only a single mpcc. So when pixel alpha changed but no full update
we can mistakenly shuffle the mpcc layering order. With this change
we will only insert/remove mpcc if there is full update.

Signed-off-by: Eric Yang <Eric.Yang2@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c    | 9 +++++++++
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c         | 5 +++--
 2 files changed, 12 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 675a81a87099..27ae88e3a373 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -1631,6 +1631,8 @@ static void update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
 	struct mpc *mpc = dc->res_pool->mpc;
 	struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
 
+
+
 	/* TODO: proper fix once fpga works */
 
 	if (dc->debug.surface_visual_confirm)
@@ -1657,6 +1659,7 @@ static void update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
 			pipe_ctx->stream->output_color_space)
 					&& per_pixel_alpha;
 
+
 	/*
 	 * TODO: remove hack
 	 * Note: currently there is a bug in init_hw such that
@@ -1667,6 +1670,12 @@ static void update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
 	 */
 	mpcc_id = hubp->inst;
 
+	/* If there is no full update, don't need to touch MPC tree*/
+	if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
+		mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
+		return;
+	}
+
 	/* check if this MPCC is already being used */
 	new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
 	/* remove MPCC if being used */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index 6f7016a2a11e..9ca51ae46de7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -65,6 +65,7 @@ static void mpc1_update_blending(
 	int mpcc_id)
 {
 	struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+	struct mpcc *mpcc = mpc1_get_mpcc(mpc, mpcc_id);
 
 	REG_UPDATE_5(MPCC_CONTROL[mpcc_id],
 			MPCC_ALPHA_BLND_MODE,		blnd_cfg->alpha_mode,
@@ -74,6 +75,7 @@ static void mpc1_update_blending(
 			MPCC_GLOBAL_GAIN,		blnd_cfg->global_gain);
 
 	mpc1_set_bg_color(mpc, &blnd_cfg->black_color, mpcc_id);
+	mpcc->blnd_cfg = *blnd_cfg;
 }
 
 void mpc1_update_stereo_mix(
@@ -235,8 +237,7 @@ struct mpcc *mpc1_insert_plane(
 	}
 
 	/* update the blending configuration */
-	new_mpcc->blnd_cfg = *blnd_cfg;
-	mpc->funcs->update_blending(mpc, &new_mpcc->blnd_cfg, mpcc_id);
+	mpc->funcs->update_blending(mpc, blnd_cfg, mpcc_id);
 
 	/* update the stereo mix settings, if provided */
 	if (sm_cfg != NULL) {

From 1ba2faf207b47e23b1d756e7be25e980724214a7 Mon Sep 17 00:00:00 2001
From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Date: Thu, 15 Mar 2018 10:25:43 -0400
Subject: [PATCH 0301/1286] drm/amd/display: hide inconsistent mpcc programming
 from dtn log

Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c    | 9 +++++----
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 27ae88e3a373..e21458169d15 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -157,10 +157,11 @@ void dcn10_log_hw_state(struct dc *dc)
 		struct mpcc_state s = {0};
 
 		pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
-		DTN_INFO("[%2d]:  %2xh  %2xh  %6xh  %4d  %10d  %7d  %12d  %4d\n",
-			i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
-			s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
-			s.idle);
+		if (s.opp_id != 0xf)
+			DTN_INFO("[%2d]:  %2xh  %2xh  %6xh  %4d  %10d  %7d  %12d  %4d\n",
+				i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
+				s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
+				s.idle);
 	}
 	DTN_INFO("\n");
 

From 24238ee6549bcbebca2f6fc49c225c4f377df8c0 Mon Sep 17 00:00:00 2001
From: Vitaly Prosyak <vitaly.prosyak@amd.com>
Date: Tue, 13 Mar 2018 15:18:34 -0500
Subject: [PATCH 0302/1286] drm/amd/display: Add dc_lut_mode enum

Signed-off-by: Vitaly Prosyak <vitaly.prosyak@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index b22158190262..015e209e58bc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -191,4 +191,9 @@ enum controller_dp_test_pattern {
 	CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA
 };
 
+enum dc_lut_mode {
+	LUT_BYPASS,
+	LUT_RAM_A,
+	LUT_RAM_B
+};
 #endif /* __DAL_HW_SHARED_H__ */

From 3032deb52a6bf706657c39d6335c81ce3265974d Mon Sep 17 00:00:00 2001
From: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
Date: Wed, 14 Mar 2018 11:19:15 -0400
Subject: [PATCH 0303/1286] drm/amd/display: Correct print types in DC_LOGS

Correct the types used for printing in logs. This is needed for adding
dynamic printing (LINUX), otherwise we get warnings.

Signed-off-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/dc/calcs/dcn_calcs.c  | 74 +++++++++----------
 .../gpu/drm/amd/display/dc/core/dc_debug.c    | 14 ++--
 drivers/gpu/drm/amd/display/dc/core/dc_link.c |  8 +-
 .../display/dc/dce110/dce110_hw_sequencer.c   |  4 +-
 .../dc/i2caux/dce110/i2c_hw_engine_dce110.c   |  2 +-
 5 files changed, 52 insertions(+), 50 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 4bb43a371292..a102c192328d 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -1459,39 +1459,39 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
 void dcn_bw_sync_calcs_and_dml(struct dc *dc)
 {
 	kernel_fpu_begin();
-	DC_LOG_BANDWIDTH_CALCS("sr_exit_time: %d ns\n"
-			"sr_enter_plus_exit_time: %d ns\n"
-			"urgent_latency: %d ns\n"
-			"write_back_latency: %d ns\n"
-			"percent_of_ideal_drambw_received_after_urg_latency: %d %\n"
+	DC_LOG_BANDWIDTH_CALCS("sr_exit_time: %f ns\n"
+			"sr_enter_plus_exit_time: %f ns\n"
+			"urgent_latency: %f ns\n"
+			"write_back_latency: %f ns\n"
+			"percent_of_ideal_drambw_received_after_urg_latency: %f %%\n"
 			"max_request_size: %d bytes\n"
-			"dcfclkv_max0p9: %d kHz\n"
-			"dcfclkv_nom0p8: %d kHz\n"
-			"dcfclkv_mid0p72: %d kHz\n"
-			"dcfclkv_min0p65: %d kHz\n"
-			"max_dispclk_vmax0p9: %d kHz\n"
-			"max_dispclk_vnom0p8: %d kHz\n"
-			"max_dispclk_vmid0p72: %d kHz\n"
-			"max_dispclk_vmin0p65: %d kHz\n"
-			"max_dppclk_vmax0p9: %d kHz\n"
-			"max_dppclk_vnom0p8: %d kHz\n"
-			"max_dppclk_vmid0p72: %d kHz\n"
-			"max_dppclk_vmin0p65: %d kHz\n"
-			"socclk: %d kHz\n"
-			"fabric_and_dram_bandwidth_vmax0p9: %d MB/s\n"
-			"fabric_and_dram_bandwidth_vnom0p8: %d MB/s\n"
-			"fabric_and_dram_bandwidth_vmid0p72: %d MB/s\n"
-			"fabric_and_dram_bandwidth_vmin0p65: %d MB/s\n"
-			"phyclkv_max0p9: %d kHz\n"
-			"phyclkv_nom0p8: %d kHz\n"
-			"phyclkv_mid0p72: %d kHz\n"
-			"phyclkv_min0p65: %d kHz\n"
-			"downspreading: %d %\n"
+			"dcfclkv_max0p9: %f kHz\n"
+			"dcfclkv_nom0p8: %f kHz\n"
+			"dcfclkv_mid0p72: %f kHz\n"
+			"dcfclkv_min0p65: %f kHz\n"
+			"max_dispclk_vmax0p9: %f kHz\n"
+			"max_dispclk_vnom0p8: %f kHz\n"
+			"max_dispclk_vmid0p72: %f kHz\n"
+			"max_dispclk_vmin0p65: %f kHz\n"
+			"max_dppclk_vmax0p9: %f kHz\n"
+			"max_dppclk_vnom0p8: %f kHz\n"
+			"max_dppclk_vmid0p72: %f kHz\n"
+			"max_dppclk_vmin0p65: %f kHz\n"
+			"socclk: %f kHz\n"
+			"fabric_and_dram_bandwidth_vmax0p9: %f MB/s\n"
+			"fabric_and_dram_bandwidth_vnom0p8: %f MB/s\n"
+			"fabric_and_dram_bandwidth_vmid0p72: %f MB/s\n"
+			"fabric_and_dram_bandwidth_vmin0p65: %f MB/s\n"
+			"phyclkv_max0p9: %f kHz\n"
+			"phyclkv_nom0p8: %f kHz\n"
+			"phyclkv_mid0p72: %f kHz\n"
+			"phyclkv_min0p65: %f kHz\n"
+			"downspreading: %f %%\n"
 			"round_trip_ping_latency_cycles: %d DCFCLK Cycles\n"
 			"urgent_out_of_order_return_per_channel: %d Bytes\n"
 			"number_of_channels: %d\n"
 			"vmm_page_size: %d Bytes\n"
-			"dram_clock_change_latency: %d ns\n"
+			"dram_clock_change_latency: %f ns\n"
 			"return_bus_width: %d Bytes\n",
 			dc->dcn_soc->sr_exit_time * 1000,
 			dc->dcn_soc->sr_enter_plus_exit_time * 1000,
@@ -1527,11 +1527,11 @@ void dcn_bw_sync_calcs_and_dml(struct dc *dc)
 			dc->dcn_soc->vmm_page_size,
 			dc->dcn_soc->dram_clock_change_latency * 1000,
 			dc->dcn_soc->return_bus_width);
-	DC_LOG_BANDWIDTH_CALCS("rob_buffer_size_in_kbyte: %d\n"
-			"det_buffer_size_in_kbyte: %d\n"
-			"dpp_output_buffer_pixels: %d\n"
-			"opp_output_buffer_lines: %d\n"
-			"pixel_chunk_size_in_kbyte: %d\n"
+	DC_LOG_BANDWIDTH_CALCS("rob_buffer_size_in_kbyte: %f\n"
+			"det_buffer_size_in_kbyte: %f\n"
+			"dpp_output_buffer_pixels: %f\n"
+			"opp_output_buffer_lines: %f\n"
+			"pixel_chunk_size_in_kbyte: %f\n"
 			"pte_enable: %d\n"
 			"pte_chunk_size: %d kbytes\n"
 			"meta_chunk_size: %d kbytes\n"
@@ -1550,13 +1550,13 @@ void dcn_bw_sync_calcs_and_dml(struct dc *dc)
 			"max_pscl_tolb_throughput: %d pixels/dppclk\n"
 			"max_lb_tovscl_throughput: %d pixels/dppclk\n"
 			"max_vscl_tohscl_throughput: %d pixels/dppclk\n"
-			"max_hscl_ratio: %d\n"
-			"max_vscl_ratio: %d\n"
+			"max_hscl_ratio: %f\n"
+			"max_vscl_ratio: %f\n"
 			"max_hscl_taps: %d\n"
 			"max_vscl_taps: %d\n"
 			"pte_buffer_size_in_requests: %d\n"
-			"dispclk_ramping_margin: %d %\n"
-			"under_scan_factor: %d %\n"
+			"dispclk_ramping_margin: %f %%\n"
+			"under_scan_factor: %f %%\n"
 			"max_inter_dcn_tile_repeaters: %d\n"
 			"can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one: %d\n"
 			"bug_forcing_luma_and_chroma_request_to_same_size_fixed: %d\n"
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index 5a552cb3f8a7..71cc60fcff5e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -72,8 +72,8 @@ void pre_surface_trace(
 				"plane_state->visible = %d;\n"
 				"plane_state->flip_immediate = %d;\n"
 				"plane_state->address.type = %d;\n"
-				"plane_state->address.grph.addr.quad_part = 0x%X;\n"
-				"plane_state->address.grph.meta_addr.quad_part = 0x%X;\n"
+				"plane_state->address.grph.addr.quad_part = 0x%llX;\n"
+				"plane_state->address.grph.meta_addr.quad_part = 0x%llX;\n"
 				"plane_state->scaling_quality.h_taps = %d;\n"
 				"plane_state->scaling_quality.v_taps = %d;\n"
 				"plane_state->scaling_quality.h_taps_c = %d;\n"
@@ -192,8 +192,8 @@ void update_surface_trace(
 		SURFACE_TRACE("Update %d\n", i);
 		if (update->flip_addr) {
 			SURFACE_TRACE("flip_addr->address.type = %d;\n"
-					"flip_addr->address.grph.addr.quad_part = 0x%X;\n"
-					"flip_addr->address.grph.meta_addr.quad_part = 0x%X;\n"
+					"flip_addr->address.grph.addr.quad_part = 0x%llX;\n"
+					"flip_addr->address.grph.meta_addr.quad_part = 0x%llX;\n"
 					"flip_addr->flip_immediate = %d;\n",
 					update->flip_addr->address.type,
 					update->flip_addr->address.grph.addr.quad_part,
@@ -211,7 +211,8 @@ void update_surface_trace(
 					"plane_info->plane_size.grph.surface_size.width = %d;\n"
 					"plane_info->plane_size.grph.surface_size.x = %d;\n"
 					"plane_info->plane_size.grph.surface_size.y = %d;\n"
-					"plane_info->rotation = %d;\n",
+					"plane_info->rotation = %d;\n"
+					"plane_info->stereo_format = %d;\n",
 					update->plane_info->color_space,
 					update->plane_info->input_tf,
 					update->plane_info->format,
@@ -371,6 +372,7 @@ void context_clock_trace(
 			context->bw.dcn.calc_clk.dppclk_khz,
 			context->bw.dcn.calc_clk.dcfclk_khz,
 			context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
-			context->bw.dcn.calc_clk.fclk_khz);
+			context->bw.dcn.calc_clk.fclk_khz,
+			context->bw.dcn.calc_clk.socclk_khz);
 #endif
 }
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index eeb04471b2f5..82ee9de23115 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2165,11 +2165,11 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
 			link->mst_stream_alloc_table.stream_count);
 
 	for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
-		DC_LOG_MST("stream_enc[%d]: 0x%x      "
+		DC_LOG_MST("stream_enc[%d]: %p      "
 		"stream[%d].vcp_id: %d      "
 		"stream[%d].slot_count: %d\n",
 		i,
-		link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+		(void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
 		i,
 		link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
 		i,
@@ -2255,11 +2255,11 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
 			link->mst_stream_alloc_table.stream_count);
 
 	for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
-		DC_LOG_MST("stream_enc[%d]: 0x%x      "
+		DC_LOG_MST("stream_enc[%d]: %p      "
 		"stream[%d].vcp_id: %d      "
 		"stream[%d].slot_count: %d\n",
 		i,
-		link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+		(void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
 		i,
 		link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
 		i,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 075ab291cdc7..c6212301712b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -2777,13 +2777,13 @@ static void dce110_program_front_end_for_pipe(
 		dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
 
 	DC_LOG_SURFACE(
-			"Pipe:%d 0x%x: addr hi:0x%x, "
+			"Pipe:%d %p: addr hi:0x%x, "
 			"addr low:0x%x, "
 			"src: %d, %d, %d,"
 			" %d; dst: %d, %d, %d, %d;"
 			"clip: %d, %d, %d, %d\n",
 			pipe_ctx->pipe_idx,
-			pipe_ctx->plane_state,
+			(void *) pipe_ctx->plane_state,
 			pipe_ctx->plane_state->address.grph.addr.high_part,
 			pipe_ctx->plane_state->address.grph.addr.low_part,
 			pipe_ctx->plane_state->src_rect.x,
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
index abd0095ced30..b7256f595052 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
@@ -527,7 +527,7 @@ static void construct(
 	REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div);
 
 	if (xtal_ref_div == 0) {
-		DC_LOG_WARNING("Invalid base timer divider\n",
+		DC_LOG_WARNING("Invalid base timer divider [%s]\n",
 				__func__);
 		xtal_ref_div = 2;
 	}

From 8d815b4635382dddd58bf03bbcfac9f4e5201151 Mon Sep 17 00:00:00 2001
From: Eric Bernstein <eric.bernstein@amd.com>
Date: Tue, 13 Mar 2018 16:40:51 -0400
Subject: [PATCH 0304/1286] drm/amd/display: Add num_active_wb to DML

Signed-off-by: Eric Bernstein <eric.bernstein@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index e296de6ca502..ce750edc1e5f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -224,6 +224,7 @@ struct	_vcs_dpi_display_output_params_st	{
 	int	output_bpp;
 	int	dsc_enable;
 	int	wb_enable;
+	int	num_active_wb;
 	int	opp_input_bpc;
 	int	output_type;
 	int	output_format;

From 7608f8569d8fee1372d4a3409aea5cca0b13b194 Mon Sep 17 00:00:00 2001
From: Xingyue Tao <xingyue.tao@amd.com>
Date: Wed, 14 Mar 2018 17:57:42 -0400
Subject: [PATCH 0305/1286] drm/amd/display: Add double buffer machanism to
 ICSC

- Video playback shows tearing when adjusting
brightness through radeon custom settings.
- Now added double buffer mechanism to switch
input CSC from register buffer ICSC and COMA
- Improved tab alignment

Signed-off-by: Xingyue Tao <xingyue.tao@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h  | 11 ++++-
 .../drm/amd/display/dc/dcn10/dcn10_dpp_cm.c   | 44 ++++++++++++-------
 .../drm/amd/display/dc/dcn10/dcn10_resource.c | 31 ++++++++++++-
 3 files changed, 67 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index 17b062a8f88a..b81b2aa3c49f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -108,6 +108,8 @@
 	SRI(CM_DGAM_LUT_DATA, CM, id), \
 	SRI(CM_CONTROL, CM, id), \
 	SRI(CM_DGAM_CONTROL, CM, id), \
+	SRI(CM_TEST_DEBUG_INDEX, CM, id), \
+	SRI(CM_TEST_DEBUG_DATA, CM, id), \
 	SRI(FORMAT_CONTROL, CNVC_CFG, id), \
 	SRI(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \
 	SRI(CURSOR0_CONTROL, CNVC_CUR, id), \
@@ -300,6 +302,7 @@
 	TF_SF(CM0_CM_DGAM_LUT_INDEX, CM_DGAM_LUT_INDEX, mask_sh), \
 	TF_SF(CM0_CM_DGAM_LUT_DATA, CM_DGAM_LUT_DATA, mask_sh), \
 	TF_SF(CM0_CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, mask_sh), \
+	TF_SF(CM0_CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_INDEX, mask_sh), \
 	TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS, mask_sh), \
 	TF2_SF(CNVC_CFG0, FORMAT_CONTROL__ALPHA_EN, mask_sh), \
 	TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_EXPANSION_MODE, mask_sh), \
@@ -1010,6 +1013,8 @@
 	type CUR0_EXPANSION_MODE; \
 	type CUR0_ENABLE; \
 	type CM_BYPASS; \
+	type CM_TEST_DEBUG_INDEX; \
+	type CM_TEST_DEBUG_DATA_ID9_ICSC_MODE; \
 	type FORMAT_CONTROL__ALPHA_EN; \
 	type CUR0_COLOR0; \
 	type CUR0_COLOR1; \
@@ -1255,6 +1260,8 @@ struct dcn_dpp_mask {
 	uint32_t CM_IGAM_LUT_RW_CONTROL; \
 	uint32_t CM_IGAM_LUT_RW_INDEX; \
 	uint32_t CM_IGAM_LUT_SEQ_COLOR; \
+	uint32_t CM_TEST_DEBUG_INDEX; \
+	uint32_t CM_TEST_DEBUG_DATA; \
 	uint32_t FORMAT_CONTROL; \
 	uint32_t CNVC_SURFACE_PIXEL_FORMAT; \
 	uint32_t CURSOR_CONTROL; \
@@ -1289,8 +1296,8 @@ struct dcn10_dpp {
 
 enum dcn10_input_csc_select {
 	INPUT_CSC_SELECT_BYPASS = 0,
-	INPUT_CSC_SELECT_ICSC,
-	INPUT_CSC_SELECT_COMA
+	INPUT_CSC_SELECT_ICSC = 1,
+	INPUT_CSC_SELECT_COMA = 2
 };
 
 void dpp1_set_cursor_attributes(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index fb32975e4b67..cc511415caee 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -267,6 +267,7 @@ void dpp1_cm_set_output_csc_default(
 		BREAK_TO_DEBUGGER();
 		return;
 	}
+
 	dpp1_cm_program_color_matrix(dpp, regval);
 	REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
 }
@@ -330,6 +331,7 @@ void dpp1_cm_set_output_csc_adjustment(
 {
 	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
 	uint32_t ocsc_mode = 4;
+
 	dpp1_cm_program_color_matrix(dpp, regval);
 	REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
 }
@@ -437,17 +439,18 @@ void dpp1_cm_program_regamma_lutb_settings(
 void dpp1_program_input_csc(
 		struct dpp *dpp_base,
 		enum dc_color_space color_space,
-		enum dcn10_input_csc_select select,
+		enum dcn10_input_csc_select input_select,
 		const struct out_csc_color_matrix *tbl_entry)
 {
 	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
 	int i;
 	int arr_size = sizeof(dcn10_input_csc_matrix)/sizeof(struct dcn10_input_csc_matrix);
 	const uint16_t *regval = NULL;
-	uint32_t selection = 1;
+	uint32_t cur_select = 0;
+	enum dcn10_input_csc_select select;
 	struct color_matrices_reg gam_regs;
 
-	if (select == INPUT_CSC_SELECT_BYPASS) {
+	if (input_select == INPUT_CSC_SELECT_BYPASS) {
 		REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0);
 		return;
 	}
@@ -467,36 +470,45 @@ void dpp1_program_input_csc(
 		regval = tbl_entry->regval;
 	}
 
-	if (select == INPUT_CSC_SELECT_COMA)
-		selection = 2;
-	REG_SET(CM_ICSC_CONTROL, 0,
-			CM_ICSC_MODE, selection);
+	/* determine which CSC matrix (icsc or coma) we are using
+	 * currently.  select the alternate set to double buffer
+	 * the CSC update so CSC is updated on frame boundary
+	 */
+	REG_SET(CM_TEST_DEBUG_INDEX, 0,
+			CM_TEST_DEBUG_INDEX, 9);
+
+	REG_GET(CM_TEST_DEBUG_DATA,
+			CM_TEST_DEBUG_DATA_ID9_ICSC_MODE, &cur_select);
+
+	if (cur_select != INPUT_CSC_SELECT_ICSC)
+		select = INPUT_CSC_SELECT_ICSC;
+	else
+		select = INPUT_CSC_SELECT_COMA;
 
 	gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11;
 	gam_regs.masks.csc_c11  = dpp->tf_mask->CM_ICSC_C11;
 	gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12;
 	gam_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12;
 
-
 	if (select == INPUT_CSC_SELECT_ICSC) {
 
 		gam_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12);
 		gam_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34);
 
-		cm_helper_program_color_matrices(
-				dpp->base.ctx,
-				regval,
-				&gam_regs);
 	} else {
 
 		gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12);
 		gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34);
 
-		cm_helper_program_color_matrices(
-				dpp->base.ctx,
-				regval,
-				&gam_regs);
 	}
+
+	cm_helper_program_color_matrices(
+			dpp->base.ctx,
+			regval,
+			&gam_regs);
+
+	REG_SET(CM_ICSC_CONTROL, 0,
+				CM_ICSC_MODE, select);
 }
 
 //keep here for now, decide multi dce support later
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index a3fe343b4a85..d321da97217c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -319,12 +319,41 @@ static const struct dcn_dpp_registers tf_regs[] = {
 	tf_regs(3),
 };
 
+/*
+ *
+	DCN1 CM debug status register definition
+
+	register :ID9_CM_STATUS do
+	implement_ref :cm
+	map to:  :cmdebugind, at: j
+	width 32
+	disclosure   NEVER
+
+		field :ID9_VUPDATE_CFG, [0], R
+		field :ID9_IGAM_LUT_MODE, [2..1], R
+		field :ID9_BNS_BYPASS, [3], R
+		field :ID9_ICSC_MODE, [5..4], R
+		field :ID9_DGAM_LUT_MODE, [8..6], R
+		field :ID9_HDR_BYPASS, [9], R
+		field :ID9_GAMUT_REMAP_MODE, [11..10], R
+		field :ID9_RGAM_LUT_MODE, [14..12], R
+		#1 free bit
+		field :ID9_OCSC_MODE, [18..16], R
+		field :ID9_DENORM_MODE, [21..19], R
+		field :ID9_ROUND_TRUNC_MODE, [25..22], R
+		field :ID9_DITHER_EN, [26], R
+		field :ID9_DITHER_MODE, [28..27], R
+	end
+*/
+
 static const struct dcn_dpp_shift tf_shift = {
-	TF_REG_LIST_SH_MASK_DCN10(__SHIFT)
+	TF_REG_LIST_SH_MASK_DCN10(__SHIFT),
+	.CM_TEST_DEBUG_DATA_ID9_ICSC_MODE = 0x4
 };
 
 static const struct dcn_dpp_mask tf_mask = {
 	TF_REG_LIST_SH_MASK_DCN10(_MASK),
+	.CM_TEST_DEBUG_DATA_ID9_ICSC_MODE = 0x30
 };
 
 static const struct dcn_mpc_registers mpc_regs = {

From f412e8307d0ac6cbffd1240fb655557c126a0f2c Mon Sep 17 00:00:00 2001
From: Anthony Koo <Anthony.Koo@amd.com>
Date: Thu, 15 Mar 2018 13:31:14 -0400
Subject: [PATCH 0306/1286] drm/amd/display: Couple bug fixes in stats module

Signed-off-by: Harry Wentland <harry.wentland@amd.com>
Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/modules/stats/stats.c | 39 +++++++++++++------
 1 file changed, 28 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
index 041f87b73d5f..ed5f6809a64e 100644
--- a/drivers/gpu/drm/amd/display/modules/stats/stats.c
+++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c
@@ -187,7 +187,7 @@ void mod_stats_dump(struct mod_stats *mod_stats)
 
 		for (int i = 0; i < core_stats->index && i < core_stats->entries; i++) {
 			dm_logger_write(logger, LOG_PROFILING,
-					"%u  %u  %u  %u  %u  %u  %u  %u  %u  %u  %u  %u  %u  %u  %u  %u  %u  %u  %u\n",
+					"%u  %u  %u  %u  %u  %u  %u  %u  %u  %u  %u  %u  %u  %u  %u  %u  %u  %u  %u",
 					time[i].render_time_in_us,
 					time[i].avg_render_time_in_us_last_ten,
 					time[i].min_window,
@@ -227,7 +227,7 @@ void mod_stats_reset_data(struct mod_stats *mod_stats)
 	memset(core_stats->time, 0,
 		sizeof(struct stats_time_cache) * core_stats->entries);
 
-	core_stats->index = 0;
+	core_stats->index = 1;
 }
 
 void mod_stats_update_flip(struct mod_stats *mod_stats,
@@ -250,7 +250,7 @@ void mod_stats_update_flip(struct mod_stats *mod_stats,
 
 	time[index].flip_timestamp_in_ns = timestamp_in_ns;
 	time[index].render_time_in_us =
-		timestamp_in_ns - time[index - 1].flip_timestamp_in_ns;
+		(timestamp_in_ns - time[index - 1].flip_timestamp_in_ns) / 1000;
 
 	if (index >= 10) {
 		for (unsigned int i = 0; i < 10; i++)
@@ -261,10 +261,12 @@ void mod_stats_update_flip(struct mod_stats *mod_stats,
 
 	if (time[index].num_vsync_between_flips > 0)
 		time[index].vsync_to_flip_time_in_us =
-			timestamp_in_ns - time[index].vupdate_timestamp_in_ns;
+			(timestamp_in_ns -
+				time[index].vupdate_timestamp_in_ns) / 1000;
 	else
 		time[index].vsync_to_flip_time_in_us =
-			timestamp_in_ns - time[index - 1].vupdate_timestamp_in_ns;
+			(timestamp_in_ns -
+				time[index - 1].vupdate_timestamp_in_ns) / 1000;
 
 	core_stats->index++;
 }
@@ -275,6 +277,8 @@ void mod_stats_update_vupdate(struct mod_stats *mod_stats,
 	struct core_stats *core_stats = NULL;
 	struct stats_time_cache *time = NULL;
 	unsigned int index = 0;
+	unsigned int num_vsyncs = 0;
+	unsigned int prev_vsync_in_ns = 0;
 
 	if (mod_stats == NULL)
 		return;
@@ -286,14 +290,27 @@ void mod_stats_update_vupdate(struct mod_stats *mod_stats,
 
 	time = core_stats->time;
 	index = core_stats->index;
+	num_vsyncs = time[index].num_vsync_between_flips;
+
+	if (num_vsyncs < MOD_STATS_NUM_VSYNCS) {
+		if (num_vsyncs == 0) {
+			prev_vsync_in_ns =
+				time[index - 1].vupdate_timestamp_in_ns;
+
+			time[index].flip_to_vsync_time_in_us =
+				(timestamp_in_ns -
+					time[index - 1].flip_timestamp_in_ns) /
+					1000;
+		} else {
+			prev_vsync_in_ns =
+				time[index].vupdate_timestamp_in_ns;
+		}
+
+		time[index].v_sync_time_in_us[num_vsyncs] =
+			(timestamp_in_ns - prev_vsync_in_ns) / 1000;
+	}
 
 	time[index].vupdate_timestamp_in_ns = timestamp_in_ns;
-	if (time[index].num_vsync_between_flips < MOD_STATS_NUM_VSYNCS)
-		time[index].v_sync_time_in_us[time[index].num_vsync_between_flips] =
-			timestamp_in_ns - time[index - 1].vupdate_timestamp_in_ns;
-	time[index].flip_to_vsync_time_in_us =
-		timestamp_in_ns - time[index - 1].flip_timestamp_in_ns;
-
 	time[index].num_vsync_between_flips++;
 }
 

From e09b6473c605119a5f7c451a93a9e812e216a824 Mon Sep 17 00:00:00 2001
From: Anthony Koo <Anthony.Koo@amd.com>
Date: Thu, 15 Mar 2018 14:18:18 -0400
Subject: [PATCH 0307/1286] drm/amd/display: Rename encoder_info_packet to
 dc_info_packet

Move this out of the HW includes to dc_types.h

Signed-off-by: Harry Wentland <harry.wentland@amd.com>
Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/dc/core/dc_resource.c | 99 +++++++++----------
 drivers/gpu/drm/amd/display/dc/dc_types.h     |  9 ++
 .../amd/display/dc/dce/dce_stream_encoder.c   |  4 +-
 .../amd/display/dc/inc/hw/stream_encoder.h    | 21 ++--
 4 files changed, 65 insertions(+), 68 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index cae78ee9a6fc..379b05536321 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1811,7 +1811,7 @@ enum dc_status dc_validate_global_state(
 }
 
 static void patch_gamut_packet_checksum(
-		struct encoder_info_packet *gamut_packet)
+		struct dc_info_packet *gamut_packet)
 {
 	/* For gamut we recalc checksum */
 	if (gamut_packet->valid) {
@@ -1830,12 +1830,11 @@ static void patch_gamut_packet_checksum(
 }
 
 static void set_avi_info_frame(
-		struct encoder_info_packet *info_packet,
+		struct dc_info_packet *info_packet,
 		struct pipe_ctx *pipe_ctx)
 {
 	struct dc_stream_state *stream = pipe_ctx->stream;
 	enum dc_color_space color_space = COLOR_SPACE_UNKNOWN;
-	struct info_frame info_frame = { {0} };
 	uint32_t pixel_encoding = 0;
 	enum scanning_type scan_type = SCANNING_TYPE_NODATA;
 	enum dc_aspect_ratio aspect = ASPECT_RATIO_NO_DATA;
@@ -1845,7 +1844,7 @@ static void set_avi_info_frame(
 	unsigned int cn0_cn1_value = 0;
 	uint8_t *check_sum = NULL;
 	uint8_t byte_index = 0;
-	union hdmi_info_packet *hdmi_info = &info_frame.avi_info_packet.info_packet_hdmi;
+	union hdmi_info_packet hdmi_info = {0};
 	union display_content_support support = {0};
 	unsigned int vic = pipe_ctx->stream->timing.vic;
 	enum dc_timing_3d_format format;
@@ -1856,11 +1855,11 @@ static void set_avi_info_frame(
 			COLOR_SPACE_SRGB:COLOR_SPACE_YCBCR709;
 
 	/* Initialize header */
-	hdmi_info->bits.header.info_frame_type = HDMI_INFOFRAME_TYPE_AVI;
+	hdmi_info.bits.header.info_frame_type = HDMI_INFOFRAME_TYPE_AVI;
 	/* InfoFrameVersion_3 is defined by CEA861F (Section 6.4), but shall
 	* not be used in HDMI 2.0 (Section 10.1) */
-	hdmi_info->bits.header.version = 2;
-	hdmi_info->bits.header.length = HDMI_AVI_INFOFRAME_SIZE;
+	hdmi_info.bits.header.version = 2;
+	hdmi_info.bits.header.length = HDMI_AVI_INFOFRAME_SIZE;
 
 	/*
 	 * IDO-defined (Y2,Y1,Y0 = 1,1,1) shall not be used by devices built
@@ -1886,39 +1885,39 @@ static void set_avi_info_frame(
 
 	/* Y0_Y1_Y2 : The pixel encoding */
 	/* H14b AVI InfoFrame has extension on Y-field from 2 bits to 3 bits */
-	hdmi_info->bits.Y0_Y1_Y2 = pixel_encoding;
+	hdmi_info.bits.Y0_Y1_Y2 = pixel_encoding;
 
 	/* A0 = 1 Active Format Information valid */
-	hdmi_info->bits.A0 = ACTIVE_FORMAT_VALID;
+	hdmi_info.bits.A0 = ACTIVE_FORMAT_VALID;
 
 	/* B0, B1 = 3; Bar info data is valid */
-	hdmi_info->bits.B0_B1 = BAR_INFO_BOTH_VALID;
+	hdmi_info.bits.B0_B1 = BAR_INFO_BOTH_VALID;
 
-	hdmi_info->bits.SC0_SC1 = PICTURE_SCALING_UNIFORM;
+	hdmi_info.bits.SC0_SC1 = PICTURE_SCALING_UNIFORM;
 
 	/* S0, S1 : Underscan / Overscan */
 	/* TODO: un-hardcode scan type */
 	scan_type = SCANNING_TYPE_UNDERSCAN;
-	hdmi_info->bits.S0_S1 = scan_type;
+	hdmi_info.bits.S0_S1 = scan_type;
 
 	/* C0, C1 : Colorimetry */
 	if (color_space == COLOR_SPACE_YCBCR709 ||
 			color_space == COLOR_SPACE_YCBCR709_LIMITED)
-		hdmi_info->bits.C0_C1 = COLORIMETRY_ITU709;
+		hdmi_info.bits.C0_C1 = COLORIMETRY_ITU709;
 	else if (color_space == COLOR_SPACE_YCBCR601 ||
 			color_space == COLOR_SPACE_YCBCR601_LIMITED)
-		hdmi_info->bits.C0_C1 = COLORIMETRY_ITU601;
+		hdmi_info.bits.C0_C1 = COLORIMETRY_ITU601;
 	else {
-		hdmi_info->bits.C0_C1 = COLORIMETRY_NO_DATA;
+		hdmi_info.bits.C0_C1 = COLORIMETRY_NO_DATA;
 	}
 	if (color_space == COLOR_SPACE_2020_RGB_FULLRANGE ||
 			color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE ||
 			color_space == COLOR_SPACE_2020_YCBCR) {
-		hdmi_info->bits.EC0_EC2 = COLORIMETRYEX_BT2020RGBYCBCR;
-		hdmi_info->bits.C0_C1   = COLORIMETRY_EXTENDED;
+		hdmi_info.bits.EC0_EC2 = COLORIMETRYEX_BT2020RGBYCBCR;
+		hdmi_info.bits.C0_C1   = COLORIMETRY_EXTENDED;
 	} else if (color_space == COLOR_SPACE_ADOBERGB) {
-		hdmi_info->bits.EC0_EC2 = COLORIMETRYEX_ADOBERGB;
-		hdmi_info->bits.C0_C1   = COLORIMETRY_EXTENDED;
+		hdmi_info.bits.EC0_EC2 = COLORIMETRYEX_ADOBERGB;
+		hdmi_info.bits.C0_C1   = COLORIMETRY_EXTENDED;
 	}
 
 	/* TODO: un-hardcode aspect ratio */
@@ -1927,18 +1926,18 @@ static void set_avi_info_frame(
 	switch (aspect) {
 	case ASPECT_RATIO_4_3:
 	case ASPECT_RATIO_16_9:
-		hdmi_info->bits.M0_M1 = aspect;
+		hdmi_info.bits.M0_M1 = aspect;
 		break;
 
 	case ASPECT_RATIO_NO_DATA:
 	case ASPECT_RATIO_64_27:
 	case ASPECT_RATIO_256_135:
 	default:
-		hdmi_info->bits.M0_M1 = 0;
+		hdmi_info.bits.M0_M1 = 0;
 	}
 
 	/* Active Format Aspect ratio - same as Picture Aspect Ratio. */
-	hdmi_info->bits.R0_R3 = ACTIVE_FORMAT_ASPECT_RATIO_SAME_AS_PICTURE;
+	hdmi_info.bits.R0_R3 = ACTIVE_FORMAT_ASPECT_RATIO_SAME_AS_PICTURE;
 
 	/* TODO: un-hardcode cn0_cn1 and itc */
 
@@ -1981,8 +1980,8 @@ static void set_avi_info_frame(
 				}
 			}
 		}
-		hdmi_info->bits.CN0_CN1 = cn0_cn1_value;
-		hdmi_info->bits.ITC = itc_value;
+		hdmi_info.bits.CN0_CN1 = cn0_cn1_value;
+		hdmi_info.bits.ITC = itc_value;
 	}
 
 	/* TODO : We should handle YCC quantization */
@@ -1991,19 +1990,19 @@ static void set_avi_info_frame(
 			stream->sink->edid_caps.qy_bit == 1) {
 		if (color_space == COLOR_SPACE_SRGB ||
 			color_space == COLOR_SPACE_2020_RGB_FULLRANGE) {
-			hdmi_info->bits.Q0_Q1   = RGB_QUANTIZATION_FULL_RANGE;
-			hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_FULL_RANGE;
+			hdmi_info.bits.Q0_Q1   = RGB_QUANTIZATION_FULL_RANGE;
+			hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_FULL_RANGE;
 		} else if (color_space == COLOR_SPACE_SRGB_LIMITED ||
 					color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) {
-			hdmi_info->bits.Q0_Q1   = RGB_QUANTIZATION_LIMITED_RANGE;
-			hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
+			hdmi_info.bits.Q0_Q1   = RGB_QUANTIZATION_LIMITED_RANGE;
+			hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
 		} else {
-			hdmi_info->bits.Q0_Q1   = RGB_QUANTIZATION_DEFAULT_RANGE;
-			hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
+			hdmi_info.bits.Q0_Q1   = RGB_QUANTIZATION_DEFAULT_RANGE;
+			hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
 		}
 	} else {
-		hdmi_info->bits.Q0_Q1   = RGB_QUANTIZATION_DEFAULT_RANGE;
-		hdmi_info->bits.YQ0_YQ1   = YYC_QUANTIZATION_LIMITED_RANGE;
+		hdmi_info.bits.Q0_Q1   = RGB_QUANTIZATION_DEFAULT_RANGE;
+		hdmi_info.bits.YQ0_YQ1   = YYC_QUANTIZATION_LIMITED_RANGE;
 	}
 
 	///VIC
@@ -2028,51 +2027,49 @@ static void set_avi_info_frame(
 			break;
 		}
 	}
-	hdmi_info->bits.VIC0_VIC7 = vic;
+	hdmi_info.bits.VIC0_VIC7 = vic;
 
 	/* pixel repetition
 	 * PR0 - PR3 start from 0 whereas pHwPathMode->mode.timing.flags.pixel
 	 * repetition start from 1 */
-	hdmi_info->bits.PR0_PR3 = 0;
+	hdmi_info.bits.PR0_PR3 = 0;
 
 	/* Bar Info
 	 * barTop:    Line Number of End of Top Bar.
 	 * barBottom: Line Number of Start of Bottom Bar.
 	 * barLeft:   Pixel Number of End of Left Bar.
 	 * barRight:  Pixel Number of Start of Right Bar. */
-	hdmi_info->bits.bar_top = stream->timing.v_border_top;
-	hdmi_info->bits.bar_bottom = (stream->timing.v_total
+	hdmi_info.bits.bar_top = stream->timing.v_border_top;
+	hdmi_info.bits.bar_bottom = (stream->timing.v_total
 			- stream->timing.v_border_bottom + 1);
-	hdmi_info->bits.bar_left  = stream->timing.h_border_left;
-	hdmi_info->bits.bar_right = (stream->timing.h_total
+	hdmi_info.bits.bar_left  = stream->timing.h_border_left;
+	hdmi_info.bits.bar_right = (stream->timing.h_total
 			- stream->timing.h_border_right + 1);
 
 	/* check_sum - Calculate AFMT_AVI_INFO0 ~ AFMT_AVI_INFO3 */
-	check_sum = &info_frame.avi_info_packet.info_packet_hdmi.packet_raw_data.sb[0];
+	check_sum = &hdmi_info.packet_raw_data.sb[0];
 
 	*check_sum = HDMI_INFOFRAME_TYPE_AVI + HDMI_AVI_INFOFRAME_SIZE + 2;
 
 	for (byte_index = 1; byte_index <= HDMI_AVI_INFOFRAME_SIZE; byte_index++)
-		*check_sum += hdmi_info->packet_raw_data.sb[byte_index];
+		*check_sum += hdmi_info.packet_raw_data.sb[byte_index];
 
 	/* one byte complement */
 	*check_sum = (uint8_t) (0x100 - *check_sum);
 
 	/* Store in hw_path_mode */
-	info_packet->hb0 = hdmi_info->packet_raw_data.hb0;
-	info_packet->hb1 = hdmi_info->packet_raw_data.hb1;
-	info_packet->hb2 = hdmi_info->packet_raw_data.hb2;
+	info_packet->hb0 = hdmi_info.packet_raw_data.hb0;
+	info_packet->hb1 = hdmi_info.packet_raw_data.hb1;
+	info_packet->hb2 = hdmi_info.packet_raw_data.hb2;
 
-	for (byte_index = 0; byte_index < sizeof(info_frame.avi_info_packet.
-				info_packet_hdmi.packet_raw_data.sb); byte_index++)
-		info_packet->sb[byte_index] = info_frame.avi_info_packet.
-				info_packet_hdmi.packet_raw_data.sb[byte_index];
+	for (byte_index = 0; byte_index < sizeof(hdmi_info.packet_raw_data.sb); byte_index++)
+		info_packet->sb[byte_index] = hdmi_info.packet_raw_data.sb[byte_index];
 
 	info_packet->valid = true;
 }
 
 static void set_vendor_info_packet(
-		struct encoder_info_packet *info_packet,
+		struct dc_info_packet *info_packet,
 		struct dc_stream_state *stream)
 {
 	uint32_t length = 0;
@@ -2185,7 +2182,7 @@ static void set_vendor_info_packet(
 }
 
 static void set_spd_info_packet(
-		struct encoder_info_packet *info_packet,
+		struct dc_info_packet *info_packet,
 		struct dc_stream_state *stream)
 {
 	/* SPD info packet for FreeSync */
@@ -2306,7 +2303,7 @@ static void set_spd_info_packet(
 }
 
 static void set_hdr_static_info_packet(
-		struct encoder_info_packet *info_packet,
+		struct dc_info_packet *info_packet,
 		struct dc_stream_state *stream)
 {
 	uint16_t i = 0;
@@ -2403,7 +2400,7 @@ static void set_hdr_static_info_packet(
 }
 
 static void set_vsc_info_packet(
-		struct encoder_info_packet *info_packet,
+		struct dc_info_packet *info_packet,
 		struct dc_stream_state *stream)
 {
 	unsigned int vscPacketRevision = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 9441305d3ab5..cd324bcc45e8 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -530,6 +530,15 @@ struct vrr_params {
 	uint32_t frame_counter;
 };
 
+struct dc_info_packet {
+	bool valid;
+	uint8_t hb0;
+	uint8_t hb1;
+	uint8_t hb2;
+	uint8_t hb3;
+	uint8_t sb[32];
+};
+
 #define DC_PLANE_UPDATE_TIMES_MAX 10
 
 struct dc_plane_flip_time {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 444558ca6533..b85fda5f38e8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -80,7 +80,7 @@ enum {
 static void dce110_update_generic_info_packet(
 	struct dce110_stream_encoder *enc110,
 	uint32_t packet_index,
-	const struct encoder_info_packet *info_packet)
+	const struct dc_info_packet *info_packet)
 {
 	uint32_t regval;
 	/* TODOFPGA Figure out a proper number for max_retries polling for lock
@@ -196,7 +196,7 @@ static void dce110_update_generic_info_packet(
 static void dce110_update_hdmi_info_packet(
 	struct dce110_stream_encoder *enc110,
 	uint32_t packet_index,
-	const struct encoder_info_packet *info_packet)
+	const struct dc_info_packet *info_packet)
 {
 	uint32_t cont, send, line;
 
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index b5db1692393c..5c21336cae4c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -34,26 +34,17 @@ struct dc_bios;
 struct dc_context;
 struct dc_crtc_timing;
 
-struct encoder_info_packet {
-	bool valid;
-	uint8_t hb0;
-	uint8_t hb1;
-	uint8_t hb2;
-	uint8_t hb3;
-	uint8_t sb[32];
-};
-
 struct encoder_info_frame {
 	/* auxiliary video information */
-	struct encoder_info_packet avi;
-	struct encoder_info_packet gamut;
-	struct encoder_info_packet vendor;
+	struct dc_info_packet avi;
+	struct dc_info_packet gamut;
+	struct dc_info_packet vendor;
 	/* source product description */
-	struct encoder_info_packet spd;
+	struct dc_info_packet spd;
 	/* video stream configuration */
-	struct encoder_info_packet vsc;
+	struct dc_info_packet vsc;
 	/* HDR Static MetaData */
-	struct encoder_info_packet hdrsmd;
+	struct dc_info_packet hdrsmd;
 };
 
 struct encoder_unblank_param {

From 6e5b3587dbf6aaf7f9eef4956a3fce12bf7e9ffa Mon Sep 17 00:00:00 2001
From: SivapiriyanKumarasamy <sivapiriyan.kumarasamy@amd.com>
Date: Wed, 14 Mar 2018 09:15:24 -0400
Subject: [PATCH 0308/1286] drm/amd/display: Add vline IRQ programming for DCN

Signed-off-by: SivapiriyanKumarasamy <sivapiriyan.kumarasamy@amd.com>
Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/core/dc.c      |  6 ++
 drivers/gpu/drm/amd/display/dc/dc_stream.h    |  4 +
 .../gpu/drm/amd/display/dc/dcn10/dcn10_optc.c | 76 +++++++++++++++++++
 .../gpu/drm/amd/display/dc/dcn10/dcn10_optc.h | 14 ++++
 .../amd/display/dc/inc/hw/timing_generator.h  |  3 +
 drivers/gpu/drm/amd/display/dc/irq_types.h    |  7 ++
 6 files changed, 110 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 63a3d468939a..554cf975be05 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1375,6 +1375,12 @@ static void commit_planes_for_stream(struct dc *dc,
 					pipe_ctx->stream_res.abm->funcs->set_abm_level(
 							pipe_ctx->stream_res.abm, stream->abm_level);
 			}
+
+			if (stream_update && stream_update->periodic_fn_vsync_delta &&
+					pipe_ctx->stream_res.tg->funcs->program_vline_interrupt)
+				pipe_ctx->stream_res.tg->funcs->program_vline_interrupt(
+						pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing,
+						pipe_ctx->stream->periodic_fn_vsync_delta);
 		}
 	}
 
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 3a7093ede569..8d5161060b60 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -70,6 +70,9 @@ struct dc_stream_state {
 	enum color_transfer_func output_tf;
 
 	bool ignore_msa_timing_param;
+
+	unsigned long long periodic_fn_vsync_delta;
+
 	/* TODO: custom INFO packets */
 	/* TODO: ABM info (DMCU) */
 	/* PSR info */
@@ -113,6 +116,7 @@ struct dc_stream_update {
 	struct dc_hdr_static_metadata *hdr_static_metadata;
 	enum color_transfer_func color_output_tf;
 	unsigned int *abm_level;
+	unsigned long long *periodic_fn_vsync_delta;
 };
 
 bool dc_is_stream_unchanged(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 4bf64d1b2c60..f56eac0e4dd2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -93,6 +93,81 @@ static void optc1_disable_stereo(struct timing_generator *optc)
 		OTG_3D_STRUCTURE_STEREO_SEL_OVR, 0);
 }
 
+static uint32_t get_start_vline(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing)
+{
+	struct dc_crtc_timing patched_crtc_timing;
+	uint32_t vesa_sync_start;
+	uint32_t asic_blank_end;
+	uint32_t interlace_factor;
+	uint32_t vertical_line_start;
+
+	patched_crtc_timing = *dc_crtc_timing;
+	optc1_apply_front_porch_workaround(optc, &patched_crtc_timing);
+
+	vesa_sync_start = patched_crtc_timing.h_addressable +
+			patched_crtc_timing.h_border_right +
+			patched_crtc_timing.h_front_porch;
+
+	asic_blank_end = patched_crtc_timing.h_total -
+			vesa_sync_start -
+			patched_crtc_timing.h_border_left;
+
+	interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
+
+	vesa_sync_start = patched_crtc_timing.v_addressable +
+			patched_crtc_timing.v_border_bottom +
+			patched_crtc_timing.v_front_porch;
+
+	asic_blank_end = (patched_crtc_timing.v_total -
+			vesa_sync_start -
+			patched_crtc_timing.v_border_top)
+			* interlace_factor;
+
+	vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1;
+	if (vertical_line_start < 0) {
+		ASSERT(0);
+		vertical_line_start = 0;
+	}
+
+	return vertical_line_start;
+}
+
+void optc1_program_vline_interrupt(
+		struct timing_generator *optc,
+		const struct dc_crtc_timing *dc_crtc_timing,
+		unsigned long long vsync_delta)
+{
+
+	struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+	unsigned long long req_delta_tens_of_usec = div64_u64((vsync_delta + 9999), 10000);
+	unsigned long long pix_clk_hundreds_khz = div64_u64((dc_crtc_timing->pix_clk_khz + 99), 100);
+	uint32_t req_delta_lines = (uint32_t) div64_u64(
+			(req_delta_tens_of_usec * pix_clk_hundreds_khz + dc_crtc_timing->h_total - 1),
+								dc_crtc_timing->h_total);
+
+	uint32_t vsync_line = get_start_vline(optc, dc_crtc_timing);
+	uint32_t start_line = 0;
+	uint32_t endLine = 0;
+
+	if (req_delta_lines != 0)
+		req_delta_lines--;
+
+	if (req_delta_lines > vsync_line)
+		start_line = dc_crtc_timing->v_total - (req_delta_lines - vsync_line) - 1;
+	else
+		start_line = vsync_line - req_delta_lines;
+
+	endLine = start_line + 2;
+
+	if (endLine >= dc_crtc_timing->v_total)
+		endLine = 2;
+
+	REG_SET_2(OTG_VERTICAL_INTERRUPT0_POSITION, 0,
+			OTG_VERTICAL_INTERRUPT0_LINE_START, start_line,
+			OTG_VERTICAL_INTERRUPT0_LINE_END, endLine);
+}
+
 /**
  * program_timing_generator   used by mode timing set
  * Program CRTC Timing Registers - OTG_H_*, OTG_V_*, Pixel repetition.
@@ -1215,6 +1290,7 @@ static bool optc1_is_optc_underflow_occurred(struct timing_generator *optc)
 static const struct timing_generator_funcs dcn10_tg_funcs = {
 		.validate_timing = optc1_validate_timing,
 		.program_timing = optc1_program_timing,
+		.program_vline_interrupt = optc1_program_vline_interrupt,
 		.program_global_sync = optc1_program_global_sync,
 		.enable_crtc = optc1_enable_crtc,
 		.disable_crtc = optc1_disable_crtc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index d25e7bf0d0d7..5a9a73d69fd6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -65,6 +65,8 @@
 	SRI(OTG_NOM_VERT_POSITION, OTG, inst),\
 	SRI(OTG_BLACK_COLOR, OTG, inst),\
 	SRI(OTG_CLOCK_CONTROL, OTG, inst),\
+	SRI(OTG_VERTICAL_INTERRUPT0_CONTROL, OTG, inst),\
+	SRI(OTG_VERTICAL_INTERRUPT0_POSITION, OTG, inst),\
 	SRI(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst),\
 	SRI(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst),\
 	SRI(OPTC_INPUT_CLOCK_CONTROL, ODM, inst),\
@@ -124,6 +126,8 @@ struct dcn_optc_registers {
 	uint32_t OTG_TEST_PATTERN_CONTROL;
 	uint32_t OTG_TEST_PATTERN_COLOR;
 	uint32_t OTG_CLOCK_CONTROL;
+	uint32_t OTG_VERTICAL_INTERRUPT0_CONTROL;
+	uint32_t OTG_VERTICAL_INTERRUPT0_POSITION;
 	uint32_t OTG_VERTICAL_INTERRUPT2_CONTROL;
 	uint32_t OTG_VERTICAL_INTERRUPT2_POSITION;
 	uint32_t OPTC_INPUT_CLOCK_CONTROL;
@@ -206,6 +210,9 @@ struct dcn_optc_registers {
 	SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_EN, mask_sh),\
 	SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_ON, mask_sh),\
 	SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_GATE_DIS, mask_sh),\
+	SF(OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE, mask_sh),\
+	SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_START, mask_sh),\
+	SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_END, mask_sh),\
 	SF(OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE, mask_sh),\
 	SF(OTG0_OTG_VERTICAL_INTERRUPT2_POSITION, OTG_VERTICAL_INTERRUPT2_LINE_START, mask_sh),\
 	SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_EN, mask_sh),\
@@ -323,6 +330,9 @@ struct dcn_optc_registers {
 	type OTG_CLOCK_EN;\
 	type OTG_CLOCK_ON;\
 	type OTG_CLOCK_GATE_DIS;\
+	type OTG_VERTICAL_INTERRUPT0_INT_ENABLE;\
+	type OTG_VERTICAL_INTERRUPT0_LINE_START;\
+	type OTG_VERTICAL_INTERRUPT0_LINE_END;\
 	type OTG_VERTICAL_INTERRUPT2_INT_ENABLE;\
 	type OTG_VERTICAL_INTERRUPT2_LINE_START;\
 	type OPTC_INPUT_CLK_EN;\
@@ -420,6 +430,10 @@ void optc1_program_timing(
 	const struct dc_crtc_timing *dc_crtc_timing,
 	bool use_vbios);
 
+void optc1_program_vline_interrupt(struct timing_generator *optc,
+		const struct dc_crtc_timing *dc_crtc_timing,
+		unsigned long long vsync_delta);
+
 void optc1_program_global_sync(
 		struct timing_generator *optc);
 
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 3217b5bf6c7a..69cb0a105300 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -140,6 +140,9 @@ struct timing_generator_funcs {
 	void (*program_timing)(struct timing_generator *tg,
 							const struct dc_crtc_timing *timing,
 							bool use_vbios);
+	void (*program_vline_interrupt)(struct timing_generator *optc,
+			const struct dc_crtc_timing *dc_crtc_timing,
+			unsigned long long vsync_delta);
 	bool (*enable_crtc)(struct timing_generator *tg);
 	bool (*disable_crtc)(struct timing_generator *tg);
 	bool (*is_counter_moving)(struct timing_generator *tg);
diff --git a/drivers/gpu/drm/amd/display/dc/irq_types.h b/drivers/gpu/drm/amd/display/dc/irq_types.h
index a506c2e939f5..cc3b1bc6cedd 100644
--- a/drivers/gpu/drm/amd/display/dc/irq_types.h
+++ b/drivers/gpu/drm/amd/display/dc/irq_types.h
@@ -135,6 +135,13 @@ enum dc_irq_source {
 	DC_IRQ_SOURCE_VBLANK5,
 	DC_IRQ_SOURCE_VBLANK6,
 
+	DC_IRQ_SOURCE_DC1_VLINE0,
+	DC_IRQ_SOURCE_DC2_VLINE0,
+	DC_IRQ_SOURCE_DC3_VLINE0,
+	DC_IRQ_SOURCE_DC4_VLINE0,
+	DC_IRQ_SOURCE_DC5_VLINE0,
+	DC_IRQ_SOURCE_DC6_VLINE0,
+
 	DAL_IRQ_SOURCES_NUMBER
 };
 

From 5813dd1c0c4e06b3321142cd2da99909a1f41707 Mon Sep 17 00:00:00 2001
From: Xingyue Tao <xingyue.tao@amd.com>
Date: Fri, 16 Mar 2018 15:20:48 -0400
Subject: [PATCH 0309/1286] drm/amd/display: Add double buffer machanism to
 OCSC

- Added double buffer mechanism to output CSC
so that there's no tearing when adjusting brightness
from Radeon settings

Signed-off-by: Xingyue Tao <xingyue.tao@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h  | 36 +++++++++++++++
 .../drm/amd/display/dc/dcn10/dcn10_dpp_cm.c   | 46 +++++++++++--------
 .../drm/amd/display/dc/dcn10/dcn10_resource.c | 32 ++-----------
 3 files changed, 67 insertions(+), 47 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index b81b2aa3c49f..9b5ff76a8027 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -420,6 +420,41 @@
 	TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \
 	TF_SF(DPP_TOP0_DPP_CONTROL, DPPCLK_RATE_CONTROL, mask_sh)
 
+/*
+ *
+	DCN1 CM debug status register definition
+
+	register :ID9_CM_STATUS do
+	implement_ref :cm
+	map to:  :cmdebugind, at: j
+	width 32
+	disclosure   NEVER
+
+		field :ID9_VUPDATE_CFG, [0], R
+		field :ID9_IGAM_LUT_MODE, [2..1], R
+		field :ID9_BNS_BYPASS, [3], R
+		field :ID9_ICSC_MODE, [5..4], R
+		field :ID9_DGAM_LUT_MODE, [8..6], R
+		field :ID9_HDR_BYPASS, [9], R
+		field :ID9_GAMUT_REMAP_MODE, [11..10], R
+		field :ID9_RGAM_LUT_MODE, [14..12], R
+		#1 free bit
+		field :ID9_OCSC_MODE, [18..16], R
+		field :ID9_DENORM_MODE, [21..19], R
+		field :ID9_ROUND_TRUNC_MODE, [25..22], R
+		field :ID9_DITHER_EN, [26], R
+		field :ID9_DITHER_MODE, [28..27], R
+	end
+*/
+
+#define TF_DEBUG_REG_LIST_SH_DCN10 \
+	.CM_TEST_DEBUG_DATA_ID9_ICSC_MODE = 4, \
+	.CM_TEST_DEBUG_DATA_ID9_OCSC_MODE = 16
+
+#define TF_DEBUG_REG_LIST_MASK_DCN10 \
+	.CM_TEST_DEBUG_DATA_ID9_ICSC_MODE = 0x30, \
+	.CM_TEST_DEBUG_DATA_ID9_OCSC_MODE = 0x70000
+
 #define TF_REG_FIELD_LIST(type) \
 	type EXT_OVERSCAN_LEFT; \
 	type EXT_OVERSCAN_RIGHT; \
@@ -1015,6 +1050,7 @@
 	type CM_BYPASS; \
 	type CM_TEST_DEBUG_INDEX; \
 	type CM_TEST_DEBUG_DATA_ID9_ICSC_MODE; \
+	type CM_TEST_DEBUG_DATA_ID9_OCSC_MODE;\
 	type FORMAT_CONTROL__ALPHA_EN; \
 	type CUR0_COLOR0; \
 	type CUR0_COLOR1; \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index cc511415caee..4f373c97804f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -216,41 +216,55 @@ static void dpp1_cm_program_color_matrix(
 		struct dcn10_dpp *dpp,
 		const uint16_t *regval)
 {
-	uint32_t mode;
+	uint32_t ocsc_mode;
+	uint32_t cur_mode;
 	struct color_matrices_reg gam_regs;
 
-	REG_GET(CM_OCSC_CONTROL, CM_OCSC_MODE, &mode);
-
 	if (regval == NULL) {
 		BREAK_TO_DEBUGGER();
 		return;
 	}
-	mode = 4;
+
+	/* determine which CSC matrix (ocsc or comb) we are using
+	 * currently.  select the alternate set to double buffer
+	 * the CSC update so CSC is updated on frame boundary
+	 */
+	REG_SET(CM_TEST_DEBUG_INDEX, 0,
+			CM_TEST_DEBUG_INDEX, 9);
+
+	REG_GET(CM_TEST_DEBUG_DATA,
+			CM_TEST_DEBUG_DATA_ID9_OCSC_MODE, &cur_mode);
+
+	if (cur_mode != 4)
+		ocsc_mode = 4;
+	else
+		ocsc_mode = 5;
+
+
 	gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_OCSC_C11;
 	gam_regs.masks.csc_c11  = dpp->tf_mask->CM_OCSC_C11;
 	gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_OCSC_C12;
 	gam_regs.masks.csc_c12 = dpp->tf_mask->CM_OCSC_C12;
 
-	if (mode == 4) {
+	if (ocsc_mode == 4) {
 
 		gam_regs.csc_c11_c12 = REG(CM_OCSC_C11_C12);
 		gam_regs.csc_c33_c34 = REG(CM_OCSC_C33_C34);
 
-		cm_helper_program_color_matrices(
-				dpp->base.ctx,
-				regval,
-				&gam_regs);
-
 	} else {
 
 		gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12);
 		gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34);
 
-		cm_helper_program_color_matrices(
-				dpp->base.ctx,
-				regval,
-				&gam_regs);
 	}
+
+	cm_helper_program_color_matrices(
+			dpp->base.ctx,
+			regval,
+			&gam_regs);
+
+	REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
+
 }
 
 void dpp1_cm_set_output_csc_default(
@@ -260,7 +274,6 @@ void dpp1_cm_set_output_csc_default(
 	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
 	const uint16_t *regval = NULL;
 	int arr_size;
-	uint32_t ocsc_mode = 4;
 
 	regval = find_color_matrix(colorspace, &arr_size);
 	if (regval == NULL) {
@@ -269,7 +282,6 @@ void dpp1_cm_set_output_csc_default(
 	}
 
 	dpp1_cm_program_color_matrix(dpp, regval);
-	REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
 }
 
 static void dpp1_cm_get_reg_field(
@@ -330,10 +342,8 @@ void dpp1_cm_set_output_csc_adjustment(
 		const uint16_t *regval)
 {
 	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
-	uint32_t ocsc_mode = 4;
 
 	dpp1_cm_program_color_matrix(dpp, regval);
-	REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
 }
 
 void dpp1_cm_power_on_regamma_lut(struct dpp *dpp_base,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index d321da97217c..7ad290cbc730 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -319,41 +319,15 @@ static const struct dcn_dpp_registers tf_regs[] = {
 	tf_regs(3),
 };
 
-/*
- *
-	DCN1 CM debug status register definition
-
-	register :ID9_CM_STATUS do
-	implement_ref :cm
-	map to:  :cmdebugind, at: j
-	width 32
-	disclosure   NEVER
-
-		field :ID9_VUPDATE_CFG, [0], R
-		field :ID9_IGAM_LUT_MODE, [2..1], R
-		field :ID9_BNS_BYPASS, [3], R
-		field :ID9_ICSC_MODE, [5..4], R
-		field :ID9_DGAM_LUT_MODE, [8..6], R
-		field :ID9_HDR_BYPASS, [9], R
-		field :ID9_GAMUT_REMAP_MODE, [11..10], R
-		field :ID9_RGAM_LUT_MODE, [14..12], R
-		#1 free bit
-		field :ID9_OCSC_MODE, [18..16], R
-		field :ID9_DENORM_MODE, [21..19], R
-		field :ID9_ROUND_TRUNC_MODE, [25..22], R
-		field :ID9_DITHER_EN, [26], R
-		field :ID9_DITHER_MODE, [28..27], R
-	end
-*/
-
 static const struct dcn_dpp_shift tf_shift = {
 	TF_REG_LIST_SH_MASK_DCN10(__SHIFT),
-	.CM_TEST_DEBUG_DATA_ID9_ICSC_MODE = 0x4
+	TF_DEBUG_REG_LIST_SH_DCN10
+
 };
 
 static const struct dcn_dpp_mask tf_mask = {
 	TF_REG_LIST_SH_MASK_DCN10(_MASK),
-	.CM_TEST_DEBUG_DATA_ID9_ICSC_MODE = 0x30
+	TF_DEBUG_REG_LIST_MASK_DCN10
 };
 
 static const struct dcn_mpc_registers mpc_regs = {

From 87943159f4093d2dae22abccbe046dac0fbdad4f Mon Sep 17 00:00:00 2001
From: Anthony Koo <Anthony.Koo@amd.com>
Date: Thu, 15 Mar 2018 14:54:30 -0400
Subject: [PATCH 0310/1286] drm/amd/display: Only program MSA_TIMING_PARAM if
 it changed

Signed-off-by: Harry Wentland <harry.wentland@amd.com>
Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/core/dc_link.c | 15 ++++++++++-----
 1 file changed, 10 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 82ee9de23115..c18f24afa698 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1183,16 +1183,21 @@ static void enable_stream_features(struct pipe_ctx *pipe_ctx)
 {
 	struct dc_stream_state *stream = pipe_ctx->stream;
 	struct dc_link *link = stream->sink->link;
-	union down_spread_ctrl downspread;
+	union down_spread_ctrl old_downspread;
+	union down_spread_ctrl new_downspread;
 
 	core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL,
-			&downspread.raw, sizeof(downspread));
+			&old_downspread.raw, sizeof(old_downspread));
 
-	downspread.bits.IGNORE_MSA_TIMING_PARAM =
+	new_downspread.raw = old_downspread.raw;
+
+	new_downspread.bits.IGNORE_MSA_TIMING_PARAM =
 			(stream->ignore_msa_timing_param) ? 1 : 0;
 
-	core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
-			&downspread.raw, sizeof(downspread));
+	if (new_downspread.raw != old_downspread.raw) {
+		core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
+			&new_downspread.raw, sizeof(new_downspread));
+	}
 }
 
 static enum dc_status enable_link_dp(

From 44d09c6a577c8ed4e0ef50257487c071ae5e0fa2 Mon Sep 17 00:00:00 2001
From: Harry Wentland <harry.wentland@amd.com>
Date: Thu, 15 Mar 2018 14:29:24 -0400
Subject: [PATCH 0311/1286] drm/amd/display: Move commit_planes_to_stream to
 amdgpu_dm

Signed-off-by: Harry Wentland <harry.wentland@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 108 +++++++++++++++++-
 drivers/gpu/drm/amd/display/dc/core/dc.c      |  89 ---------------
 drivers/gpu/drm/amd/display/dc/dc_stream.h    |   7 --
 3 files changed, 103 insertions(+), 101 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index bad9f09c588b..3ff3905eee9a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3977,6 +3977,97 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
 	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
 }
 
+/*
+ * TODO this whole function needs to go
+ *
+ * dc_surface_update is needlessly complex. See if we can just replace this
+ * with a dc_plane_state and follow the atomic model a bit more closely here.
+ */
+static bool commit_planes_to_stream(
+		struct dc *dc,
+		struct dc_plane_state **plane_states,
+		uint8_t new_plane_count,
+		struct dm_crtc_state *dm_new_crtc_state,
+		struct dm_crtc_state *dm_old_crtc_state,
+		struct dc_state *state)
+{
+	/* no need to dynamically allocate this. it's pretty small */
+	struct dc_surface_update updates[MAX_SURFACES];
+	struct dc_flip_addrs *flip_addr;
+	struct dc_plane_info *plane_info;
+	struct dc_scaling_info *scaling_info;
+	int i;
+	struct dc_stream_state *dc_stream = dm_new_crtc_state->stream;
+	struct dc_stream_update *stream_update =
+			kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL);
+
+	if (!stream_update) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+
+	flip_addr = kcalloc(MAX_SURFACES, sizeof(struct dc_flip_addrs),
+			    GFP_KERNEL);
+	plane_info = kcalloc(MAX_SURFACES, sizeof(struct dc_plane_info),
+			     GFP_KERNEL);
+	scaling_info = kcalloc(MAX_SURFACES, sizeof(struct dc_scaling_info),
+			       GFP_KERNEL);
+
+	if (!flip_addr || !plane_info || !scaling_info) {
+		kfree(flip_addr);
+		kfree(plane_info);
+		kfree(scaling_info);
+		kfree(stream_update);
+		return false;
+	}
+
+	memset(updates, 0, sizeof(updates));
+
+	stream_update->src = dc_stream->src;
+	stream_update->dst = dc_stream->dst;
+	stream_update->out_transfer_func = dc_stream->out_transfer_func;
+
+	for (i = 0; i < new_plane_count; i++) {
+		updates[i].surface = plane_states[i];
+		updates[i].gamma =
+			(struct dc_gamma *)plane_states[i]->gamma_correction;
+		updates[i].in_transfer_func = plane_states[i]->in_transfer_func;
+		flip_addr[i].address = plane_states[i]->address;
+		flip_addr[i].flip_immediate = plane_states[i]->flip_immediate;
+		plane_info[i].color_space = plane_states[i]->color_space;
+		plane_info[i].input_tf = plane_states[i]->input_tf;
+		plane_info[i].format = plane_states[i]->format;
+		plane_info[i].plane_size = plane_states[i]->plane_size;
+		plane_info[i].rotation = plane_states[i]->rotation;
+		plane_info[i].horizontal_mirror = plane_states[i]->horizontal_mirror;
+		plane_info[i].stereo_format = plane_states[i]->stereo_format;
+		plane_info[i].tiling_info = plane_states[i]->tiling_info;
+		plane_info[i].visible = plane_states[i]->visible;
+		plane_info[i].per_pixel_alpha = plane_states[i]->per_pixel_alpha;
+		plane_info[i].dcc = plane_states[i]->dcc;
+		scaling_info[i].scaling_quality = plane_states[i]->scaling_quality;
+		scaling_info[i].src_rect = plane_states[i]->src_rect;
+		scaling_info[i].dst_rect = plane_states[i]->dst_rect;
+		scaling_info[i].clip_rect = plane_states[i]->clip_rect;
+
+		updates[i].flip_addr = &flip_addr[i];
+		updates[i].plane_info = &plane_info[i];
+		updates[i].scaling_info = &scaling_info[i];
+	}
+
+	dc_commit_updates_for_stream(
+			dc,
+			updates,
+			new_plane_count,
+			dc_stream, stream_update, plane_states, state);
+
+	kfree(flip_addr);
+	kfree(plane_info);
+	kfree(scaling_info);
+	kfree(stream_update);
+	return true;
+}
+
 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
 				    struct drm_device *dev,
 				    struct amdgpu_display_manager *dm,
@@ -3992,6 +4083,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
 	struct drm_crtc_state *new_pcrtc_state =
 			drm_atomic_get_new_crtc_state(state, pcrtc);
 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
+	struct dm_crtc_state *dm_old_crtc_state =
+			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
 	int planes_count = 0;
 	unsigned long flags;
@@ -4070,10 +4163,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
 		}
 
-		if (false == dc_commit_planes_to_stream(dm->dc,
+
+		if (false == commit_planes_to_stream(dm->dc,
 							plane_states_constructed,
 							planes_count,
-							dc_stream_attach,
+							acrtc_state,
+							dm_old_crtc_state,
 							dm_state->context))
 			dm_error("%s: Failed to attach plane!\n", __func__);
 	} else {
@@ -4298,8 +4393,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
 		struct dc_stream_status *status = NULL;
 
-		if (acrtc)
+		if (acrtc) {
 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+		}
 
 		/* Skip any modesets/resets */
 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
@@ -4322,11 +4419,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 		WARN_ON(!status->plane_count);
 
 		/*TODO How it works with MPO ?*/
-		if (!dc_commit_planes_to_stream(
+		if (!commit_planes_to_stream(
 				dm->dc,
 				status->plane_states,
 				status->plane_count,
-				dm_new_crtc_state->stream,
+				dm_new_crtc_state,
+				to_dm_crtc_state(old_crtc_state),
 				dm_state->context))
 			dm_error("%s: Failed to update stream scaling!\n", __func__);
 	}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 554cf975be05..6f4ad67ffca6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -936,95 +936,6 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
 	return true;
 }
 
-/*
- * TODO this whole function needs to go
- *
- * dc_surface_update is needlessly complex. See if we can just replace this
- * with a dc_plane_state and follow the atomic model a bit more closely here.
- */
-bool dc_commit_planes_to_stream(
-		struct dc *dc,
-		struct dc_plane_state **plane_states,
-		uint8_t new_plane_count,
-		struct dc_stream_state *dc_stream,
-		struct dc_state *state)
-{
-	/* no need to dynamically allocate this. it's pretty small */
-	struct dc_surface_update updates[MAX_SURFACES];
-	struct dc_flip_addrs *flip_addr;
-	struct dc_plane_info *plane_info;
-	struct dc_scaling_info *scaling_info;
-	int i;
-	struct dc_stream_update *stream_update =
-			kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL);
-
-	if (!stream_update) {
-		BREAK_TO_DEBUGGER();
-		return false;
-	}
-
-	flip_addr = kcalloc(MAX_SURFACES, sizeof(struct dc_flip_addrs),
-			    GFP_KERNEL);
-	plane_info = kcalloc(MAX_SURFACES, sizeof(struct dc_plane_info),
-			     GFP_KERNEL);
-	scaling_info = kcalloc(MAX_SURFACES, sizeof(struct dc_scaling_info),
-			       GFP_KERNEL);
-
-	if (!flip_addr || !plane_info || !scaling_info) {
-		kfree(flip_addr);
-		kfree(plane_info);
-		kfree(scaling_info);
-		kfree(stream_update);
-		return false;
-	}
-
-	memset(updates, 0, sizeof(updates));
-
-	stream_update->src = dc_stream->src;
-	stream_update->dst = dc_stream->dst;
-	stream_update->out_transfer_func = dc_stream->out_transfer_func;
-
-	for (i = 0; i < new_plane_count; i++) {
-		updates[i].surface = plane_states[i];
-		updates[i].gamma =
-			(struct dc_gamma *)plane_states[i]->gamma_correction;
-		updates[i].in_transfer_func = plane_states[i]->in_transfer_func;
-		flip_addr[i].address = plane_states[i]->address;
-		flip_addr[i].flip_immediate = plane_states[i]->flip_immediate;
-		plane_info[i].color_space = plane_states[i]->color_space;
-		plane_info[i].input_tf = plane_states[i]->input_tf;
-		plane_info[i].format = plane_states[i]->format;
-		plane_info[i].plane_size = plane_states[i]->plane_size;
-		plane_info[i].rotation = plane_states[i]->rotation;
-		plane_info[i].horizontal_mirror = plane_states[i]->horizontal_mirror;
-		plane_info[i].stereo_format = plane_states[i]->stereo_format;
-		plane_info[i].tiling_info = plane_states[i]->tiling_info;
-		plane_info[i].visible = plane_states[i]->visible;
-		plane_info[i].per_pixel_alpha = plane_states[i]->per_pixel_alpha;
-		plane_info[i].dcc = plane_states[i]->dcc;
-		scaling_info[i].scaling_quality = plane_states[i]->scaling_quality;
-		scaling_info[i].src_rect = plane_states[i]->src_rect;
-		scaling_info[i].dst_rect = plane_states[i]->dst_rect;
-		scaling_info[i].clip_rect = plane_states[i]->clip_rect;
-
-		updates[i].flip_addr = &flip_addr[i];
-		updates[i].plane_info = &plane_info[i];
-		updates[i].scaling_info = &scaling_info[i];
-	}
-
-	dc_commit_updates_for_stream(
-			dc,
-			updates,
-			new_plane_count,
-			dc_stream, stream_update, plane_states, state);
-
-	kfree(flip_addr);
-	kfree(plane_info);
-	kfree(scaling_info);
-	kfree(stream_update);
-	return true;
-}
-
 struct dc_state *dc_create_state(void)
 {
 	struct dc_state *context = kzalloc(sizeof(struct dc_state),
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 8d5161060b60..2971cd07e093 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -135,13 +135,6 @@ bool dc_is_stream_scaling_unchanged(
  *   This does not trigger a flip.  No surface address is programmed.
  */
 
-bool dc_commit_planes_to_stream(
-		struct dc *dc,
-		struct dc_plane_state **plane_states,
-		uint8_t new_plane_count,
-		struct dc_stream_state *dc_stream,
-		struct dc_state *state);
-
 void dc_commit_updates_for_stream(struct dc *dc,
 		struct dc_surface_update *srf_updates,
 		int surface_count,

From 844de65e9108a03f2018a6bb827cc53bfa71693b Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Fri, 23 Mar 2018 10:45:00 +0800
Subject: [PATCH 0312/1286] drm/amd/pp: Remove useless fw load error handler on
 Polaris

Acked-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c  | 12 ++----------
 1 file changed, 2 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 997a777dd35b..fe6854eecf7b 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -301,19 +301,11 @@ static int polaris10_start_smu(struct pp_hwmgr *hwmgr)
 		smu_data->smu7_data.security_hard_key = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
 
 		/* Check if SMU is running in protected mode */
-		if (smu_data->protected_mode == 0) {
+		if (smu_data->protected_mode == 0)
 			result = polaris10_start_smu_in_non_protection_mode(hwmgr);
-		} else {
+		else
 			result = polaris10_start_smu_in_protection_mode(hwmgr);
 
-			/* If failed, try with different security Key. */
-			if (result != 0) {
-				smu_data->smu7_data.security_hard_key ^= 1;
-				cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU);
-				result = polaris10_start_smu_in_protection_mode(hwmgr);
-			}
-		}
-
 		if (result != 0)
 			PP_ASSERT_WITH_CODE(0, "Failed to load SMU ucode.", return result);
 

From ba8ab90e6ac9322f39ab8368941b38b5bb12477c Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Thu, 22 Mar 2018 14:52:35 +0800
Subject: [PATCH 0313/1286] drm/amd/pp: Add hwmgr_sw_init/fini functioins

Clean up pp ip functions

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 368 ++++--------------
 .../drm/amd/powerplay/hwmgr/hardwaremanager.c |  14 +-
 drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c   |  74 +++-
 drivers/gpu/drm/amd/powerplay/inc/hwmgr.h     |   7 +-
 4 files changed, 147 insertions(+), 316 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 7e8ad30d98e2..6503bbfdc76e 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -31,24 +31,11 @@
 #include "amdgpu.h"
 #include "hwmgr.h"
 
-#define PP_DPM_DISABLED 0xCCCC
-
 static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
 		enum amd_pm_state_type *user_state);
 
 static const struct amd_pm_funcs pp_dpm_funcs;
 
-static inline int pp_check(struct pp_hwmgr *hwmgr)
-{
-	if (hwmgr == NULL || hwmgr->smumgr_funcs == NULL)
-		return -EINVAL;
-
-	if (hwmgr->pm_en == 0 || hwmgr->hwmgr_func == NULL)
-		return PP_DPM_DISABLED;
-
-	return 0;
-}
-
 static int amd_powerplay_create(struct amdgpu_device *adev)
 {
 	struct pp_hwmgr *hwmgr;
@@ -73,7 +60,7 @@ static int amd_powerplay_create(struct amdgpu_device *adev)
 }
 
 
-static int amd_powerplay_destroy(struct amdgpu_device *adev)
+static void amd_powerplay_destroy(struct amdgpu_device *adev)
 {
 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
 
@@ -82,8 +69,6 @@ static int amd_powerplay_destroy(struct amdgpu_device *adev)
 
 	kfree(hwmgr);
 	hwmgr = NULL;
-
-	return 0;
 }
 
 static int pp_early_init(void *handle)
@@ -109,18 +94,9 @@ static int pp_sw_init(void *handle)
 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
 	int ret = 0;
 
-	ret = pp_check(hwmgr);
+	ret = hwmgr_sw_init(hwmgr);
 
-	if (ret >= 0) {
-		if (hwmgr->smumgr_funcs->smu_init == NULL)
-			return -EINVAL;
-
-		ret = hwmgr->smumgr_funcs->smu_init(hwmgr);
-
-		phm_register_irq_handlers(hwmgr);
-
-		pr_debug("amdgpu: powerplay sw initialized\n");
-	}
+	pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
 
 	return ret;
 }
@@ -129,13 +105,8 @@ static int pp_sw_fini(void *handle)
 {
 	struct amdgpu_device *adev = handle;
 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
-	int ret = 0;
 
-	ret = pp_check(hwmgr);
-	if (ret >= 0) {
-		if (hwmgr->smumgr_funcs->smu_fini != NULL)
-			hwmgr->smumgr_funcs->smu_fini(hwmgr);
-	}
+	hwmgr_sw_fini(hwmgr);
 
 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
 		amdgpu_ucode_fini_bo(adev);
@@ -152,40 +123,20 @@ static int pp_hw_init(void *handle)
 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
 		amdgpu_ucode_init_bo(adev);
 
-	ret = pp_check(hwmgr);
+	ret = hwmgr_hw_init(hwmgr);
 
-	if (ret >= 0) {
-		if (hwmgr->smumgr_funcs->start_smu == NULL)
-			return -EINVAL;
+	if (ret)
+		pr_err("powerplay hw init failed\n");
 
-		if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
-			pr_err("smc start failed\n");
-			hwmgr->smumgr_funcs->smu_fini(hwmgr);
-			return -EINVAL;
-		}
-		if (ret == PP_DPM_DISABLED)
-			goto exit;
-		ret = hwmgr_hw_init(hwmgr);
-		if (ret)
-			goto exit;
-	}
 	return ret;
-exit:
-	hwmgr->pm_en = 0;
-	cgs_notify_dpm_enabled(hwmgr->device, false);
-	return 0;
-
 }
 
 static int pp_hw_fini(void *handle)
 {
 	struct amdgpu_device *adev = handle;
 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
-	int ret = 0;
 
-	ret = pp_check(hwmgr);
-	if (ret == 0)
-		hwmgr_hw_fini(hwmgr);
+	hwmgr_hw_fini(hwmgr);
 
 	return 0;
 }
@@ -194,11 +145,8 @@ static int pp_late_init(void *handle)
 {
 	struct amdgpu_device *adev = handle;
 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
-	int ret = 0;
 
-	ret = pp_check(hwmgr);
-
-	if (ret == 0)
+	if (hwmgr && hwmgr->pm_en)
 		pp_dpm_dispatch_tasks(hwmgr,
 					AMD_PP_TASK_COMPLETE_INIT, NULL);
 
@@ -233,12 +181,9 @@ static int pp_set_powergating_state(void *handle,
 {
 	struct amdgpu_device *adev = handle;
 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
-	int ret = 0;
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
-		return ret;
+	if (!hwmgr || !hwmgr->pm_en)
+		return 0;
 
 	if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
 		pr_info("%s was not implemented.\n", __func__);
@@ -254,38 +199,16 @@ static int pp_suspend(void *handle)
 {
 	struct amdgpu_device *adev = handle;
 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
-	int ret = 0;
 
-	ret = pp_check(hwmgr);
-	if (ret == 0)
-		hwmgr_hw_suspend(hwmgr);
-	return 0;
+	return hwmgr_suspend(hwmgr);
 }
 
 static int pp_resume(void *handle)
 {
 	struct amdgpu_device *adev = handle;
 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
-	int ret;
 
-	ret = pp_check(hwmgr);
-
-	if (ret < 0)
-		return ret;
-
-	if (hwmgr->smumgr_funcs->start_smu == NULL)
-		return -EINVAL;
-
-	if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
-		pr_err("smc start failed\n");
-		hwmgr->smumgr_funcs->smu_fini(hwmgr);
-		return -EINVAL;
-	}
-
-	if (ret == PP_DPM_DISABLED)
-		return 0;
-
-	return hwmgr_hw_resume(hwmgr);
+	return hwmgr_resume(hwmgr);
 }
 
 static int pp_set_clockgating_state(void *handle,
@@ -334,12 +257,9 @@ static int pp_dpm_fw_loading_complete(void *handle)
 static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
 {
 	struct pp_hwmgr *hwmgr = handle;
-	int ret = 0;
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
-		return ret;
+	if (!hwmgr || !hwmgr->pm_en)
+		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
 		pr_info("%s was not implemented.\n", __func__);
@@ -389,12 +309,9 @@ static int pp_dpm_force_performance_level(void *handle,
 					enum amd_dpm_forced_level level)
 {
 	struct pp_hwmgr *hwmgr = handle;
-	int ret = 0;
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
-		return ret;
+	if (!hwmgr || !hwmgr->pm_en)
+		return -EINVAL;
 
 	if (level == hwmgr->dpm_level)
 		return 0;
@@ -412,13 +329,10 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level(
 								void *handle)
 {
 	struct pp_hwmgr *hwmgr = handle;
-	int ret = 0;
 	enum amd_dpm_forced_level level;
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
-		return ret;
+	if (!hwmgr || !hwmgr->pm_en)
+		return -EINVAL;
 
 	mutex_lock(&hwmgr->smu_lock);
 	level = hwmgr->dpm_level;
@@ -429,13 +343,10 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level(
 static uint32_t pp_dpm_get_sclk(void *handle, bool low)
 {
 	struct pp_hwmgr *hwmgr = handle;
-	int ret = 0;
 	uint32_t clk = 0;
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
-		return ret;
+	if (!hwmgr || !hwmgr->pm_en)
+		return 0;
 
 	if (hwmgr->hwmgr_func->get_sclk == NULL) {
 		pr_info("%s was not implemented.\n", __func__);
@@ -450,13 +361,10 @@ static uint32_t pp_dpm_get_sclk(void *handle, bool low)
 static uint32_t pp_dpm_get_mclk(void *handle, bool low)
 {
 	struct pp_hwmgr *hwmgr = handle;
-	int ret = 0;
 	uint32_t clk = 0;
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
-		return ret;
+	if (!hwmgr || !hwmgr->pm_en)
+		return 0;
 
 	if (hwmgr->hwmgr_func->get_mclk == NULL) {
 		pr_info("%s was not implemented.\n", __func__);
@@ -471,11 +379,8 @@ static uint32_t pp_dpm_get_mclk(void *handle, bool low)
 static void pp_dpm_powergate_vce(void *handle, bool gate)
 {
 	struct pp_hwmgr *hwmgr = handle;
-	int ret = 0;
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
+	if (!hwmgr || !hwmgr->pm_en)
 		return;
 
 	if (hwmgr->hwmgr_func->powergate_vce == NULL) {
@@ -490,11 +395,8 @@ static void pp_dpm_powergate_vce(void *handle, bool gate)
 static void pp_dpm_powergate_uvd(void *handle, bool gate)
 {
 	struct pp_hwmgr *hwmgr = handle;
-	int ret = 0;
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
+	if (!hwmgr || !hwmgr->pm_en)
 		return;
 
 	if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
@@ -512,10 +414,8 @@ static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
 	int ret = 0;
 	struct pp_hwmgr *hwmgr = handle;
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
-		return ret;
+	if (!hwmgr || !hwmgr->pm_en)
+		return -EINVAL;
 
 	mutex_lock(&hwmgr->smu_lock);
 	ret = hwmgr_handle_task(hwmgr, task_id, user_state);
@@ -528,15 +428,9 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
 {
 	struct pp_hwmgr *hwmgr = handle;
 	struct pp_power_state *state;
-	int ret = 0;
 	enum amd_pm_state_type pm_type;
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
-		return ret;
-
-	if (hwmgr->current_ps == NULL)
+	if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
 		return -EINVAL;
 
 	mutex_lock(&hwmgr->smu_lock);
@@ -568,11 +462,8 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
 static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
 {
 	struct pp_hwmgr *hwmgr = handle;
-	int ret = 0;
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
+	if (!hwmgr || !hwmgr->pm_en)
 		return;
 
 	if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
@@ -587,13 +478,10 @@ static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
 static uint32_t pp_dpm_get_fan_control_mode(void *handle)
 {
 	struct pp_hwmgr *hwmgr = handle;
-	int ret = 0;
 	uint32_t mode = 0;
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
-		return ret;
+	if (!hwmgr || !hwmgr->pm_en)
+		return 0;
 
 	if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
 		pr_info("%s was not implemented.\n", __func__);
@@ -610,10 +498,8 @@ static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = 0;
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
-		return ret;
+	if (!hwmgr || !hwmgr->pm_en)
+		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
 		pr_info("%s was not implemented.\n", __func__);
@@ -630,10 +516,8 @@ static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = 0;
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
-		return ret;
+	if (!hwmgr || !hwmgr->pm_en)
+		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
 		pr_info("%s was not implemented.\n", __func__);
@@ -651,10 +535,8 @@ static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = 0;
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
-		return ret;
+	if (!hwmgr || !hwmgr->pm_en)
+		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
 		return -EINVAL;
@@ -670,16 +552,10 @@ static int pp_dpm_get_pp_num_states(void *handle,
 {
 	struct pp_hwmgr *hwmgr = handle;
 	int i;
-	int ret = 0;
 
 	memset(data, 0, sizeof(*data));
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
-		return ret;
-
-	if (hwmgr->ps == NULL)
+	if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps)
 		return -EINVAL;
 
 	mutex_lock(&hwmgr->smu_lock);
@@ -713,15 +589,9 @@ static int pp_dpm_get_pp_num_states(void *handle,
 static int pp_dpm_get_pp_table(void *handle, char **table)
 {
 	struct pp_hwmgr *hwmgr = handle;
-	int ret = 0;
 	int size = 0;
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
-		return ret;
-
-	if (!hwmgr->soft_pp_table)
+	if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table)
 		return -EINVAL;
 
 	mutex_lock(&hwmgr->smu_lock);
@@ -736,10 +606,6 @@ static int amd_powerplay_reset(void *handle)
 	struct pp_hwmgr *hwmgr = handle;
 	int ret;
 
-	ret = pp_check(hwmgr);
-	if (ret)
-		return ret;
-
 	ret = hwmgr_hw_fini(hwmgr);
 	if (ret)
 		return ret;
@@ -756,10 +622,8 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = 0;
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
-		return ret;
+	if (!hwmgr || !hwmgr->pm_en)
+		return -EINVAL;
 
 	mutex_lock(&hwmgr->smu_lock);
 	if (!hwmgr->hardcode_pp_table) {
@@ -796,10 +660,8 @@ static int pp_dpm_force_clock_level(void *handle,
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = 0;
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
-		return ret;
+	if (!hwmgr || !hwmgr->pm_en)
+		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->force_clock_level == NULL) {
 		pr_info("%s was not implemented.\n", __func__);
@@ -820,10 +682,8 @@ static int pp_dpm_print_clock_levels(void *handle,
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = 0;
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
-		return ret;
+	if (!hwmgr || !hwmgr->pm_en)
+		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
 		pr_info("%s was not implemented.\n", __func__);
@@ -840,10 +700,8 @@ static int pp_dpm_get_sclk_od(void *handle)
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = 0;
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
-		return ret;
+	if (!hwmgr || !hwmgr->pm_en)
+		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
 		pr_info("%s was not implemented.\n", __func__);
@@ -860,10 +718,8 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = 0;
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
-		return ret;
+	if (!hwmgr || !hwmgr->pm_en)
+		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
 		pr_info("%s was not implemented.\n", __func__);
@@ -881,10 +737,8 @@ static int pp_dpm_get_mclk_od(void *handle)
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = 0;
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
-		return ret;
+	if (!hwmgr || !hwmgr->pm_en)
+		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
 		pr_info("%s was not implemented.\n", __func__);
@@ -901,10 +755,8 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = 0;
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
-		return ret;
+	if (!hwmgr || !hwmgr->pm_en)
+		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
 		pr_info("%s was not implemented.\n", __func__);
@@ -922,11 +774,7 @@ static int pp_dpm_read_sensor(void *handle, int idx,
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = 0;
 
-	ret = pp_check(hwmgr);
-	if (ret)
-		return ret;
-
-	if (value == NULL)
+	if (!hwmgr || !hwmgr->pm_en || !value)
 		return -EINVAL;
 
 	switch (idx) {
@@ -948,14 +796,11 @@ static struct amd_vce_state*
 pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
 {
 	struct pp_hwmgr *hwmgr = handle;
-	int ret = 0;
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
+	if (!hwmgr || !hwmgr->pm_en)
 		return NULL;
 
-	if (hwmgr && idx < hwmgr->num_vce_state_tables)
+	if (idx < hwmgr->num_vce_state_tables)
 		return &hwmgr->vce_states[idx];
 	return NULL;
 }
@@ -964,7 +809,7 @@ static int pp_get_power_profile_mode(void *handle, char *buf)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!buf || pp_check(hwmgr))
+	if (!hwmgr || !hwmgr->pm_en || !buf)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
@@ -980,12 +825,12 @@ static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = -EINVAL;
 
-	if (pp_check(hwmgr))
-		return -EINVAL;
+	if (!hwmgr || !hwmgr->pm_en)
+		return ret;
 
 	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
 		pr_info("%s was not implemented.\n", __func__);
-		return -EINVAL;
+		return ret;
 	}
 	mutex_lock(&hwmgr->smu_lock);
 	if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
@@ -998,7 +843,7 @@ static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint3
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (pp_check(hwmgr))
+	if (!hwmgr || !hwmgr->pm_en)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
@@ -1016,7 +861,7 @@ static int pp_dpm_switch_power_profile(void *handle,
 	long workload;
 	uint32_t index;
 
-	if (pp_check(hwmgr))
+	if (!hwmgr || !hwmgr->pm_en)
 		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
@@ -1058,10 +903,8 @@ static int pp_dpm_notify_smu_memory_info(void *handle,
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = 0;
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
-		return ret;
+	if (!hwmgr || !hwmgr->pm_en)
+		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->notify_cac_buffer_info == NULL) {
 		pr_info("%s was not implemented.\n", __func__);
@@ -1082,12 +925,9 @@ static int pp_dpm_notify_smu_memory_info(void *handle,
 static int pp_set_power_limit(void *handle, uint32_t limit)
 {
 	struct pp_hwmgr *hwmgr = handle;
-	int ret = 0;
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
-		return ret;
+	if (!hwmgr || !hwmgr->pm_en)
+		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_power_limit == NULL) {
 		pr_info("%s was not implemented.\n", __func__);
@@ -1104,20 +944,14 @@ static int pp_set_power_limit(void *handle, uint32_t limit)
 	hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
 	hwmgr->power_limit = limit;
 	mutex_unlock(&hwmgr->smu_lock);
-	return ret;
+	return 0;
 }
 
 static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
 {
 	struct pp_hwmgr *hwmgr = handle;
-	int ret = 0;
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
-		return ret;
-
-	if (limit == NULL)
+	if (!hwmgr || !hwmgr->pm_en ||!limit)
 		return -EINVAL;
 
 	mutex_lock(&hwmgr->smu_lock);
@@ -1129,19 +963,16 @@ static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
 
 	mutex_unlock(&hwmgr->smu_lock);
 
-	return ret;
+	return 0;
 }
 
 static int pp_display_configuration_change(void *handle,
 	const struct amd_pp_display_configuration *display_config)
 {
 	struct pp_hwmgr *hwmgr = handle;
-	int ret = 0;
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
-		return ret;
+	if (!hwmgr || !hwmgr->pm_en)
+		return -EINVAL;
 
 	mutex_lock(&hwmgr->smu_lock);
 	phm_store_dal_configuration_data(hwmgr, display_config);
@@ -1155,12 +986,7 @@ static int pp_get_display_power_level(void *handle,
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = 0;
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
-		return ret;
-
-	if (output == NULL)
+	if (!hwmgr || !hwmgr->pm_en ||!output)
 		return -EINVAL;
 
 	mutex_lock(&hwmgr->smu_lock);
@@ -1177,10 +1003,8 @@ static int pp_get_current_clocks(void *handle,
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = 0;
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
-		return ret;
+	if (!hwmgr || !hwmgr->pm_en)
+		return -EINVAL;
 
 	mutex_lock(&hwmgr->smu_lock);
 
@@ -1225,10 +1049,8 @@ static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struc
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = 0;
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
-		return ret;
+	if (!hwmgr || !hwmgr->pm_en)
+		return -EINVAL;
 
 	if (clocks == NULL)
 		return -EINVAL;
@@ -1246,11 +1068,7 @@ static int pp_get_clock_by_type_with_latency(void *handle,
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = 0;
 
-	ret = pp_check(hwmgr);
-	if (ret)
-		return ret;
-
-	if (!clocks)
+	if (!hwmgr || !hwmgr->pm_en ||!clocks)
 		return -EINVAL;
 
 	mutex_lock(&hwmgr->smu_lock);
@@ -1266,11 +1084,7 @@ static int pp_get_clock_by_type_with_voltage(void *handle,
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = 0;
 
-	ret = pp_check(hwmgr);
-	if (ret)
-		return ret;
-
-	if (!clocks)
+	if (!hwmgr || !hwmgr->pm_en ||!clocks)
 		return -EINVAL;
 
 	mutex_lock(&hwmgr->smu_lock);
@@ -1287,11 +1101,7 @@ static int pp_set_watermarks_for_clocks_ranges(void *handle,
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = 0;
 
-	ret = pp_check(hwmgr);
-	if (ret)
-		return ret;
-
-	if (!wm_with_clock_ranges)
+	if (!hwmgr || !hwmgr->pm_en ||!wm_with_clock_ranges)
 		return -EINVAL;
 
 	mutex_lock(&hwmgr->smu_lock);
@@ -1308,11 +1118,7 @@ static int pp_display_clock_voltage_request(void *handle,
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = 0;
 
-	ret = pp_check(hwmgr);
-	if (ret)
-		return ret;
-
-	if (!clock)
+	if (!hwmgr || !hwmgr->pm_en ||!clock)
 		return -EINVAL;
 
 	mutex_lock(&hwmgr->smu_lock);
@@ -1328,12 +1134,7 @@ static int pp_get_display_mode_validation_clocks(void *handle,
 	struct pp_hwmgr *hwmgr = handle;
 	int ret = 0;
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
-		return ret;
-
-	if (clocks == NULL)
+	if (!hwmgr || !hwmgr->pm_en ||!clocks)
 		return -EINVAL;
 
 	mutex_lock(&hwmgr->smu_lock);
@@ -1348,12 +1149,9 @@ static int pp_get_display_mode_validation_clocks(void *handle,
 static int pp_set_mmhub_powergating_by_smu(void *handle)
 {
 	struct pp_hwmgr *hwmgr = handle;
-	int ret = 0;
 
-	ret = pp_check(hwmgr);
-
-	if (ret)
-		return ret;
+	if (!hwmgr || !hwmgr->pm_en)
+		return -EINVAL;
 
 	if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) {
 		pr_info("%s was not implemented.\n", __func__);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index ae2e9339dd6b..dcceadb2e172 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -75,8 +75,7 @@ int phm_set_power_state(struct pp_hwmgr *hwmgr,
 
 int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
 {
-	int ret = 1;
-	bool enabled;
+	int ret = -EINVAL;;
 	PHM_FUNC_CHECK(hwmgr);
 
 	if (smum_is_dpm_running(hwmgr)) {
@@ -87,17 +86,12 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
 	if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable)
 		ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr);
 
-	enabled = ret == 0;
-
-	cgs_notify_dpm_enabled(hwmgr->device, enabled);
-
 	return ret;
 }
 
 int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr)
 {
-	int ret = -1;
-	bool enabled;
+	int ret = -EINVAL;
 
 	PHM_FUNC_CHECK(hwmgr);
 
@@ -109,10 +103,6 @@ int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr)
 	if (hwmgr->hwmgr_func->dynamic_state_management_disable)
 		ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr);
 
-	enabled = ret == 0 ? false : true;
-
-	cgs_notify_dpm_enabled(hwmgr->device, enabled);
-
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 42982055b161..30ff8a9c301b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -76,7 +76,7 @@ static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr)
 
 int hwmgr_early_init(struct pp_hwmgr *hwmgr)
 {
-	if (hwmgr == NULL)
+	if (!hwmgr)
 		return -EINVAL;
 
 	hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
@@ -170,17 +170,51 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
+int hwmgr_sw_init(struct pp_hwmgr *hwmgr)
+{
+	if (!hwmgr|| !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->smu_init)
+		return -EINVAL;
+
+	phm_register_irq_handlers(hwmgr);
+
+	return hwmgr->smumgr_funcs->smu_init(hwmgr);
+}
+
+
+int hwmgr_sw_fini(struct pp_hwmgr *hwmgr)
+{
+	if (hwmgr && hwmgr->smumgr_funcs && hwmgr->smumgr_funcs->smu_fini)
+		hwmgr->smumgr_funcs->smu_fini(hwmgr);
+
+	return 0;
+}
+
 int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
 {
 	int ret = 0;
 
-	if (hwmgr == NULL)
+	if (!hwmgr || !hwmgr->smumgr_funcs)
 		return -EINVAL;
 
-	if (hwmgr->pptable_func == NULL ||
-	    hwmgr->pptable_func->pptable_init == NULL ||
-	    hwmgr->hwmgr_func->backend_init == NULL)
-		return -EINVAL;
+	if (hwmgr->smumgr_funcs->start_smu) {
+		ret = hwmgr->smumgr_funcs->start_smu(hwmgr);
+		if (ret) {
+			pr_err("smc start failed\n");
+			return -EINVAL;
+		}
+	}
+
+	if (!hwmgr->pm_en)
+		return 0;
+
+	if (!hwmgr->pptable_func ||
+	    !hwmgr->pptable_func->pptable_init ||
+	    !hwmgr->hwmgr_func->backend_init) {
+		hwmgr->pm_en = false;
+		((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled = false;
+		pr_info("dpm not supported \n");
+		return 0;
+	}
 
 	ret = hwmgr->pptable_func->pptable_init(hwmgr);
 	if (ret)
@@ -214,14 +248,13 @@ err1:
 	if (hwmgr->pptable_func->pptable_fini)
 		hwmgr->pptable_func->pptable_fini(hwmgr);
 err:
-	pr_err("amdgpu: powerplay initialization failed\n");
 	return ret;
 }
 
 int hwmgr_hw_fini(struct pp_hwmgr *hwmgr)
 {
-	if (hwmgr == NULL)
-		return -EINVAL;
+	if (!hwmgr || !hwmgr->pm_en)
+		return 0;
 
 	phm_stop_thermal_controller(hwmgr);
 	psm_set_boot_states(hwmgr);
@@ -236,12 +269,12 @@ int hwmgr_hw_fini(struct pp_hwmgr *hwmgr)
 	return psm_fini_power_state_table(hwmgr);
 }
 
-int hwmgr_hw_suspend(struct pp_hwmgr *hwmgr)
+int hwmgr_suspend(struct pp_hwmgr *hwmgr)
 {
 	int ret = 0;
 
-	if (hwmgr == NULL)
-		return -EINVAL;
+	if (!hwmgr || !hwmgr->pm_en)
+		return 0;
 
 	phm_disable_smc_firmware_ctf(hwmgr);
 	ret = psm_set_boot_states(hwmgr);
@@ -255,13 +288,23 @@ int hwmgr_hw_suspend(struct pp_hwmgr *hwmgr)
 	return ret;
 }
 
-int hwmgr_hw_resume(struct pp_hwmgr *hwmgr)
+int hwmgr_resume(struct pp_hwmgr *hwmgr)
 {
 	int ret = 0;
 
-	if (hwmgr == NULL)
+	if (!hwmgr)
 		return -EINVAL;
 
+	if (hwmgr->smumgr_funcs && hwmgr->smumgr_funcs->start_smu) {
+		if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
+			pr_err("smc start failed\n");
+			return -EINVAL;
+		}
+	}
+
+	if (!hwmgr->pm_en)
+		return 0;
+
 	ret = phm_setup_asic(hwmgr);
 	if (ret)
 		return ret;
@@ -270,9 +313,6 @@ int hwmgr_hw_resume(struct pp_hwmgr *hwmgr)
 	if (ret)
 		return ret;
 	ret = phm_start_thermal_controller(hwmgr);
-	if (ret)
-		return ret;
-
 	ret |= psm_set_performance_states(hwmgr);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 17f811d181c8..d6c9a3bac0a9 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -782,10 +782,13 @@ struct pp_hwmgr {
 };
 
 int hwmgr_early_init(struct pp_hwmgr *hwmgr);
+int hwmgr_sw_init(struct pp_hwmgr *hwmgr);
+int hwmgr_sw_fini(struct pp_hwmgr *hwmgr);
 int hwmgr_hw_init(struct pp_hwmgr *hwmgr);
 int hwmgr_hw_fini(struct pp_hwmgr *hwmgr);
-int hwmgr_hw_suspend(struct pp_hwmgr *hwmgr);
-int hwmgr_hw_resume(struct pp_hwmgr *hwmgr);
+int hwmgr_suspend(struct pp_hwmgr *hwmgr);
+int hwmgr_resume(struct pp_hwmgr *hwmgr);
+
 int hwmgr_handle_task(struct pp_hwmgr *hwmgr,
 				enum amd_pp_task task_id,
 				enum amd_pm_state_type *user_state);

From b61e54cb1881c7cb74787da6a5d39d8d48dcc075 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Thu, 22 Mar 2018 15:12:59 +0800
Subject: [PATCH 0314/1286] drm/amd/pp: Lock pm_funcs when set pp table

unlock mutex until set pp table completely to avoid
conflict if other pp functions were called simultaneously.

use hwmgr_handle_task instand of pp_dpm_dispatch_tasks.
It is not make sense that call pp_functions in ip_functions.

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 28 +++++++++----------
 1 file changed, 14 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 6503bbfdc76e..9ada102e253c 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -31,8 +31,6 @@
 #include "amdgpu.h"
 #include "hwmgr.h"
 
-static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
-		enum amd_pm_state_type *user_state);
 
 static const struct amd_pm_funcs pp_dpm_funcs;
 
@@ -146,10 +144,12 @@ static int pp_late_init(void *handle)
 	struct amdgpu_device *adev = handle;
 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
 
-	if (hwmgr && hwmgr->pm_en)
-		pp_dpm_dispatch_tasks(hwmgr,
+	if (hwmgr && hwmgr->pm_en) {
+		mutex_lock(&hwmgr->smu_lock);
+		hwmgr_handle_task(hwmgr,
 					AMD_PP_TASK_COMPLETE_INIT, NULL);
-
+		mutex_unlock(&hwmgr->smu_lock);
+	}
 	return 0;
 }
 
@@ -620,7 +620,7 @@ static int amd_powerplay_reset(void *handle)
 static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
 {
 	struct pp_hwmgr *hwmgr = handle;
-	int ret = 0;
+	int ret = -ENOMEM;
 
 	if (!hwmgr || !hwmgr->pm_en)
 		return -EINVAL;
@@ -630,28 +630,28 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
 		hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
 						   hwmgr->soft_pp_table_size,
 						   GFP_KERNEL);
-		if (!hwmgr->hardcode_pp_table) {
-			mutex_unlock(&hwmgr->smu_lock);
-			return -ENOMEM;
-		}
+		if (!hwmgr->hardcode_pp_table)
+			goto err;
 	}
 
 	memcpy(hwmgr->hardcode_pp_table, buf, size);
 
 	hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
-	mutex_unlock(&hwmgr->smu_lock);
 
 	ret = amd_powerplay_reset(handle);
 	if (ret)
-		return ret;
+		goto err;
 
 	if (hwmgr->hwmgr_func->avfs_control) {
 		ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
 		if (ret)
-			return ret;
+			goto err;
 	}
-
+	mutex_unlock(&hwmgr->smu_lock);
 	return 0;
+err:
+	mutex_unlock(&hwmgr->smu_lock);
+	return ret;
 }
 
 static int pp_dpm_force_clock_level(void *handle,

From 8bb575a2d83af097980641d864401b303286755c Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Thu, 22 Mar 2018 15:46:47 +0800
Subject: [PATCH 0315/1286] drm/amd/pp: Save vf state in pp context

Store vf state in pp_context so we can
deprecate the cgs interface.

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/amd_powerplay.c          |  3 ++-
 drivers/gpu/drm/amd/powerplay/inc/hwmgr.h              |  1 +
 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c     |  7 +++----
 .../gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c    |  3 +--
 drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c     | 10 +++++-----
 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c    |  3 +--
 6 files changed, 13 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 9ada102e253c..337af789d258 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -46,7 +46,8 @@ static int amd_powerplay_create(struct amdgpu_device *adev)
 		return -ENOMEM;
 
 	hwmgr->adev = adev;
-	hwmgr->pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
+	hwmgr->not_vf = !amdgpu_sriov_vf(adev);
+	hwmgr->pm_en = (amdgpu_dpm && hwmgr->not_vf) ? true : false;
 	hwmgr->device = amdgpu_cgs_create_device(adev);
 	mutex_init(&hwmgr->smu_lock);
 	hwmgr->chip_family = adev->family;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index d6c9a3bac0a9..d5cadc61c9b3 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -718,6 +718,7 @@ struct pp_hwmgr {
 	uint32_t chip_family;
 	uint32_t chip_id;
 	uint32_t smu_version;
+	bool not_vf;
 	bool pm_en;
 	struct mutex smu_lock;
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index faef78321446..35b947e5292c 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -288,8 +288,7 @@ static int fiji_start_smu(struct pp_hwmgr *hwmgr)
 	struct fiji_smumgr *priv = (struct fiji_smumgr *)(hwmgr->smu_backend);
 
 	/* Only start SMC if SMC RAM is not running */
-	if (!(smu7_is_smc_ram_running(hwmgr)
-		|| cgs_is_virtualization_enabled(hwmgr->device))) {
+	if (!smu7_is_smc_ram_running(hwmgr) && hwmgr->not_vf) {
 		/* Check if SMU is running in protected mode */
 		if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
 				CGS_IND_REG__SMC,
@@ -335,8 +334,8 @@ static bool fiji_is_hw_avfs_present(struct pp_hwmgr *hwmgr)
 	uint32_t efuse = 0;
 	uint32_t mask = (1 << ((AVFS_EN_MSB - AVFS_EN_LSB) + 1)) - 1;
 
-	if (cgs_is_virtualization_enabled(hwmgr->device))
-		return 0;
+	if (!hwmgr->not_vf)
+		return false;
 
 	if (!atomctrl_read_efuse(hwmgr->device, AVFS_EN_LSB, AVFS_EN_MSB,
 			mask, &efuse)) {
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index fe6854eecf7b..05e60e8fee0b 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -295,8 +295,7 @@ static int polaris10_start_smu(struct pp_hwmgr *hwmgr)
 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
 
 	/* Only start SMC if SMC RAM is not running */
-	if (!(smu7_is_smc_ram_running(hwmgr)
-		|| cgs_is_virtualization_enabled(hwmgr->device))) {
+	if (!smu7_is_smc_ram_running(hwmgr) && hwmgr->not_vf) {
 		smu_data->protected_mode = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE));
 		smu_data->smu7_data.security_hard_key = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index 0399c10d2be0..3684822b75b2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -375,7 +375,7 @@ static int smu7_populate_single_firmware_entry(struct pp_hwmgr *hwmgr,
 		entry->meta_data_addr_low = 0;
 
 		/* digest need be excluded out */
-		if (cgs_is_virtualization_enabled(hwmgr->device))
+		if (!hwmgr->not_vf)
 			info.image_size -= 20;
 		entry->data_size_byte = info.image_size;
 		entry->num_register_entries = 0;
@@ -409,7 +409,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
 					0x0);
 
 	if (hwmgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */
-		if (!cgs_is_virtualization_enabled(hwmgr->device)) {
+		if (hwmgr->not_vf) {
 			smu7_send_msg_to_smc_with_parameter(hwmgr,
 						PPSMC_MSG_SMU_DRAM_ADDR_HI,
 						upper_32_bits(smu_data->smu_buffer.mc_addr));
@@ -467,7 +467,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
 	PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
 				UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
 				"Failed to Get Firmware Entry.", return -EINVAL);
-	if (cgs_is_virtualization_enabled(hwmgr->device))
+	if (!hwmgr->not_vf)
 		PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
 				UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
 				"Failed to Get Firmware Entry.", return -EINVAL);
@@ -608,7 +608,7 @@ int smu7_init(struct pp_hwmgr *hwmgr)
 	smu_data->header = smu_data->header_buffer.kaddr;
 	smu_data->header_buffer.mc_addr = mc_addr;
 
-	if (cgs_is_virtualization_enabled(hwmgr->device))
+	if (!hwmgr->not_vf)
 		return 0;
 
 	smu_data->smu_buffer.data_size = 200*4096;
@@ -643,7 +643,7 @@ int smu7_smu_fini(struct pp_hwmgr *hwmgr)
 					&smu_data->header_buffer.mc_addr,
 					&smu_data->header_buffer.kaddr);
 
-	if (!cgs_is_virtualization_enabled(hwmgr->device))
+	if (hwmgr->not_vf)
 		amdgpu_bo_free_kernel(&smu_data->smu_buffer.handle,
 					&smu_data->smu_buffer.mc_addr,
 					&smu_data->smu_buffer.kaddr);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index b51d7468c3e7..2ba05d2b4302 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -199,8 +199,7 @@ static int tonga_start_smu(struct pp_hwmgr *hwmgr)
 	int result;
 
 	/* Only start SMC if SMC RAM is not running */
-	if (!(smu7_is_smc_ram_running(hwmgr) ||
-		cgs_is_virtualization_enabled(hwmgr->device))) {
+	if (!smu7_is_smc_ram_running(hwmgr) && hwmgr->not_vf) {
 		/*Check if SMU is running in protected mode*/
 		if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
 					SMU_FIRMWARE, SMU_MODE)) {

From 64f6db77fcb81493988061587fa478e6612dc45b Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Thu, 22 Mar 2018 19:32:45 +0800
Subject: [PATCH 0316/1286] drm/amd/pp: Use release_firmware directly in
 powerplay

Use kernel api directly so we can deprecate the cgs interface.

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/amd_powerplay.c      | 6 +++++-
 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c   | 1 -
 drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | 1 -
 3 files changed, 5 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 337af789d258..b91ef113a490 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -25,6 +25,7 @@
 #include <linux/kernel.h>
 #include <linux/gfp.h>
 #include <linux/slab.h>
+#include <linux/firmware.h>
 #include "amd_shared.h"
 #include "amd_powerplay.h"
 #include "power_state.h"
@@ -107,8 +108,11 @@ static int pp_sw_fini(void *handle)
 
 	hwmgr_sw_fini(hwmgr);
 
-	if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
+	if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
+		release_firmware(adev->pm.fw);
+		adev->pm.fw = NULL;
 		amdgpu_ucode_fini_bo(adev);
+	}
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
index 08d000140eca..e30a2eea1fba 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
@@ -2784,7 +2784,6 @@ static int ci_smu_fini(struct pp_hwmgr *hwmgr)
 {
 	kfree(hwmgr->smu_backend);
 	hwmgr->smu_backend = NULL;
-	cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index 3684822b75b2..41fab2df994e 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -650,6 +650,5 @@ int smu7_smu_fini(struct pp_hwmgr *hwmgr)
 
 	kfree(hwmgr->smu_backend);
 	hwmgr->smu_backend = NULL;
-	cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU);
 	return 0;
 }

From b13aa1091fb2002a6854e0401df5fc6231fbca58 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Mon, 26 Mar 2018 16:18:34 +0800
Subject: [PATCH 0317/1286] drm/amdgpu: Use dpm_enabled as dpm state flag

driver will set dpm_enabled to true only when
module parameter amdgpu_dpm not equal to 0 and
smu hw initialize successfully.

Reviewed-by: Evan Quan <evan.quan@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c     | 2 +-
 drivers/gpu/drm/amd/amdgpu/ci_dpm.c         | 2 +-
 drivers/gpu/drm/amd/amdgpu/kv_dpm.c         | 2 +-
 drivers/gpu/drm/amd/amdgpu/si_dpm.c         | 2 +-
 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c       | 4 ++--
 drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 3 ++-
 7 files changed, 9 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 448d69fe3756..c98e59721444 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -428,7 +428,7 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
 	if (size & 3 || *pos & 0x3)
 		return -EINVAL;
 
-	if (amdgpu_dpm == 0)
+	if (!adev->pm.dpm_enabled)
 		return -EINVAL;
 
 	/* convert offset to sensor number */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 4b7824d30e73..bd9e723dbb2b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -704,7 +704,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 		struct pp_gpu_power query = {0};
 		int query_size = sizeof(query);
 
-		if (amdgpu_dpm == 0)
+		if (!adev->pm.dpm_enabled)
 			return -ENOENT;
 
 		switch (info->sensor_info.type) {
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 47ef3e6e7178..be6b19951e6a 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -6255,7 +6255,7 @@ static int ci_dpm_late_init(void *handle)
 	int ret;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	if (!amdgpu_dpm)
+	if (!adev->pm.dpm_enabled)
 		return 0;
 
 	/* init the sysfs and debugfs files late */
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 26ba984ab2b7..bc1720ea4959 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2974,7 +2974,7 @@ static int kv_dpm_late_init(void *handle)
 	/* powerdown unused blocks for now */
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	if (!amdgpu_dpm)
+	if (!adev->pm.dpm_enabled)
 		return 0;
 
 	kv_dpm_powergate_acp(adev, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 797d505bf9ee..b12d7c9d42a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -7580,7 +7580,7 @@ static int si_dpm_late_init(void *handle)
 	int ret;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	if (!amdgpu_dpm)
+	if (!adev->pm.dpm_enabled)
 		return 0;
 
 	ret = si_set_temperature_range(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 948bb9437757..87cbb142dd0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -688,7 +688,7 @@ static int uvd_v4_2_set_powergating_state(void *handle,
 
 	if (state == AMD_PG_STATE_GATE) {
 		uvd_v4_2_stop(adev);
-		if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) {
+		if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) {
 			if (!(RREG32_SMC(ixCURRENT_PG_STATUS) &
 				CURRENT_PG_STATUS__UVD_PG_STATUS_MASK)) {
 				WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK   |
@@ -699,7 +699,7 @@ static int uvd_v4_2_set_powergating_state(void *handle,
 		}
 		return 0;
 	} else {
-		if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) {
+		if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) {
 			if (RREG32_SMC(ixCURRENT_PG_STATUS) &
 				CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
 				WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK   |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 30ff8a9c301b..bca67df29c8c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -211,7 +211,6 @@ int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
 	    !hwmgr->pptable_func->pptable_init ||
 	    !hwmgr->hwmgr_func->backend_init) {
 		hwmgr->pm_en = false;
-		((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled = false;
 		pr_info("dpm not supported \n");
 		return 0;
 	}
@@ -240,6 +239,8 @@ int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
 	if (ret)
 		goto err2;
 
+	((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled = true;
+
 	return 0;
 err2:
 	if (hwmgr->hwmgr_func->backend_fini)

From 986567e4ed81a21a66e841b9e87e708c435328d8 Mon Sep 17 00:00:00 2001
From: Colin Ian King <colin.king@canonical.com>
Date: Tue, 27 Mar 2018 09:32:57 +0100
Subject: [PATCH 0318/1286] drm/amd/pp: Fix spelling mistake: "suppported" ->
 "supported"

Trivial fix to spelling mistake in pr_warn warning message text

Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Colin Ian King <colin.king@canonical.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
index 0f2851b5b368..308bff2b5d1d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
@@ -46,7 +46,7 @@ int psm_init_power_state_table(struct pp_hwmgr *hwmgr)
 					  sizeof(struct pp_power_state);
 
 	if (table_entries == 0 || size == 0) {
-		pr_warn("Please check whether power state management is suppported on this asic\n");
+		pr_warn("Please check whether power state management is supported on this asic\n");
 		return 0;
 	}
 

From 62fd51275e4d43e300f95f2148a41e5bf738ac29 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Fri, 23 Mar 2018 18:18:23 +0800
Subject: [PATCH 0319/1286] drm/amd/pp: Use gfx rlc funcs directly in powerplay
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

In order to remove cgs interfaces:
cgs_enter_safe_mode
cgs_lock_grbm_idx

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../drm/amd/powerplay/hwmgr/smu7_powertune.c  | 13 ++--
 .../amd/powerplay/hwmgr/vega10_powertune.c    | 71 ++++++++++---------
 2 files changed, 46 insertions(+), 38 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index 03bc7453f3b1..a55ee166ce9f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -740,8 +740,8 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
 	    PP_CAP(PHM_PlatformCaps_TDRamping) ||
 	    PP_CAP(PHM_PlatformCaps_TCPRamping)) {
 
-		cgs_enter_safe_mode(hwmgr->device, true);
-		cgs_lock_grbm_idx(hwmgr->device, true);
+		adev->gfx.rlc.funcs->enter_safe_mode(adev);
+		mutex_lock(&adev->grbm_idx_mutex);
 		value = 0;
 		value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX);
 		for (count = 0; count < num_se; count++) {
@@ -781,8 +781,8 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
 			PP_ASSERT_WITH_CODE((0 == result),
 					"Failed to enable DPM DIDT.", return result);
 		}
-		cgs_lock_grbm_idx(hwmgr->device, false);
-		cgs_enter_safe_mode(hwmgr->device, false);
+		mutex_unlock(&adev->grbm_idx_mutex);
+		adev->gfx.rlc.funcs->exit_safe_mode(adev);
 	}
 
 	return 0;
@@ -791,13 +791,14 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
 int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
 {
 	int result;
+	struct amdgpu_device *adev = hwmgr->adev;
 
 	if (PP_CAP(PHM_PlatformCaps_SQRamping) ||
 	    PP_CAP(PHM_PlatformCaps_DBRamping) ||
 	    PP_CAP(PHM_PlatformCaps_TDRamping) ||
 	    PP_CAP(PHM_PlatformCaps_TCPRamping)) {
 
-		cgs_enter_safe_mode(hwmgr->device, true);
+		adev->gfx.rlc.funcs->enter_safe_mode(adev);
 
 		result = smu7_enable_didt(hwmgr, false);
 		PP_ASSERT_WITH_CODE((result == 0),
@@ -809,7 +810,7 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
 			PP_ASSERT_WITH_CODE((0 == result),
 					"Failed to disable DPM DIDT.", return result);
 		}
-		cgs_enter_safe_mode(hwmgr->device, false);
+		adev->gfx.rlc.funcs->exit_safe_mode(adev);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
index ba63faefc61f..203a6918395b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
@@ -930,16 +930,16 @@ static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable)
 
 static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
 {
+	struct amdgpu_device *adev = hwmgr->adev;
 	int result;
 	uint32_t num_se = 0, count, data;
-	struct amdgpu_device *adev = hwmgr->adev;
 	uint32_t reg;
 
 	num_se = adev->gfx.config.max_shader_engines;
 
-	cgs_enter_safe_mode(hwmgr->device, true);
+	adev->gfx.rlc.funcs->enter_safe_mode(adev);
 
-	cgs_lock_grbm_idx(hwmgr->device, true);
+	mutex_lock(&adev->grbm_idx_mutex);
 	reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
 	for (count = 0; count < num_se; count++) {
 		data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
@@ -959,38 +959,40 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
 			break;
 	}
 	cgs_write_register(hwmgr->device, reg, 0xE0000000);
-	cgs_lock_grbm_idx(hwmgr->device, false);
+	mutex_unlock(&adev->grbm_idx_mutex);
 
 	vega10_didt_set_mask(hwmgr, true);
 
-	cgs_enter_safe_mode(hwmgr->device, false);
+	adev->gfx.rlc.funcs->exit_safe_mode(adev);
 
 	return 0;
 }
 
 static int vega10_disable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
 {
-	cgs_enter_safe_mode(hwmgr->device, true);
+	struct amdgpu_device *adev = hwmgr->adev;
+
+	adev->gfx.rlc.funcs->enter_safe_mode(adev);
 
 	vega10_didt_set_mask(hwmgr, false);
 
-	cgs_enter_safe_mode(hwmgr->device, false);
+	adev->gfx.rlc.funcs->exit_safe_mode(adev);
 
 	return 0;
 }
 
 static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
 {
+	struct amdgpu_device *adev = hwmgr->adev;
 	int result;
 	uint32_t num_se = 0, count, data;
-	struct amdgpu_device *adev = hwmgr->adev;
 	uint32_t reg;
 
 	num_se = adev->gfx.config.max_shader_engines;
 
-	cgs_enter_safe_mode(hwmgr->device, true);
+	adev->gfx.rlc.funcs->enter_safe_mode(adev);
 
-	cgs_lock_grbm_idx(hwmgr->device, true);
+	mutex_lock(&adev->grbm_idx_mutex);
 	reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
 	for (count = 0; count < num_se; count++) {
 		data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
@@ -1004,11 +1006,11 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
 			break;
 	}
 	cgs_write_register(hwmgr->device, reg, 0xE0000000);
-	cgs_lock_grbm_idx(hwmgr->device, false);
+	mutex_unlock(&adev->grbm_idx_mutex);
 
 	vega10_didt_set_mask(hwmgr, true);
 
-	cgs_enter_safe_mode(hwmgr->device, false);
+	adev->gfx.rlc.funcs->exit_safe_mode(adev);
 
 	vega10_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega10);
 	if (PP_CAP(PHM_PlatformCaps_GCEDC))
@@ -1022,13 +1024,14 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
 
 static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
 {
+	struct amdgpu_device *adev = hwmgr->adev;
 	uint32_t data;
 
-	cgs_enter_safe_mode(hwmgr->device, true);
+	adev->gfx.rlc.funcs->enter_safe_mode(adev);
 
 	vega10_didt_set_mask(hwmgr, false);
 
-	cgs_enter_safe_mode(hwmgr->device, false);
+	adev->gfx.rlc.funcs->exit_safe_mode(adev);
 
 	if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
 		data = 0x00000000;
@@ -1043,16 +1046,16 @@ static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
 
 static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr)
 {
+	struct amdgpu_device *adev = hwmgr->adev;
 	int result;
 	uint32_t num_se = 0, count, data;
-	struct amdgpu_device *adev = hwmgr->adev;
 	uint32_t reg;
 
 	num_se = adev->gfx.config.max_shader_engines;
 
-	cgs_enter_safe_mode(hwmgr->device, true);
+	adev->gfx.rlc.funcs->enter_safe_mode(adev);
 
-	cgs_lock_grbm_idx(hwmgr->device, true);
+	mutex_lock(&adev->grbm_idx_mutex);
 	reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
 	for (count = 0; count < num_se; count++) {
 		data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
@@ -1068,41 +1071,43 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr)
 			break;
 	}
 	cgs_write_register(hwmgr->device, reg, 0xE0000000);
-	cgs_lock_grbm_idx(hwmgr->device, false);
+	mutex_unlock(&adev->grbm_idx_mutex);
 
 	vega10_didt_set_mask(hwmgr, true);
 
-	cgs_enter_safe_mode(hwmgr->device, false);
+	adev->gfx.rlc.funcs->exit_safe_mode(adev);
 
 	return 0;
 }
 
 static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr)
 {
-	cgs_enter_safe_mode(hwmgr->device, true);
+	struct amdgpu_device *adev = hwmgr->adev;
+
+	adev->gfx.rlc.funcs->enter_safe_mode(adev);
 
 	vega10_didt_set_mask(hwmgr, false);
 
-	cgs_enter_safe_mode(hwmgr->device, false);
+	adev->gfx.rlc.funcs->exit_safe_mode(adev);
 
 	return 0;
 }
 
 static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
 {
+	struct amdgpu_device *adev = hwmgr->adev;
 	int result;
 	uint32_t num_se = 0;
 	uint32_t count, data;
-	struct amdgpu_device *adev = hwmgr->adev;
 	uint32_t reg;
 
 	num_se = adev->gfx.config.max_shader_engines;
 
-	cgs_enter_safe_mode(hwmgr->device, true);
+	adev->gfx.rlc.funcs->enter_safe_mode(adev);
 
 	vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10);
 
-	cgs_lock_grbm_idx(hwmgr->device, true);
+	mutex_lock(&adev->grbm_idx_mutex);
 	reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
 	for (count = 0; count < num_se; count++) {
 		data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
@@ -1116,11 +1121,11 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
 			break;
 	}
 	cgs_write_register(hwmgr->device, reg, 0xE0000000);
-	cgs_lock_grbm_idx(hwmgr->device, false);
+	mutex_unlock(&adev->grbm_idx_mutex);
 
 	vega10_didt_set_mask(hwmgr, true);
 
-	cgs_enter_safe_mode(hwmgr->device, false);
+	adev->gfx.rlc.funcs->exit_safe_mode(adev);
 
 	vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega10);
 
@@ -1137,13 +1142,14 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
 
 static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
 {
+	struct amdgpu_device *adev = hwmgr->adev;
 	uint32_t data;
 
-	cgs_enter_safe_mode(hwmgr->device, true);
+	adev->gfx.rlc.funcs->enter_safe_mode(adev);
 
 	vega10_didt_set_mask(hwmgr, false);
 
-	cgs_enter_safe_mode(hwmgr->device, false);
+	adev->gfx.rlc.funcs->exit_safe_mode(adev);
 
 	if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
 		data = 0x00000000;
@@ -1158,15 +1164,16 @@ static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
 
 static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
 {
+	struct amdgpu_device *adev = hwmgr->adev;
 	uint32_t reg;
 	int result;
 
-	cgs_enter_safe_mode(hwmgr->device, true);
+	adev->gfx.rlc.funcs->enter_safe_mode(adev);
 
-	cgs_lock_grbm_idx(hwmgr->device, true);
+	mutex_lock(&adev->grbm_idx_mutex);
 	reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
 	cgs_write_register(hwmgr->device, reg, 0xE0000000);
-	cgs_lock_grbm_idx(hwmgr->device, false);
+	mutex_unlock(&adev->grbm_idx_mutex);
 
 	result = vega10_program_didt_config_registers(hwmgr, SEEDCForceStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
 	result |= vega10_program_didt_config_registers(hwmgr, SEEDCCtrlForceStallConfig_Vega10, VEGA10_CONFIGREG_DIDT);
@@ -1175,7 +1182,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
 
 	vega10_didt_set_mask(hwmgr, false);
 
-	cgs_enter_safe_mode(hwmgr->device, false);
+	adev->gfx.rlc.funcs->exit_safe_mode(adev);
 
 	return 0;
 }

From d32d661770a455802afc8d2f9efed617cc8073ed Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Fri, 23 Mar 2018 18:36:51 +0800
Subject: [PATCH 0320/1286] drm/amdgpu: Get pci resource directly through adev
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

In order to remove the cgs wrapper function
cgs_get_pci_resource

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c | 11 +++++------
 1 file changed, 5 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index a29362f9ef41..03ee36739efe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -290,12 +290,11 @@ static int acp_hw_init(void *handle)
 	else if (r)
 		return r;
 
-	r = cgs_get_pci_resource(adev->acp.cgs_device, CGS_RESOURCE_TYPE_MMIO,
-			0x5289, 0, &acp_base);
-	if (r == -ENODEV)
-		return 0;
-	else if (r)
-		return r;
+	if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
+		return -EINVAL;
+
+	acp_base = adev->rmmio_base;
+
 	if (adev->asic_type != CHIP_STONEY) {
 		adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
 		if (adev->acp.acp_genpd == NULL)

From e8ee21d2a46d8d5cc85766fb49251c2c21871b30 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Mon, 26 Mar 2018 18:13:28 +0800
Subject: [PATCH 0321/1286] drm/amd/dc: Use atombios api directly in DC

In order to remove the cgs wrapper functions for atombios api.

Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../drm/amd/display/dc/bios/command_table.c   | 22 +++++++++-------
 .../drm/amd/display/dc/bios/command_table2.c  | 26 ++++++++++++-------
 2 files changed, 28 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 4b5fdd577848..651e1fd4622f 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -24,7 +24,7 @@
  */
 
 #include "dm_services.h"
-
+#include "amdgpu.h"
 #include "atom.h"
 
 #include "include/bios_parser_interface.h"
@@ -35,16 +35,16 @@
 #include "bios_parser_types_internal.h"
 
 #define EXEC_BIOS_CMD_TABLE(command, params)\
-	(cgs_atom_exec_cmd_table(bp->base.ctx->cgs_device, \
+	(amdgpu_atom_execute_table(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
 		GetIndexIntoMasterTable(COMMAND, command), \
-		&params) == 0)
+		(uint32_t *)&params) == 0)
 
 #define BIOS_CMD_TABLE_REVISION(command, frev, crev)\
-	cgs_atom_get_cmd_table_revs(bp->base.ctx->cgs_device, \
+	amdgpu_atom_parse_cmd_header(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
 		GetIndexIntoMasterTable(COMMAND, command), &frev, &crev)
 
 #define BIOS_CMD_TABLE_PARA_REVISION(command)\
-	bios_cmd_table_para_revision(bp->base.ctx->cgs_device, \
+	bios_cmd_table_para_revision(bp->base.ctx->driver_context, \
 		GetIndexIntoMasterTable(COMMAND, command))
 
 static void init_dig_encoder_control(struct bios_parser *bp);
@@ -82,16 +82,18 @@ void dal_bios_parser_init_cmd_tbl(struct bios_parser *bp)
 	init_set_dce_clock(bp);
 }
 
-static uint32_t bios_cmd_table_para_revision(void *cgs_device,
+static uint32_t bios_cmd_table_para_revision(void *dev,
 					     uint32_t index)
 {
+	struct amdgpu_device *adev = dev;
 	uint8_t frev, crev;
 
-	if (cgs_atom_get_cmd_table_revs(cgs_device,
+	if (amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context,
 					index,
-					&frev, &crev) != 0)
+					&frev, &crev))
+		return crev;
+	else
 		return 0;
-	return crev;
 }
 
 /*******************************************************************************
@@ -368,7 +370,7 @@ static void init_transmitter_control(struct bios_parser *bp)
 	uint8_t crev;
 
 	if (BIOS_CMD_TABLE_REVISION(UNIPHYTransmitterControl,
-			frev, crev) != 0)
+			frev, crev) == false)
 		BREAK_TO_DEBUGGER();
 	switch (crev) {
 	case 2:
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 3f63f712c8a4..752b08a42d3e 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -26,14 +26,18 @@
 #include "dm_services.h"
 
 #include "ObjectID.h"
-#include "atomfirmware.h"
 
+#include "atomfirmware.h"
+#include "atom.h"
 #include "include/bios_parser_interface.h"
 
 #include "command_table2.h"
 #include "command_table_helper2.h"
 #include "bios_parser_helper.h"
 #include "bios_parser_types_internal2.h"
+#include "amdgpu.h"
+
+
 #define DC_LOGGER \
 	bp->base.ctx->logger
 
@@ -43,16 +47,16 @@
 		->FieldName)-(char *)0)/sizeof(uint16_t))
 
 #define EXEC_BIOS_CMD_TABLE(fname, params)\
-	(cgs_atom_exec_cmd_table(bp->base.ctx->cgs_device, \
+	(amdgpu_atom_execute_table(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
 		GET_INDEX_INTO_MASTER_TABLE(command, fname), \
-		&params) == 0)
+		(uint32_t *)&params) == 0)
 
 #define BIOS_CMD_TABLE_REVISION(fname, frev, crev)\
-	cgs_atom_get_cmd_table_revs(bp->base.ctx->cgs_device, \
+	amdgpu_atom_parse_cmd_header(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
 		GET_INDEX_INTO_MASTER_TABLE(command, fname), &frev, &crev)
 
 #define BIOS_CMD_TABLE_PARA_REVISION(fname)\
-	bios_cmd_table_para_revision(bp->base.ctx->cgs_device, \
+	bios_cmd_table_para_revision(bp->base.ctx->driver_context, \
 			GET_INDEX_INTO_MASTER_TABLE(command, fname))
 
 static void init_dig_encoder_control(struct bios_parser *bp);
@@ -86,16 +90,18 @@ void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp)
 	init_get_smu_clock_info(bp);
 }
 
-static uint32_t bios_cmd_table_para_revision(void *cgs_device,
+static uint32_t bios_cmd_table_para_revision(void *dev,
 					     uint32_t index)
 {
+	struct amdgpu_device *adev = dev;
 	uint8_t frev, crev;
 
-	if (cgs_atom_get_cmd_table_revs(cgs_device,
+	if (amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context,
 					index,
-					&frev, &crev) != 0)
+					&frev, &crev))
+		return crev;
+	else
 		return 0;
-	return crev;
 }
 
 /******************************************************************************
@@ -201,7 +207,7 @@ static void init_transmitter_control(struct bios_parser *bp)
 	uint8_t frev;
 	uint8_t crev;
 
-	if (BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev) != 0)
+	if (BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev) == false)
 		BREAK_TO_DEBUGGER();
 	switch (crev) {
 	case 6:

From b3892e2bb519fe18225d0628f0dd255761f16502 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Mon, 26 Mar 2018 18:49:35 +0800
Subject: [PATCH 0322/1286] drm/amd/pp: Use atombios api directly in powerplay
 (v2)

In order to remove the cgs wrapper functions for atombios api.

v2: squash in whitespace cleanup (Alex)

Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c  | 145 +++++++++---------
 .../gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h  |   2 +-
 .../drm/amd/powerplay/hwmgr/ppatomfwctrl.c    |  88 +++++------
 .../powerplay/hwmgr/process_pptables_v1_0.c   |   2 +-
 .../drm/amd/powerplay/hwmgr/processpptables.c |   4 +-
 .../gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c  |   3 +-
 .../gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c  |   3 +-
 .../gpu/drm/amd/powerplay/hwmgr/smu_helper.c  |  16 ++
 .../gpu/drm/amd/powerplay/hwmgr/smu_helper.h  |   3 +
 .../powerplay/hwmgr/vega10_processpptables.c  |   2 +-
 .../powerplay/hwmgr/vega12_processpptables.c  |   2 +-
 .../drm/amd/powerplay/smumgr/fiji_smumgr.c    |   2 +-
 12 files changed, 149 insertions(+), 123 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index c6febbf0bf69..971fb5dfb620 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -23,7 +23,7 @@
 #include "pp_debug.h"
 #include <linux/module.h>
 #include <linux/slab.h>
-
+#include "atom.h"
 #include "ppatomctrl.h"
 #include "atombios.h"
 #include "cgs_common.h"
@@ -128,7 +128,6 @@ static int atomctrl_set_mc_reg_address_table(
 	return 0;
 }
 
-
 int atomctrl_initialize_mc_reg_table(
 		struct pp_hwmgr *hwmgr,
 		uint8_t module_index,
@@ -141,7 +140,7 @@ int atomctrl_initialize_mc_reg_table(
 	u16 size;
 
 	vram_info = (ATOM_VRAM_INFO_HEADER_V2_1 *)
-		cgs_atom_get_data_table(hwmgr->device,
+		smu_atom_get_data_table(hwmgr->adev,
 				GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev);
 
 	if (module_index >= vram_info->ucNumOfVRAMModule) {
@@ -174,6 +173,8 @@ int atomctrl_set_engine_dram_timings_rv770(
 		uint32_t engine_clock,
 		uint32_t memory_clock)
 {
+	struct amdgpu_device *adev = hwmgr->adev;
+
 	SET_ENGINE_CLOCK_PS_ALLOCATION engine_clock_parameters;
 
 	/* They are both in 10KHz Units. */
@@ -184,9 +185,10 @@ int atomctrl_set_engine_dram_timings_rv770(
 	/* in 10 khz units.*/
 	engine_clock_parameters.sReserved.ulClock =
 		cpu_to_le32(memory_clock & SET_CLOCK_FREQ_MASK);
-	return cgs_atom_exec_cmd_table(hwmgr->device,
+
+	return amdgpu_atom_execute_table(adev->mode_info.atom_context,
 			GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
-			&engine_clock_parameters);
+			(uint32_t *)&engine_clock_parameters);
 }
 
 /**
@@ -203,7 +205,7 @@ static ATOM_VOLTAGE_OBJECT_INFO *get_voltage_info_table(void *device)
 	union voltage_object_info *voltage_info;
 
 	voltage_info = (union voltage_object_info *)
-		cgs_atom_get_data_table(device, index,
+		smu_atom_get_data_table(device, index,
 			&size, &frev, &crev);
 
 	if (voltage_info != NULL)
@@ -247,16 +249,16 @@ int atomctrl_get_memory_pll_dividers_si(
 		pp_atomctrl_memory_clock_param *mpll_param,
 		bool strobe_mode)
 {
+	struct amdgpu_device *adev = hwmgr->adev;
 	COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 mpll_parameters;
 	int result;
 
 	mpll_parameters.ulClock = cpu_to_le32(clock_value);
 	mpll_parameters.ucInputFlag = (uint8_t)((strobe_mode) ? 1 : 0);
 
-	result = cgs_atom_exec_cmd_table
-		(hwmgr->device,
+	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
 		 GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
-		 &mpll_parameters);
+		(uint32_t *)&mpll_parameters);
 
 	if (0 == result) {
 		mpll_param->mpll_fb_divider.clk_frac =
@@ -295,14 +297,15 @@ int atomctrl_get_memory_pll_dividers_si(
 int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
 		uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param)
 {
+	struct amdgpu_device *adev = hwmgr->adev;
 	COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2 mpll_parameters;
 	int result;
 
 	mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value);
 
-	result = cgs_atom_exec_cmd_table(hwmgr->device,
+	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
 			GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
-			&mpll_parameters);
+			(uint32_t *)&mpll_parameters);
 
 	if (!result)
 		mpll_param->mpll_post_divider =
@@ -315,15 +318,15 @@ int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
 					  uint32_t clock_value,
 					  pp_atomctrl_clock_dividers_kong *dividers)
 {
+	struct amdgpu_device *adev = hwmgr->adev;
 	COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 pll_parameters;
 	int result;
 
 	pll_parameters.ulClock = cpu_to_le32(clock_value);
 
-	result = cgs_atom_exec_cmd_table
-		(hwmgr->device,
+	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
 		 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
-		 &pll_parameters);
+		(uint32_t *)&pll_parameters);
 
 	if (0 == result) {
 		dividers->pll_post_divider = pll_parameters.ucPostDiv;
@@ -338,16 +341,16 @@ int atomctrl_get_engine_pll_dividers_vi(
 		uint32_t clock_value,
 		pp_atomctrl_clock_dividers_vi *dividers)
 {
+	struct amdgpu_device *adev = hwmgr->adev;
 	COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters;
 	int result;
 
 	pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
 	pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
 
-	result = cgs_atom_exec_cmd_table
-		(hwmgr->device,
+	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
 		 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
-		 &pll_patameters);
+		(uint32_t *)&pll_patameters);
 
 	if (0 == result) {
 		dividers->pll_post_divider =
@@ -375,16 +378,16 @@ int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr,
 		uint32_t clock_value,
 		pp_atomctrl_clock_dividers_ai *dividers)
 {
+	struct amdgpu_device *adev = hwmgr->adev;
 	COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7 pll_patameters;
 	int result;
 
 	pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
 	pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
 
-	result = cgs_atom_exec_cmd_table
-		(hwmgr->device,
+	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
 		 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
-		 &pll_patameters);
+		(uint32_t *)&pll_patameters);
 
 	if (0 == result) {
 		dividers->usSclk_fcw_frac     = le16_to_cpu(pll_patameters.usSclk_fcw_frac);
@@ -407,6 +410,7 @@ int atomctrl_get_dfs_pll_dividers_vi(
 		uint32_t clock_value,
 		pp_atomctrl_clock_dividers_vi *dividers)
 {
+	struct amdgpu_device *adev = hwmgr->adev;
 	COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters;
 	int result;
 
@@ -414,10 +418,9 @@ int atomctrl_get_dfs_pll_dividers_vi(
 	pll_patameters.ulClock.ucPostDiv =
 		COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK;
 
-	result = cgs_atom_exec_cmd_table
-		(hwmgr->device,
+	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
 		 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
-		 &pll_patameters);
+		(uint32_t *)&pll_patameters);
 
 	if (0 == result) {
 		dividers->pll_post_divider =
@@ -452,7 +455,7 @@ uint32_t atomctrl_get_reference_clock(struct pp_hwmgr *hwmgr)
 	uint32_t clock;
 
 	fw_info = (ATOM_FIRMWARE_INFO *)
-		cgs_atom_get_data_table(hwmgr->device,
+		smu_atom_get_data_table(hwmgr->adev,
 			GetIndexIntoMasterTable(DATA, FirmwareInfo),
 			&size, &frev, &crev);
 
@@ -476,7 +479,7 @@ bool atomctrl_is_voltage_controlled_by_gpio_v3(
 		uint8_t voltage_mode)
 {
 	ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
-		(ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->device);
+		(ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev);
 	bool ret;
 
 	PP_ASSERT_WITH_CODE((NULL != voltage_info),
@@ -495,7 +498,7 @@ int atomctrl_get_voltage_table_v3(
 		pp_atomctrl_voltage_table *voltage_table)
 {
 	ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
-		(ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->device);
+		(ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev);
 	const ATOM_VOLTAGE_OBJECT_V3 *voltage_object;
 	unsigned int i;
 
@@ -572,7 +575,7 @@ static ATOM_GPIO_PIN_LUT *get_gpio_lookup_table(void *device)
 	void *table_address;
 
 	table_address = (ATOM_GPIO_PIN_LUT *)
-		cgs_atom_get_data_table(device,
+		smu_atom_get_data_table(device,
 				GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT),
 				&size, &frev, &crev);
 
@@ -592,7 +595,7 @@ bool atomctrl_get_pp_assign_pin(
 {
 	bool bRet = false;
 	ATOM_GPIO_PIN_LUT *gpio_lookup_table =
-		get_gpio_lookup_table(hwmgr->device);
+		get_gpio_lookup_table(hwmgr->adev);
 
 	PP_ASSERT_WITH_CODE((NULL != gpio_lookup_table),
 			"Could not find GPIO lookup Table in BIOS.", return false);
@@ -613,7 +616,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
 		bool debug)
 {
 	ATOM_ASIC_PROFILING_INFO_V3_4 *getASICProfilingInfo;
-
+	struct amdgpu_device *adev = hwmgr->adev;
 	EFUSE_LINEAR_FUNC_PARAM sRO_fuse;
 	EFUSE_LINEAR_FUNC_PARAM sCACm_fuse;
 	EFUSE_LINEAR_FUNC_PARAM sCACb_fuse;
@@ -640,7 +643,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
 	int result;
 
 	getASICProfilingInfo = (ATOM_ASIC_PROFILING_INFO_V3_4 *)
-			cgs_atom_get_data_table(hwmgr->device,
+			smu_atom_get_data_table(hwmgr->adev,
 					GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
 					NULL, NULL, NULL);
 
@@ -706,9 +709,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
 
 	sOutput_FuseValues.sEfuse = sInput_FuseValues;
 
-	result = cgs_atom_exec_cmd_table(hwmgr->device,
+	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
 			GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
-			&sOutput_FuseValues);
+			(uint32_t *)&sOutput_FuseValues);
 
 	if (result)
 		return result;
@@ -727,9 +730,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
 
 	sOutput_FuseValues.sEfuse = sInput_FuseValues;
 
-	result = cgs_atom_exec_cmd_table(hwmgr->device,
+	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
 			GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
-			&sOutput_FuseValues);
+			(uint32_t *)&sOutput_FuseValues);
 
 	if (result)
 		return result;
@@ -747,9 +750,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
 	sInput_FuseValues.ucBitLength = sCACb_fuse.ucEfuseLength;
 	sOutput_FuseValues.sEfuse = sInput_FuseValues;
 
-	result = cgs_atom_exec_cmd_table(hwmgr->device,
+	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
 			GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
-			&sOutput_FuseValues);
+			(uint32_t *)&sOutput_FuseValues);
 
 	if (result)
 		return result;
@@ -768,9 +771,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
 
 	sOutput_FuseValues.sEfuse = sInput_FuseValues;
 
-	result = cgs_atom_exec_cmd_table(hwmgr->device,
+	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
 			GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
-			&sOutput_FuseValues);
+			(uint32_t *)&sOutput_FuseValues);
 
 	if (result)
 		return result;
@@ -790,9 +793,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
 
 	sOutput_FuseValues.sEfuse = sInput_FuseValues;
 
-	result = cgs_atom_exec_cmd_table(hwmgr->device,
+	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
 			GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
-			&sOutput_FuseValues);
+			(uint32_t *)&sOutput_FuseValues);
 	if (result)
 		return result;
 
@@ -811,9 +814,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
 	sInput_FuseValues.ucBitLength = sKv_b_fuse.ucEfuseLength;
 	sOutput_FuseValues.sEfuse = sInput_FuseValues;
 
-	result = cgs_atom_exec_cmd_table(hwmgr->device,
+	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
 			GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
-			&sOutput_FuseValues);
+			(uint32_t *)&sOutput_FuseValues);
 
 	if (result)
 		return result;
@@ -842,9 +845,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
 
 	sOutput_FuseValues.sEfuse = sInput_FuseValues;
 
-	result = cgs_atom_exec_cmd_table(hwmgr->device,
+	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
 			GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
-			&sOutput_FuseValues);
+			(uint32_t *)&sOutput_FuseValues);
 
 	if (result)
 		return result;
@@ -1053,8 +1056,9 @@ int atomctrl_get_voltage_evv_on_sclk(
 		uint32_t sclk, uint16_t virtual_voltage_Id,
 		uint16_t *voltage)
 {
-	int result;
+	struct amdgpu_device *adev = hwmgr->adev;
 	GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space;
+	int result;
 
 	get_voltage_info_param_space.ucVoltageType   =
 		voltage_type;
@@ -1065,9 +1069,9 @@ int atomctrl_get_voltage_evv_on_sclk(
 	get_voltage_info_param_space.ulSCLKFreq      =
 		cpu_to_le32(sclk);
 
-	result = cgs_atom_exec_cmd_table(hwmgr->device,
+	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
 			GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
-			&get_voltage_info_param_space);
+			(uint32_t *)&get_voltage_info_param_space);
 
 	if (0 != result)
 		return result;
@@ -1088,9 +1092,10 @@ int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr,
 			     uint16_t virtual_voltage_id,
 			     uint16_t *voltage)
 {
+	struct amdgpu_device *adev = hwmgr->adev;
+	GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space;
 	int result;
 	int entry_id;
-	GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space;
 
 	/* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
 	for (entry_id = 0; entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count; entry_id++) {
@@ -1111,9 +1116,9 @@ int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr,
 	get_voltage_info_param_space.ulSCLKFreq =
 		cpu_to_le32(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].clk);
 
-	result = cgs_atom_exec_cmd_table(hwmgr->device,
+	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
 			GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
-			&get_voltage_info_param_space);
+			(uint32_t *)&get_voltage_info_param_space);
 
 	if (0 != result)
 		return result;
@@ -1135,7 +1140,7 @@ uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr)
 	u16 size;
 
 	fw_info = (ATOM_COMMON_TABLE_HEADER *)
-		cgs_atom_get_data_table(hwmgr->device,
+		smu_atom_get_data_table(hwmgr->adev,
 				GetIndexIntoMasterTable(DATA, FirmwareInfo),
 				&size, &frev, &crev);
 
@@ -1167,7 +1172,7 @@ static ATOM_ASIC_INTERNAL_SS_INFO *asic_internal_ss_get_ss_table(void *device)
 	u16 size;
 
 	table = (ATOM_ASIC_INTERNAL_SS_INFO *)
-		cgs_atom_get_data_table(device,
+		smu_atom_get_data_table(device,
 			GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info),
 			&size, &frev, &crev);
 
@@ -1188,7 +1193,7 @@ static int asic_internal_ss_get_ss_asignment(struct pp_hwmgr *hwmgr,
 
 	memset(ssEntry, 0x00, sizeof(pp_atomctrl_internal_ss_info));
 
-	table = asic_internal_ss_get_ss_table(hwmgr->device);
+	table = asic_internal_ss_get_ss_table(hwmgr->adev);
 
 	if (NULL == table)
 		return -1;
@@ -1260,9 +1265,10 @@ int atomctrl_get_engine_clock_spread_spectrum(
 			ASIC_INTERNAL_ENGINE_SS, engine_clock, ssInfo);
 }
 
-int atomctrl_read_efuse(void *device, uint16_t start_index,
+int atomctrl_read_efuse(struct pp_hwmgr *hwmgr, uint16_t start_index,
 		uint16_t end_index, uint32_t mask, uint32_t *efuse)
 {
+	struct amdgpu_device *adev = hwmgr->adev;
 	int result;
 	READ_EFUSE_VALUE_PARAMETER efuse_param;
 
@@ -1272,9 +1278,9 @@ int atomctrl_read_efuse(void *device, uint16_t start_index,
 	efuse_param.sEfuse.ucBitLength  = (uint8_t)
 			((end_index - start_index) + 1);
 
-	result = cgs_atom_exec_cmd_table(device,
+	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
 			GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
-			&efuse_param);
+			(uint32_t *)&efuse_param);
 	if (!result)
 		*efuse = le32_to_cpu(efuse_param.ulEfuseValue) & mask;
 
@@ -1284,6 +1290,7 @@ int atomctrl_read_efuse(void *device, uint16_t start_index,
 int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
 			      uint8_t level)
 {
+	struct amdgpu_device *adev = hwmgr->adev;
 	DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1 memory_clock_parameters;
 	int result;
 
@@ -1293,10 +1300,9 @@ int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
 		ADJUST_MC_SETTING_PARAM;
 	memory_clock_parameters.asDPMMCReg.ucMclkDPMState = level;
 
-	result = cgs_atom_exec_cmd_table
-		(hwmgr->device,
+	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
 		 GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
-		 &memory_clock_parameters);
+		(uint32_t *)&memory_clock_parameters);
 
 	return result;
 }
@@ -1304,7 +1310,7 @@ int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
 int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
 				uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage)
 {
-
+	struct amdgpu_device *adev = hwmgr->adev;
 	int result;
 	GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_3 get_voltage_info_param_space;
 
@@ -1313,9 +1319,9 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_
 	get_voltage_info_param_space.usVoltageLevel = cpu_to_le16(virtual_voltage_Id);
 	get_voltage_info_param_space.ulSCLKFreq = cpu_to_le32(sclk);
 
-	result = cgs_atom_exec_cmd_table(hwmgr->device,
+	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
 			GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
-			&get_voltage_info_param_space);
+			(uint32_t *)&get_voltage_info_param_space);
 
 	if (0 != result)
 		return result;
@@ -1334,7 +1340,7 @@ int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctr
 	u16 size;
 
 	ATOM_SMU_INFO_V2_1 *psmu_info =
-		(ATOM_SMU_INFO_V2_1 *)cgs_atom_get_data_table(hwmgr->device,
+		(ATOM_SMU_INFO_V2_1 *)smu_atom_get_data_table(hwmgr->adev,
 			GetIndexIntoMasterTable(DATA, SMU_Info),
 			&size, &frev, &crev);
 
@@ -1362,7 +1368,7 @@ int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
 		return -EINVAL;
 
 	profile = (ATOM_ASIC_PROFILING_INFO_V3_6 *)
-			cgs_atom_get_data_table(hwmgr->device,
+			smu_atom_get_data_table(hwmgr->adev,
 					GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
 					NULL, NULL, NULL);
 	if (!profile)
@@ -1402,7 +1408,7 @@ int  atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
 				uint16_t *load_line)
 {
 	ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
-		(ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->device);
+		(ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev);
 
 	const ATOM_VOLTAGE_OBJECT_V3 *voltage_object;
 
@@ -1421,16 +1427,17 @@ int  atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
 
 int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual_voltage_id)
 {
-	int result;
+	struct amdgpu_device *adev = hwmgr->adev;
 	SET_VOLTAGE_PS_ALLOCATION allocation;
 	SET_VOLTAGE_PARAMETERS_V1_3 *voltage_parameters =
 			(SET_VOLTAGE_PARAMETERS_V1_3 *)&allocation.sASICSetVoltage;
+	int result;
 
 	voltage_parameters->ucVoltageMode = ATOM_GET_LEAKAGE_ID;
 
-	result = cgs_atom_exec_cmd_table(hwmgr->device,
+	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
 			GetIndexIntoMasterTable(COMMAND, SetVoltage),
-			voltage_parameters);
+			(uint32_t *)voltage_parameters);
 
 	*virtual_voltage_id = voltage_parameters->usVoltageLevel;
 
@@ -1453,7 +1460,7 @@ int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr,
 	ix = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo);
 
 	profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *)
-			cgs_atom_get_data_table(hwmgr->device,
+			smu_atom_get_data_table(hwmgr->adev,
 					ix,
 					NULL, NULL, NULL);
 	if (!profile)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
index c44a92064cf1..c672a5069840 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
@@ -298,7 +298,7 @@ extern int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
 extern int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
 						 uint32_t clock_value,
 						 pp_atomctrl_clock_dividers_kong *dividers);
-extern int atomctrl_read_efuse(void *device, uint16_t start_index,
+extern int atomctrl_read_efuse(struct pp_hwmgr *hwmgr, uint16_t start_index,
 		uint16_t end_index, uint32_t mask, uint32_t *efuse);
 extern int atomctrl_calculate_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
 		uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage, uint16_t dpm_level, bool debug);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
index ad42caac033e..0adaf36b6d68 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
@@ -23,9 +23,9 @@
 
 #include "ppatomfwctrl.h"
 #include "atomfirmware.h"
+#include "atom.h"
 #include "pp_debug.h"
 
-
 static const union atom_voltage_object_v4 *pp_atomfwctrl_lookup_voltage_type_v4(
 		const struct atom_voltage_objects_info_v4_1 *voltage_object_info_table,
 		uint8_t voltage_type, uint8_t voltage_mode)
@@ -38,35 +38,34 @@ static const union atom_voltage_object_v4 *pp_atomfwctrl_lookup_voltage_type_v4(
 
 	while (offset < size) {
 		const union atom_voltage_object_v4 *voltage_object =
-				(const union atom_voltage_object_v4 *)(start + offset);
+			(const union atom_voltage_object_v4 *)(start + offset);
 
-        if (voltage_type == voltage_object->gpio_voltage_obj.header.voltage_type &&
-            voltage_mode == voltage_object->gpio_voltage_obj.header.voltage_mode)
-            return voltage_object;
+		if (voltage_type == voltage_object->gpio_voltage_obj.header.voltage_type &&
+		    voltage_mode == voltage_object->gpio_voltage_obj.header.voltage_mode)
+			return voltage_object;
 
-        offset += le16_to_cpu(voltage_object->gpio_voltage_obj.header.object_size);
+		offset += le16_to_cpu(voltage_object->gpio_voltage_obj.header.object_size);
 
-    }
+	}
 
-    return NULL;
+	return NULL;
 }
 
 static struct atom_voltage_objects_info_v4_1 *pp_atomfwctrl_get_voltage_info_table(
 		struct pp_hwmgr *hwmgr)
 {
-    const void *table_address;
-    uint16_t idx;
+	const void *table_address;
+	uint16_t idx;
 
-    idx = GetIndexIntoMasterDataTable(voltageobject_info);
-    table_address =	cgs_atom_get_data_table(hwmgr->device,
-    		idx, NULL, NULL, NULL);
+	idx = GetIndexIntoMasterDataTable(voltageobject_info);
+	table_address = smu_atom_get_data_table(hwmgr->adev,
+						idx, NULL, NULL, NULL);
 
-    PP_ASSERT_WITH_CODE( 
-        table_address,
-        "Error retrieving BIOS Table Address!",
-        return NULL);
+	PP_ASSERT_WITH_CODE(table_address,
+			"Error retrieving BIOS Table Address!",
+			return NULL);
 
-    return (struct atom_voltage_objects_info_v4_1 *)table_address;
+	return (struct atom_voltage_objects_info_v4_1 *)table_address;
 }
 
 /**
@@ -167,7 +166,7 @@ static struct atom_gpio_pin_lut_v2_1 *pp_atomfwctrl_get_gpio_lookup_table(
 	uint16_t idx;
 
 	idx = GetIndexIntoMasterDataTable(gpio_pin_lut);
-	table_address =	cgs_atom_get_data_table(hwmgr->device,
+	table_address =	smu_atom_get_data_table(hwmgr->adev,
 			idx, NULL, NULL, NULL);
 	PP_ASSERT_WITH_CODE(table_address,
 			"Error retrieving BIOS Table Address!",
@@ -248,28 +247,30 @@ int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr,
 		uint32_t clock_type, uint32_t clock_value,
 		struct pp_atomfwctrl_clock_dividers_soc15 *dividers)
 {
+	struct amdgpu_device *adev = hwmgr->adev;
 	struct compute_gpu_clock_input_parameter_v1_8 pll_parameters;
 	struct compute_gpu_clock_output_parameter_v1_8 *pll_output;
-	int result;
 	uint32_t idx;
 
 	pll_parameters.gpuclock_10khz = (uint32_t)clock_value;
 	pll_parameters.gpu_clock_type = clock_type;
 
 	idx = GetIndexIntoMasterCmdTable(computegpuclockparam);
-	result = cgs_atom_exec_cmd_table(hwmgr->device, idx, &pll_parameters);
 
-	if (!result) {
-		pll_output = (struct compute_gpu_clock_output_parameter_v1_8 *)
-				&pll_parameters;
-		dividers->ulClock = le32_to_cpu(pll_output->gpuclock_10khz);
-		dividers->ulDid = le32_to_cpu(pll_output->dfs_did);
-		dividers->ulPll_fb_mult = le32_to_cpu(pll_output->pll_fb_mult);
-		dividers->ulPll_ss_fbsmult = le32_to_cpu(pll_output->pll_ss_fbsmult);
-		dividers->usPll_ss_slew_frac = le16_to_cpu(pll_output->pll_ss_slew_frac);
-		dividers->ucPll_ss_enable = pll_output->pll_ss_enable;
-	}
-	return result;
+	if (amdgpu_atom_execute_table(
+		adev->mode_info.atom_context, idx, (uint32_t *)&pll_parameters))
+		return -EINVAL;
+
+	pll_output = (struct compute_gpu_clock_output_parameter_v1_8 *)
+			&pll_parameters;
+	dividers->ulClock = le32_to_cpu(pll_output->gpuclock_10khz);
+	dividers->ulDid = le32_to_cpu(pll_output->dfs_did);
+	dividers->ulPll_fb_mult = le32_to_cpu(pll_output->pll_fb_mult);
+	dividers->ulPll_ss_fbsmult = le32_to_cpu(pll_output->pll_ss_fbsmult);
+	dividers->usPll_ss_slew_frac = le16_to_cpu(pll_output->pll_ss_slew_frac);
+	dividers->ucPll_ss_enable = pll_output->pll_ss_enable;
+
+	return 0;
 }
 
 int pp_atomfwctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
@@ -283,7 +284,7 @@ int pp_atomfwctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
 
 	idx = GetIndexIntoMasterDataTable(asic_profiling_info);
 	profile = (struct atom_asic_profiling_info_v4_1 *)
-			cgs_atom_get_data_table(hwmgr->device,
+			smu_atom_get_data_table(hwmgr->adev,
 					idx, NULL, NULL, NULL);
 
 	if (!profile)
@@ -467,7 +468,7 @@ int pp_atomfwctrl_get_gpio_information(struct pp_hwmgr *hwmgr,
 
 	idx = GetIndexIntoMasterDataTable(smu_info);
 	info = (struct atom_smu_info_v3_1 *)
-		cgs_atom_get_data_table(hwmgr->device,
+		smu_atom_get_data_table(hwmgr->adev,
 				idx, NULL, NULL, NULL);
 
 	if (!info) {
@@ -489,6 +490,7 @@ int pp_atomfwctrl_get_gpio_information(struct pp_hwmgr *hwmgr,
 
 int pp_atomfwctrl__get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKID id, uint32_t *frequency)
 {
+	struct amdgpu_device *adev = hwmgr->adev;
 	struct atom_get_smu_clock_info_parameters_v3_1   parameters;
 	struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
 	uint32_t ix;
@@ -497,13 +499,13 @@ int pp_atomfwctrl__get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLK
 	parameters.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
 
 	ix = GetIndexIntoMasterCmdTable(getsmuclockinfo);
-	if (!cgs_atom_exec_cmd_table(hwmgr->device, ix, &parameters)) {
-		output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&parameters;
-		*frequency = output->atom_smu_outputclkfreq.smu_clock_freq_hz / 10000;
-	} else {
-		pr_info("Error execute_table getsmuclockinfo!");
-		return -1;
-	}
+
+	if (amdgpu_atom_execute_table(
+		adev->mode_info.atom_context, ix, (uint32_t *)&parameters))
+		return -EINVAL;
+
+	output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&parameters;
+	*frequency = output->atom_smu_outputclkfreq.smu_clock_freq_hz / 10000;
 
 	return 0;
 }
@@ -517,7 +519,7 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
 
 	ix = GetIndexIntoMasterDataTable(firmwareinfo);
 	info = (struct atom_firmware_info_v3_1 *)
-		cgs_atom_get_data_table(hwmgr->device,
+		smu_atom_get_data_table(hwmgr->adev,
 				ix, NULL, NULL, NULL);
 
 	if (!info) {
@@ -553,7 +555,7 @@ int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
 
 	ix = GetIndexIntoMasterDataTable(smc_dpm_info);
 	info = (struct atom_smc_dpm_info_v4_1 *)
-		cgs_atom_get_data_table(hwmgr->device,
+		smu_atom_get_data_table(hwmgr->adev,
 				ix, NULL, NULL, NULL);
 	if (!info) {
 		pr_info("Error retrieving BIOS Table Address!");
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index c9eecce5683f..8516516eb6cc 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -141,7 +141,7 @@ static const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
 
 	if (!table_address) {
 		table_address = (ATOM_Tonga_POWERPLAYTABLE *)
-				cgs_atom_get_data_table(hwmgr->device,
+				smu_atom_get_data_table(hwmgr->adev,
 						index, &size, &frev, &crev);
 		hwmgr->soft_pp_table = table_address;	/*Cache the result in RAM.*/
 		hwmgr->soft_pp_table_size = size;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
index 36ca7c419c90..ce64dfabd34b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
@@ -837,7 +837,7 @@ static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table(
 			hwmgr->soft_pp_table = &soft_dummy_pp_table[0];
 			hwmgr->soft_pp_table_size = sizeof(soft_dummy_pp_table);
 		} else {
-			table_addr = cgs_atom_get_data_table(hwmgr->device,
+			table_addr = smu_atom_get_data_table(hwmgr->adev,
 					GetIndexIntoMasterTable(DATA, PowerPlayInfo),
 					&size, &frev, &crev);
 			hwmgr->soft_pp_table = table_addr;
@@ -1058,7 +1058,7 @@ static int init_overdrive_limits(struct pp_hwmgr *hwmgr,
 		return 0;
 
 	/* We assume here that fw_info is unchanged if this call fails.*/
-	fw_info = cgs_atom_get_data_table(hwmgr->device,
+	fw_info = smu_atom_get_data_table(hwmgr->adev,
 			 GetIndexIntoMasterTable(DATA, FirmwareInfo),
 			 &size, &frev, &crev);
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index add90675fd2a..9087ef91b50b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -2957,8 +2957,7 @@ static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
 	/* First retrieve the Boot clocks and VDDC from the firmware info table.
 	 * We assume here that fw_info is unchanged if this call fails.
 	 */
-	fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
-			hwmgr->device, index,
+	fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)smu_atom_get_data_table(hwmgr->adev, index,
 			&size, &frev, &crev);
 	if (!fw_info)
 		/* During a test, there is no firmware info table. */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
index 7b26607c646a..3ac07fabbe5c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
@@ -314,8 +314,7 @@ static int smu8_get_system_info_data(struct pp_hwmgr *hwmgr)
 	uint8_t frev, crev;
 	uint16_t size;
 
-	info = (ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *) cgs_atom_get_data_table(
-			hwmgr->device,
+	info = (ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *)smu_atom_get_data_table(hwmgr->adev,
 			GetIndexIntoMasterTable(DATA, IntegratedSystemInfo),
 			&size, &frev, &crev);
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
index 598122854ab5..529be3cd768a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
@@ -24,6 +24,7 @@
 #include "pp_debug.h"
 #include "ppatomctrl.h"
 #include "ppsmc.h"
+#include "atom.h"
 
 uint8_t convert_to_vid(uint16_t vddc)
 {
@@ -608,3 +609,18 @@ int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr)
 
 	return 0;
 }
+
+void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size,
+						uint8_t *frev, uint8_t *crev)
+{
+	struct amdgpu_device *adev = dev;
+	uint16_t data_start;
+
+	if (amdgpu_atom_parse_data_header(
+		    adev->mode_info.atom_context, table, size,
+		    frev, crev, &data_start))
+		return (uint8_t *)adev->mode_info.atom_context->bios +
+			data_start;
+
+	return NULL;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
index d37d16e4b613..14ee162ac92a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
@@ -82,6 +82,9 @@ int phm_irq_process(struct amdgpu_device *adev,
 
 int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr);
 
+void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size,
+						uint8_t *frev, uint8_t *crev);
+
 #define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
 #define PHM_FIELD_MASK(reg, field) reg##__##field##_MASK
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
index c61d0744860d..0768d259c07c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
@@ -52,7 +52,7 @@ static const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
 
 	if (!table_address) {
 		table_address = (ATOM_Vega10_POWERPLAYTABLE *)
-				cgs_atom_get_data_table(hwmgr->device, index,
+				smu_atom_get_data_table(hwmgr->adev, index,
 						&size, &frev, &crev);
 
 		hwmgr->soft_pp_table = table_address;	/*Cache the result in RAM.*/
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
index b34113f45904..7fa1ba89ac54 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
@@ -51,7 +51,7 @@ static const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
 
 	if (!table_address) {
 		table_address = (ATOM_Vega12_POWERPLAYTABLE *)
-				cgs_atom_get_data_table(hwmgr->device, index,
+				smu_atom_get_data_table(hwmgr->adev, index,
 						&size, &frev, &crev);
 
 		hwmgr->soft_pp_table = table_address;	/*Cache the result in RAM.*/
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index 35b947e5292c..1eec527add99 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -337,7 +337,7 @@ static bool fiji_is_hw_avfs_present(struct pp_hwmgr *hwmgr)
 	if (!hwmgr->not_vf)
 		return false;
 
-	if (!atomctrl_read_efuse(hwmgr->device, AVFS_EN_LSB, AVFS_EN_MSB,
+	if (!atomctrl_read_efuse(hwmgr, AVFS_EN_LSB, AVFS_EN_MSB,
 			mask, &efuse)) {
 		if (efuse)
 			return true;

From d91ea4969bc5edbbe3bd723a1b3ae7d947f62a5a Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Mon, 26 Mar 2018 22:08:29 +0800
Subject: [PATCH 0323/1286] drm/amdgpu: Set pm_display_cfg in non-dc mode

those display informations are needed by powerplay.

Reviewed-by: Huang Rui <ray.huang@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c | 20 ++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h |  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c  | 31 +++++++++++++------------
 3 files changed, 37 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index e997ebbe43ea..def1010ac05e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -115,6 +115,26 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
 	pr_cont("\n");
 }
 
+void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
+{
+	struct drm_device *ddev = adev->ddev;
+	struct drm_crtc *crtc;
+	struct amdgpu_crtc *amdgpu_crtc;
+
+	adev->pm.dpm.new_active_crtcs = 0;
+	adev->pm.dpm.new_active_crtc_count = 0;
+	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
+		list_for_each_entry(crtc,
+				    &ddev->mode_config.crtc_list, head) {
+			amdgpu_crtc = to_amdgpu_crtc(crtc);
+			if (amdgpu_crtc->enabled) {
+				adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
+				adev->pm.dpm.new_active_crtc_count++;
+			}
+		}
+	}
+}
+
 
 u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
 {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 643d008410c6..b8c5177fa809 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -482,6 +482,7 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
 				struct amdgpu_ps *rps);
 u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev);
 u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev);
+void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev);
 bool amdgpu_is_uvd_state(u32 class, u32 class2);
 void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
 			      u32 *p, u32 *u);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 361975cf45a9..e6e365852f11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1658,9 +1658,6 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
 
 void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
 {
-	struct drm_device *ddev = adev->ddev;
-	struct drm_crtc *crtc;
-	struct amdgpu_crtc *amdgpu_crtc;
 	int i = 0;
 
 	if (!adev->pm.dpm_enabled)
@@ -1675,22 +1672,26 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
 			amdgpu_fence_wait_empty(ring);
 	}
 
+	if (!amdgpu_device_has_dc_support(adev)) {
+		mutex_lock(&adev->pm.mutex);
+		amdgpu_dpm_get_active_displays(adev);
+		adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtcs;
+		adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
+		adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
+		/* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
+		if (adev->pm.pm_display_cfg.vrefresh > 120)
+			adev->pm.pm_display_cfg.min_vblank_time = 0;
+		if (adev->powerplay.pp_funcs->display_configuration_change)
+			adev->powerplay.pp_funcs->display_configuration_change(
+							adev->powerplay.pp_handle,
+							&adev->pm.pm_display_cfg);
+		mutex_unlock(&adev->pm.mutex);
+	}
+
 	if (adev->powerplay.pp_funcs->dispatch_tasks) {
 		amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
 	} else {
 		mutex_lock(&adev->pm.mutex);
-		adev->pm.dpm.new_active_crtcs = 0;
-		adev->pm.dpm.new_active_crtc_count = 0;
-		if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
-			list_for_each_entry(crtc,
-					    &ddev->mode_config.crtc_list, head) {
-				amdgpu_crtc = to_amdgpu_crtc(crtc);
-				if (amdgpu_crtc->enabled) {
-					adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
-					adev->pm.dpm.new_active_crtc_count++;
-				}
-			}
-		}
 		/* update battery/ac status */
 		if (power_supply_is_system_supplied() > 0)
 			adev->pm.dpm.ac_power = true;

From 555fd70c59bc7f7acd8bc429d92bd59a66a7b83b Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Tue, 27 Mar 2018 13:32:02 +0800
Subject: [PATCH 0324/1286] drm/amd/pp: Not call cgs interface to get display
 info

DC/Non DC all will update display configuration
when the display state changed
No need to get display info through cgs interface

Reviewed-by: Evan Quan <evan.quan@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/amd_powerplay.c |  1 +
 .../drm/amd/powerplay/hwmgr/hardwaremanager.c |  8 +--
 .../gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c |  2 +-
 .../gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c  | 56 +++++--------------
 .../gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c  | 14 ++---
 .../drm/amd/powerplay/hwmgr/vega10_hwmgr.c    | 51 +++++------------
 .../drm/amd/powerplay/hwmgr/vega12_hwmgr.c    | 29 +++-------
 drivers/gpu/drm/amd/powerplay/inc/hwmgr.h     |  2 +-
 .../gpu/drm/amd/powerplay/smumgr/ci_smumgr.c  |  4 +-
 .../drm/amd/powerplay/smumgr/fiji_smumgr.c    |  4 +-
 .../drm/amd/powerplay/smumgr/iceland_smumgr.c |  6 +-
 .../amd/powerplay/smumgr/polaris10_smumgr.c   |  8 +--
 .../drm/amd/powerplay/smumgr/tonga_smumgr.c   | 12 ++--
 13 files changed, 61 insertions(+), 136 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index b91ef113a490..1ca6a13be6a3 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -54,6 +54,7 @@ static int amd_powerplay_create(struct amdgpu_device *adev)
 	hwmgr->chip_family = adev->family;
 	hwmgr->chip_id = adev->asic_type;
 	hwmgr->feature_mask = amdgpu_pp_feature_mask;
+	hwmgr->display_config = &adev->pm.pm_display_cfg;
 	adev->powerplay.pp_handle = hwmgr;
 	adev->powerplay.pp_funcs = &pp_dpm_funcs;
 	return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index dcceadb2e172..e411012b3dcb 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -265,13 +265,11 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
 	if (display_config == NULL)
 		return -EINVAL;
 
-	hwmgr->display_config = *display_config;
-
 	if (NULL != hwmgr->hwmgr_func->set_deep_sleep_dcefclk)
-		hwmgr->hwmgr_func->set_deep_sleep_dcefclk(hwmgr, hwmgr->display_config.min_dcef_deep_sleep_set_clk);
+		hwmgr->hwmgr_func->set_deep_sleep_dcefclk(hwmgr, display_config->min_dcef_deep_sleep_set_clk);
 
-	for (index = 0; index < hwmgr->display_config.num_path_including_non_display; index++) {
-		if (hwmgr->display_config.displays[index].controller_id != 0)
+	for (index = 0; index < display_config->num_path_including_non_display; index++) {
+		if (display_config->displays[index].controller_id != 0)
 			number_of_active_display++;
 	}
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 10253b89b3d8..055358b95fdf 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -161,7 +161,7 @@ static int smu10_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
 	struct PP_Clocks clocks = {0};
 	struct pp_display_clock_request clock_req;
 
-	clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk;
+	clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
 	clock_req.clock_type = amd_pp_dcf_clock;
 	clock_req.clock_freq_in_khz = clocks.dcefClock * 10;
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 9087ef91b50b..14332159227e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -2777,8 +2777,6 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
 	struct PP_Clocks minimum_clocks = {0};
 	bool disable_mclk_switching;
 	bool disable_mclk_switching_for_frame_lock;
-	struct cgs_display_info info = {0};
-	struct cgs_mode_info mode_info = {0};
 	const struct phm_clock_and_voltage_limits *max_limits;
 	uint32_t i;
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -2787,7 +2785,6 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
 	int32_t count;
 	int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
 
-	info.mode_info = &mode_info;
 	data->battery_state = (PP_StateUILabel_Battery ==
 			request_ps->classification.ui_label);
 
@@ -2809,10 +2806,8 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
 		}
 	}
 
-	cgs_get_active_displays_info(hwmgr->device, &info);
-
-	minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
-	minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
+	minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock;
+	minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
 
 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
 			PHM_PlatformCaps_StablePState)) {
@@ -2843,12 +2838,12 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
 				    PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
 
 
-	if (info.display_count == 0)
+	if (hwmgr->display_config->num_display == 0)
 		disable_mclk_switching = false;
 	else
-		disable_mclk_switching = ((1 < info.display_count) ||
+		disable_mclk_switching = ((1 < hwmgr->display_config->num_display) ||
 					  disable_mclk_switching_for_frame_lock ||
-					  smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us));
+					  smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time));
 
 	sclk = smu7_ps->performance_levels[0].engine_clock;
 	mclk = smu7_ps->performance_levels[0].memory_clock;
@@ -3479,7 +3474,6 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
 			[smu7_ps->performance_level_count - 1].memory_clock;
 	struct PP_Clocks min_clocks = {0};
 	uint32_t i;
-	struct cgs_display_info info = {0};
 
 	for (i = 0; i < sclk_table->count; i++) {
 		if (sclk == sclk_table->dpm_levels[i].value)
@@ -3506,9 +3500,8 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
 	if (i >= mclk_table->count)
 		data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
 
-	cgs_get_active_displays_info(hwmgr->device, &info);
 
-	if (data->display_timing.num_existing_displays != info.display_count)
+	if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
 		data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
 
 	return 0;
@@ -3907,15 +3900,8 @@ smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
 static int
 smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
 {
-	uint32_t num_active_displays = 0;
-	struct cgs_display_info info = {0};
-
-	info.mode_info = NULL;
-	cgs_get_active_displays_info(hwmgr->device, &info);
-
-	num_active_displays = info.display_count;
-
-	if (num_active_displays > 1 && hwmgr->display_config.multi_monitor_in_sync != true)
+	if (hwmgr->display_config->num_display > 1 &&
+			!hwmgr->display_config->multi_monitor_in_sync)
 		smu7_notify_smc_display_change(hwmgr, false);
 
 	return 0;
@@ -3930,33 +3916,24 @@ smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
 static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
 {
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-	uint32_t num_active_displays = 0;
 	uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
 	uint32_t display_gap2;
 	uint32_t pre_vbi_time_in_us;
 	uint32_t frame_time_in_us;
-	uint32_t ref_clock;
-	uint32_t refresh_rate = 0;
-	struct cgs_display_info info = {0};
-	struct cgs_mode_info mode_info = {0};
+	uint32_t ref_clock, refresh_rate;
 
-	info.mode_info = &mode_info;
-	cgs_get_active_displays_info(hwmgr->device, &info);
-	num_active_displays = info.display_count;
-
-	display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
+	display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (hwmgr->display_config->num_display > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
 
 	ref_clock =  amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
-
-	refresh_rate = mode_info.refresh_rate;
+	refresh_rate = hwmgr->display_config->vrefresh;
 
 	if (0 == refresh_rate)
 		refresh_rate = 60;
 
 	frame_time_in_us = 1000000 / refresh_rate;
 
-	pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
+	pre_vbi_time_in_us = frame_time_in_us - 200 - hwmgr->display_config->min_vblank_time;
 
 	data->frame_time_x2 = frame_time_in_us * 2 / 100;
 
@@ -4036,17 +4013,14 @@ smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
 {
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 	bool is_update_required = false;
-	struct cgs_display_info info = {0, 0, NULL};
 
-	cgs_get_active_displays_info(hwmgr->device, &info);
-
-	if (data->display_timing.num_existing_displays != info.display_count)
+	if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
 		is_update_required = true;
 
 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
-		if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr &&
+		if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr &&
 			(data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK ||
-			hwmgr->display_config.min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
+			hwmgr->display_config->min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
 			is_update_required = true;
 	}
 	return is_update_required;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
index 3ac07fabbe5c..c2f93aa1d2e8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
@@ -693,7 +693,7 @@ static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr)
 	else
 		data->sclk_dpm.soft_max_clk  = table->entries[table->count - 1].clk;
 
-	clock = hwmgr->display_config.min_core_set_clock;
+	clock = hwmgr->display_config->min_core_set_clock;
 	if (clock == 0)
 		pr_debug("min_core_set_clock not set\n");
 
@@ -748,7 +748,7 @@ static int smu8_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr)
 {
 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
 				PHM_PlatformCaps_SclkDeepSleep)) {
-		uint32_t clks = hwmgr->display_config.min_core_set_clock_in_sr;
+		uint32_t clks = hwmgr->display_config->min_core_set_clock_in_sr;
 		if (clks == 0)
 			clks = SMU8_MIN_DEEP_SLEEP_SCLK;
 
@@ -1040,25 +1040,21 @@ static int smu8_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
 	struct smu8_hwmgr *data = hwmgr->backend;
 	struct PP_Clocks clocks = {0, 0, 0, 0};
 	bool force_high;
-	uint32_t  num_of_active_displays = 0;
-	struct cgs_display_info info = {0};
 
 	smu8_ps->need_dfs_bypass = true;
 
 	data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
 
-	clocks.memoryClock = hwmgr->display_config.min_mem_set_clock != 0 ?
-				hwmgr->display_config.min_mem_set_clock :
+	clocks.memoryClock = hwmgr->display_config->min_mem_set_clock != 0 ?
+				hwmgr->display_config->min_mem_set_clock :
 				data->sys_info.nbp_memory_clock[1];
 
-	cgs_get_active_displays_info(hwmgr->device, &info);
-	num_of_active_displays = info.display_count;
 
 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
 		clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk;
 
 	force_high = (clocks.memoryClock > data->sys_info.nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK - 1])
-			|| (num_of_active_displays >= 3);
+			|| (hwmgr->display_config->num_display >= 3);
 
 	smu8_ps->action = smu8_current_ps->action;
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 7cbb56ba6fab..c9fb4b2cf5c6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -3028,7 +3028,6 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
 	bool disable_mclk_switching_for_frame_lock;
 	bool disable_mclk_switching_for_vr;
 	bool force_mclk_high;
-	struct cgs_display_info info = {0};
 	const struct phm_clock_and_voltage_limits *max_limits;
 	uint32_t i;
 	struct vega10_hwmgr *data = hwmgr->backend;
@@ -3063,11 +3062,9 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
 		}
 	}
 
-	cgs_get_active_displays_info(hwmgr->device, &info);
-
 	/* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
-	minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
-	minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
+	minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock;
+	minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
 
 	if (PP_CAP(PHM_PlatformCaps_StablePState)) {
 		stable_pstate_sclk_dpm_percentage =
@@ -3107,10 +3104,10 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
 		PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR);
 	force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh);
 
-	if (info.display_count == 0)
+	if (hwmgr->display_config->num_display == 0)
 		disable_mclk_switching = false;
 	else
-		disable_mclk_switching = (info.display_count > 1) ||
+		disable_mclk_switching = (hwmgr->display_config->num_display > 1) ||
 			disable_mclk_switching_for_frame_lock ||
 			disable_mclk_switching_for_vr ||
 			force_mclk_high;
@@ -3186,7 +3183,6 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co
 			[vega10_ps->performance_level_count - 1].mem_clock;
 	struct PP_Clocks min_clocks = {0};
 	uint32_t i;
-	struct cgs_display_info info = {0};
 
 	data->need_update_dpm_table = 0;
 
@@ -3211,10 +3207,8 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co
 				data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK;
 		}
 
-		cgs_get_active_displays_info(hwmgr->device, &info);
-
 		if (data->display_timing.num_existing_displays !=
-				info.display_count)
+				hwmgr->display_config->num_display)
 			data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
 	} else {
 		for (i = 0; i < sclk_table->count; i++) {
@@ -3242,13 +3236,11 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co
 				break;
 		}
 
-		cgs_get_active_displays_info(hwmgr->device, &info);
-
 		if (i >= mclk_table->count)
 			data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
 
 		if (data->display_timing.num_existing_displays !=
-				info.display_count ||
+				hwmgr->display_config->num_display ||
 				i >= mclk_table->count)
 			data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
 	}
@@ -3956,26 +3948,18 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
 			(struct phm_ppt_v2_information *)hwmgr->pptable;
 	struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = table_info->vdd_dep_on_mclk;
 	uint32_t idx;
-	uint32_t num_active_disps = 0;
-	struct cgs_display_info info = {0};
 	struct PP_Clocks min_clocks = {0};
 	uint32_t i;
 	struct pp_display_clock_request clock_req;
 
-	info.mode_info = NULL;
-
-	cgs_get_active_displays_info(hwmgr->device, &info);
-
-	num_active_disps = info.display_count;
-
-	if (num_active_disps > 1)
+	if (hwmgr->display_config->num_display > 1)
 		vega10_notify_smc_display_change(hwmgr, false);
 	else
 		vega10_notify_smc_display_change(hwmgr, true);
 
-	min_clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk;
-	min_clocks.dcefClockInSR = hwmgr->display_config.min_dcef_deep_sleep_set_clk;
-	min_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
+	min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
+	min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
+	min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
 
 	for (i = 0; i < dpm_table->count; i++) {
 		if (dpm_table->dpm_levels[i].value == min_clocks.dcefClock)
@@ -4501,10 +4485,8 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
 static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
 {
 	struct vega10_hwmgr *data = hwmgr->backend;
-	int result = 0;
-	uint32_t num_turned_on_displays = 1;
 	Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
-	struct cgs_display_info info = {0};
+	int result = 0;
 
 	if ((data->water_marks_bitmap & WaterMarksExist) &&
 			!(data->water_marks_bitmap & WaterMarksLoaded)) {
@@ -4514,10 +4496,8 @@ static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
 	}
 
 	if (data->water_marks_bitmap & WaterMarksLoaded) {
-		cgs_get_active_displays_info(hwmgr->device, &info);
-		num_turned_on_displays = info.display_count;
 		smum_send_msg_to_smc_with_parameter(hwmgr,
-			PPSMC_MSG_NumOfDisplays, num_turned_on_displays);
+			PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display);
 	}
 
 	return result;
@@ -4603,15 +4583,12 @@ vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmg
 {
 	struct vega10_hwmgr *data = hwmgr->backend;
 	bool is_update_required = false;
-	struct cgs_display_info info = {0, 0, NULL};
 
-	cgs_get_active_displays_info(hwmgr->device, &info);
-
-	if (data->display_timing.num_existing_displays != info.display_count)
+	if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
 		is_update_required = true;
 
 	if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep)) {
-		if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr)
+		if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr)
 			is_update_required = true;
 	}
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 200de46bd06b..6a85238ae20f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -1260,23 +1260,18 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
 {
 	struct vega12_hwmgr *data =
 			(struct vega12_hwmgr *)(hwmgr->backend);
-	uint32_t num_active_disps = 0;
-	struct cgs_display_info info = {0};
 	struct PP_Clocks min_clocks = {0};
 	struct pp_display_clock_request clock_req;
 	uint32_t clk_request;
 
-	info.mode_info = NULL;
-	cgs_get_active_displays_info(hwmgr->device, &info);
-	num_active_disps = info.display_count;
-	if (num_active_disps > 1)
+	if (hwmgr->display_config->num_display > 1)
 		vega12_notify_smc_display_change(hwmgr, false);
 	else
 		vega12_notify_smc_display_change(hwmgr, true);
 
-	min_clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk;
-	min_clocks.dcefClockInSR = hwmgr->display_config.min_dcef_deep_sleep_set_clk;
-	min_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
+	min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
+	min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
+	min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
 
 	if (data->smu_features[GNLD_DPM_DCEFCLK].supported) {
 		clock_req.clock_type = amd_pp_dcef_clock;
@@ -1832,9 +1827,7 @@ static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
 {
 	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
 	int result = 0;
-	uint32_t num_turned_on_displays = 1;
 	Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
-	struct cgs_display_info info = {0};
 
 	if ((data->water_marks_bitmap & WaterMarksExist) &&
 			!(data->water_marks_bitmap & WaterMarksLoaded)) {
@@ -1846,12 +1839,9 @@ static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
 
 	if ((data->water_marks_bitmap & WaterMarksExist) &&
 		data->smu_features[GNLD_DPM_DCEFCLK].supported &&
-		data->smu_features[GNLD_DPM_SOCCLK].supported) {
-		cgs_get_active_displays_info(hwmgr->device, &info);
-		num_turned_on_displays = info.display_count;
+		data->smu_features[GNLD_DPM_SOCCLK].supported)
 		smum_send_msg_to_smc_with_parameter(hwmgr,
-			PPSMC_MSG_NumOfDisplays, num_turned_on_displays);
-	}
+			PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display);
 
 	return result;
 }
@@ -1894,15 +1884,12 @@ vega12_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmg
 {
 	struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
 	bool is_update_required = false;
-	struct cgs_display_info info = {0, 0, NULL};
 
-	cgs_get_active_displays_info(hwmgr->device, &info);
-
-	if (data->display_timing.num_existing_displays != info.display_count)
+	if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
 		is_update_required = true;
 
 	if (data->registry_data.gfx_clk_deep_sleep_support) {
-		if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr)
+		if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr)
 			is_update_required = true;
 	}
 
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index d5cadc61c9b3..e450ec74d6ed 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -765,7 +765,7 @@ struct pp_hwmgr {
 	struct pp_power_state    *request_ps;
 	struct pp_power_state    *boot_ps;
 	struct pp_power_state    *uvd_ps;
-	struct amd_pp_display_configuration display_config;
+	const struct amd_pp_display_configuration *display_config;
 	uint32_t feature_mask;
 	bool avfs_supported;
 	/* UMD Pstate */
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
index e30a2eea1fba..c28b95fd1c85 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
@@ -1182,7 +1182,6 @@ static int ci_populate_single_memory_level(
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 	int result = 0;
 	bool dll_state_on;
-	struct cgs_display_info info = {0};
 	uint32_t mclk_edc_wr_enable_threshold = 40000;
 	uint32_t mclk_edc_enable_threshold = 40000;
 	uint32_t mclk_strobe_mode_threshold = 40000;
@@ -1236,8 +1235,7 @@ static int ci_populate_single_memory_level(
 	/* default set to low watermark. Highest level will be set to high later.*/
 	memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
 
-	cgs_get_active_displays_info(hwmgr->device, &info);
-	data->display_timing.num_existing_displays = info.display_count;
+	data->display_timing.num_existing_displays = hwmgr->display_config->num_display;
 
 	/* stutter mode not support on ci */
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index 1eec527add99..d023494c3eae 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -988,11 +988,11 @@ static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
 
 	threshold = clock * data->fast_watermark_threshold / 100;
 
-	data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
+	data->display_timing.min_clock_in_sr = hwmgr->display_config->min_core_set_clock_in_sr;
 
 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
 		level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
-								hwmgr->display_config.min_core_set_clock_in_sr);
+								hwmgr->display_config->min_core_set_clock_in_sr);
 
 
 	/* Default to slow, highest DPM level will be
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
index d4bb934e7334..bc05e355012d 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
@@ -932,7 +932,7 @@ static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
 	graphic_level->PowerThrottle = 0;
 
 	data->display_timing.min_clock_in_sr =
-			hwmgr->display_config.min_core_set_clock_in_sr;
+			hwmgr->display_config->min_core_set_clock_in_sr;
 
 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
 			PHM_PlatformCaps_SclkDeepSleep))
@@ -1236,7 +1236,6 @@ static int iceland_populate_single_memory_level(
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 	int result = 0;
 	bool dll_state_on;
-	struct cgs_display_info info = {0};
 	uint32_t mclk_edc_wr_enable_threshold = 40000;
 	uint32_t mclk_edc_enable_threshold = 40000;
 	uint32_t mclk_strobe_mode_threshold = 40000;
@@ -1283,8 +1282,7 @@ static int iceland_populate_single_memory_level(
 	/* default set to low watermark. Highest level will be set to high later.*/
 	memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
 
-	cgs_get_active_displays_info(hwmgr->device, &info);
-	data->display_timing.num_existing_displays = info.display_count;
+	data->display_timing.num_existing_displays = hwmgr->display_config->num_display;
 
 	/* stutter mode not support on iceland */
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 05e60e8fee0b..d9192286099d 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -942,11 +942,11 @@ static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
 	level->DownHyst = data->current_profile_setting.sclk_down_hyst;
 	level->VoltageDownHyst = 0;
 	level->PowerThrottle = 0;
-	data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
+	data->display_timing.min_clock_in_sr = hwmgr->display_config->min_core_set_clock_in_sr;
 
 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
 		level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
-								hwmgr->display_config.min_core_set_clock_in_sr);
+								hwmgr->display_config->min_core_set_clock_in_sr);
 
 	/* Default to slow, highest DPM level will be
 	 * set to PPSMC_DISPLAY_WATERMARK_LOW later.
@@ -1076,11 +1076,9 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
 	struct phm_ppt_v1_information *table_info =
 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
 	int result = 0;
-	struct cgs_display_info info = {0, 0, NULL};
 	uint32_t mclk_stutter_mode_threshold = 40000;
 	phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table = NULL;
 
-	cgs_get_active_displays_info(hwmgr->device, &info);
 
 	if (hwmgr->od_enabled)
 		vdd_dep_table = (phm_ppt_v1_clock_voltage_dependency_table *)&data->odn_dpm_table.vdd_dependency_on_mclk;
@@ -1106,7 +1104,7 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
 	mem_level->StutterEnable = false;
 	mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
 
-	data->display_timing.num_existing_displays = info.display_count;
+	data->display_timing.num_existing_displays = hwmgr->display_config->num_display;
 
 	if (mclk_stutter_mode_threshold &&
 		(clock <= mclk_stutter_mode_threshold) &&
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index 2ba05d2b4302..94ba304ff52e 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -650,7 +650,7 @@ static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
 	graphic_level->PowerThrottle = 0;
 
 	data->display_timing.min_clock_in_sr =
-			hwmgr->display_config.min_core_set_clock_in_sr;
+			hwmgr->display_config->min_core_set_clock_in_sr;
 
 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
 			PHM_PlatformCaps_SclkDeepSleep))
@@ -956,18 +956,17 @@ static int tonga_populate_single_memory_level(
 		SMU72_Discrete_MemoryLevel *memory_level
 		)
 {
-	uint32_t mvdd = 0;
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 	struct phm_ppt_v1_information *pptable_info =
 			  (struct phm_ppt_v1_information *)(hwmgr->pptable);
-	int result = 0;
-	bool dll_state_on;
-	struct cgs_display_info info = {0};
 	uint32_t mclk_edc_wr_enable_threshold = 40000;
 	uint32_t mclk_stutter_mode_threshold = 30000;
 	uint32_t mclk_edc_enable_threshold = 40000;
 	uint32_t mclk_strobe_mode_threshold = 40000;
 	phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table = NULL;
+	int result = 0;
+	bool dll_state_on;
+	uint32_t mvdd = 0;
 
 	if (hwmgr->od_enabled)
 		vdd_dep_table = (phm_ppt_v1_clock_voltage_dependency_table *)&data->odn_dpm_table.vdd_dependency_on_mclk;
@@ -1008,8 +1007,7 @@ static int tonga_populate_single_memory_level(
 	/* default set to low watermark. Highest level will be set to high later.*/
 	memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
 
-	cgs_get_active_displays_info(hwmgr->device, &info);
-	data->display_timing.num_existing_displays = info.display_count;
+	data->display_timing.num_existing_displays = hwmgr->display_config->num_display;
 
 	if ((mclk_stutter_mode_threshold != 0) &&
 	    (memory_clock <= mclk_stutter_mode_threshold) &&

From 10b3f45c4a189ffa0e0b9566fce7a0b65b289322 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Thu, 22 Mar 2018 19:32:45 +0800
Subject: [PATCH 0325/1286] drm/amdgpu: Delete some cgs functions

Drop cgs wrappers that are no longer used.
1. cgs_rel_firmwar
2. cgs_is_virtualization_enabled
3. cgs_notify_dpm_enabled
4. cgs_atom_get_data_table
5. cgs_atom_get_cmd_table_revs
6. cgs_atom_exec_cmd_table
7. cgs_get_active_displays_info

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c  | 197 -----------------------
 drivers/gpu/drm/amd/include/cgs_common.h | 139 ----------------
 2 files changed, 336 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 71a57b2f7f04..dc28fa63bf51 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -23,7 +23,6 @@
  */
 #include <linux/list.h>
 #include <linux/slab.h>
-#include <linux/pci.h>
 #include <drm/drmP.h>
 #include <linux/firmware.h>
 #include <drm/amdgpu_drm.h>
@@ -109,78 +108,6 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
 	WARN(1, "Invalid indirect register space");
 }
 
-static int amdgpu_cgs_get_pci_resource(struct cgs_device *cgs_device,
-				       enum cgs_resource_type resource_type,
-				       uint64_t size,
-				       uint64_t offset,
-				       uint64_t *resource_base)
-{
-	CGS_FUNC_ADEV;
-
-	if (resource_base == NULL)
-		return -EINVAL;
-
-	switch (resource_type) {
-	case CGS_RESOURCE_TYPE_MMIO:
-		if (adev->rmmio_size == 0)
-			return -ENOENT;
-		if ((offset + size) > adev->rmmio_size)
-			return -EINVAL;
-		*resource_base = adev->rmmio_base;
-		return 0;
-	case CGS_RESOURCE_TYPE_DOORBELL:
-		if (adev->doorbell.size == 0)
-			return -ENOENT;
-		if ((offset + size) > adev->doorbell.size)
-			return -EINVAL;
-		*resource_base = adev->doorbell.base;
-		return 0;
-	case CGS_RESOURCE_TYPE_FB:
-	case CGS_RESOURCE_TYPE_IO:
-	case CGS_RESOURCE_TYPE_ROM:
-	default:
-		return -EINVAL;
-	}
-}
-
-static const void *amdgpu_cgs_atom_get_data_table(struct cgs_device *cgs_device,
-						  unsigned table, uint16_t *size,
-						  uint8_t *frev, uint8_t *crev)
-{
-	CGS_FUNC_ADEV;
-	uint16_t data_start;
-
-	if (amdgpu_atom_parse_data_header(
-		    adev->mode_info.atom_context, table, size,
-		    frev, crev, &data_start))
-		return (uint8_t*)adev->mode_info.atom_context->bios +
-			data_start;
-
-	return NULL;
-}
-
-static int amdgpu_cgs_atom_get_cmd_table_revs(struct cgs_device *cgs_device, unsigned table,
-					      uint8_t *frev, uint8_t *crev)
-{
-	CGS_FUNC_ADEV;
-
-	if (amdgpu_atom_parse_cmd_header(
-		    adev->mode_info.atom_context, table,
-		    frev, crev))
-		return 0;
-
-	return -EINVAL;
-}
-
-static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device *cgs_device, unsigned table,
-					  void *args)
-{
-	CGS_FUNC_ADEV;
-
-	return amdgpu_atom_execute_table(
-		adev->mode_info.atom_context, table, args);
-}
-
 static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
 				  enum amd_ip_block_type block_type,
 				  enum amd_clockgating_state state)
@@ -223,7 +150,6 @@ static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
 	return r;
 }
 
-
 static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
 {
 	CGS_FUNC_ADEV;
@@ -271,18 +197,6 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
 	return result;
 }
 
-static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type)
-{
-	CGS_FUNC_ADEV;
-	if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
-		release_firmware(adev->pm.fw);
-		adev->pm.fw = NULL;
-		return 0;
-	}
-	/* cannot release other firmware because they are not created by cgs */
-	return -EINVAL;
-}
-
 static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
 					enum cgs_ucode_id type)
 {
@@ -326,34 +240,6 @@ static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
 	return fw_version;
 }
 
-static int amdgpu_cgs_enter_safe_mode(struct cgs_device *cgs_device,
-					bool en)
-{
-	CGS_FUNC_ADEV;
-
-	if (adev->gfx.rlc.funcs->enter_safe_mode == NULL ||
-		adev->gfx.rlc.funcs->exit_safe_mode == NULL)
-		return 0;
-
-	if (en)
-		adev->gfx.rlc.funcs->enter_safe_mode(adev);
-	else
-		adev->gfx.rlc.funcs->exit_safe_mode(adev);
-
-	return 0;
-}
-
-static void amdgpu_cgs_lock_grbm_idx(struct cgs_device *cgs_device,
-					bool lock)
-{
-	CGS_FUNC_ADEV;
-
-	if (lock)
-		mutex_lock(&adev->grbm_idx_mutex);
-	else
-		mutex_unlock(&adev->grbm_idx_mutex);
-}
-
 static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
 					enum cgs_ucode_id type,
 					struct cgs_firmware_info *info)
@@ -598,97 +484,14 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
 	return 0;
 }
 
-static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device)
-{
-	CGS_FUNC_ADEV;
-	return amdgpu_sriov_vf(adev);
-}
-
-static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
-					  struct cgs_display_info *info)
-{
-	CGS_FUNC_ADEV;
-	struct cgs_mode_info *mode_info;
-
-	if (info == NULL)
-		return -EINVAL;
-
-	mode_info = info->mode_info;
-	if (mode_info)
-		/* if the displays are off, vblank time is max */
-		mode_info->vblank_time_us = 0xffffffff;
-
-	if (!amdgpu_device_has_dc_support(adev)) {
-		struct amdgpu_crtc *amdgpu_crtc;
-		struct drm_device *ddev = adev->ddev;
-		struct drm_crtc *crtc;
-		uint32_t line_time_us, vblank_lines;
-
-		if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
-			list_for_each_entry(crtc,
-					&ddev->mode_config.crtc_list, head) {
-				amdgpu_crtc = to_amdgpu_crtc(crtc);
-				if (crtc->enabled) {
-					info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
-					info->display_count++;
-				}
-				if (mode_info != NULL &&
-					crtc->enabled && amdgpu_crtc->enabled &&
-					amdgpu_crtc->hw_mode.clock) {
-					line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
-								amdgpu_crtc->hw_mode.clock;
-					vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
-								amdgpu_crtc->hw_mode.crtc_vdisplay +
-								(amdgpu_crtc->v_border * 2);
-					mode_info->vblank_time_us = vblank_lines * line_time_us;
-					mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
-					/* we have issues with mclk switching with refresh rates
-					 * over 120 hz on the non-DC code.
-					 */
-					if (mode_info->refresh_rate > 120)
-						mode_info->vblank_time_us = 0;
-					mode_info = NULL;
-				}
-			}
-		}
-	} else {
-		info->display_count = adev->pm.pm_display_cfg.num_display;
-		if (mode_info != NULL) {
-			mode_info->vblank_time_us = adev->pm.pm_display_cfg.min_vblank_time;
-			mode_info->refresh_rate = adev->pm.pm_display_cfg.vrefresh;
-		}
-	}
-	return 0;
-}
-
-
-static int amdgpu_cgs_notify_dpm_enabled(struct cgs_device *cgs_device, bool enabled)
-{
-	CGS_FUNC_ADEV;
-
-	adev->pm.dpm_enabled = enabled;
-
-	return 0;
-}
-
 static const struct cgs_ops amdgpu_cgs_ops = {
 	.read_register = amdgpu_cgs_read_register,
 	.write_register = amdgpu_cgs_write_register,
 	.read_ind_register = amdgpu_cgs_read_ind_register,
 	.write_ind_register = amdgpu_cgs_write_ind_register,
-	.get_pci_resource = amdgpu_cgs_get_pci_resource,
-	.atom_get_data_table = amdgpu_cgs_atom_get_data_table,
-	.atom_get_cmd_table_revs = amdgpu_cgs_atom_get_cmd_table_revs,
-	.atom_exec_cmd_table = amdgpu_cgs_atom_exec_cmd_table,
 	.get_firmware_info = amdgpu_cgs_get_firmware_info,
-	.rel_firmware = amdgpu_cgs_rel_firmware,
 	.set_powergating_state = amdgpu_cgs_set_powergating_state,
 	.set_clockgating_state = amdgpu_cgs_set_clockgating_state,
-	.get_active_displays_info = amdgpu_cgs_get_active_displays_info,
-	.notify_dpm_enabled = amdgpu_cgs_notify_dpm_enabled,
-	.is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled,
-	.enter_safe_mode = amdgpu_cgs_enter_safe_mode,
-	.lock_grbm_idx = amdgpu_cgs_lock_grbm_idx,
 };
 
 struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index f2814ae7ecdd..cab34a4b65cc 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -76,17 +76,6 @@ enum cgs_ucode_id {
 	CGS_UCODE_ID_MAXIMUM,
 };
 
-/*
- * enum cgs_resource_type - GPU resource type
- */
-enum cgs_resource_type {
-	CGS_RESOURCE_TYPE_MMIO = 0,
-	CGS_RESOURCE_TYPE_FB,
-	CGS_RESOURCE_TYPE_IO,
-	CGS_RESOURCE_TYPE_DOORBELL,
-	CGS_RESOURCE_TYPE_ROM,
-};
-
 /**
  * struct cgs_firmware_info - Firmware information
  */
@@ -104,17 +93,6 @@ struct cgs_firmware_info {
 	bool			is_kicker;
 };
 
-struct cgs_mode_info {
-	uint32_t		refresh_rate;
-	uint32_t		vblank_time_us;
-};
-
-struct cgs_display_info {
-	uint32_t		display_count;
-	uint32_t		active_display_mask;
-	struct cgs_mode_info *mode_info;
-};
-
 typedef unsigned long cgs_handle_t;
 
 /**
@@ -170,73 +148,10 @@ typedef void (*cgs_write_ind_register_t)(struct cgs_device *cgs_device, enum cgs
 #define CGS_WREG32_FIELD_IND(device, space, reg, field, val)	\
 	cgs_write_ind_register(device, space, ix##reg, (cgs_read_ind_register(device, space, ix##reg) & ~CGS_REG_FIELD_MASK(reg, field)) | (val) << CGS_REG_FIELD_SHIFT(reg, field))
 
-/**
- * cgs_get_pci_resource() - provide access to a device resource (PCI BAR)
- * @cgs_device:	opaque device handle
- * @resource_type:	Type of Resource (MMIO, IO, ROM, FB, DOORBELL)
- * @size:	size of the region
- * @offset:	offset from the start of the region
- * @resource_base:	base address (not including offset) returned
- *
- * Return: 0 on success, -errno otherwise
- */
-typedef int (*cgs_get_pci_resource_t)(struct cgs_device *cgs_device,
-				      enum cgs_resource_type resource_type,
-				      uint64_t size,
-				      uint64_t offset,
-				      uint64_t *resource_base);
-
-/**
- * cgs_atom_get_data_table() - Get a pointer to an ATOM BIOS data table
- * @cgs_device:	opaque device handle
- * @table:	data table index
- * @size:	size of the table (output, may be NULL)
- * @frev:	table format revision (output, may be NULL)
- * @crev:	table content revision (output, may be NULL)
- *
- * Return: Pointer to start of the table, or NULL on failure
- */
-typedef const void *(*cgs_atom_get_data_table_t)(
-	struct cgs_device *cgs_device, unsigned table,
-	uint16_t *size, uint8_t *frev, uint8_t *crev);
-
-/**
- * cgs_atom_get_cmd_table_revs() - Get ATOM BIOS command table revisions
- * @cgs_device:	opaque device handle
- * @table:	data table index
- * @frev:	table format revision (output, may be NULL)
- * @crev:	table content revision (output, may be NULL)
- *
- * Return: 0 on success, -errno otherwise
- */
-typedef int (*cgs_atom_get_cmd_table_revs_t)(struct cgs_device *cgs_device, unsigned table,
-					     uint8_t *frev, uint8_t *crev);
-
-/**
- * cgs_atom_exec_cmd_table() - Execute an ATOM BIOS command table
- * @cgs_device: opaque device handle
- * @table:	command table index
- * @args:	arguments
- *
- * Return: 0 on success, -errno otherwise
- */
-typedef int (*cgs_atom_exec_cmd_table_t)(struct cgs_device *cgs_device,
-					 unsigned table, void *args);
-
-/**
- * cgs_get_firmware_info - Get the firmware information from core driver
- * @cgs_device: opaque device handle
- * @type: the firmware type
- * @info: returend firmware information
- *
- * Return: 0 on success, -errno otherwise
- */
 typedef int (*cgs_get_firmware_info)(struct cgs_device *cgs_device,
 				     enum cgs_ucode_id type,
 				     struct cgs_firmware_info *info);
 
-typedef int (*cgs_rel_firmware)(struct cgs_device *cgs_device,
-					 enum cgs_ucode_id type);
 
 typedef int(*cgs_set_powergating_state)(struct cgs_device *cgs_device,
 				  enum amd_ip_block_type block_type,
@@ -246,43 +161,17 @@ typedef int(*cgs_set_clockgating_state)(struct cgs_device *cgs_device,
 				  enum amd_ip_block_type block_type,
 				  enum amd_clockgating_state state);
 
-typedef int(*cgs_get_active_displays_info)(
-					struct cgs_device *cgs_device,
-					struct cgs_display_info *info);
-
-typedef int (*cgs_notify_dpm_enabled)(struct cgs_device *cgs_device, bool enabled);
-
-typedef int (*cgs_is_virtualization_enabled_t)(void *cgs_device);
-
-typedef int (*cgs_enter_safe_mode)(struct cgs_device *cgs_device, bool en);
-
-typedef void (*cgs_lock_grbm_idx)(struct cgs_device *cgs_device, bool lock);
-
 struct cgs_ops {
 	/* MMIO access */
 	cgs_read_register_t read_register;
 	cgs_write_register_t write_register;
 	cgs_read_ind_register_t read_ind_register;
 	cgs_write_ind_register_t write_ind_register;
-	/* PCI resources */
-	cgs_get_pci_resource_t get_pci_resource;
-	/* ATOM BIOS */
-	cgs_atom_get_data_table_t atom_get_data_table;
-	cgs_atom_get_cmd_table_revs_t atom_get_cmd_table_revs;
-	cgs_atom_exec_cmd_table_t atom_exec_cmd_table;
 	/* Firmware Info */
 	cgs_get_firmware_info get_firmware_info;
-	cgs_rel_firmware rel_firmware;
 	/* cg pg interface*/
 	cgs_set_powergating_state set_powergating_state;
 	cgs_set_clockgating_state set_clockgating_state;
-	/* display manager */
-	cgs_get_active_displays_info get_active_displays_info;
-	/* notify dpm enabled */
-	cgs_notify_dpm_enabled notify_dpm_enabled;
-	cgs_is_virtualization_enabled_t is_virtualization_enabled;
-	cgs_enter_safe_mode enter_safe_mode;
-	cgs_lock_grbm_idx lock_grbm_idx;
 };
 
 struct cgs_os_ops; /* To be define in OS-specific CGS header */
@@ -309,40 +198,12 @@ struct cgs_device
 #define cgs_write_ind_register(dev,space,index,value)		\
 	CGS_CALL(write_ind_register,dev,space,index,value)
 
-#define cgs_atom_get_data_table(dev,table,size,frev,crev)	\
-	CGS_CALL(atom_get_data_table,dev,table,size,frev,crev)
-#define cgs_atom_get_cmd_table_revs(dev,table,frev,crev)	\
-	CGS_CALL(atom_get_cmd_table_revs,dev,table,frev,crev)
-#define cgs_atom_exec_cmd_table(dev,table,args)		\
-	CGS_CALL(atom_exec_cmd_table,dev,table,args)
-
 #define cgs_get_firmware_info(dev, type, info)	\
 	CGS_CALL(get_firmware_info, dev, type, info)
-#define cgs_rel_firmware(dev, type)	\
-	CGS_CALL(rel_firmware, dev, type)
 #define cgs_set_powergating_state(dev, block_type, state)	\
 	CGS_CALL(set_powergating_state, dev, block_type, state)
 #define cgs_set_clockgating_state(dev, block_type, state)	\
 	CGS_CALL(set_clockgating_state, dev, block_type, state)
-#define cgs_notify_dpm_enabled(dev, enabled)	\
-	CGS_CALL(notify_dpm_enabled, dev, enabled)
-
-#define cgs_get_active_displays_info(dev, info)	\
-	CGS_CALL(get_active_displays_info, dev, info)
-
-#define cgs_get_pci_resource(cgs_device, resource_type, size, offset, \
-	resource_base) \
-	CGS_CALL(get_pci_resource, cgs_device, resource_type, size, offset, \
-	resource_base)
-
-#define cgs_is_virtualization_enabled(cgs_device) \
-		CGS_CALL(is_virtualization_enabled, cgs_device)
-
-#define cgs_enter_safe_mode(cgs_device, en) \
-		CGS_CALL(enter_safe_mode, cgs_device, en)
-
-#define cgs_lock_grbm_idx(cgs_device, lock) \
-		CGS_CALL(lock_grbm_idx, cgs_device, lock)
 
 
 #endif /* _CGS_COMMON_H */

From 43fa561fd07fe707815d1b72472f6f5829223a52 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Wed, 28 Mar 2018 13:42:45 -0500
Subject: [PATCH 0326/1286] drm/amdgpu: remove duplicate cg/pg wrapper
 functions
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Christian König<christian.koenig@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h           |  4 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c       | 44 -------------------
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c    |  6 ++-
 drivers/gpu/drm/amd/include/cgs_common.h      | 31 -------------
 drivers/gpu/drm/amd/powerplay/amd_powerplay.c |  8 ++--
 .../powerplay/hwmgr/smu7_clockpowergating.c   | 16 +++----
 .../gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c  | 20 ++++-----
 .../drm/amd/powerplay/smumgr/fiji_smumgr.c    |  8 ++--
 8 files changed, 30 insertions(+), 107 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 0193f6ced00b..3000c4abe34f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -222,10 +222,10 @@ enum amdgpu_kiq_irq {
 	AMDGPU_CP_KIQ_IRQ_LAST
 };
 
-int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev,
+int amdgpu_device_ip_set_clockgating_state(void *dev,
 					   enum amd_ip_block_type block_type,
 					   enum amd_clockgating_state state);
-int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev,
+int amdgpu_device_ip_set_powergating_state(void *dev,
 					   enum amd_ip_block_type block_type,
 					   enum amd_powergating_state state);
 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index dc28fa63bf51..a8a942c60ea2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -108,48 +108,6 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
 	WARN(1, "Invalid indirect register space");
 }
 
-static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
-				  enum amd_ip_block_type block_type,
-				  enum amd_clockgating_state state)
-{
-	CGS_FUNC_ADEV;
-	int i, r = -1;
-
-	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_blocks[i].status.valid)
-			continue;
-
-		if (adev->ip_blocks[i].version->type == block_type) {
-			r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
-								(void *)adev,
-									state);
-			break;
-		}
-	}
-	return r;
-}
-
-static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
-				  enum amd_ip_block_type block_type,
-				  enum amd_powergating_state state)
-{
-	CGS_FUNC_ADEV;
-	int i, r = -1;
-
-	for (i = 0; i < adev->num_ip_blocks; i++) {
-		if (!adev->ip_blocks[i].status.valid)
-			continue;
-
-		if (adev->ip_blocks[i].version->type == block_type) {
-			r = adev->ip_blocks[i].version->funcs->set_powergating_state(
-								(void *)adev,
-									state);
-			break;
-		}
-	}
-	return r;
-}
-
 static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
 {
 	CGS_FUNC_ADEV;
@@ -490,8 +448,6 @@ static const struct cgs_ops amdgpu_cgs_ops = {
 	.read_ind_register = amdgpu_cgs_read_ind_register,
 	.write_ind_register = amdgpu_cgs_write_ind_register,
 	.get_firmware_info = amdgpu_cgs_get_firmware_info,
-	.set_powergating_state = amdgpu_cgs_set_powergating_state,
-	.set_clockgating_state = amdgpu_cgs_set_clockgating_state,
 };
 
 struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 34af664b9f93..a53926580b3d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1039,10 +1039,11 @@ static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
  * the hardware IP specified.
  * Returns the error code from the last instance.
  */
-int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev,
+int amdgpu_device_ip_set_clockgating_state(void *dev,
 					   enum amd_ip_block_type block_type,
 					   enum amd_clockgating_state state)
 {
+	struct amdgpu_device *adev = dev;
 	int i, r = 0;
 
 	for (i = 0; i < adev->num_ip_blocks; i++) {
@@ -1072,10 +1073,11 @@ int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev,
  * the hardware IP specified.
  * Returns the error code from the last instance.
  */
-int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev,
+int amdgpu_device_ip_set_powergating_state(void *dev,
 					   enum amd_ip_block_type block_type,
 					   enum amd_powergating_state state)
 {
+	struct amdgpu_device *adev = dev;
 	int i, r = 0;
 
 	for (i = 0; i < adev->num_ip_blocks; i++) {
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index cab34a4b65cc..a69deb3a2ac0 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -42,20 +42,6 @@ enum cgs_ind_reg {
 	CGS_IND_REG__AUDIO_ENDPT
 };
 
-/**
- * enum cgs_engine - Engines that can be statically power-gated
- */
-enum cgs_engine {
-	CGS_ENGINE__UVD,
-	CGS_ENGINE__VCE,
-	CGS_ENGINE__VP8,
-	CGS_ENGINE__ACP_DMA,
-	CGS_ENGINE__ACP_DSP0,
-	CGS_ENGINE__ACP_DSP1,
-	CGS_ENGINE__ISP,
-	/* ... */
-};
-
 /*
  * enum cgs_ucode_id - Firmware types for different IPs
  */
@@ -152,15 +138,6 @@ typedef int (*cgs_get_firmware_info)(struct cgs_device *cgs_device,
 				     enum cgs_ucode_id type,
 				     struct cgs_firmware_info *info);
 
-
-typedef int(*cgs_set_powergating_state)(struct cgs_device *cgs_device,
-				  enum amd_ip_block_type block_type,
-				  enum amd_powergating_state state);
-
-typedef int(*cgs_set_clockgating_state)(struct cgs_device *cgs_device,
-				  enum amd_ip_block_type block_type,
-				  enum amd_clockgating_state state);
-
 struct cgs_ops {
 	/* MMIO access */
 	cgs_read_register_t read_register;
@@ -169,9 +146,6 @@ struct cgs_ops {
 	cgs_write_ind_register_t write_ind_register;
 	/* Firmware Info */
 	cgs_get_firmware_info get_firmware_info;
-	/* cg pg interface*/
-	cgs_set_powergating_state set_powergating_state;
-	cgs_set_clockgating_state set_clockgating_state;
 };
 
 struct cgs_os_ops; /* To be define in OS-specific CGS header */
@@ -200,10 +174,5 @@ struct cgs_device
 
 #define cgs_get_firmware_info(dev, type, info)	\
 	CGS_CALL(get_firmware_info, dev, type, info)
-#define cgs_set_powergating_state(dev, block_type, state)	\
-	CGS_CALL(set_powergating_state, dev, block_type, state)
-#define cgs_set_clockgating_state(dev, block_type, state)	\
-	CGS_CALL(set_clockgating_state, dev, block_type, state)
-
 
 #endif /* _CGS_COMMON_H */
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 1ca6a13be6a3..66c49b89cdb4 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -288,10 +288,10 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr  *hwmgr,
 		if (*level & profile_mode_mask) {
 			hwmgr->saved_dpm_level = hwmgr->dpm_level;
 			hwmgr->en_umd_pstate = true;
-			cgs_set_clockgating_state(hwmgr->device,
+			amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
 						AMD_IP_BLOCK_TYPE_GFX,
 						AMD_CG_STATE_UNGATE);
-			cgs_set_powergating_state(hwmgr->device,
+			amdgpu_device_ip_set_powergating_state(hwmgr->adev,
 					AMD_IP_BLOCK_TYPE_GFX,
 					AMD_PG_STATE_UNGATE);
 		}
@@ -301,10 +301,10 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr  *hwmgr,
 			if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
 				*level = hwmgr->saved_dpm_level;
 			hwmgr->en_umd_pstate = false;
-			cgs_set_clockgating_state(hwmgr->device,
+			amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
 					AMD_IP_BLOCK_TYPE_GFX,
 					AMD_CG_STATE_GATE);
-			cgs_set_powergating_state(hwmgr->device,
+			amdgpu_device_ip_set_powergating_state(hwmgr->adev,
 					AMD_IP_BLOCK_TYPE_GFX,
 					AMD_PG_STATE_GATE);
 		}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index f4cbaee4e2ca..6d72a5600917 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -147,20 +147,20 @@ void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
 	data->uvd_power_gated = bgate;
 
 	if (bgate) {
-		cgs_set_powergating_state(hwmgr->device,
+		amdgpu_device_ip_set_powergating_state(hwmgr->adev,
 						AMD_IP_BLOCK_TYPE_UVD,
 						AMD_PG_STATE_GATE);
-		cgs_set_clockgating_state(hwmgr->device,
+		amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
 				AMD_IP_BLOCK_TYPE_UVD,
 				AMD_CG_STATE_GATE);
 		smu7_update_uvd_dpm(hwmgr, true);
 		smu7_powerdown_uvd(hwmgr);
 	} else {
 		smu7_powerup_uvd(hwmgr);
-		cgs_set_clockgating_state(hwmgr->device,
+		amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
 				AMD_IP_BLOCK_TYPE_UVD,
 				AMD_CG_STATE_UNGATE);
-		cgs_set_powergating_state(hwmgr->device,
+		amdgpu_device_ip_set_powergating_state(hwmgr->adev,
 						AMD_IP_BLOCK_TYPE_UVD,
 						AMD_PG_STATE_UNGATE);
 		smu7_update_uvd_dpm(hwmgr, false);
@@ -175,20 +175,20 @@ void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
 	data->vce_power_gated = bgate;
 
 	if (bgate) {
-		cgs_set_powergating_state(hwmgr->device,
+		amdgpu_device_ip_set_powergating_state(hwmgr->adev,
 						AMD_IP_BLOCK_TYPE_VCE,
 						AMD_PG_STATE_GATE);
-		cgs_set_clockgating_state(hwmgr->device,
+		amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
 				AMD_IP_BLOCK_TYPE_VCE,
 				AMD_CG_STATE_GATE);
 		smu7_update_vce_dpm(hwmgr, true);
 		smu7_powerdown_vce(hwmgr);
 	} else {
 		smu7_powerup_vce(hwmgr);
-		cgs_set_clockgating_state(hwmgr->device,
+		amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
 				AMD_IP_BLOCK_TYPE_VCE,
 				AMD_CG_STATE_UNGATE);
-		cgs_set_powergating_state(hwmgr->device,
+		amdgpu_device_ip_set_powergating_state(hwmgr->adev,
 						AMD_IP_BLOCK_TYPE_VCE,
 						AMD_PG_STATE_UNGATE);
 		smu7_update_vce_dpm(hwmgr, false);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
index c2f93aa1d2e8..50690c72b2ea 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
@@ -1892,20 +1892,20 @@ static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
 	data->uvd_power_gated = bgate;
 
 	if (bgate) {
-		cgs_set_powergating_state(hwmgr->device,
+		amdgpu_device_ip_set_powergating_state(hwmgr->adev,
 						AMD_IP_BLOCK_TYPE_UVD,
 						AMD_PG_STATE_GATE);
-		cgs_set_clockgating_state(hwmgr->device,
+		amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
 						AMD_IP_BLOCK_TYPE_UVD,
 						AMD_CG_STATE_GATE);
 		smu8_dpm_update_uvd_dpm(hwmgr, true);
 		smu8_dpm_powerdown_uvd(hwmgr);
 	} else {
 		smu8_dpm_powerup_uvd(hwmgr);
-		cgs_set_clockgating_state(hwmgr->device,
+		amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
 						AMD_IP_BLOCK_TYPE_UVD,
 						AMD_CG_STATE_UNGATE);
-		cgs_set_powergating_state(hwmgr->device,
+		amdgpu_device_ip_set_powergating_state(hwmgr->adev,
 						AMD_IP_BLOCK_TYPE_UVD,
 						AMD_PG_STATE_UNGATE);
 		smu8_dpm_update_uvd_dpm(hwmgr, false);
@@ -1918,12 +1918,10 @@ static void smu8_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
 	struct smu8_hwmgr *data = hwmgr->backend;
 
 	if (bgate) {
-		cgs_set_powergating_state(
-					hwmgr->device,
+		amdgpu_device_ip_set_powergating_state(hwmgr->adev,
 					AMD_IP_BLOCK_TYPE_VCE,
 					AMD_PG_STATE_GATE);
-		cgs_set_clockgating_state(
-					hwmgr->device,
+		amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
 					AMD_IP_BLOCK_TYPE_VCE,
 					AMD_CG_STATE_GATE);
 		smu8_enable_disable_vce_dpm(hwmgr, false);
@@ -1932,12 +1930,10 @@ static void smu8_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
 	} else {
 		smu8_dpm_powerup_vce(hwmgr);
 		data->vce_power_gated = false;
-		cgs_set_clockgating_state(
-					hwmgr->device,
+		amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
 					AMD_IP_BLOCK_TYPE_VCE,
 					AMD_CG_STATE_UNGATE);
-		cgs_set_powergating_state(
-					hwmgr->device,
+		amdgpu_device_ip_set_powergating_state(hwmgr->adev,
 					AMD_IP_BLOCK_TYPE_VCE,
 					AMD_PG_STATE_UNGATE);
 		smu8_dpm_update_vce_dpm(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index d023494c3eae..dae3422366b3 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -306,13 +306,13 @@ static int fiji_start_smu(struct pp_hwmgr *hwmgr)
 	}
 
 	/* To initialize all clock gating before RLC loaded and running.*/
-	cgs_set_clockgating_state(hwmgr->device,
+	amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
 			AMD_IP_BLOCK_TYPE_GFX, AMD_CG_STATE_GATE);
-	cgs_set_clockgating_state(hwmgr->device,
+	amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
 			AMD_IP_BLOCK_TYPE_GMC, AMD_CG_STATE_GATE);
-	cgs_set_clockgating_state(hwmgr->device,
+	amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
 			AMD_IP_BLOCK_TYPE_SDMA, AMD_CG_STATE_GATE);
-	cgs_set_clockgating_state(hwmgr->device,
+	amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
 			AMD_IP_BLOCK_TYPE_COMMON, AMD_CG_STATE_GATE);
 
 	/* Setup SoftRegsStart here for register lookup in case

From 2b816a1d773e755332733a89bdd276e08f935933 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Tue, 27 Mar 2018 16:37:30 -0500
Subject: [PATCH 0327/1286] drm/amdgpu/sdma4: use a helper for
 SDMA_OP_POLL_REGMEM
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Rather than opencoding it in a bunch of functions.

Reviewed-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 62 ++++++++++++++------------
 1 file changed, 34 insertions(+), 28 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 399f876f9cad..2c618a1be03e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -360,6 +360,31 @@ static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
 
 }
 
+static void sdma_v4_0_wait_reg_mem(struct amdgpu_ring *ring,
+				   int mem_space, int hdp,
+				   uint32_t addr0, uint32_t addr1,
+				   uint32_t ref, uint32_t mask,
+				   uint32_t inv)
+{
+	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+			  SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(hdp) |
+			  SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(mem_space) |
+			  SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
+	if (mem_space) {
+		/* memory */
+		amdgpu_ring_write(ring, addr0);
+		amdgpu_ring_write(ring, addr1);
+	} else {
+		/* registers */
+		amdgpu_ring_write(ring, addr0 << 2);
+		amdgpu_ring_write(ring, addr1 << 2);
+	}
+	amdgpu_ring_write(ring, ref); /* reference */
+	amdgpu_ring_write(ring, mask); /* mask */
+	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(inv)); /* retry count, poll interval */
+}
+
 /**
  * sdma_v4_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
  *
@@ -378,15 +403,10 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 	else
 		ref_and_mask = nbio_hf_reg->ref_and_mask_sdma1;
 
-	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
-			  SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
-			  SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
-	amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_done_offset(adev)) << 2);
-	amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_req_offset(adev)) << 2);
-	amdgpu_ring_write(ring, ref_and_mask); /* reference */
-	amdgpu_ring_write(ring, ref_and_mask); /* mask */
-	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
-			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
+	sdma_v4_0_wait_reg_mem(ring, 0, 1,
+			       adev->nbio_funcs->get_hdp_flush_done_offset(adev),
+			       adev->nbio_funcs->get_hdp_flush_req_offset(adev),
+			       ref_and_mask, ref_and_mask, 10);
 }
 
 /**
@@ -1114,16 +1134,10 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
 	uint64_t addr = ring->fence_drv.gpu_addr;
 
 	/* wait for idle */
-	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
-			  SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
-			  SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
-			  SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
-	amdgpu_ring_write(ring, addr & 0xfffffffc);
-	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
-	amdgpu_ring_write(ring, seq); /* reference */
-	amdgpu_ring_write(ring, 0xffffffff); /* mask */
-	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
-			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
+	sdma_v4_0_wait_reg_mem(ring, 1, 0,
+			       addr & 0xfffffffc,
+			       upper_32_bits(addr) & 0xffffffff,
+			       seq, 0xffffffff, 4);
 }
 
 
@@ -1154,15 +1168,7 @@ static void sdma_v4_0_ring_emit_wreg(struct amdgpu_ring *ring,
 static void sdma_v4_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
 					 uint32_t val, uint32_t mask)
 {
-	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
-			  SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
-			  SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
-	amdgpu_ring_write(ring, reg << 2);
-	amdgpu_ring_write(ring, 0);
-	amdgpu_ring_write(ring, val); /* reference */
-	amdgpu_ring_write(ring, mask); /* mask */
-	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
-			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
+	sdma_v4_0_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10);
 }
 
 static int sdma_v4_0_early_init(void *handle)

From 3ef1381d4e7ddd3e063cf6fd33df96badfb66839 Mon Sep 17 00:00:00 2001
From: Hawking Zhang <Hawking.Zhang@amd.com>
Date: Wed, 28 Mar 2018 16:23:28 +0800
Subject: [PATCH 0328/1286] drm/amdgpu: add df v1_7 header files

Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../amd/include/asic_reg/df/df_1_7_default.h  | 26 ++++++++++
 .../amd/include/asic_reg/df/df_1_7_offset.h   | 33 +++++++++++++
 .../amd/include/asic_reg/df/df_1_7_sh_mask.h  | 48 +++++++++++++++++++
 3 files changed, 107 insertions(+)
 create mode 100644 drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_default.h
 create mode 100644 drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_offset.h
 create mode 100644 drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_sh_mask.h

diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_default.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_default.h
new file mode 100644
index 000000000000..9e19e723081b
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_default.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2018  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _df_1_7_DEFAULT_HEADER
+#define _df_1_7_DEFAULT_HEADER
+
+#define mmFabricConfigAccessControl_DEFAULT						0x00000000
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_offset.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_offset.h
new file mode 100644
index 000000000000..2b305dd021e8
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_offset.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2018  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _df_1_7_OFFSET_HEADER
+#define _df_1_7_OFFSET_HEADER
+
+#define mmFabricConfigAccessControl									0x0410
+#define mmFabricConfigAccessControl_BASE_IDX								0
+
+#define mmDF_PIE_AON0_DfGlobalClkGater									0x00fc
+#define mmDF_PIE_AON0_DfGlobalClkGater_BASE_IDX								0
+
+#define mmDF_CS_AON0_DramBaseAddress0									0x0044
+#define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX								0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_sh_mask.h
new file mode 100644
index 000000000000..2ba849798924
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_sh_mask.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2018  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _df_1_7_SH_MASK_HEADER
+#define _df_1_7_SH_MASK_HEADER
+
+/* FabricConfigAccessControl */
+#define FabricConfigAccessControl__CfgRegInstAccEn__SHIFT						0x0
+#define FabricConfigAccessControl__CfgRegInstAccRegLock__SHIFT						0x1
+#define FabricConfigAccessControl__CfgRegInstID__SHIFT							0x10
+#define FabricConfigAccessControl__CfgRegInstAccEn_MASK							0x00000001L
+#define FabricConfigAccessControl__CfgRegInstAccRegLock_MASK						0x00000002L
+#define FabricConfigAccessControl__CfgRegInstID_MASK							0x00FF0000L
+
+/* DF_PIE_AON0_DfGlobalClkGater */
+#define DF_PIE_AON0_DfGlobalClkGater__MGCGMode__SHIFT							0x0
+#define DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK							0x0000000FL
+
+/* DF_CS_AON0_DramBaseAddress0 */
+#define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT							0x0
+#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT						0x1
+#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT						0x4
+#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT						0x8
+#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT						0xc
+#define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK							0x00000001L
+#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK						0x00000002L
+#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK							0x000000F0L
+#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK							0x00000700L
+#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK							0xFFFFF000L
+
+#endif

From 634c96e3f3c7982d4b3ad14f8e004d11af184e91 Mon Sep 17 00:00:00 2001
From: Hawking Zhang <Hawking.Zhang@amd.com>
Date: Fri, 23 Mar 2018 11:37:25 +0800
Subject: [PATCH 0329/1286] drm/amdgpu: add df callback functions structure

Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h | 13 ++++++++++++-
 1 file changed, 12 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 3000c4abe34f..df409ddb97e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1368,7 +1368,17 @@ struct amdgpu_nbio_funcs {
 	void (*detect_hw_virt)(struct amdgpu_device *adev);
 };
 
-
+struct amdgpu_df_funcs {
+	void (*init)(struct amdgpu_device *adev);
+	void (*enable_broadcast_mode)(struct amdgpu_device *adev,
+				      bool enable);
+	u32 (*get_fb_channel_number)(struct amdgpu_device *adev);
+	u32 (*get_hbm_channel_number)(struct amdgpu_device *adev);
+	void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
+						 bool enable);
+	void (*get_clockgating_state)(struct amdgpu_device *adev,
+				      u32 *flags);
+};
 /* Define the HW IP blocks will be used in driver , add more if necessary */
 enum amd_hw_ip_block_type {
 	GC_HWIP = 1,
@@ -1588,6 +1598,7 @@ struct amdgpu_device {
 	uint32_t 		*reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
 
 	const struct amdgpu_nbio_funcs	*nbio_funcs;
+	const struct amdgpu_df_funcs	*df_funcs;
 
 	/* delayed work_func for deferring clockgating during resume */
 	struct delayed_work     late_init_work;

From d99605ead70efa0dc259c28f9b258184e2b3e77c Mon Sep 17 00:00:00 2001
From: Hawking Zhang <Hawking.Zhang@amd.com>
Date: Wed, 28 Mar 2018 16:27:56 +0800
Subject: [PATCH 0330/1286] drm/amdgpu/df: implement df v1_7 callback functions

Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/Makefile  |   4 +
 drivers/gpu/drm/amd/amdgpu/df_v1_7.c | 112 +++++++++++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/df_v1_7.h |  40 ++++++++++
 3 files changed, 156 insertions(+)
 create mode 100644 drivers/gpu/drm/amd/amdgpu/df_v1_7.c
 create mode 100644 drivers/gpu/drm/amd/amdgpu/df_v1_7.h

diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 2ca2b5154d52..2fe4a0bf98c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -64,6 +64,10 @@ amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce
 amdgpu-y += \
 	vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o
 
+# add DF block
+amdgpu-y += \
+	df_v1_7.o
+
 # add GMC block
 amdgpu-y += \
 	gmc_v7_0.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
new file mode 100644
index 000000000000..4ffda996660f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "df_v1_7.h"
+
+#include "df/df_1_7_default.h"
+#include "df/df_1_7_offset.h"
+#include "df/df_1_7_sh_mask.h"
+
+static u32 df_v1_7_channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
+
+static void df_v1_7_init (struct amdgpu_device *adev)
+{
+}
+
+static void df_v1_7_enable_broadcast_mode(struct amdgpu_device *adev,
+                                          bool enable)
+{
+	u32 tmp;
+
+	if (enable) {
+		tmp = RREG32_SOC15(DF, 0, mmFabricConfigAccessControl);
+		tmp &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK;
+		WREG32_SOC15(DF, 0, mmFabricConfigAccessControl, tmp);
+	} else
+		WREG32_SOC15(DF, 0, mmFabricConfigAccessControl,
+			     mmFabricConfigAccessControl_DEFAULT);
+}
+
+static u32 df_v1_7_get_fb_channel_number(struct amdgpu_device *adev)
+{
+	u32 tmp;
+
+	tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
+	tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
+	tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
+
+	return tmp;
+}
+
+static u32 df_v1_7_get_hbm_channel_number(struct amdgpu_device *adev)
+{
+	int fb_channel_number;
+
+	fb_channel_number = adev->df_funcs->get_fb_channel_number(adev);
+
+	return df_v1_7_channel_number[fb_channel_number];
+}
+
+static void df_v1_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+						     bool enable)
+{
+	u32 tmp;
+
+	/* Put DF on broadcast mode */
+	adev->df_funcs->enable_broadcast_mode(adev, true);
+
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) {
+		tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
+		tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
+		tmp |= DF_V1_7_MGCG_ENABLE_15_CYCLE_DELAY;
+		WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
+	} else {
+		tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
+		tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
+		tmp |= DF_V1_7_MGCG_DISABLE;
+		WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
+	}
+
+	/* Exit boradcast mode */
+	adev->df_funcs->enable_broadcast_mode(adev, false);
+}
+
+static void df_v1_7_get_clockgating_state(struct amdgpu_device *adev,
+					  u32 *flags)
+{
+	u32 tmp;
+
+	/* AMD_CG_SUPPORT_DF_MGCG */
+	tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
+	if (tmp & DF_V1_7_MGCG_ENABLE_15_CYCLE_DELAY)
+		*flags |= AMD_CG_SUPPORT_DF_MGCG;
+}
+
+const struct amdgpu_df_funcs df_v1_7_funcs = {
+	.init = df_v1_7_init,
+	.enable_broadcast_mode = df_v1_7_enable_broadcast_mode,
+	.get_fb_channel_number = df_v1_7_get_fb_channel_number,
+	.get_hbm_channel_number = df_v1_7_get_hbm_channel_number,
+	.update_medium_grain_clock_gating = df_v1_7_update_medium_grain_clock_gating,
+	.get_clockgating_state = df_v1_7_get_clockgating_state,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v1_7.h b/drivers/gpu/drm/amd/amdgpu/df_v1_7.h
new file mode 100644
index 000000000000..74621104c487
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __DF_V1_7_H__
+#define __DF_V1_7_H__
+
+#include "soc15_common.h"
+enum DF_V1_7_MGCG
+{
+	DF_V1_7_MGCG_DISABLE = 0,
+	DF_V1_7_MGCG_ENABLE_00_CYCLE_DELAY =1,
+	DF_V1_7_MGCG_ENABLE_01_CYCLE_DELAY =2,
+	DF_V1_7_MGCG_ENABLE_15_CYCLE_DELAY =13,
+	DF_V1_7_MGCG_ENABLE_31_CYCLE_DELAY =14,
+	DF_V1_7_MGCG_ENABLE_63_CYCLE_DELAY =15
+};
+
+extern const struct amdgpu_df_funcs df_v1_7_funcs;
+
+#endif

From 070706c03b3e67207cc41bd97b67ff0930d79cb3 Mon Sep 17 00:00:00 2001
From: Hawking Zhang <Hawking.Zhang@amd.com>
Date: Wed, 28 Mar 2018 17:08:04 +0800
Subject: [PATCH 0331/1286] drm/amdgpu: switch to use df callback functions

Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 35 +--------------
 drivers/gpu/drm/amd/amdgpu/soc15.c    | 62 ++-------------------------
 2 files changed, 5 insertions(+), 92 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index e687363900bb..070946e1e4a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -714,7 +714,6 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
  */
 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
 {
-	u32 tmp;
 	int chansize, numchan;
 	int r;
 
@@ -727,39 +726,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
 		else
 			chansize = 128;
 
-		tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
-		tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
-		tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
-		switch (tmp) {
-		case 0:
-		default:
-			numchan = 1;
-			break;
-		case 1:
-			numchan = 2;
-			break;
-		case 2:
-			numchan = 0;
-			break;
-		case 3:
-			numchan = 4;
-			break;
-		case 4:
-			numchan = 0;
-			break;
-		case 5:
-			numchan = 8;
-			break;
-		case 6:
-			numchan = 0;
-			break;
-		case 7:
-			numchan = 16;
-			break;
-		case 8:
-			numchan = 2;
-			break;
-		}
+		numchan = adev->df_funcs->get_hbm_channel_number(adev);
 		adev->gmc.vram_width = numchan * chansize;
 	}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 51cf8a30f6c2..654b015d5e05 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -52,6 +52,7 @@
 #include "gmc_v9_0.h"
 #include "gfxhub_v1_0.h"
 #include "mmhub_v1_0.h"
+#include "df_v1_7.h"
 #include "vega10_ih.h"
 #include "sdma_v4_0.h"
 #include "uvd_v7_0.h"
@@ -60,33 +61,6 @@
 #include "dce_virtual.h"
 #include "mxgpu_ai.h"
 
-#define mmFabricConfigAccessControl                                                                    0x0410
-#define mmFabricConfigAccessControl_BASE_IDX                                                           0
-#define mmFabricConfigAccessControl_DEFAULT                                      0x00000000
-//FabricConfigAccessControl
-#define FabricConfigAccessControl__CfgRegInstAccEn__SHIFT                                                     0x0
-#define FabricConfigAccessControl__CfgRegInstAccRegLock__SHIFT                                                0x1
-#define FabricConfigAccessControl__CfgRegInstID__SHIFT                                                        0x10
-#define FabricConfigAccessControl__CfgRegInstAccEn_MASK                                                       0x00000001L
-#define FabricConfigAccessControl__CfgRegInstAccRegLock_MASK                                                  0x00000002L
-#define FabricConfigAccessControl__CfgRegInstID_MASK                                                          0x00FF0000L
-
-
-#define mmDF_PIE_AON0_DfGlobalClkGater                                                                 0x00fc
-#define mmDF_PIE_AON0_DfGlobalClkGater_BASE_IDX                                                        0
-//DF_PIE_AON0_DfGlobalClkGater
-#define DF_PIE_AON0_DfGlobalClkGater__MGCGMode__SHIFT                                                         0x0
-#define DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK                                                           0x0000000FL
-
-enum {
-	DF_MGCG_DISABLE = 0,
-	DF_MGCG_ENABLE_00_CYCLE_DELAY =1,
-	DF_MGCG_ENABLE_01_CYCLE_DELAY =2,
-	DF_MGCG_ENABLE_15_CYCLE_DELAY =13,
-	DF_MGCG_ENABLE_31_CYCLE_DELAY =14,
-	DF_MGCG_ENABLE_63_CYCLE_DELAY =15
-};
-
 #define mmMP0_MISC_CGTT_CTRL0                                                                   0x01b9
 #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX                                                          0
 #define mmMP0_MISC_LIGHT_SLEEP_CTRL                                                             0x01ba
@@ -521,6 +495,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
 	else
 		adev->nbio_funcs = &nbio_v6_1_funcs;
 
+	adev->df_funcs = &df_v1_7_funcs;
 	adev->nbio_funcs->detect_hw_virt(adev);
 
 	if (amdgpu_sriov_vf(adev))
@@ -871,32 +846,6 @@ static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *ade
 		WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data);
 }
 
-static void soc15_update_df_medium_grain_clock_gating(struct amdgpu_device *adev,
-						       bool enable)
-{
-	uint32_t data;
-
-	/* Put DF on broadcast mode */
-	data = RREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl));
-	data &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK;
-	WREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl), data);
-
-	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) {
-		data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater));
-		data &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
-		data |= DF_MGCG_ENABLE_15_CYCLE_DELAY;
-		WREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater), data);
-	} else {
-		data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater));
-		data &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
-		data |= DF_MGCG_DISABLE;
-		WREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater), data);
-	}
-
-	WREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl),
-	       mmFabricConfigAccessControl_DEFAULT);
-}
-
 static int soc15_common_set_clockgating_state(void *handle,
 					    enum amd_clockgating_state state)
 {
@@ -920,7 +869,7 @@ static int soc15_common_set_clockgating_state(void *handle,
 				state == AMD_CG_STATE_GATE ? true : false);
 		soc15_update_rom_medium_grain_clock_gating(adev,
 				state == AMD_CG_STATE_GATE ? true : false);
-		soc15_update_df_medium_grain_clock_gating(adev,
+		adev->df_funcs->update_medium_grain_clock_gating(adev,
 				state == AMD_CG_STATE_GATE ? true : false);
 		break;
 	case CHIP_RAVEN:
@@ -973,10 +922,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
 	if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
 		*flags |= AMD_CG_SUPPORT_ROM_MGCG;
 
-	/* AMD_CG_SUPPORT_DF_MGCG */
-	data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater));
-	if (data & DF_MGCG_ENABLE_15_CYCLE_DELAY)
-		*flags |= AMD_CG_SUPPORT_DF_MGCG;
+	adev->df_funcs->get_clockgating_state(adev, flags);
 }
 
 static int soc15_common_set_powergating_state(void *handle,

From c99c7d6ef9f610145308577ae5845cd6e14051e2 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Fri, 30 Mar 2018 13:05:44 +0800
Subject: [PATCH 0332/1286] drm/amd/display: Disentangle dc.h include from
 amdgpu.h

Use forward declaration in amdgpu_dm.h for struct dc instand
of include dc.h to make header files more standalone

Reviewed-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h       | 2 +-
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c | 1 +
 2 files changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index b68400c1154b..3af699b24e10 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -28,7 +28,6 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
-#include "dc.h"
 
 /*
  * This file contains the definition for amdgpu_display_manager
@@ -53,6 +52,7 @@
 struct amdgpu_device;
 struct drm_device;
 struct amdgpu_dm_irq_handler_data;
+struct dc;
 
 struct amdgpu_dm_prev_state {
 	struct drm_framebuffer *fb;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index f6cb502c303f..ef5fad8c5aac 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -25,6 +25,7 @@
 
 #include "amdgpu_mode.h"
 #include "amdgpu_dm.h"
+#include "dc.h"
 #include "modules/color/color_gamma.h"
 
 #define MAX_DRM_LUT_VALUE 0xFFFF

From a110dfe3ab28fd4c52f785a7b80131ef5e8b61fb Mon Sep 17 00:00:00 2001
From: Daniel Stone <daniels@collabora.com>
Date: Fri, 30 Mar 2018 15:11:36 +0100
Subject: [PATCH 0333/1286] drm/radeon: Move GEM BO to drm_framebuffer
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Since drm_framebuffer can now store GEM objects directly, place them
there rather than in our own subclass. As this makes the framebuffer
create_handle and destroy functions the same as the GEM framebuffer
helper, we can reuse those.

Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Daniel Stone <daniels@collabora.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: David (ChunMing) Zhou <David1.Zhou@amd.com>
Cc: amd-gfx@lists.freedesktop.org
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/radeon/atombios_crtc.c      | 10 +++----
 drivers/gpu/drm/radeon/radeon_device.c      |  4 +--
 drivers/gpu/drm/radeon/radeon_display.c     | 31 +++++----------------
 drivers/gpu/drm/radeon/radeon_fb.c          |  8 +++---
 drivers/gpu/drm/radeon/radeon_legacy_crtc.c | 11 +++-----
 drivers/gpu/drm/radeon/radeon_mode.h        |  1 -
 6 files changed, 22 insertions(+), 43 deletions(-)

diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 02baaaf20e9d..028a811c1462 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1176,7 +1176,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
 	/* If atomic, assume fb object is pinned & idle & fenced and
 	 * just update base pointers
 	 */
-	obj = radeon_fb->obj;
+	obj = radeon_fb->base.obj[0];
 	rbo = gem_to_radeon_bo(obj);
 	r = radeon_bo_reserve(rbo, false);
 	if (unlikely(r != 0))
@@ -1442,7 +1442,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
 
 	if (!atomic && fb && fb != crtc->primary->fb) {
 		radeon_fb = to_radeon_framebuffer(fb);
-		rbo = gem_to_radeon_bo(radeon_fb->obj);
+		rbo = gem_to_radeon_bo(radeon_fb->base.obj[0]);
 		r = radeon_bo_reserve(rbo, false);
 		if (unlikely(r != 0))
 			return r;
@@ -1490,7 +1490,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
 		target_fb = crtc->primary->fb;
 	}
 
-	obj = radeon_fb->obj;
+	obj = radeon_fb->base.obj[0];
 	rbo = gem_to_radeon_bo(obj);
 	r = radeon_bo_reserve(rbo, false);
 	if (unlikely(r != 0))
@@ -1642,7 +1642,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
 
 	if (!atomic && fb && fb != crtc->primary->fb) {
 		radeon_fb = to_radeon_framebuffer(fb);
-		rbo = gem_to_radeon_bo(radeon_fb->obj);
+		rbo = gem_to_radeon_bo(radeon_fb->base.obj[0]);
 		r = radeon_bo_reserve(rbo, false);
 		if (unlikely(r != 0))
 			return r;
@@ -2153,7 +2153,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
 		struct radeon_bo *rbo;
 
 		radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
-		rbo = gem_to_radeon_bo(radeon_fb->obj);
+		rbo = gem_to_radeon_bo(radeon_fb->base.obj[0]);
 		r = radeon_bo_reserve(rbo, false);
 		if (unlikely(r))
 			DRM_ERROR("failed to reserve rbo before unpin\n");
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index e415d2c097a7..30c5bc20a60b 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1599,10 +1599,10 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
 			}
 		}
 
-		if (rfb == NULL || rfb->obj == NULL) {
+		if (rfb == NULL || rfb->base.obj[0] == NULL) {
 			continue;
 		}
-		robj = gem_to_radeon_bo(rfb->obj);
+		robj = gem_to_radeon_bo(rfb->base.obj[0]);
 		/* don't unpin kernel fb objects */
 		if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
 			r = radeon_bo_reserve(robj, false);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 26129b2b082d..dc300128283d 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -32,6 +32,7 @@
 
 #include <linux/pm_runtime.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_edid.h>
@@ -502,14 +503,14 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
 
 	/* schedule unpin of the old buffer */
 	old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
-	obj = old_radeon_fb->obj;
+	obj = old_radeon_fb->base.obj[0];
 
 	/* take a reference to the old object */
 	drm_gem_object_get(obj);
 	work->old_rbo = gem_to_radeon_bo(obj);
 
 	new_radeon_fb = to_radeon_framebuffer(fb);
-	obj = new_radeon_fb->obj;
+	obj = new_radeon_fb->base.obj[0];
 	new_rbo = gem_to_radeon_bo(obj);
 
 	/* pin the new buffer */
@@ -1285,27 +1286,9 @@ void radeon_compute_pll_legacy(struct radeon_pll *pll,
 
 }
 
-static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
-	struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
-
-	drm_gem_object_put_unlocked(radeon_fb->obj);
-	drm_framebuffer_cleanup(fb);
-	kfree(radeon_fb);
-}
-
-static int radeon_user_framebuffer_create_handle(struct drm_framebuffer *fb,
-						  struct drm_file *file_priv,
-						  unsigned int *handle)
-{
-	struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
-
-	return drm_gem_handle_create(file_priv, radeon_fb->obj, handle);
-}
-
 static const struct drm_framebuffer_funcs radeon_fb_funcs = {
-	.destroy = radeon_user_framebuffer_destroy,
-	.create_handle = radeon_user_framebuffer_create_handle,
+	.destroy = drm_gem_fb_destroy,
+	.create_handle = drm_gem_fb_create_handle,
 };
 
 int
@@ -1315,11 +1298,11 @@ radeon_framebuffer_init(struct drm_device *dev,
 			struct drm_gem_object *obj)
 {
 	int ret;
-	rfb->obj = obj;
+	rfb->base.obj[0] = obj;
 	drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
 	ret = drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
 	if (ret) {
-		rfb->obj = NULL;
+		rfb->base.obj[0] = NULL;
 		return ret;
 	}
 	return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 57c5404a1654..6cd99f6a4305 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -312,9 +312,9 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
 
 	drm_fb_helper_unregister_fbi(&rfbdev->helper);
 
-	if (rfb->obj) {
-		radeonfb_destroy_pinned_object(rfb->obj);
-		rfb->obj = NULL;
+	if (rfb->base.obj[0]) {
+		radeonfb_destroy_pinned_object(rfb->base.obj[0]);
+		rfb->base.obj[0] = NULL;
 		drm_framebuffer_unregister_private(&rfb->base);
 		drm_framebuffer_cleanup(&rfb->base);
 	}
@@ -400,7 +400,7 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
 	if (!rdev->mode_info.rfbdev)
 		return false;
 
-	if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj))
+	if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.base.obj[0]))
 		return true;
 	return false;
 }
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 1f1856e0b1e0..50b3f556845a 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -423,7 +423,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
 	}
 
 	/* Pin framebuffer & get tilling informations */
-	obj = radeon_fb->obj;
+	obj = radeon_fb->base.obj[0];
 	rbo = gem_to_radeon_bo(obj);
 retry:
 	r = radeon_bo_reserve(rbo, false);
@@ -451,7 +451,7 @@ retry:
 			struct radeon_bo *old_rbo;
 			unsigned long nsize, osize;
 
-			old_rbo = gem_to_radeon_bo(to_radeon_framebuffer(fb)->obj);
+			old_rbo = gem_to_radeon_bo(fb->obj[0]);
 			osize = radeon_bo_size(old_rbo);
 			nsize = radeon_bo_size(rbo);
 			if (nsize <= osize && !radeon_bo_reserve(old_rbo, false)) {
@@ -558,8 +558,7 @@ retry:
 	WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
 
 	if (!atomic && fb && fb != crtc->primary->fb) {
-		radeon_fb = to_radeon_framebuffer(fb);
-		rbo = gem_to_radeon_bo(radeon_fb->obj);
+		rbo = gem_to_radeon_bo(fb->obj[0]);
 		r = radeon_bo_reserve(rbo, false);
 		if (unlikely(r != 0))
 			return r;
@@ -1093,11 +1092,9 @@ static void radeon_crtc_disable(struct drm_crtc *crtc)
 	radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
 	if (crtc->primary->fb) {
 		int r;
-		struct radeon_framebuffer *radeon_fb;
 		struct radeon_bo *rbo;
 
-		radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
-		rbo = gem_to_radeon_bo(radeon_fb->obj);
+		rbo = gem_to_radeon_bo(crtc->primary->fb->obj[0]);
 		r = radeon_bo_reserve(rbo, false);
 		if (unlikely(r))
 			DRM_ERROR("failed to reserve rbo before unpin\n");
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 3243e5e01432..cd93c80332f7 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -576,7 +576,6 @@ struct radeon_connector {
 
 struct radeon_framebuffer {
 	struct drm_framebuffer base;
-	struct drm_gem_object *obj;
 };
 
 #define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \

From 9a0f0c9d0c9de76324807571694f8c89b433e3f1 Mon Sep 17 00:00:00 2001
From: Daniel Stone <daniels@collabora.com>
Date: Fri, 30 Mar 2018 15:11:37 +0100
Subject: [PATCH 0334/1286] drm/radeon: radeon_framebuffer -> drm_framebuffer
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Since drm_framebuffer can now store GEM objects directly, place them
there rather than in our own subclass. As this makes the framebuffer
create_handle and destroy functions the same as the GEM framebuffer
helper, we can reuse those.

Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Daniel Stone <daniels@collabora.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: David (ChunMing) Zhou <David1.Zhou@amd.com>
Cc: amd-gfx@lists.freedesktop.org
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/radeon/atombios_crtc.c      | 32 ++++++---------------
 drivers/gpu/drm/radeon/radeon_device.c      |  6 ++--
 drivers/gpu/drm/radeon/radeon_display.c     | 30 +++++++++----------
 drivers/gpu/drm/radeon/radeon_fb.c          | 20 ++++++-------
 drivers/gpu/drm/radeon/radeon_legacy_crtc.c | 11 ++-----
 drivers/gpu/drm/radeon/radeon_mode.h        |  7 +----
 6 files changed, 39 insertions(+), 67 deletions(-)

diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 028a811c1462..efbd5816082d 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1145,7 +1145,6 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
 	struct radeon_device *rdev = dev->dev_private;
-	struct radeon_framebuffer *radeon_fb;
 	struct drm_framebuffer *target_fb;
 	struct drm_gem_object *obj;
 	struct radeon_bo *rbo;
@@ -1164,19 +1163,15 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
 		return 0;
 	}
 
-	if (atomic) {
-		radeon_fb = to_radeon_framebuffer(fb);
+	if (atomic)
 		target_fb = fb;
-	}
-	else {
-		radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
+	else
 		target_fb = crtc->primary->fb;
-	}
 
 	/* If atomic, assume fb object is pinned & idle & fenced and
 	 * just update base pointers
 	 */
-	obj = radeon_fb->base.obj[0];
+	obj = target_fb->obj[0];
 	rbo = gem_to_radeon_bo(obj);
 	r = radeon_bo_reserve(rbo, false);
 	if (unlikely(r != 0))
@@ -1441,8 +1436,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
 	WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
 
 	if (!atomic && fb && fb != crtc->primary->fb) {
-		radeon_fb = to_radeon_framebuffer(fb);
-		rbo = gem_to_radeon_bo(radeon_fb->base.obj[0]);
+		rbo = gem_to_radeon_bo(fb->obj[0]);
 		r = radeon_bo_reserve(rbo, false);
 		if (unlikely(r != 0))
 			return r;
@@ -1463,7 +1457,6 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
 	struct radeon_device *rdev = dev->dev_private;
-	struct radeon_framebuffer *radeon_fb;
 	struct drm_gem_object *obj;
 	struct radeon_bo *rbo;
 	struct drm_framebuffer *target_fb;
@@ -1481,16 +1474,12 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
 		return 0;
 	}
 
-	if (atomic) {
-		radeon_fb = to_radeon_framebuffer(fb);
+	if (atomic)
 		target_fb = fb;
-	}
-	else {
-		radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
+	else
 		target_fb = crtc->primary->fb;
-	}
 
-	obj = radeon_fb->base.obj[0];
+	obj = target_fb->obj[0];
 	rbo = gem_to_radeon_bo(obj);
 	r = radeon_bo_reserve(rbo, false);
 	if (unlikely(r != 0))
@@ -1641,8 +1630,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
 	WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
 
 	if (!atomic && fb && fb != crtc->primary->fb) {
-		radeon_fb = to_radeon_framebuffer(fb);
-		rbo = gem_to_radeon_bo(radeon_fb->base.obj[0]);
+		rbo = gem_to_radeon_bo(fb->obj[0]);
 		r = radeon_bo_reserve(rbo, false);
 		if (unlikely(r != 0))
 			return r;
@@ -2149,11 +2137,9 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
 	atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
 	if (crtc->primary->fb) {
 		int r;
-		struct radeon_framebuffer *radeon_fb;
 		struct radeon_bo *rbo;
 
-		radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
-		rbo = gem_to_radeon_bo(radeon_fb->base.obj[0]);
+		rbo = gem_to_radeon_bo(crtc->primary->fb->obj[0]);
 		r = radeon_bo_reserve(rbo, false);
 		if (unlikely(r))
 			DRM_ERROR("failed to reserve rbo before unpin\n");
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 30c5bc20a60b..90e17e29e12a 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1587,7 +1587,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
 	/* unpin the front buffers and cursors */
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
-		struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
+		struct drm_framebuffer *fb = crtc->primary->fb;
 		struct radeon_bo *robj;
 
 		if (radeon_crtc->cursor_bo) {
@@ -1599,10 +1599,10 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
 			}
 		}
 
-		if (rfb == NULL || rfb->base.obj[0] == NULL) {
+		if (fb == NULL || fb->obj[0] == NULL) {
 			continue;
 		}
-		robj = gem_to_radeon_bo(rfb->base.obj[0]);
+		robj = gem_to_radeon_bo(fb->obj[0]);
 		/* don't unpin kernel fb objects */
 		if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
 			r = radeon_bo_reserve(robj, false);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index dc300128283d..9d3ac8b981da 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -479,8 +479,6 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
 	struct drm_device *dev = crtc->dev;
 	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
-	struct radeon_framebuffer *old_radeon_fb;
-	struct radeon_framebuffer *new_radeon_fb;
 	struct drm_gem_object *obj;
 	struct radeon_flip_work *work;
 	struct radeon_bo *new_rbo;
@@ -502,15 +500,13 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
 	work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
 
 	/* schedule unpin of the old buffer */
-	old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
-	obj = old_radeon_fb->base.obj[0];
+	obj = crtc->primary->fb->obj[0];
 
 	/* take a reference to the old object */
 	drm_gem_object_get(obj);
 	work->old_rbo = gem_to_radeon_bo(obj);
 
-	new_radeon_fb = to_radeon_framebuffer(fb);
-	obj = new_radeon_fb->base.obj[0];
+	obj = fb->obj[0];
 	new_rbo = gem_to_radeon_bo(obj);
 
 	/* pin the new buffer */
@@ -1293,16 +1289,16 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = {
 
 int
 radeon_framebuffer_init(struct drm_device *dev,
-			struct radeon_framebuffer *rfb,
+			struct drm_framebuffer *fb,
 			const struct drm_mode_fb_cmd2 *mode_cmd,
 			struct drm_gem_object *obj)
 {
 	int ret;
-	rfb->base.obj[0] = obj;
-	drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
-	ret = drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
+	fb->obj[0] = obj;
+	drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+	ret = drm_framebuffer_init(dev, fb, &radeon_fb_funcs);
 	if (ret) {
-		rfb->base.obj[0] = NULL;
+		fb->obj[0] = NULL;
 		return ret;
 	}
 	return 0;
@@ -1314,7 +1310,7 @@ radeon_user_framebuffer_create(struct drm_device *dev,
 			       const struct drm_mode_fb_cmd2 *mode_cmd)
 {
 	struct drm_gem_object *obj;
-	struct radeon_framebuffer *radeon_fb;
+	struct drm_framebuffer *fb;
 	int ret;
 
 	obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
@@ -1330,20 +1326,20 @@ radeon_user_framebuffer_create(struct drm_device *dev,
 		return ERR_PTR(-EINVAL);
 	}
 
-	radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
-	if (radeon_fb == NULL) {
+	fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+	if (fb == NULL) {
 		drm_gem_object_put_unlocked(obj);
 		return ERR_PTR(-ENOMEM);
 	}
 
-	ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
+	ret = radeon_framebuffer_init(dev, fb, mode_cmd, obj);
 	if (ret) {
-		kfree(radeon_fb);
+		kfree(fb);
 		drm_gem_object_put_unlocked(obj);
 		return ERR_PTR(ret);
 	}
 
-	return &radeon_fb->base;
+	return fb;
 }
 
 static const struct drm_mode_config_funcs radeon_mode_funcs = {
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 6cd99f6a4305..1179034024ae 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -43,7 +43,7 @@
  */
 struct radeon_fbdev {
 	struct drm_fb_helper helper;
-	struct radeon_framebuffer rfb;
+	struct drm_framebuffer fb;
 	struct radeon_device *rdev;
 };
 
@@ -246,13 +246,13 @@ static int radeonfb_create(struct drm_fb_helper *helper,
 
 	info->par = rfbdev;
 
-	ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
+	ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->fb, &mode_cmd, gobj);
 	if (ret) {
 		DRM_ERROR("failed to initialize framebuffer %d\n", ret);
 		goto out;
 	}
 
-	fb = &rfbdev->rfb.base;
+	fb = &rfbdev->fb;
 
 	/* setup helper */
 	rfbdev->helper.fb = fb;
@@ -308,15 +308,15 @@ out:
 
 static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
 {
-	struct radeon_framebuffer *rfb = &rfbdev->rfb;
+	struct drm_framebuffer *fb = &rfbdev->fb;
 
 	drm_fb_helper_unregister_fbi(&rfbdev->helper);
 
-	if (rfb->base.obj[0]) {
-		radeonfb_destroy_pinned_object(rfb->base.obj[0]);
-		rfb->base.obj[0] = NULL;
-		drm_framebuffer_unregister_private(&rfb->base);
-		drm_framebuffer_cleanup(&rfb->base);
+	if (fb->obj[0]) {
+		radeonfb_destroy_pinned_object(fb->obj[0]);
+		fb->obj[0] = NULL;
+		drm_framebuffer_unregister_private(fb);
+		drm_framebuffer_cleanup(fb);
 	}
 	drm_fb_helper_fini(&rfbdev->helper);
 
@@ -400,7 +400,7 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
 	if (!rdev->mode_info.rfbdev)
 		return false;
 
-	if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.base.obj[0]))
+	if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->fb.obj[0]))
 		return true;
 	return false;
 }
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 50b3f556845a..35a205ae4318 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -374,7 +374,6 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
 	struct drm_device *dev = crtc->dev;
 	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
-	struct radeon_framebuffer *radeon_fb;
 	struct drm_framebuffer *target_fb;
 	struct drm_gem_object *obj;
 	struct radeon_bo *rbo;
@@ -393,14 +392,10 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
 		return 0;
 	}
 
-	if (atomic) {
-		radeon_fb = to_radeon_framebuffer(fb);
+	if (atomic)
 		target_fb = fb;
-	}
-	else {
-		radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
+	else
 		target_fb = crtc->primary->fb;
-	}
 
 	switch (target_fb->format->cpp[0] * 8) {
 	case 8:
@@ -423,7 +418,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
 	}
 
 	/* Pin framebuffer & get tilling informations */
-	obj = radeon_fb->base.obj[0];
+	obj = target_fb->obj[0];
 	rbo = gem_to_radeon_bo(obj);
 retry:
 	r = radeon_bo_reserve(rbo, false);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index cd93c80332f7..fd470d6bf3f4 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -46,7 +46,6 @@ struct radeon_device;
 #define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base)
 #define to_radeon_connector(x) container_of(x, struct radeon_connector, base)
 #define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base)
-#define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base)
 
 #define RADEON_MAX_HPD_PINS 7
 #define RADEON_MAX_CRTCS 6
@@ -574,10 +573,6 @@ struct radeon_connector {
 	int enabled_attribs;
 };
 
-struct radeon_framebuffer {
-	struct drm_framebuffer base;
-};
-
 #define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
 				((em) == ATOM_ENCODER_MODE_DP_MST))
 
@@ -931,7 +926,7 @@ radeon_combios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc);
 extern void
 radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on);
 int radeon_framebuffer_init(struct drm_device *dev,
-			     struct radeon_framebuffer *rfb,
+			     struct drm_framebuffer *rfb,
 			     const struct drm_mode_fb_cmd2 *mode_cmd,
 			     struct drm_gem_object *obj);
 

From e68d14dd4ebaf596bf0c237ba82f815c2f561dec Mon Sep 17 00:00:00 2001
From: Daniel Stone <daniels@collabora.com>
Date: Fri, 30 Mar 2018 15:11:38 +0100
Subject: [PATCH 0335/1286] drm/amdgpu: Move GEM BO to drm_framebuffer
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Since drm_framebuffer can now store GEM objects directly, place them
there rather than in our own subclass. As this makes the framebuffer
create_handle and destroy functions the same as the GEM framebuffer
helper, we can reuse those.

Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Daniel Stone <daniels@collabora.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: David (ChunMing) Zhou <David1.Zhou@amd.com>
Cc: amd-gfx@lists.freedesktop.org
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c    |  6 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c   | 36 ++++---------------
 drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c        | 10 +++---
 drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h      |  1 -
 drivers/gpu/drm/amd/amdgpu/dce_v10_0.c        | 17 +++------
 drivers/gpu/drm/amd/amdgpu/dce_v11_0.c        | 17 +++------
 drivers/gpu/drm/amd/amdgpu/dce_v6_0.c         | 17 +++------
 drivers/gpu/drm/amd/amdgpu/dce_v8_0.c         | 17 +++------
 drivers/gpu/drm/amd/amdgpu/dce_virtual.c      |  4 +--
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 11 +++---
 10 files changed, 40 insertions(+), 96 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index a53926580b3d..e0d6b1ddd213 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2541,7 +2541,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
 	/* unpin the front buffers and cursors */
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-		struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
+		struct drm_framebuffer *fb = crtc->primary->fb;
 		struct amdgpu_bo *robj;
 
 		if (amdgpu_crtc->cursor_bo) {
@@ -2553,10 +2553,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
 			}
 		}
 
-		if (rfb == NULL || rfb->obj == NULL) {
+		if (fb == NULL || fb->obj[0] == NULL) {
 			continue;
 		}
-		robj = gem_to_amdgpu_bo(rfb->obj);
+		robj = gem_to_amdgpu_bo(fb->obj[0]);
 		/* don't unpin kernel fb objects */
 		if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
 			r = amdgpu_bo_reserve(robj, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 93f700ab1bfb..b83ae998fe27 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -35,6 +35,7 @@
 #include <linux/pm_runtime.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_edid.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_fb_helper.h>
 
 static void amdgpu_display_flip_callback(struct dma_fence *f,
@@ -151,8 +152,6 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
 	struct drm_device *dev = crtc->dev;
 	struct amdgpu_device *adev = dev->dev_private;
 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-	struct amdgpu_framebuffer *old_amdgpu_fb;
-	struct amdgpu_framebuffer *new_amdgpu_fb;
 	struct drm_gem_object *obj;
 	struct amdgpu_flip_work *work;
 	struct amdgpu_bo *new_abo;
@@ -174,15 +173,13 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
 	work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
 
 	/* schedule unpin of the old buffer */
-	old_amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
-	obj = old_amdgpu_fb->obj;
+	obj = crtc->primary->fb->obj[0];
 
 	/* take a reference to the old object */
 	work->old_abo = gem_to_amdgpu_bo(obj);
 	amdgpu_bo_ref(work->old_abo);
 
-	new_amdgpu_fb = to_amdgpu_framebuffer(fb);
-	obj = new_amdgpu_fb->obj;
+	obj = fb->obj[0];
 	new_abo = gem_to_amdgpu_bo(obj);
 
 	/* pin the new buffer */
@@ -482,28 +479,9 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
 	return true;
 }
 
-static void amdgpu_display_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
-	struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
-
-	drm_gem_object_put_unlocked(amdgpu_fb->obj);
-	drm_framebuffer_cleanup(fb);
-	kfree(amdgpu_fb);
-}
-
-static int amdgpu_display_user_framebuffer_create_handle(
-			struct drm_framebuffer *fb,
-			struct drm_file *file_priv,
-			unsigned int *handle)
-{
-	struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
-
-	return drm_gem_handle_create(file_priv, amdgpu_fb->obj, handle);
-}
-
 static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
-	.destroy = amdgpu_display_user_framebuffer_destroy,
-	.create_handle = amdgpu_display_user_framebuffer_create_handle,
+	.destroy = drm_gem_fb_destroy,
+	.create_handle = drm_gem_fb_create_handle,
 };
 
 uint32_t amdgpu_display_framebuffer_domains(struct amdgpu_device *adev)
@@ -526,11 +504,11 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
 				    struct drm_gem_object *obj)
 {
 	int ret;
-	rfb->obj = obj;
+	rfb->base.obj[0] = obj;
 	drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
 	ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
 	if (ret) {
-		rfb->obj = NULL;
+		rfb->base.obj[0] = NULL;
 		return ret;
 	}
 	return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 12063019751b..ff89e84b34ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -292,9 +292,9 @@ static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfb
 
 	drm_fb_helper_unregister_fbi(&rfbdev->helper);
 
-	if (rfb->obj) {
-		amdgpufb_destroy_pinned_object(rfb->obj);
-		rfb->obj = NULL;
+	if (rfb->base.obj[0]) {
+		amdgpufb_destroy_pinned_object(rfb->base.obj[0]);
+		rfb->base.obj[0] = NULL;
 		drm_framebuffer_unregister_private(&rfb->base);
 		drm_framebuffer_cleanup(&rfb->base);
 	}
@@ -377,7 +377,7 @@ int amdgpu_fbdev_total_size(struct amdgpu_device *adev)
 	if (!adev->mode_info.rfbdev)
 		return 0;
 
-	robj = gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.obj);
+	robj = gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.base.obj[0]);
 	size += amdgpu_bo_size(robj);
 	return size;
 }
@@ -386,7 +386,7 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
 {
 	if (!adev->mode_info.rfbdev)
 		return false;
-	if (robj == gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.obj))
+	if (robj == gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.base.obj[0]))
 		return true;
 	return false;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index d6416ee52e32..b9e9e8b02fb7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -308,7 +308,6 @@ struct amdgpu_display_funcs {
 
 struct amdgpu_framebuffer {
 	struct drm_framebuffer base;
-	struct drm_gem_object *obj;
 
 	/* caching for later use */
 	uint64_t address;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 452f88ea46a2..ada241bfeee9 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1823,7 +1823,6 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
 	struct amdgpu_device *adev = dev->dev_private;
-	struct amdgpu_framebuffer *amdgpu_fb;
 	struct drm_framebuffer *target_fb;
 	struct drm_gem_object *obj;
 	struct amdgpu_bo *abo;
@@ -1842,18 +1841,15 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
 		return 0;
 	}
 
-	if (atomic) {
-		amdgpu_fb = to_amdgpu_framebuffer(fb);
+	if (atomic)
 		target_fb = fb;
-	} else {
-		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+	else
 		target_fb = crtc->primary->fb;
-	}
 
 	/* If atomic, assume fb object is pinned & idle & fenced and
 	 * just update base pointers
 	 */
-	obj = amdgpu_fb->obj;
+	obj = target_fb->obj[0];
 	abo = gem_to_amdgpu_bo(obj);
 	r = amdgpu_bo_reserve(abo, false);
 	if (unlikely(r != 0))
@@ -2043,8 +2039,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
 	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
 
 	if (!atomic && fb && fb != crtc->primary->fb) {
-		amdgpu_fb = to_amdgpu_framebuffer(fb);
-		abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+		abo = gem_to_amdgpu_bo(fb->obj[0]);
 		r = amdgpu_bo_reserve(abo, true);
 		if (unlikely(r != 0))
 			return r;
@@ -2526,11 +2521,9 @@ static void dce_v10_0_crtc_disable(struct drm_crtc *crtc)
 	dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
 	if (crtc->primary->fb) {
 		int r;
-		struct amdgpu_framebuffer *amdgpu_fb;
 		struct amdgpu_bo *abo;
 
-		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
-		abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+		abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
 		r = amdgpu_bo_reserve(abo, true);
 		if (unlikely(r))
 			DRM_ERROR("failed to reserve abo before unpin\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index a7c1c584a191..d3ae508b2a92 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1862,7 +1862,6 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
 	struct amdgpu_device *adev = dev->dev_private;
-	struct amdgpu_framebuffer *amdgpu_fb;
 	struct drm_framebuffer *target_fb;
 	struct drm_gem_object *obj;
 	struct amdgpu_bo *abo;
@@ -1881,18 +1880,15 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
 		return 0;
 	}
 
-	if (atomic) {
-		amdgpu_fb = to_amdgpu_framebuffer(fb);
+	if (atomic)
 		target_fb = fb;
-	} else {
-		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+	else
 		target_fb = crtc->primary->fb;
-	}
 
 	/* If atomic, assume fb object is pinned & idle & fenced and
 	 * just update base pointers
 	 */
-	obj = amdgpu_fb->obj;
+	obj = target_fb->obj[0];
 	abo = gem_to_amdgpu_bo(obj);
 	r = amdgpu_bo_reserve(abo, false);
 	if (unlikely(r != 0))
@@ -2082,8 +2078,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
 	WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
 
 	if (!atomic && fb && fb != crtc->primary->fb) {
-		amdgpu_fb = to_amdgpu_framebuffer(fb);
-		abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+		abo = gem_to_amdgpu_bo(fb->obj[0]);
 		r = amdgpu_bo_reserve(abo, true);
 		if (unlikely(r != 0))
 			return r;
@@ -2601,11 +2596,9 @@ static void dce_v11_0_crtc_disable(struct drm_crtc *crtc)
 	dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
 	if (crtc->primary->fb) {
 		int r;
-		struct amdgpu_framebuffer *amdgpu_fb;
 		struct amdgpu_bo *abo;
 
-		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
-		abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+		abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
 		r = amdgpu_bo_reserve(abo, true);
 		if (unlikely(r))
 			DRM_ERROR("failed to reserve abo before unpin\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 9f67b7fd3487..394cc1e8fe20 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1780,7 +1780,6 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
 	struct amdgpu_device *adev = dev->dev_private;
-	struct amdgpu_framebuffer *amdgpu_fb;
 	struct drm_framebuffer *target_fb;
 	struct drm_gem_object *obj;
 	struct amdgpu_bo *abo;
@@ -1798,18 +1797,15 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
 		return 0;
 	}
 
-	if (atomic) {
-		amdgpu_fb = to_amdgpu_framebuffer(fb);
+	if (atomic)
 		target_fb = fb;
-	} else {
-		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+	else
 		target_fb = crtc->primary->fb;
-	}
 
 	/* If atomic, assume fb object is pinned & idle & fenced and
 	 * just update base pointers
 	 */
-	obj = amdgpu_fb->obj;
+	obj = target_fb->obj[0];
 	abo = gem_to_amdgpu_bo(obj);
 	r = amdgpu_bo_reserve(abo, false);
 	if (unlikely(r != 0))
@@ -1978,8 +1974,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
 	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
 
 	if (!atomic && fb && fb != crtc->primary->fb) {
-		amdgpu_fb = to_amdgpu_framebuffer(fb);
-		abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+		abo = gem_to_amdgpu_bo(fb->obj[0]);
 		r = amdgpu_bo_reserve(abo, true);
 		if (unlikely(r != 0))
 			return r;
@@ -2414,11 +2409,9 @@ static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
 	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
 	if (crtc->primary->fb) {
 		int r;
-		struct amdgpu_framebuffer *amdgpu_fb;
 		struct amdgpu_bo *abo;
 
-		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
-		abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+		abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
 		r = amdgpu_bo_reserve(abo, true);
 		if (unlikely(r))
 			DRM_ERROR("failed to reserve abo before unpin\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index f55422cbd77a..c9b9ab8f1b05 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1754,7 +1754,6 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
 	struct amdgpu_device *adev = dev->dev_private;
-	struct amdgpu_framebuffer *amdgpu_fb;
 	struct drm_framebuffer *target_fb;
 	struct drm_gem_object *obj;
 	struct amdgpu_bo *abo;
@@ -1773,18 +1772,15 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
 		return 0;
 	}
 
-	if (atomic) {
-		amdgpu_fb = to_amdgpu_framebuffer(fb);
+	if (atomic)
 		target_fb = fb;
-	} else {
-		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+	else
 		target_fb = crtc->primary->fb;
-	}
 
 	/* If atomic, assume fb object is pinned & idle & fenced and
 	 * just update base pointers
 	 */
-	obj = amdgpu_fb->obj;
+	obj = target_fb->obj[0];
 	abo = gem_to_amdgpu_bo(obj);
 	r = amdgpu_bo_reserve(abo, false);
 	if (unlikely(r != 0))
@@ -1955,8 +1951,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
 	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
 
 	if (!atomic && fb && fb != crtc->primary->fb) {
-		amdgpu_fb = to_amdgpu_framebuffer(fb);
-		abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+		abo = gem_to_amdgpu_bo(fb->obj[0]);
 		r = amdgpu_bo_reserve(abo, true);
 		if (unlikely(r != 0))
 			return r;
@@ -2430,11 +2425,9 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
 	dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
 	if (crtc->primary->fb) {
 		int r;
-		struct amdgpu_framebuffer *amdgpu_fb;
 		struct amdgpu_bo *abo;
 
-		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
-		abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+		abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
 		r = amdgpu_bo_reserve(abo, true);
 		if (unlikely(r))
 			DRM_ERROR("failed to reserve abo before unpin\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index b51f05dc9582..89b2286a9d6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -168,11 +168,9 @@ static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
 	dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
 	if (crtc->primary->fb) {
 		int r;
-		struct amdgpu_framebuffer *amdgpu_fb;
 		struct amdgpu_bo *abo;
 
-		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
-		abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+		abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
 		r = amdgpu_bo_reserve(abo, true);
 		if (unlikely(r))
 			DRM_ERROR("failed to reserve abo before unpin\n");
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 3ff3905eee9a..077ee6793a1c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1819,7 +1819,7 @@ static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
 		       uint64_t *tiling_flags)
 {
-	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
 	int r = amdgpu_bo_reserve(rbo, false);
 
 	if (unlikely(r)) {
@@ -3028,8 +3028,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
 	}
 
 	afb = to_amdgpu_framebuffer(new_state->fb);
-
-	obj = afb->obj;
+	obj = new_state->fb->obj[0];
 	rbo = gem_to_amdgpu_bo(obj);
 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
 	r = amdgpu_bo_reserve(rbo, false);
@@ -3093,14 +3092,12 @@ static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
 				       struct drm_plane_state *old_state)
 {
 	struct amdgpu_bo *rbo;
-	struct amdgpu_framebuffer *afb;
 	int r;
 
 	if (!old_state->fb)
 		return;
 
-	afb = to_amdgpu_framebuffer(old_state->fb);
-	rbo = gem_to_amdgpu_bo(afb->obj);
+	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
 	r = amdgpu_bo_reserve(rbo, false);
 	if (unlikely(r)) {
 		DRM_ERROR("failed to reserve rbo before unpin\n");
@@ -3896,7 +3893,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
 	int r, vpos, hpos;
 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
-	struct amdgpu_bo *abo = gem_to_amdgpu_bo(afb->obj);
+	struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);
 	struct amdgpu_device *adev = crtc->dev->dev_private;
 	bool async_flip = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
 	struct dc_flip_addrs addr = { {0} };

From 844c541951a00ddffa0248c72e1d7d3e4afaad30 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Mon, 26 Mar 2018 12:56:56 -0500
Subject: [PATCH 0336/1286] drm/amdgpu: add documentation on hwmon interfaces
 exposed (v3)

Provide detail on the currently exposed hwmon interfaces
for temperature, power, voltage, and fan.

v2: add power cap documentation
v3: add a comment about sensors tool

Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 40 ++++++++++++++++++++++++++
 1 file changed, 40 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index e6e365852f11..e5f60fc31516 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1109,6 +1109,46 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
 	return count;
 }
 
+
+/**
+ * DOC: hwmon
+ *
+ * The amdgpu driver exposes the following sensor interfaces:
+ * - GPU temperature (via the on-die sensor)
+ * - GPU voltage
+ * - Northbridge voltage (APUs only)
+ * - GPU power
+ * - GPU fan
+ *
+ * hwmon interfaces for GPU temperature:
+ * - temp1_input: the on die GPU temperature in millidegrees Celsius
+ * - temp1_crit: temperature critical max value in millidegrees Celsius
+ * - temp1_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
+ *
+ * hwmon interfaces for GPU voltage:
+ * - in0_input: the voltage on the GPU in millivolts
+ * - in1_input: the voltage on the Northbridge in millivolts
+ *
+ * hwmon interfaces for GPU power:
+ * - power1_average: average power used by the GPU in microWatts
+ * - power1_cap_min: minimum cap supported in microWatts
+ * - power1_cap_max: maximum cap supported in microWatts
+ * - power1_cap: selected power cap in microWatts
+ *
+ * hwmon interfaces for GPU fan:
+ * - pwm1: pulse width modulation fan level (0-255)
+ * - pwm1_enable: pulse width modulation fan control method
+ *                0: no fan speed control
+ *                1: manual fan speed control using pwm interface
+ *                2: automatic fan speed control
+ * - pwm1_min: pulse width modulation fan control minimum level (0)
+ * - pwm1_max: pulse width modulation fan control maximum level (255)
+ * - fan1_input: fan speed in RPM
+ *
+ * You can use hwmon tools like sensors to view this information on your system.
+ *
+ */
+
 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0);
 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);

From 6907069004216e630d30847bf2893ab18156ed0f Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Thu, 29 Mar 2018 13:51:28 -0500
Subject: [PATCH 0337/1286] drm/amdgpu: add asic need_full_reset callback
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Allow us to determine at the soc level whether the
asic requires full reset or if soft reset will work.

Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index df409ddb97e6..21272ce74b56 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1204,6 +1204,8 @@ struct amdgpu_asic_funcs {
 	/* invalidate hdp read cache */
 	void (*invalidate_hdp)(struct amdgpu_device *adev,
 			       struct amdgpu_ring *ring);
+	/* check if the asic needs a full reset of if soft reset will work */
+	bool (*need_full_reset)(struct amdgpu_device *adev);
 };
 
 /*
@@ -1773,6 +1775,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
 #define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r))
 #define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r))
+#define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
 #define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid))
 #define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
 #define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))

From 0a881af83cf8d0a9d270f63dd378f4eefda60c48 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Tue, 3 Apr 2018 13:27:14 -0500
Subject: [PATCH 0338/1286] drm/amdgpu/si: implement asic need_full_reset
 callback
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Used to check on a per SoC basis whether the SoC needs
a full reset of a per IP soft reset.

Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/si.c | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index a675ec6d2811..c364ef94cc36 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1252,6 +1252,12 @@ static void si_invalidate_hdp(struct amdgpu_device *adev,
 	}
 }
 
+static bool si_need_full_reset(struct amdgpu_device *adev)
+{
+	/* change this when we support soft reset */
+	return true;
+}
+
 static int si_get_pcie_lanes(struct amdgpu_device *adev)
 {
 	u32 link_width_cntl;
@@ -1332,6 +1338,7 @@ static const struct amdgpu_asic_funcs si_asic_funcs =
 	.get_config_memsize = &si_get_config_memsize,
 	.flush_hdp = &si_flush_hdp,
 	.invalidate_hdp = &si_invalidate_hdp,
+	.need_full_reset = &si_need_full_reset,
 };
 
 static uint32_t si_get_rev_id(struct amdgpu_device *adev)

From b7acb46f210b92005b8db16380eac6b3a4c61431 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Thu, 29 Mar 2018 14:39:10 -0500
Subject: [PATCH 0339/1286] drm/amdgpu/cik: implement asic need_full_reset
 callback
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Used to check on a per SoC basis whether the SoC needs
a full reset of a per IP soft reset.

Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/cik.c | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 0df22030e713..8ff4c60d1b59 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1735,6 +1735,12 @@ static void cik_invalidate_hdp(struct amdgpu_device *adev,
 	}
 }
 
+static bool cik_need_full_reset(struct amdgpu_device *adev)
+{
+	/* change this when we support soft reset */
+	return true;
+}
+
 static const struct amdgpu_asic_funcs cik_asic_funcs =
 {
 	.read_disabled_bios = &cik_read_disabled_bios,
@@ -1748,6 +1754,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
 	.get_config_memsize = &cik_get_config_memsize,
 	.flush_hdp = &cik_flush_hdp,
 	.invalidate_hdp = &cik_invalidate_hdp,
+	.need_full_reset = &cik_need_full_reset,
 };
 
 static int cik_common_early_init(void *handle)

From 06082d9b711fd5889c5f182c6fa629891e5b48c3 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Thu, 29 Mar 2018 14:39:28 -0500
Subject: [PATCH 0340/1286] drm/amdgpu/vi: implement asic need_full_reset
 callback
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Used to check on a per SoC basis whether the SoC needs
a full reset of a per IP soft reset.

Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/vi.c | 22 ++++++++++++++++++++++
 1 file changed, 22 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 126f1276d347..1b4ee249b95a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -876,6 +876,27 @@ static void vi_invalidate_hdp(struct amdgpu_device *adev,
 	}
 }
 
+static bool vi_need_full_reset(struct amdgpu_device *adev)
+{
+	switch (adev->asic_type) {
+	case CHIP_CARRIZO:
+	case CHIP_STONEY:
+		/* CZ has hang issues with full reset at the moment */
+		return false;
+	case CHIP_FIJI:
+	case CHIP_TONGA:
+		/* XXX: soft reset should work on fiji and tonga */
+		return true;
+	case CHIP_POLARIS10:
+	case CHIP_POLARIS11:
+	case CHIP_POLARIS12:
+	case CHIP_TOPAZ:
+	default:
+		/* change this when we support soft reset */
+		return true;
+	}
+}
+
 static const struct amdgpu_asic_funcs vi_asic_funcs =
 {
 	.read_disabled_bios = &vi_read_disabled_bios,
@@ -889,6 +910,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
 	.get_config_memsize = &vi_get_config_memsize,
 	.flush_hdp = &vi_flush_hdp,
 	.invalidate_hdp = &vi_invalidate_hdp,
+	.need_full_reset = &vi_need_full_reset,
 };
 
 #define CZ_REV_BRISTOL(rev)	 \

From adbd4f894f3615f04a4c0cfb931ed647c0280a5f Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Thu, 29 Mar 2018 14:39:46 -0500
Subject: [PATCH 0341/1286] drm/amdgpu/soc15: implement asic need_full_reset
 callback
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Used to check on a per SoC basis whether the SoC needs
a full reset of a per IP soft reset.

Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/soc15.c | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 654b015d5e05..2e9ebe8db5cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -568,6 +568,12 @@ static void soc15_invalidate_hdp(struct amdgpu_device *adev,
 			HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
 }
 
+static bool soc15_need_full_reset(struct amdgpu_device *adev)
+{
+	/* change this when we implement soft reset */
+	return true;
+}
+
 static const struct amdgpu_asic_funcs soc15_asic_funcs =
 {
 	.read_disabled_bios = &soc15_read_disabled_bios,
@@ -581,6 +587,7 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs =
 	.get_config_memsize = &soc15_get_config_memsize,
 	.flush_hdp = &soc15_flush_hdp,
 	.invalidate_hdp = &soc15_invalidate_hdp,
+	.need_full_reset = &soc15_need_full_reset,
 };
 
 static int soc15_common_early_init(void *handle)

From 8bc04c2965879c79bb84e3fc8410e6c90cecc96d Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Thu, 29 Mar 2018 14:48:37 -0500
Subject: [PATCH 0342/1286] drm/amdgpu: use new asic need_full_reset callback
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Use the new callback to determine whether to use full
asic reset or per IP soft reset.  Enables reset to
actually proceed on asics which don't support soft
reset yet.

Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index e0d6b1ddd213..abc33464959e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2738,6 +2738,9 @@ static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
 	if (amdgpu_sriov_vf(adev))
 		return true;
 
+	if (amdgpu_asic_need_full_reset(adev))
+		return true;
+
 	for (i = 0; i < adev->num_ip_blocks; i++) {
 		if (!adev->ip_blocks[i].status.valid)
 			continue;
@@ -2794,6 +2797,9 @@ static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
 {
 	int i;
 
+	if (amdgpu_asic_need_full_reset(adev))
+		return true;
+
 	for (i = 0; i < adev->num_ip_blocks; i++) {
 		if (!adev->ip_blocks[i].status.valid)
 			continue;

From e63f86735d9220c8ca6929dc07a4c78f111a6201 Mon Sep 17 00:00:00 2001
From: Colin Ian King <colin.king@canonical.com>
Date: Fri, 30 Mar 2018 17:00:47 +0100
Subject: [PATCH 0343/1286] drm/amd/display: fix spelling mistake: "Usupported"
 -> "Unsupported"

Trivial fix to spelling mistake in DRM_ERROR error message text

Reviewed-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Colin Ian King <colin.king@canonical.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 077ee6793a1c..fbde450277e8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1521,7 +1521,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
 		break;
 #endif
 	default:
-		DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
+		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
 		goto fail;
 	}
 
@@ -1714,7 +1714,7 @@ static int dm_early_init(void *handle)
 		break;
 #endif
 	default:
-		DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
+		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
 		return -EINVAL;
 	}
 

From ced5443502b682decd886ccda10f10862e418ae9 Mon Sep 17 00:00:00 2001
From: Nayan Deshmukh <nayan26deshmukh@gmail.com>
Date: Thu, 29 Mar 2018 22:36:31 +0530
Subject: [PATCH 0344/1286] drm/scheduler: fix param documentation
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

There is no @kernel parameter anymore and document the
@guilty parameter

Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/scheduler/gpu_scheduler.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 0d95888ccc3e..1d368bc66ac2 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -117,8 +117,9 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
  * @sched	The pointer to the scheduler
  * @entity	The pointer to a valid drm_sched_entity
  * @rq		The run queue this entity belongs
- * @kernel	If this is an entity for the kernel
  * @jobs	The max number of jobs in the job queue
+ * @guilty      atomic_t set to 1 when a job on this queue
+ *              is found to be guilty causing a timeout
  *
  * return 0 if succeed. negative error code on failure
 */

From a70cdb9eddcfd4ba20d69b84149b4a38648455ac Mon Sep 17 00:00:00 2001
From: Nayan Deshmukh <nayan26deshmukh@gmail.com>
Date: Thu, 29 Mar 2018 22:36:33 +0530
Subject: [PATCH 0345/1286] drm/scheduler: move the tracepoints file from the
 include directory
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Move it with the scheduler code. This is mostly a straight forward
rename with no code change except for updating the TRACE_INCLUDE_PATH

Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
Suggested-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Acked-by: Lucas Stach <l.stach@pengutronix.de>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/scheduler/gpu_scheduler.c                       | 2 +-
 .../drm => drivers/gpu/drm/scheduler}/gpu_scheduler_trace.h     | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
 rename {include/drm => drivers/gpu/drm/scheduler}/gpu_scheduler_trace.h (97%)

diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 1d368bc66ac2..310275eaf128 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -30,7 +30,7 @@
 #include <drm/spsc_queue.h>
 
 #define CREATE_TRACE_POINTS
-#include <drm/gpu_scheduler_trace.h>
+#include "gpu_scheduler_trace.h"
 
 #define to_drm_sched_job(sched_job)		\
 		container_of((sched_job), struct drm_sched_job, queue_node)
diff --git a/include/drm/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
similarity index 97%
rename from include/drm/gpu_scheduler_trace.h
rename to drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
index 0789e8d0a0e1..4998ad950a48 100644
--- a/include/drm/gpu_scheduler_trace.h
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
@@ -78,5 +78,5 @@ TRACE_EVENT(drm_sched_process_job,
 
 /* This part must be outside protection */
 #undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/scheduler
 #include <trace/define_trace.h>

From 5a8c102ac471c53da38b2c3c35417e9355d21215 Mon Sep 17 00:00:00 2001
From: Chunming Zhou <david1.zhou@amd.com>
Date: Fri, 16 Mar 2018 12:29:38 +0800
Subject: [PATCH 0346/1286] drm/amdgpu: Don't change preferred domian when
 fallback GTT v6
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

v2: add sanity checking
v3: make code open
v4: also handle visible to invisible fallback
v5: Since two fallback cases, re-use goto retry
v6: avoid bo is unref when retry, and only user BO can fallback

Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com> (v5)
Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
Cc: felix.kuehling@amd.com
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c    | 16 ++--------
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 37 ++++++++++++++--------
 2 files changed, 26 insertions(+), 27 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 46b9ea4e6103..28c2706e48d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -56,23 +56,11 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
 		alignment = PAGE_SIZE;
 	}
 
-retry:
 	r = amdgpu_bo_create(adev, size, alignment, initial_domain,
 			     flags, type, resv, &bo);
 	if (r) {
-		if (r != -ERESTARTSYS) {
-			if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
-				flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
-				goto retry;
-			}
-
-			if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
-				initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
-				goto retry;
-			}
-			DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
-				  size, initial_domain, alignment, r);
-		}
+		DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
+			  size, initial_domain, alignment, r);
 		return r;
 	}
 	*obj = &bo->gem_base;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 6d08cde8443c..04d6830347ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -356,6 +356,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
 	struct amdgpu_bo *bo;
 	unsigned long page_align;
 	size_t acc_size;
+	u32 domains, preferred_domains, allowed_domains;
 	int r;
 
 	page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
@@ -369,22 +370,24 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
 	acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
 				       sizeof(struct amdgpu_bo));
 
+	preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
+				      AMDGPU_GEM_DOMAIN_GTT |
+				      AMDGPU_GEM_DOMAIN_CPU |
+				      AMDGPU_GEM_DOMAIN_GDS |
+				      AMDGPU_GEM_DOMAIN_GWS |
+				      AMDGPU_GEM_DOMAIN_OA);
+	allowed_domains = preferred_domains;
+	if (type != ttm_bo_type_kernel &&
+	    allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
+		allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
+	domains = preferred_domains;
+retry:
 	bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
 	if (bo == NULL)
 		return -ENOMEM;
 	drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
 	INIT_LIST_HEAD(&bo->shadow_list);
 	INIT_LIST_HEAD(&bo->va);
-	bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
-					 AMDGPU_GEM_DOMAIN_GTT |
-					 AMDGPU_GEM_DOMAIN_CPU |
-					 AMDGPU_GEM_DOMAIN_GDS |
-					 AMDGPU_GEM_DOMAIN_GWS |
-					 AMDGPU_GEM_DOMAIN_OA);
-	bo->allowed_domains = bo->preferred_domains;
-	if (type != ttm_bo_type_kernel &&
-	    bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
-		bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
 
 	bo->flags = flags;
 
@@ -417,12 +420,20 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
 #endif
 
 	bo->tbo.bdev = &adev->mman.bdev;
-	amdgpu_ttm_placement_from_domain(bo, domain);
-
+	amdgpu_ttm_placement_from_domain(bo, domains);
 	r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
 				 &bo->placement, page_align, &ctx, acc_size,
 				 NULL, resv, &amdgpu_ttm_bo_destroy);
-	if (unlikely(r != 0))
+	if (unlikely(r && r != -ERESTARTSYS) && type == ttm_bo_type_device) {
+		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
+			flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+			goto retry;
+		} else if (domains != allowed_domains) {
+			domains = allowed_domains;
+			goto retry;
+		}
+	}
+	if (unlikely(r))
 		return r;
 
 	if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&

From 552825b28ddac200b6080d9e79f4121b68e1517d Mon Sep 17 00:00:00 2001
From: Chunming Zhou <david1.zhou@amd.com>
Date: Mon, 2 Apr 2018 11:20:44 +0800
Subject: [PATCH 0347/1286] drm/amdgpu: add new bo flag that indicates BOs
 don't need fallback (v2)

user cases:
1. KFD wraps amdgpu_bo_create, they have no fallback case which is different
with amdgpu_gem_object_create.
since upstream branch has no amdgpu_amdkfd_gpuvm.c, which need KFD
guys add this flag to __alloc_memory_of_gpu:
+       flags |= AMDGPU_GEM_CREATE_NO_FALLBACK;
2. UMD can specify this flag for their allocation as well if they like.

v2: squash in merge conflict fix (Chunming)

Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
Cc: felix.kuehling@amd.com
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c     | 3 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 5 ++++-
 include/uapi/drm/amdgpu_drm.h              | 2 ++
 3 files changed, 8 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index dc34b50e6b29..d7d7ce1507ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -386,7 +386,8 @@ retry:
 	    bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
 		p->bytes_moved_vis += ctx.bytes_moved;
 
-	if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
+	if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains &&
+	    !(bo->flags & AMDGPU_GEM_CREATE_NO_FALLBACK)) {
 		domain = bo->allowed_domains;
 		goto retry;
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 04d6830347ec..9e23d6f6f3f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -388,6 +388,8 @@ retry:
 	drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
 	INIT_LIST_HEAD(&bo->shadow_list);
 	INIT_LIST_HEAD(&bo->va);
+	bo->preferred_domains = preferred_domains;
+	bo->allowed_domains = allowed_domains;
 
 	bo->flags = flags;
 
@@ -424,7 +426,8 @@ retry:
 	r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
 				 &bo->placement, page_align, &ctx, acc_size,
 				 NULL, resv, &amdgpu_ttm_bo_destroy);
-	if (unlikely(r && r != -ERESTARTSYS) && type == ttm_bo_type_device) {
+	if (unlikely(r && r != -ERESTARTSYS) && type == ttm_bo_type_device &&
+	    !(flags & AMDGPU_GEM_CREATE_NO_FALLBACK)) {
 		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
 			flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 			goto retry;
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index c363b67f2d0a..4f5a27d64c54 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -95,6 +95,8 @@ extern "C" {
 #define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID	(1 << 6)
 /* Flag that BO sharing will be explicitly synchronized */
 #define AMDGPU_GEM_CREATE_EXPLICIT_SYNC		(1 << 7)
+/* Flag that BO doesn't need fallback */
+#define AMDGPU_GEM_CREATE_NO_FALLBACK		(1 << 8)
 
 struct drm_amdgpu_gem_create_in  {
 	/** the requested memory size */

From 1a61ee07211c543bf43e635fa703c162a78af0e1 Mon Sep 17 00:00:00 2001
From: Eric Anholt <eric@anholt.net>
Date: Wed, 4 Apr 2018 15:32:51 -0700
Subject: [PATCH 0348/1286] drm/sched: Extend the documentation.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

These comments answer all the questions I had for myself when
implementing a driver using the GPU scheduler.

Signed-off-by: Eric Anholt <eric@anholt.net>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 include/drm/gpu_scheduler.h | 46 +++++++++++++++++++++++++++++++++----
 1 file changed, 42 insertions(+), 4 deletions(-)

diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index dfd54fb94e10..c053a32341bf 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -43,10 +43,12 @@ enum drm_sched_priority {
 };
 
 /**
- * A scheduler entity is a wrapper around a job queue or a group
- * of other entities. Entities take turns emitting jobs from their
- * job queues to corresponding hardware ring based on scheduling
- * policy.
+ * drm_sched_entity - A wrapper around a job queue (typically attached
+ * to the DRM file_priv).
+ *
+ * Entities will emit jobs in order to their corresponding hardware
+ * ring, and the scheduler will alternate between entities based on
+ * scheduling policy.
 */
 struct drm_sched_entity {
 	struct list_head		list;
@@ -78,7 +80,18 @@ struct drm_sched_rq {
 
 struct drm_sched_fence {
 	struct dma_fence		scheduled;
+
+	/* This fence is what will be signaled by the scheduler when
+	 * the job is completed.
+	 *
+	 * When setting up an out fence for the job, you should use
+	 * this, since it's available immediately upon
+	 * drm_sched_job_init(), and the fence returned by the driver
+	 * from run_job() won't be created until the dependencies have
+	 * resolved.
+	 */
 	struct dma_fence		finished;
+
 	struct dma_fence_cb		cb;
 	struct dma_fence		*parent;
 	struct drm_gpu_scheduler	*sched;
@@ -88,6 +101,13 @@ struct drm_sched_fence {
 
 struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
 
+/**
+ * drm_sched_job - A job to be run by an entity.
+ *
+ * A job is created by the driver using drm_sched_job_init(), and
+ * should call drm_sched_entity_push_job() once it wants the scheduler
+ * to schedule the job.
+ */
 struct drm_sched_job {
 	struct spsc_node		queue_node;
 	struct drm_gpu_scheduler	*sched;
@@ -112,10 +132,28 @@ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
  * these functions should be implemented in driver side
 */
 struct drm_sched_backend_ops {
+	/* Called when the scheduler is considering scheduling this
+	 * job next, to get another struct dma_fence for this job to
+	 * block on.  Once it returns NULL, run_job() may be called.
+	 */
 	struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
 					struct drm_sched_entity *s_entity);
+
+	/* Called to execute the job once all of the dependencies have
+	 * been resolved.  This may be called multiple times, if
+	 * timedout_job() has happened and drm_sched_job_recovery()
+	 * decides to try it again.
+	 */
 	struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
+
+	/* Called when a job has taken too long to execute, to trigger
+	 * GPU recovery.
+	 */
 	void (*timedout_job)(struct drm_sched_job *sched_job);
+
+	/* Called once the job's finished fence has been signaled and
+	 * it's time to clean it up.
+	 */
 	void (*free_job)(struct drm_sched_job *sched_job);
 };
 

From 1eb1547fd0267fbb5fabe4973210dfc295a92725 Mon Sep 17 00:00:00 2001
From: James Zhu <James.Zhu@amd.com>
Date: Tue, 3 Apr 2018 10:41:32 -0400
Subject: [PATCH 0349/1286] drm/amdgpu: Added support for MV packet
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Motion vector packet needs support in physical mode.

Signed-off-by: James Zhu <James.Zhu@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 24 ++++++++++++++++++++++++
 1 file changed, 24 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index a33804bd3314..d7261e01ff8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -755,6 +755,18 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
 			if (r)
 				goto out;
 			break;
+
+		case 0x0500000d: /* MV buffer */
+			r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3,
+							idx + 2, 0, 0);
+			if (r)
+				goto out;
+
+			r = amdgpu_vce_validate_bo(p, ib_idx, idx + 8,
+							idx + 7, 0, 0);
+			if (r)
+				goto out;
+			break;
 		}
 
 		idx += len / 4;
@@ -860,6 +872,18 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
 				goto out;
 			break;
 
+		case 0x0500000d: /* MV buffer */
+			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3,
+							idx + 2, *size, 0);
+			if (r)
+				goto out;
+
+			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 8,
+							idx + 7, *size / 12, 0);
+			if (r)
+				goto out;
+			break;
+
 		default:
 			DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
 			r = -EINVAL;

From 8218d7f1f70179a532639f01dfd32dc5dbb09ed3 Mon Sep 17 00:00:00 2001
From: Harry Wentland <harry.wentland@amd.com>
Date: Tue, 17 Oct 2017 12:02:01 -0400
Subject: [PATCH 0350/1286] drm/amd/display: Don't access legacy properties

We're an atomic driver and shouldn't access legacy properties. Doing so
will only scare users with stack traces.

Instead save the prop in the state and access it directly. Much simpler.

Signed-off-by: Harry Wentland <harry.wentland@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Reviewed-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 13 ++++++++++---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h |  1 +
 2 files changed, 11 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index fbde450277e8..74839478bdc9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -5118,17 +5118,24 @@ void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
 					   struct edid *edid)
 {
 	int i;
-	uint64_t val_capable;
 	bool edid_check_required;
 	struct detailed_timing *timing;
 	struct detailed_non_pixel *data;
 	struct detailed_data_monitor_range *range;
 	struct amdgpu_dm_connector *amdgpu_dm_connector =
 			to_amdgpu_dm_connector(connector);
+	struct dm_connector_state *dm_con_state;
 
 	struct drm_device *dev = connector->dev;
 	struct amdgpu_device *adev = dev->dev_private;
 
+	if (!connector->state) {
+		DRM_ERROR("%s - Connector has no state", __func__);
+		return;
+	}
+
+	dm_con_state = to_dm_connector_state(connector->state);
+
 	edid_check_required = false;
 	if (!amdgpu_dm_connector->dc_sink) {
 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
@@ -5147,7 +5154,7 @@ void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
 						amdgpu_dm_connector);
 		}
 	}
-	val_capable = 0;
+	dm_con_state->freesync_capable = false;
 	if (edid_check_required == true && (edid->version > 1 ||
 	   (edid->version == 1 && edid->revision > 1))) {
 		for (i = 0; i < 4; i++) {
@@ -5183,7 +5190,7 @@ void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
 					amdgpu_dm_connector->min_vfreq * 1000000;
 			amdgpu_dm_connector->caps.max_refresh_in_micro_hz =
 					amdgpu_dm_connector->max_vfreq * 1000000;
-				val_capable = 1;
+			dm_con_state->freesync_capable = true;
 		}
 	}
 
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 3af699b24e10..005cf0d2dc34 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -220,6 +220,7 @@ struct dm_connector_state {
 	uint8_t underscan_hborder;
 	bool underscan_enable;
 	struct mod_freesync_user_enable user_enable;
+	bool freesync_capable;
 };
 
 #define to_dm_connector_state(x)\

From 742811b7121ec4e426edb4a21657ca5523955489 Mon Sep 17 00:00:00 2001
From: Harry Wentland <harry.wentland@amd.com>
Date: Mon, 12 Mar 2018 11:16:47 -0400
Subject: [PATCH 0351/1286] drm/amd/display: Only register backlight device if
 embedded panel connected

Signed-off-by: Harry Wentland <harry.wentland@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 58 +++++++++++--------
 1 file changed, 33 insertions(+), 25 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 74839478bdc9..6636f4e9d30c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1403,6 +1403,28 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
 	return ret;
 }
 
+
+static void register_backlight_device(struct amdgpu_display_manager *dm,
+				      struct dc_link *link)
+{
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
+	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
+	    link->type != dc_connection_none) {
+		/* Event if registration failed, we should continue with
+		 * DM initialization because not having a backlight control
+		 * is better then a black screen.
+		 */
+		amdgpu_dm_register_backlight_device(dm);
+
+		if (dm->backlight_dev)
+			dm->backlight_link = link;
+	}
+#endif
+}
+
+
 /* In this architecture, the association
  * connector -> encoder -> crtc
  * id not really requried. The crtc and connector will hold the
@@ -1456,6 +1478,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
 
 	/* loops over all connectors on the board */
 	for (i = 0; i < link_cnt; i++) {
+		struct dc_link *link = NULL;
 
 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
 			DRM_ERROR(
@@ -1482,9 +1505,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
 			goto fail;
 		}
 
-		if (dc_link_detect(dc_get_link_at_index(dm->dc, i),
-				DETECT_REASON_BOOT))
+		link = dc_get_link_at_index(dm->dc, i);
+
+		if (dc_link_detect(link, DETECT_REASON_BOOT)) {
 			amdgpu_dm_update_connector_after_detect(aconnector);
+			register_backlight_device(dm, link);
+		}
+
+
 	}
 
 	/* Software is initialized. Now we can register interrupt handlers. */
@@ -2684,7 +2712,8 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
 
-	if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
+	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
+	    link->type != dc_connection_none) {
 		amdgpu_dm_register_backlight_device(dm);
 
 		if (dm->backlight_dev) {
@@ -3557,6 +3586,7 @@ create_i2c(struct ddc_service *ddc_service,
 	return i2c;
 }
 
+
 /* Note: this function assumes that dc_link_detect() was called for the
  * dc_link which will be represented by this aconnector.
  */
@@ -3626,28 +3656,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
 		amdgpu_dm_initialize_dp_connector(dm, aconnector);
 
-#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
-	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
-
-	/* NOTE: this currently will create backlight device even if a panel
-	 * is not connected to the eDP/LVDS connector.
-	 *
-	 * This is less than ideal but we don't have sink information at this
-	 * stage since detection happens after. We can't do detection earlier
-	 * since MST detection needs connectors to be created first.
-	 */
-	if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
-		/* Event if registration failed, we should continue with
-		 * DM initialization because not having a backlight control
-		 * is better then a black screen.
-		 */
-		amdgpu_dm_register_backlight_device(dm);
-
-		if (dm->backlight_dev)
-			dm->backlight_link = link;
-	}
-#endif
-
 out_free:
 	if (res) {
 		kfree(i2c);

From 5cd29ed0cb9a1985d3334d52d3b54f0defb23de4 Mon Sep 17 00:00:00 2001
From: Harry Wentland <harry.wentland@amd.com>
Date: Mon, 12 Mar 2018 11:48:26 -0400
Subject: [PATCH 0352/1286] drm/amd/display: Don't register backlight on
 connector_destroy

Signed-off-by: Harry Wentland <harry.wentland@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 12 ++++--------
 1 file changed, 4 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 6636f4e9d30c..7ecc22462628 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2713,14 +2713,10 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
 
 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
-	    link->type != dc_connection_none) {
-		amdgpu_dm_register_backlight_device(dm);
-
-		if (dm->backlight_dev) {
-			backlight_device_unregister(dm->backlight_dev);
-			dm->backlight_dev = NULL;
-		}
-
+	    link->type != dc_connection_none &&
+	    dm->backlight_dev) {
+		backlight_device_unregister(dm->backlight_dev);
+		dm->backlight_dev = NULL;
 	}
 #endif
 	drm_connector_unregister(connector);

From 0c8df4bbc4de4789dde7fa622585803fd10dd8e4 Mon Sep 17 00:00:00 2001
From: Anthony Koo <Anthony.Koo@amd.com>
Date: Thu, 15 Mar 2018 13:46:50 -0400
Subject: [PATCH 0353/1286] drm/amd/display: Program v_total_min/max after
 v_total_cntl

Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../display/dc/dce110/dce110_timing_generator.c  | 16 ++++++++--------
 .../display/dc/dce120/dce120_timing_generator.c  | 12 ++++++------
 .../gpu/drm/amd/display/dc/dcn10/dcn10_optc.c    | 12 ++++++------
 3 files changed, 20 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
index be7153924a70..1b2fe0df347f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
@@ -431,14 +431,6 @@ void dce110_timing_generator_set_drr(
 			0,
 			CRTC_V_TOTAL_CONTROL,
 			CRTC_SET_V_TOTAL_MIN_MASK);
-		set_reg_field_value(v_total_min,
-				0,
-				CRTC_V_TOTAL_MIN,
-				CRTC_V_TOTAL_MIN);
-		set_reg_field_value(v_total_max,
-				0,
-				CRTC_V_TOTAL_MAX,
-				CRTC_V_TOTAL_MAX);
 		set_reg_field_value(v_total_cntl,
 				0,
 				CRTC_V_TOTAL_CONTROL,
@@ -447,6 +439,14 @@ void dce110_timing_generator_set_drr(
 				0,
 				CRTC_V_TOTAL_CONTROL,
 				CRTC_V_TOTAL_MAX_SEL);
+		set_reg_field_value(v_total_min,
+				0,
+				CRTC_V_TOTAL_MIN,
+				CRTC_V_TOTAL_MIN);
+		set_reg_field_value(v_total_max,
+				0,
+				CRTC_V_TOTAL_MAX,
+				CRTC_V_TOTAL_MAX);
 		set_reg_field_value(v_total_cntl,
 				0,
 				CRTC_V_TOTAL_CONTROL,
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
index 7bee78172d85..2ea490f8482e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
@@ -570,18 +570,18 @@ void dce120_timing_generator_set_drr(
 				0x180);
 
 	} else {
-		CRTC_REG_UPDATE(
-				CRTC0_CRTC_V_TOTAL_MIN,
-				CRTC_V_TOTAL_MIN, 0);
-		CRTC_REG_UPDATE(
-				CRTC0_CRTC_V_TOTAL_MAX,
-				CRTC_V_TOTAL_MAX, 0);
 		CRTC_REG_SET_N(CRTC0_CRTC_V_TOTAL_CONTROL, 5,
 				FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MIN_SEL), 0,
 				FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MAX_SEL), 0,
 				FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_ON_EVENT), 0,
 				FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_TO_MASTER_VSYNC), 0,
 				FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_SET_V_TOTAL_MIN_MASK), 0);
+		CRTC_REG_UPDATE(
+				CRTC0_CRTC_V_TOTAL_MIN,
+				CRTC_V_TOTAL_MIN, 0);
+		CRTC_REG_UPDATE(
+				CRTC0_CRTC_V_TOTAL_MAX,
+				CRTC_V_TOTAL_MAX, 0);
 		CRTC_REG_UPDATE(
 				CRTC0_CRTC_STATIC_SCREEN_CONTROL,
 				CRTC_STATIC_SCREEN_EVENT_MASK,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index f56eac0e4dd2..dc921307874a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -855,17 +855,17 @@ void optc1_set_drr(
 				OTG_SET_V_TOTAL_MIN_MASK_EN, 0,
 				OTG_SET_V_TOTAL_MIN_MASK, 0);
 	} else {
-		REG_SET(OTG_V_TOTAL_MIN, 0,
-			OTG_V_TOTAL_MIN, 0);
-
-		REG_SET(OTG_V_TOTAL_MAX, 0,
-			OTG_V_TOTAL_MAX, 0);
-
 		REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
 				OTG_SET_V_TOTAL_MIN_MASK, 0,
 				OTG_V_TOTAL_MIN_SEL, 0,
 				OTG_V_TOTAL_MAX_SEL, 0,
 				OTG_FORCE_LOCK_ON_EVENT, 0);
+
+		REG_SET(OTG_V_TOTAL_MIN, 0,
+			OTG_V_TOTAL_MIN, 0);
+
+		REG_SET(OTG_V_TOTAL_MAX, 0,
+			OTG_V_TOTAL_MAX, 0);
 	}
 }
 

From 9e3efe3eed47952e2e0209b3808989ae1cc6a31b Mon Sep 17 00:00:00 2001
From: Harry Wentland <harry.wentland@amd.com>
Date: Thu, 15 Mar 2018 15:08:04 -0400
Subject: [PATCH 0354/1286] drm/amd/display: Set ignore_msa_timing_param

Signed-off-by: Harry Wentland <harry.wentland@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 7ecc22462628..a6039e5b664d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2487,6 +2487,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 
 	update_stream_signal(stream);
 
+	if (dm_state && dm_state->freesync_capable)
+		stream->ignore_msa_timing_param = true;
+
 	return stream;
 }
 

From f110892ead622bdc9a7732a23aef3a08b0565608 Mon Sep 17 00:00:00 2001
From: Hersen Wu <hersenxs.wu@amd.com>
Date: Mon, 19 Mar 2018 15:22:51 -0400
Subject: [PATCH 0355/1286] drm/amd/display: Non-HDMI DP active dongle should
 not support YUV pixel format

Signed-off-by: Hersen Wu <hersenxs.wu@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
Reviewed-by: Wesley Chalmers <Wesley.Chalmers@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/core/dc_link.c | 19 ++++++++++++++++---
 1 file changed, 16 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index c18f24afa698..e612841f7f91 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1848,9 +1848,22 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
 
 static bool dp_active_dongle_validate_timing(
 		const struct dc_crtc_timing *timing,
-		const struct dc_dongle_caps *dongle_caps)
+		const struct dpcd_caps *dpcd_caps)
 {
 	unsigned int required_pix_clk = timing->pix_clk_khz;
+	const struct dc_dongle_caps *dongle_caps = &dpcd_caps->dongle_caps;
+
+	switch (dpcd_caps->dongle_type) {
+	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
+	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
+	case DISPLAY_DONGLE_DP_DVI_DONGLE:
+		if (timing->pixel_encoding == PIXEL_ENCODING_RGB)
+			return true;
+		else
+			return false;
+	default:
+		break;
+	}
 
 	if (dongle_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
 		dongle_caps->extendedCapValid == false)
@@ -1916,7 +1929,7 @@ enum dc_status dc_link_validate_mode_timing(
 		const struct dc_crtc_timing *timing)
 {
 	uint32_t max_pix_clk = stream->sink->dongle_max_pix_clk;
-	struct dc_dongle_caps *dongle_caps = &link->dpcd_caps.dongle_caps;
+	struct dpcd_caps *dpcd_caps = &link->dpcd_caps;
 
 	/* A hack to avoid failing any modes for EDID override feature on
 	 * topology change such as lower quality cable for DP or different dongle
@@ -1929,7 +1942,7 @@ enum dc_status dc_link_validate_mode_timing(
 		return DC_EXCEED_DONGLE_CAP;
 
 	/* Active Dongle*/
-	if (!dp_active_dongle_validate_timing(timing, dongle_caps))
+	if (!dp_active_dongle_validate_timing(timing, dpcd_caps))
 		return DC_EXCEED_DONGLE_CAP;
 
 	switch (stream->signal) {

From 63bd5444f6937bf6bd27a2ab79162fec784dd83c Mon Sep 17 00:00:00 2001
From: Harry Wentland <harry.wentland@amd.com>
Date: Wed, 14 Mar 2018 15:54:27 -0400
Subject: [PATCH 0356/1286] drm/amd/display: Fix potential access beyond end of
 array in CM

Signed-off-by: Harry Wentland <harry.wentland@amd.com>
CC: Dan Carpenter <dan.carpenter@oracle.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c   | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index 881a1bff94d2..96d5878e9ccd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -367,15 +367,15 @@ bool cm_helper_translate_curve_to_hw_format(
 
 	lut_params->hw_points_num = hw_points;
 
-	i = 1;
-	for (k = 0; k < MAX_REGIONS_NUMBER && i < MAX_REGIONS_NUMBER; k++) {
+	k = 0;
+	for (i = 1; i < MAX_REGIONS_NUMBER; i++) {
 		if (seg_distr[k] != -1) {
 			lut_params->arr_curve_points[k].segments_num =
 					seg_distr[k];
 			lut_params->arr_curve_points[i].offset =
 					lut_params->arr_curve_points[k].offset + (1 << seg_distr[k]);
 		}
-		i++;
+		k++;
 	}
 
 	if (seg_distr[k] != -1)
@@ -529,15 +529,15 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
 
 	lut_params->hw_points_num = hw_points;
 
-	i = 1;
-	for (k = 0; k < MAX_REGIONS_NUMBER && i < MAX_REGIONS_NUMBER; k++) {
+	k = 0;
+	for (i = 1; i < MAX_REGIONS_NUMBER; i++) {
 		if (seg_distr[k] != -1) {
 			lut_params->arr_curve_points[k].segments_num =
 					seg_distr[k];
 			lut_params->arr_curve_points[i].offset =
 					lut_params->arr_curve_points[k].offset + (1 << seg_distr[k]);
 		}
-		i++;
+		k++;
 	}
 
 	if (seg_distr[k] != -1)

From 5d4b05ddd826d877327ecabf987b7c61ec3cb0c5 Mon Sep 17 00:00:00 2001
From: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
Date: Thu, 15 Mar 2018 13:01:46 -0400
Subject: [PATCH 0357/1286] drm/amd/display: Add Dynamic debug prints

Created Macros for DC_LOG_XXX to pr_debug() & DRM_DEBUG_KMS.

Signed-off-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |  5 --
 .../gpu/drm/amd/display/dc/core/dc_debug.c    | 20 +++---
 drivers/gpu/drm/amd/display/dc/core/dc_link.c | 19 +++---
 .../gpu/drm/amd/display/dc/core/dc_resource.c |  7 +-
 .../gpu/drm/amd/display/dc/dce/dce_audio.c    |  9 +--
 .../drm/amd/display/dc/dce/dce_clock_source.c | 11 ++--
 .../display/dc/dce110/dce110_hw_sequencer.c   |  7 +-
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c | 14 ++--
 .../drm/amd/display/include/logger_types.h    | 64 +++++++++----------
 9 files changed, 77 insertions(+), 79 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index a6039e5b664d..2514d7b3b66e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -433,11 +433,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 
 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
 
-	if (amdgpu_dc_log)
-		init_data.log_mask = DC_DEFAULT_LOG_MASK;
-	else
-		init_data.log_mask = DC_MIN_LOG_MASK;
-
 	/*
 	 * TODO debug why this doesn't work on Raven
 	 */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index 71cc60fcff5e..a3c87611220d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -36,8 +36,9 @@
 #include "hw_sequencer.h"
 
 #include "resource.h"
-#define DC_LOGGER \
-	logger
+
+#define DC_LOGGER_INIT(logger)
+
 
 #define SURFACE_TRACE(...) do {\
 		if (dc->debug.surface_trace) \
@@ -60,8 +61,7 @@ void pre_surface_trace(
 		int surface_count)
 {
 	int i;
-	struct dc  *core_dc = dc;
-	struct dal_logger *logger =  core_dc->ctx->logger;
+	DC_LOGGER_INIT(dc->ctx->logger);
 
 	for (i = 0; i < surface_count; i++) {
 		const struct dc_plane_state *plane_state = plane_states[i];
@@ -183,8 +183,7 @@ void update_surface_trace(
 		int surface_count)
 {
 	int i;
-	struct dc  *core_dc = dc;
-	struct dal_logger *logger =  core_dc->ctx->logger;
+	DC_LOGGER_INIT(dc->ctx->logger);
 
 	for (i = 0; i < surface_count; i++) {
 		const struct dc_surface_update *update = &updates[i];
@@ -304,8 +303,7 @@ void update_surface_trace(
 
 void post_surface_trace(struct dc *dc)
 {
-	struct dc  *core_dc = dc;
-	struct dal_logger *logger =  core_dc->ctx->logger;
+	DC_LOGGER_INIT(dc->ctx->logger);
 
 	SURFACE_TRACE("post surface process.\n");
 
@@ -317,10 +315,10 @@ void context_timing_trace(
 {
 	int i;
 	struct dc  *core_dc = dc;
-	struct dal_logger *logger =  core_dc->ctx->logger;
 	int h_pos[MAX_PIPES], v_pos[MAX_PIPES];
 	struct crtc_position position;
 	unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index;
+	DC_LOGGER_INIT(dc->ctx->logger);
 
 
 	for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
@@ -355,9 +353,7 @@ void context_clock_trace(
 		struct dc_state *context)
 {
 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
-	struct dc  *core_dc = dc;
-	struct dal_logger *logger =  core_dc->ctx->logger;
-
+	DC_LOGGER_INIT(dc->ctx->logger);
 	CLOCK_TRACE("Current: dispclk_khz:%d  max_dppclk_khz:%d  dcfclk_khz:%d\n"
 			"dcfclk_deep_sleep_khz:%d  fclk_khz:%d  socclk_khz:%d\n",
 			context->bw.dcn.calc_clk.dispclk_khz,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index e612841f7f91..d9efdd926145 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -45,8 +45,9 @@
 #include "dce/dce_11_0_d.h"
 #include "dce/dce_11_0_enum.h"
 #include "dce/dce_11_0_sh_mask.h"
-#define DC_LOGGER \
-	dc_ctx->logger
+
+#define DC_LOGGER_INIT(logger)
+
 
 #define LINK_INFO(...) \
 	DC_LOG_HW_HOTPLUG(  \
@@ -561,7 +562,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
 	struct dc_context *dc_ctx = link->ctx;
 	struct dc_sink *sink = NULL;
 	enum dc_connection_type new_connection_type = dc_connection_none;
-
+	DC_LOGGER_INIT(link->ctx->logger);
 	if (link->connector_signal == SIGNAL_TYPE_VIRTUAL)
 		return false;
 
@@ -927,6 +928,7 @@ static bool construct(
 	struct integrated_info info = {{{ 0 }}};
 	struct dc_bios *bios = init_params->dc->ctx->dc_bios;
 	const struct dc_vbios_funcs *bp_funcs = bios->funcs;
+	DC_LOGGER_INIT(dc_ctx->logger);
 
 	link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
 	link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID;
@@ -1135,7 +1137,8 @@ static void dpcd_configure_panel_mode(
 {
 	union dpcd_edp_config edp_config_set;
 	bool panel_mode_edp = false;
-	struct dc_context *dc_ctx = link->ctx;
+	DC_LOGGER_INIT(link->ctx->logger);
+
 	memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config));
 
 	if (DP_PANEL_MODE_DEFAULT != panel_mode) {
@@ -1968,10 +1971,10 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
 	struct dc  *core_dc = link->ctx->dc;
 	struct abm *abm = core_dc->res_pool->abm;
 	struct dmcu *dmcu = core_dc->res_pool->dmcu;
-	struct dc_context *dc_ctx = link->ctx;
 	unsigned int controller_id = 0;
 	bool use_smooth_brightness = true;
 	int i;
+	DC_LOGGER_INIT(link->ctx->logger);
 
 	if ((dmcu == NULL) ||
 		(abm == NULL) ||
@@ -2154,8 +2157,8 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
 	struct fixed31_32 avg_time_slots_per_mtp;
 	struct fixed31_32 pbn;
 	struct fixed31_32 pbn_per_slot;
-	struct dc_context *dc_ctx = link->ctx;
 	uint8_t i;
+	DC_LOGGER_INIT(link->ctx->logger);
 
 	/* enable_link_dp_mst already check link->enabled_stream_count
 	 * and stream is in link->stream[]. This is called during set mode,
@@ -2234,7 +2237,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
 	struct fixed31_32 avg_time_slots_per_mtp = dal_fixed31_32_from_int(0);
 	uint8_t i;
 	bool mst_mode = (link->type == dc_connection_mst_branch);
-	struct dc_context *dc_ctx = link->ctx;
+	DC_LOGGER_INIT(link->ctx->logger);
 
 	/* deallocate_mst_payload is called before disable link. When mode or
 	 * disable/enable monitor, new stream is created which is not in link
@@ -2307,8 +2310,8 @@ void core_link_enable_stream(
 		struct pipe_ctx *pipe_ctx)
 {
 	struct dc  *core_dc = pipe_ctx->stream->ctx->dc;
-	struct dc_context *dc_ctx = pipe_ctx->stream->ctx;
 	enum dc_status status;
+	DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
 
 	/* eDP lit up by bios already, no need to enable again. */
 	if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 379b05536321..50b84f69bd25 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -45,8 +45,9 @@
 #include "dcn10/dcn10_resource.h"
 #endif
 #include "dce120/dce120_resource.h"
-#define DC_LOGGER \
-	ctx->logger
+
+#define DC_LOGGER_INIT(logger)
+
 enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
 {
 	enum dce_version dc_version = DCE_VERSION_UNKNOWN;
@@ -835,7 +836,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 	struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
 	struct view recout_skip = { 0 };
 	bool res = false;
-	struct dc_context *ctx = pipe_ctx->stream->ctx;
+	DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
 	/* Important: scaling ratio calculation requires pixel format,
 	 * lb depth calculation requires recout and taps require scaling ratios.
 	 * Inits require viewport, taps, ratios and recout of split pipe
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index 6d5cdcdc8ec9..7f6d724686f1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -33,8 +33,9 @@
 
 #define CTX \
 	aud->base.ctx
-#define DC_LOGGER \
-	aud->base.ctx->logger
+
+#define DC_LOGGER_INIT()
+
 #define REG(reg)\
 	(aud->regs->reg)
 
@@ -348,8 +349,8 @@ static void set_audio_latency(
 
 void dce_aud_az_enable(struct audio *audio)
 {
-	struct dce_audio *aud = DCE_AUD(audio);
 	uint32_t value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
+	DC_LOGGER_INIT();
 
 	set_reg_field_value(value, 1,
 			    AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
@@ -371,7 +372,7 @@ void dce_aud_az_enable(struct audio *audio)
 void dce_aud_az_disable(struct audio *audio)
 {
 	uint32_t value;
-	struct dce_audio *aud = DCE_AUD(audio);
+	DC_LOGGER_INIT();
 
 	value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
 	set_reg_field_value(value, 1,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 0aa2cda60890..67dad7f1e643 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -41,8 +41,9 @@
 
 #define CTX \
 	clk_src->base.ctx
-#define DC_LOGGER \
-	calc_pll_cs->ctx->logger
+
+#define DC_LOGGER_INIT()
+
 #undef FN
 #define FN(reg_name, field_name) \
 	clk_src->cs_shift->field_name, clk_src->cs_mask->field_name
@@ -467,7 +468,7 @@ static uint32_t dce110_get_pix_clk_dividers_helper (
 {
 	uint32_t field = 0;
 	uint32_t pll_calc_error = MAX_PLL_CALC_ERROR;
-	struct calc_pll_clock_source *calc_pll_cs = &clk_src->calc_pll;
+	DC_LOGGER_INIT();
 	/* Check if reference clock is external (not pcie/xtalin)
 	* HW Dce80 spec:
 	* 00 - PCIE_REFCLK, 01 - XTALIN,    02 - GENERICA,    03 - GENERICB
@@ -557,8 +558,8 @@ static uint32_t dce110_get_pix_clk_dividers(
 		struct pll_settings *pll_settings)
 {
 	struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(cs);
-	struct calc_pll_clock_source *calc_pll_cs = &clk_src->calc_pll;
 	uint32_t pll_calc_error = MAX_PLL_CALC_ERROR;
+	DC_LOGGER_INIT();
 
 	if (pix_clk_params == NULL || pll_settings == NULL
 			|| pix_clk_params->requested_pix_clk == 0) {
@@ -1054,7 +1055,7 @@ static void get_ss_info_from_atombios(
 	struct spread_spectrum_info *ss_info_cur;
 	struct spread_spectrum_data *ss_data_cur;
 	uint32_t i;
-	struct calc_pll_clock_source *calc_pll_cs = &clk_src->calc_pll;
+	DC_LOGGER_INIT();
 	if (ss_entries_num == NULL) {
 		DC_LOG_SYNC(
 			"Invalid entry !!!\n");
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index c6212301712b..e8df50f30e5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -70,8 +70,9 @@
 
 #define CTX \
 	hws->ctx
-#define DC_LOGGER \
-	ctx->logger
+
+#define DC_LOGGER_INIT()
+
 #define REG(reg)\
 	hws->regs->reg
 
@@ -2701,7 +2702,7 @@ static void dce110_program_front_end_for_pipe(
 	struct xfm_grph_csc_adjustment adjust;
 	struct out_csc_color_matrix tbl_entry;
 	unsigned int i;
-	struct dc_context *ctx = dc->ctx;
+	DC_LOGGER_INIT();
 	memset(&tbl_entry, 0, sizeof(tbl_entry));
 
 	if (dc->current_state)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index e21458169d15..de5293dc4db3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -45,8 +45,8 @@
 #include "dcn10_hubbub.h"
 #include "dcn10_cm_common.h"
 
-#define DC_LOGGER \
-	ctx->logger
+#define DC_LOGGER_INIT(logger)
+
 #define CTX \
 	hws->ctx
 #define REG(reg)\
@@ -363,7 +363,7 @@ static void power_on_plane(
 	struct dce_hwseq *hws,
 	int plane_id)
 {
-	struct dc_context *ctx = hws->ctx;
+	DC_LOGGER_INIT(hws->ctx->logger);
 	if (REG(DC_IP_REQUEST_CNTL)) {
 		REG_SET(DC_IP_REQUEST_CNTL, 0,
 				IP_REQUEST_EN, 1);
@@ -562,7 +562,7 @@ static void reset_back_end_for_pipe(
 		struct dc_state *context)
 {
 	int i;
-	struct dc_context *ctx = dc->ctx;
+	DC_LOGGER_INIT(dc->ctx->logger);
 	if (pipe_ctx->stream_res.stream_enc == NULL) {
 		pipe_ctx->stream = NULL;
 		return;
@@ -658,7 +658,7 @@ static void plane_atomic_power_down(struct dc *dc, struct pipe_ctx *pipe_ctx)
 {
 	struct dce_hwseq *hws = dc->hwseq;
 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
-	struct dc_context *ctx = dc->ctx;
+	DC_LOGGER_INIT(dc->ctx->logger);
 
 	if (REG(DC_IP_REQUEST_CNTL)) {
 		REG_SET(DC_IP_REQUEST_CNTL, 0,
@@ -708,7 +708,7 @@ static void plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
 
 static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
 {
-	struct dc_context *ctx = dc->ctx;
+	DC_LOGGER_INIT(dc->ctx->logger);
 
 	if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
 		return;
@@ -2001,9 +2001,9 @@ static void dcn10_apply_ctx_for_surface(
 	bool removed_pipe[4] = { false };
 	unsigned int ref_clk_mhz = dc->res_pool->ref_clock_inKhz/1000;
 	bool program_water_mark = false;
-	struct dc_context *ctx = dc->ctx;
 	struct pipe_ctx *top_pipe_to_program =
 			find_top_pipe_for_stream(dc, context, stream);
+	DC_LOGGER_INIT(dc->ctx->logger);
 
 	if (!top_pipe_to_program)
 		return;
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index 427796bdc14a..4f332e80cecc 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -29,39 +29,39 @@
 #include "os_types.h"
 
 #define MAX_NAME_LEN 32
-#define DC_LOG_ERROR(a, ...) dm_logger_write(DC_LOGGER, LOG_ERROR, a, ## __VA_ARGS__)
-#define DC_LOG_WARNING(a, ...) dm_logger_write(DC_LOGGER, LOG_WARNING, a, ## __VA_ARGS__)
-#define DC_LOG_DEBUG(a, ...) dm_logger_write(DC_LOGGER, LOG_DEBUG, a, ## __VA_ARGS__)
-#define DC_LOG_DC(a, ...) dm_logger_write(DC_LOGGER, LOG_DC, a, ## __VA_ARGS__)
-#define DC_LOG_DTN(a, ...) dm_logger_write(DC_LOGGER, LOG_DTN, a, ## __VA_ARGS__)
-#define DC_LOG_SURFACE(a, ...) dm_logger_write(DC_LOGGER, LOG_SURFACE, a, ## __VA_ARGS__)
-#define DC_LOG_HW_HOTPLUG(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_HOTPLUG, a, ## __VA_ARGS__)
-#define DC_LOG_HW_LINK_TRAINING(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_LINK_TRAINING, a, ## __VA_ARGS__)
-#define DC_LOG_HW_SET_MODE(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_SET_MODE, a, ## __VA_ARGS__)
-#define DC_LOG_HW_RESUME_S3(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_RESUME_S3, a, ## __VA_ARGS__)
-#define DC_LOG_HW_AUDIO(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_AUDIO, a, ## __VA_ARGS__)
-#define DC_LOG_HW_HPD_IRQ(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_HPD_IRQ, a, ## __VA_ARGS__)
-#define DC_LOG_MST(a, ...) dm_logger_write(DC_LOGGER, LOG_MST, a, ## __VA_ARGS__)
-#define DC_LOG_SCALER(a, ...) dm_logger_write(DC_LOGGER, LOG_SCALER, a, ## __VA_ARGS__)
-#define DC_LOG_BIOS(a, ...) dm_logger_write(DC_LOGGER, LOG_BIOS, a, ## __VA_ARGS__)
-#define DC_LOG_BANDWIDTH_CALCS(a, ...) dm_logger_write(DC_LOGGER, LOG_BANDWIDTH_CALCS, a, ## __VA_ARGS__)
-#define DC_LOG_BANDWIDTH_VALIDATION(a, ...) dm_logger_write(DC_LOGGER, LOG_BANDWIDTH_VALIDATION, a, ## __VA_ARGS__)
-#define DC_LOG_I2C_AUX(a, ...) dm_logger_write(DC_LOGGER, LOG_I2C_AUX, a, ## __VA_ARGS__)
-#define DC_LOG_SYNC(a, ...) dm_logger_write(DC_LOGGER, LOG_SYNC, a, ## __VA_ARGS__)
-#define DC_LOG_BACKLIGHT(a, ...) dm_logger_write(DC_LOGGER, LOG_BACKLIGHT, a, ## __VA_ARGS__)
-#define DC_LOG_FEATURE_OVERRIDE(a, ...) dm_logger_write(DC_LOGGER, LOG_FEATURE_OVERRIDE, a, ## __VA_ARGS__)
-#define DC_LOG_DETECTION_EDID_PARSER(a, ...) dm_logger_write(DC_LOGGER, LOG_DETECTION_EDID_PARSER, a, ## __VA_ARGS__)
-#define DC_LOG_DETECTION_DP_CAPS(a, ...) dm_logger_write(DC_LOGGER, LOG_DETECTION_DP_CAPS, a, ## __VA_ARGS__)
-#define DC_LOG_RESOURCE(a, ...) dm_logger_write(DC_LOGGER, LOG_RESOURCE, a, ## __VA_ARGS__)
-#define DC_LOG_DML(a, ...) dm_logger_write(DC_LOGGER, LOG_DML, a, ## __VA_ARGS__)
-#define DC_LOG_EVENT_MODE_SET(a, ...) dm_logger_write(DC_LOGGER, LOG_EVENT_MODE_SET, a, ## __VA_ARGS__)
-#define DC_LOG_EVENT_DETECTION(a, ...) dm_logger_write(DC_LOGGER, LOG_EVENT_DETECTION, a, ## __VA_ARGS__)
-#define DC_LOG_EVENT_LINK_TRAINING(a, ...) dm_logger_write(DC_LOGGER, LOG_EVENT_LINK_TRAINING, a, ## __VA_ARGS__)
-#define DC_LOG_EVENT_LINK_LOSS(a, ...) dm_logger_write(DC_LOGGER, LOG_EVENT_LINK_LOSS, a, ## __VA_ARGS__)
-#define DC_LOG_EVENT_UNDERFLOW(a, ...) dm_logger_write(DC_LOGGER, LOG_EVENT_UNDERFLOW, a, ## __VA_ARGS__)
-#define DC_LOG_IF_TRACE(a, ...) dm_logger_write(DC_LOGGER, LOG_IF_TRACE, a, ## __VA_ARGS__)
-#define DC_LOG_PERF_TRACE(a, ...) dm_logger_write(DC_LOGGER, LOG_PERF_TRACE, a, ## __VA_ARGS__)
 
+#define DC_LOG_ERROR(...) DRM_ERROR(__VA_ARGS__)
+#define DC_LOG_WARNING(...) DRM_WARN(__VA_ARGS__)
+#define DC_LOG_DEBUG(...) DRM_INFO(__VA_ARGS__)
+#define DC_LOG_DC(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_DTN(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_SURFACE(...) pr_debug("[SURFACE]:"__VA_ARGS__)
+#define DC_LOG_HW_HOTPLUG(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_HW_LINK_TRAINING(...) pr_debug("[HW_LINK_TRAINING]:"__VA_ARGS__)
+#define DC_LOG_HW_SET_MODE(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_HW_RESUME_S3(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_HW_AUDIO(...) pr_debug("[HW_AUDIO]:"__VA_ARGS__)
+#define DC_LOG_HW_HPD_IRQ(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_MST(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_SCALER(...) pr_debug("[SCALER]:"__VA_ARGS__)
+#define DC_LOG_BIOS(...) pr_debug("[BIOS]:"__VA_ARGS__)
+#define DC_LOG_BANDWIDTH_CALCS(...) pr_debug("[BANDWIDTH_CALCS]:"__VA_ARGS__)
+#define DC_LOG_BANDWIDTH_VALIDATION(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_I2C_AUX(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_SYNC(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_BACKLIGHT(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_FEATURE_OVERRIDE(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_DETECTION_EDID_PARSER(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_DETECTION_DP_CAPS(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_RESOURCE(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_DML(...) pr_debug("[DML]:"__VA_ARGS__)
+#define DC_LOG_EVENT_MODE_SET(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_EVENT_DETECTION(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_EVENT_LINK_TRAINING(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_EVENT_LINK_LOSS(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_EVENT_UNDERFLOW(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_IF_TRACE(...) pr_debug("[IF_TRACE]:"__VA_ARGS__)
+#define DC_LOG_PERF_TRACE(...) DRM_DEBUG_KMS(__VA_ARGS__)
 
 struct dal_logger;
 

From 01fe3e4876d3799b37e6c712dcfed7cc2cafa3f0 Mon Sep 17 00:00:00 2001
From: Anthony Koo <Anthony.Koo@amd.com>
Date: Thu, 15 Mar 2018 13:34:16 -0400
Subject: [PATCH 0358/1286] drm/amd/display: Add vmax/min_sel prints to
 dcn10_log_hw_state

Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 6 ++++--
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c         | 6 ++++++
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h         | 2 ++
 3 files changed, 12 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index de5293dc4db3..f3341a2399fa 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -165,7 +165,7 @@ void dcn10_log_hw_state(struct dc *dc)
 	}
 	DTN_INFO("\n");
 
-	DTN_INFO("OTG:  v_bs  v_be  v_ss  v_se  vpol  vmax  vmin"
+	DTN_INFO("OTG:  v_bs  v_be  v_ss  v_se  vpol  vmax  vmin  vmax_sel  vmin_sel"
 			"  h_bs  h_be  h_ss  h_se  hpol  htot  vtot  underflow\n");
 
 	for (i = 0; i < pool->timing_generator_count; i++) {
@@ -178,7 +178,7 @@ void dcn10_log_hw_state(struct dc *dc)
 		if ((s.otg_enabled & 1) == 0)
 			continue;
 
-		DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %5d %5d %5d"
+		DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d"
 				" %5d %5d %5d %5d  %9d\n",
 				tg->inst,
 				s.v_blank_start,
@@ -188,6 +188,8 @@ void dcn10_log_hw_state(struct dc *dc)
 				s.v_sync_a_pol,
 				s.v_total_max,
 				s.v_total_min,
+				s.v_total_max_sel,
+				s.v_total_min_sel,
 				s.h_blank_start,
 				s.h_blank_end,
 				s.h_sync_a_start,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index dc921307874a..2c5dbece928e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -1229,6 +1229,12 @@ void optc1_read_otg_state(struct optc *optc1,
 	REG_GET(OTG_V_TOTAL_MIN,
 			OTG_V_TOTAL_MIN, &s->v_total_min);
 
+	REG_GET(OTG_V_TOTAL_CONTROL,
+			OTG_V_TOTAL_MAX_SEL, &s->v_total_max_sel);
+
+	REG_GET(OTG_V_TOTAL_CONTROL,
+			OTG_V_TOTAL_MIN_SEL, &s->v_total_min_sel);
+
 	REG_GET_2(OTG_V_SYNC_A,
 			OTG_V_SYNC_A_START, &s->v_sync_a_start,
 			OTG_V_SYNC_A_END, &s->v_sync_a_end);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index 5a9a73d69fd6..89e09e5327a2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -406,6 +406,8 @@ struct dcn_otg_state {
 	uint32_t v_total;
 	uint32_t v_total_max;
 	uint32_t v_total_min;
+	uint32_t v_total_min_sel;
+	uint32_t v_total_max_sel;
 	uint32_t v_sync_a_start;
 	uint32_t v_sync_a_end;
 	uint32_t h_blank_start;

From 4c61af8afe855fcf65a09d47c8e330bb1fd1fb4a Mon Sep 17 00:00:00 2001
From: Harry Wentland <harry.wentland@amd.com>
Date: Fri, 23 Mar 2018 13:39:27 -0400
Subject: [PATCH 0359/1286] drm/amd/display: Implement dm_get_timestamp

We use this to ensure we wait at least 500ms in between eDP
disable/enable.

Signed-off-by: Harry Wentland <harry.wentland@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
index 89342b48be6b..fe29125215b5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -37,8 +37,10 @@
 
 unsigned long long dm_get_timestamp(struct dc_context *ctx)
 {
-	/* TODO: return actual timestamp */
-	return 0;
+	struct timespec64 time;
+
+	getrawmonotonic64(&time);
+	return timespec64_to_ns(&time);
 }
 
 void dm_perf_trace_timestamp(const char *func_name, unsigned int line)

From 78d5d04d118d55b6c51ca787d5debb9ad1b8a391 Mon Sep 17 00:00:00 2001
From: Charlene Liu <charlene.liu@amd.com>
Date: Tue, 20 Mar 2018 14:53:04 -0400
Subject: [PATCH 0360/1286] drm/amd/display: add delay between panel pwr off to
 on.

As per eDP 1.4 spec, there must be at least 500ms delay
between eDP power off and on.
This change added time stamp when edp power off, which can
be used to calculate duration time when edp power on.
If duration less than 500ms, add a wait.

Signed-off-by: Charlene Liu <charlene.liu@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../display/amdgpu_dm/amdgpu_dm_services.c    |  7 +++++
 drivers/gpu/drm/amd/display/dc/dc_link.h      |  9 ++++++
 .../display/dc/dce110/dce110_hw_sequencer.c   | 29 ++++++++++++++++++-
 drivers/gpu/drm/amd/display/dc/dm_services.h  |  4 +++
 4 files changed, 48 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
index fe29125215b5..0229c7edb8ad 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -43,6 +43,13 @@ unsigned long long dm_get_timestamp(struct dc_context *ctx)
 	return timespec64_to_ns(&time);
 }
 
+unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
+		unsigned long long current_time_stamp,
+		unsigned long long last_time_stamp)
+{
+	return current_time_stamp - last_time_stamp;
+}
+
 void dm_perf_trace_timestamp(const char *func_name, unsigned int line)
 {
 }
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index fb4d9eafdc6e..eeff98741293 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -51,6 +51,14 @@ struct link_mst_stream_allocation_table {
 	struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
 };
 
+struct time_stamp {
+	uint64_t edp_poweroff;
+	uint64_t edp_poweron;
+};
+
+struct link_trace {
+	struct time_stamp time_stamp;
+};
 /*
  * A link contains one or more sinks and their connected status.
  * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
@@ -114,6 +122,7 @@ struct dc_link {
 
 	struct dc_link_status link_status;
 
+	struct link_trace link_trace;
 };
 
 const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index e8df50f30e5b..db2d15dfb831 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -849,6 +849,28 @@ void hwss_edp_power_control(
 
 	if (power_up != is_panel_powered_on(hwseq)) {
 		/* Send VBIOS command to prompt eDP panel power */
+		if (power_up) {
+			unsigned long long current_ts = dm_get_timestamp(ctx);
+			unsigned long long duration_in_ms =
+					dm_get_elapse_time_in_ns(
+							ctx,
+							current_ts,
+							link->link_trace.time_stamp.edp_poweroff) / 1000000;
+			unsigned long long wait_time_ms = 0;
+
+			/* max 500ms from LCDVDD off to on */
+			if (link->link_trace.time_stamp.edp_poweroff == 0)
+				wait_time_ms = 500;
+			else if (duration_in_ms < 500)
+				wait_time_ms = 500 - duration_in_ms;
+
+			if (wait_time_ms) {
+				msleep(wait_time_ms);
+				dm_output_to_console("%s: wait %lld ms to power on eDP.\n",
+						__func__, wait_time_ms);
+			}
+
+		}
 
 		DC_LOG_HW_RESUME_S3(
 				"%s: Panel Power action: %s\n",
@@ -862,9 +884,14 @@ void hwss_edp_power_control(
 		cntl.coherent = false;
 		cntl.lanes_number = LANE_COUNT_FOUR;
 		cntl.hpd_sel = link->link_enc->hpd_source;
-
 		bp_result = link_transmitter_control(ctx->dc_bios, &cntl);
 
+		if (!power_up)
+			/*save driver power off time stamp*/
+			link->link_trace.time_stamp.edp_poweroff = dm_get_timestamp(ctx);
+		else
+			link->link_trace.time_stamp.edp_poweron = dm_get_timestamp(ctx);
+
 		if (bp_result != BP_RESULT_OK)
 			DC_LOG_ERROR(
 					"%s: Panel Power bp_result: %d\n",
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index 22e7ee7dcd26..8eafe1af8a5e 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -341,6 +341,10 @@ bool dm_dmcu_set_pipe(struct dc_context *ctx, unsigned int controller_id);
 
 unsigned long long dm_get_timestamp(struct dc_context *ctx);
 
+unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
+		unsigned long long current_time_stamp,
+		unsigned long long last_time_stamp);
+
 /*
  * performance tracing
  */

From 1402c605173bbbb1c2f7e615a1708a6ee61c69c3 Mon Sep 17 00:00:00 2001
From: Eric Yang <Eric.Yang2@amd.com>
Date: Wed, 14 Mar 2018 17:56:58 -0400
Subject: [PATCH 0361/1286] drm/amd/display: Set all update flags when we have
 full update

To prevent future optimization related bugs, just set all update
flags when we have a full update, since we know we want to reprogram
everything in that case.

Signed-off-by: Eric Yang <Eric.Yang2@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/core/dc.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 6f4ad67ffca6..b331d9e78cdb 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1208,7 +1208,7 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
 	type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
 	if (type == UPDATE_TYPE_FULL)
 		for (i = 0; i < surface_count; i++)
-			updates[i].surface->update_flags.bits.full_update = 1;
+			updates[i].surface->update_flags.raw = 0xFFFFFFFF;
 
 	return type;
 }

From 0c41891c81c017b5040b211f0b294ff5eb440d44 Mon Sep 17 00:00:00 2001
From: Eric Bernstein <eric.bernstein@amd.com>
Date: Mon, 19 Mar 2018 14:41:59 -0400
Subject: [PATCH 0362/1286] drm/amd/display: Refactor stream encoder for HW
 review

Move DCN1 implementation of stream encoder to new file (instead
of common dce_stream_encoder.c).
Cleanup code related to different implementation due to register
definition differences.

Signed-off-by: Eric Bernstein <eric.bernstein@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dcn10/Makefile |    2 +-
 .../drm/amd/display/dc/dcn10/dcn10_resource.c |   36 +-
 .../display/dc/dcn10/dcn10_stream_encoder.c   | 1505 +++++++++++++++++
 .../display/dc/dcn10/dcn10_stream_encoder.h   |  584 +++++++
 4 files changed, 2101 insertions(+), 26 deletions(-)
 create mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
 create mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
index 5469bdfe19f3..5c69743a4b4f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -26,7 +26,7 @@ DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \
 		dcn10_dpp.o dcn10_opp.o dcn10_optc.o \
 		dcn10_hubp.o dcn10_mpc.o \
 		dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \
-		dcn10_hubbub.o
+		dcn10_hubbub.o dcn10_stream_encoder.o
 
 AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10))
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 7ad290cbc730..f305f65675d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -39,7 +39,7 @@
 #include "dce110/dce110_hw_sequencer.h"
 #include "dcn10/dcn10_opp.h"
 #include "dce/dce_link_encoder.h"
-#include "dce/dce_stream_encoder.h"
+#include "dcn10/dcn10_stream_encoder.h"
 #include "dce/dce_clocks.h"
 #include "dce/dce_clock_source.h"
 #include "dce/dce_audio.h"
@@ -166,36 +166,22 @@ static const struct dce_abm_mask abm_mask = {
 
 #define stream_enc_regs(id)\
 [id] = {\
-	SE_DCN_REG_LIST(id),\
-	.TMDS_CNTL = 0,\
-	.AFMT_AVI_INFO0 = 0,\
-	.AFMT_AVI_INFO1 = 0,\
-	.AFMT_AVI_INFO2 = 0,\
-	.AFMT_AVI_INFO3 = 0,\
+	SE_DCN_REG_LIST(id)\
 }
 
-static const struct dce110_stream_enc_registers stream_enc_regs[] = {
+static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
 	stream_enc_regs(0),
 	stream_enc_regs(1),
 	stream_enc_regs(2),
 	stream_enc_regs(3),
 };
 
-static const struct dce_stream_encoder_shift se_shift = {
+static const struct dcn10_stream_encoder_shift se_shift = {
 		SE_COMMON_MASK_SH_LIST_DCN10(__SHIFT)
 };
 
-static const struct dce_stream_encoder_mask se_mask = {
-		SE_COMMON_MASK_SH_LIST_DCN10(_MASK),
-		.AFMT_GENERIC0_UPDATE = 0,
-		.AFMT_GENERIC2_UPDATE = 0,
-		.DP_DYN_RANGE = 0,
-		.DP_YCBCR_RANGE = 0,
-		.HDMI_AVI_INFO_SEND = 0,
-		.HDMI_AVI_INFO_CONT = 0,
-		.HDMI_AVI_INFO_LINE = 0,
-		.DP_SEC_AVI_ENABLE = 0,
-		.AFMT_AVI_INFO_VERSION = 0
+static const struct dcn10_stream_encoder_mask se_mask = {
+		SE_COMMON_MASK_SH_LIST_DCN10(_MASK)
 };
 
 #define audio_regs(id)\
@@ -653,16 +639,16 @@ static struct stream_encoder *dcn10_stream_encoder_create(
 	enum engine_id eng_id,
 	struct dc_context *ctx)
 {
-	struct dce110_stream_encoder *enc110 =
-		kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL);
+	struct dcn10_stream_encoder *enc1 =
+		kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL);
 
-	if (!enc110)
+	if (!enc1)
 		return NULL;
 
-	dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
+	dcn10_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id,
 					&stream_enc_regs[eng_id],
 					&se_shift, &se_mask);
-	return &enc110->base;
+	return &enc1->base;
 }
 
 static const struct dce_hwseq_registers hwseq_reg = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
new file mode 100644
index 000000000000..0413c707b921
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -0,0 +1,1505 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "dc_bios_types.h"
+#include "dcn10_stream_encoder.h"
+
+#include "reg_helper.h"
+#define DC_LOGGER \
+		enc1->base.ctx->logger
+enum DP_PIXEL_ENCODING {
+DP_PIXEL_ENCODING_RGB444                 = 0x00000000,
+DP_PIXEL_ENCODING_YCBCR422               = 0x00000001,
+DP_PIXEL_ENCODING_YCBCR444               = 0x00000002,
+DP_PIXEL_ENCODING_RGB_WIDE_GAMUT         = 0x00000003,
+DP_PIXEL_ENCODING_Y_ONLY                 = 0x00000004,
+DP_PIXEL_ENCODING_YCBCR420               = 0x00000005,
+DP_PIXEL_ENCODING_RESERVED               = 0x00000006,
+};
+
+
+enum DP_COMPONENT_DEPTH {
+DP_COMPONENT_DEPTH_6BPC                  = 0x00000000,
+DP_COMPONENT_DEPTH_8BPC                  = 0x00000001,
+DP_COMPONENT_DEPTH_10BPC                 = 0x00000002,
+DP_COMPONENT_DEPTH_12BPC                 = 0x00000003,
+DP_COMPONENT_DEPTH_16BPC                 = 0x00000004,
+DP_COMPONENT_DEPTH_RESERVED              = 0x00000005,
+};
+
+
+#define REG(reg)\
+	(enc1->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+	enc1->se_shift->field_name, enc1->se_mask->field_name
+
+#define VBI_LINE_0 0
+#define DP_BLANK_MAX_RETRY 20
+#define HDMI_CLOCK_CHANNEL_RATE_MORE_340M 340000
+
+
+enum {
+	DP_MST_UPDATE_MAX_RETRY = 50
+};
+
+#define CTX \
+	enc1->base.ctx
+
+static void enc1_update_generic_info_packet(
+	struct dcn10_stream_encoder *enc1,
+	uint32_t packet_index,
+	const struct dc_info_packet *info_packet)
+{
+	uint32_t regval;
+	/* TODOFPGA Figure out a proper number for max_retries polling for lock
+	 * use 50 for now.
+	 */
+	uint32_t max_retries = 50;
+
+	/*we need turn on clock before programming AFMT block*/
+	REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
+
+	if (packet_index >= 8)
+		ASSERT(0);
+
+	/* poll dig_update_lock is not locked -> asic internal signal
+	 * assume otg master lock will unlock it
+	 */
+/*		REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_LOCK_STATUS,
+			0, 10, max_retries);*/
+
+	/* check if HW reading GSP memory */
+	REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT,
+			0, 10, max_retries);
+
+	/* HW does is not reading GSP memory not reading too long ->
+	 * something wrong. clear GPS memory access and notify?
+	 * hw SW is writing to GSP memory
+	 */
+	REG_UPDATE(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT_CLR, 1);
+
+	/* choose which generic packet to use */
+	regval = REG_READ(AFMT_VBI_PACKET_CONTROL);
+	REG_UPDATE(AFMT_VBI_PACKET_CONTROL,
+			AFMT_GENERIC_INDEX, packet_index);
+
+	/* write generic packet header
+	 * (4th byte is for GENERIC0 only)
+	 */
+	REG_SET_4(AFMT_GENERIC_HDR, 0,
+			AFMT_GENERIC_HB0, info_packet->hb0,
+			AFMT_GENERIC_HB1, info_packet->hb1,
+			AFMT_GENERIC_HB2, info_packet->hb2,
+			AFMT_GENERIC_HB3, info_packet->hb3);
+
+	/* write generic packet contents
+	 * (we never use last 4 bytes)
+	 * there are 8 (0-7) mmDIG0_AFMT_GENERIC0_x registers
+	 */
+	{
+		const uint32_t *content =
+			(const uint32_t *) &info_packet->sb[0];
+
+		REG_WRITE(AFMT_GENERIC_0, *content++);
+		REG_WRITE(AFMT_GENERIC_1, *content++);
+		REG_WRITE(AFMT_GENERIC_2, *content++);
+		REG_WRITE(AFMT_GENERIC_3, *content++);
+		REG_WRITE(AFMT_GENERIC_4, *content++);
+		REG_WRITE(AFMT_GENERIC_5, *content++);
+		REG_WRITE(AFMT_GENERIC_6, *content++);
+		REG_WRITE(AFMT_GENERIC_7, *content);
+	}
+
+	switch (packet_index) {
+	case 0:
+		REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+				AFMT_GENERIC0_FRAME_UPDATE, 1);
+		break;
+	case 1:
+		REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+				AFMT_GENERIC1_FRAME_UPDATE, 1);
+		break;
+	case 2:
+		REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+				AFMT_GENERIC2_FRAME_UPDATE, 1);
+		break;
+	case 3:
+		REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+				AFMT_GENERIC3_FRAME_UPDATE, 1);
+		break;
+	case 4:
+		REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+				AFMT_GENERIC4_FRAME_UPDATE, 1);
+		break;
+	case 5:
+		REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+				AFMT_GENERIC5_FRAME_UPDATE, 1);
+		break;
+	case 6:
+		REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+				AFMT_GENERIC6_FRAME_UPDATE, 1);
+		break;
+	case 7:
+		REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+				AFMT_GENERIC7_FRAME_UPDATE, 1);
+		break;
+	default:
+		break;
+	}
+}
+
+static void enc1_update_hdmi_info_packet(
+	struct dcn10_stream_encoder *enc1,
+	uint32_t packet_index,
+	const struct dc_info_packet *info_packet)
+{
+	uint32_t cont, send, line;
+
+	if (info_packet->valid) {
+		enc1_update_generic_info_packet(
+			enc1,
+			packet_index,
+			info_packet);
+
+		/* enable transmission of packet(s) -
+		 * packet transmission begins on the next frame
+		 */
+		cont = 1;
+		/* send packet(s) every frame */
+		send = 1;
+		/* select line number to send packets on */
+		line = 2;
+	} else {
+		cont = 0;
+		send = 0;
+		line = 0;
+	}
+
+	/* choose which generic packet control to use */
+	switch (packet_index) {
+	case 0:
+		REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL0,
+				HDMI_GENERIC0_CONT, cont,
+				HDMI_GENERIC0_SEND, send,
+				HDMI_GENERIC0_LINE, line);
+		break;
+	case 1:
+		REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL0,
+				HDMI_GENERIC1_CONT, cont,
+				HDMI_GENERIC1_SEND, send,
+				HDMI_GENERIC1_LINE, line);
+		break;
+	case 2:
+		REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL1,
+				HDMI_GENERIC0_CONT, cont,
+				HDMI_GENERIC0_SEND, send,
+				HDMI_GENERIC0_LINE, line);
+		break;
+	case 3:
+		REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL1,
+				HDMI_GENERIC1_CONT, cont,
+				HDMI_GENERIC1_SEND, send,
+				HDMI_GENERIC1_LINE, line);
+		break;
+	case 4:
+		REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2,
+				HDMI_GENERIC0_CONT, cont,
+				HDMI_GENERIC0_SEND, send,
+				HDMI_GENERIC0_LINE, line);
+		break;
+	case 5:
+		REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2,
+				HDMI_GENERIC1_CONT, cont,
+				HDMI_GENERIC1_SEND, send,
+				HDMI_GENERIC1_LINE, line);
+		break;
+	case 6:
+		REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL3,
+				HDMI_GENERIC0_CONT, cont,
+				HDMI_GENERIC0_SEND, send,
+				HDMI_GENERIC0_LINE, line);
+		break;
+	case 7:
+		REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL3,
+				HDMI_GENERIC1_CONT, cont,
+				HDMI_GENERIC1_SEND, send,
+				HDMI_GENERIC1_LINE, line);
+		break;
+	default:
+		/* invalid HW packet index */
+		DC_LOG_WARNING(
+			"Invalid HW packet index: %s()\n",
+			__func__);
+		return;
+	}
+}
+
+/* setup stream encoder in dp mode */
+static void enc1_stream_encoder_dp_set_stream_attribute(
+	struct stream_encoder *enc,
+	struct dc_crtc_timing *crtc_timing,
+	enum dc_color_space output_color_space)
+{
+	uint32_t h_active_start;
+	uint32_t v_active_start;
+	uint32_t misc0 = 0;
+	uint32_t misc1 = 0;
+	uint32_t h_blank;
+	uint32_t h_back_porch;
+	uint8_t synchronous_clock = 0; /* asynchronous mode */
+	uint8_t colorimetry_bpc;
+	uint8_t dynamic_range_rgb = 0; /*full range*/
+	uint8_t dynamic_range_ycbcr = 1; /*bt709*/
+
+	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+	REG_UPDATE(DP_DB_CNTL, DP_DB_DISABLE, 1);
+
+	/* set pixel encoding */
+	switch (crtc_timing->pixel_encoding) {
+	case PIXEL_ENCODING_YCBCR422:
+		REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+				DP_PIXEL_ENCODING_YCBCR422);
+		break;
+	case PIXEL_ENCODING_YCBCR444:
+		REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+				DP_PIXEL_ENCODING_YCBCR444);
+
+		if (crtc_timing->flags.Y_ONLY)
+			if (crtc_timing->display_color_depth != COLOR_DEPTH_666)
+				/* HW testing only, no use case yet.
+				 * Color depth of Y-only could be
+				 * 8, 10, 12, 16 bits
+				 */
+				REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+						DP_PIXEL_ENCODING_Y_ONLY);
+		/* Note: DP_MSA_MISC1 bit 7 is the indicator
+		 * of Y-only mode.
+		 * This bit is set in HW if register
+		 * DP_PIXEL_ENCODING is programmed to 0x4
+		 */
+		break;
+	case PIXEL_ENCODING_YCBCR420:
+		REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+				DP_PIXEL_ENCODING_YCBCR420);
+		REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1);
+		break;
+	default:
+		REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+				DP_PIXEL_ENCODING_RGB444);
+		break;
+	}
+
+	misc1 = REG_READ(DP_MSA_MISC);
+
+	/* set color depth */
+
+	switch (crtc_timing->display_color_depth) {
+	case COLOR_DEPTH_666:
+		REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+				0);
+		break;
+	case COLOR_DEPTH_888:
+		REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+				DP_COMPONENT_DEPTH_8BPC);
+		break;
+	case COLOR_DEPTH_101010:
+		REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+				DP_COMPONENT_DEPTH_10BPC);
+
+		break;
+	case COLOR_DEPTH_121212:
+		REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+				DP_COMPONENT_DEPTH_12BPC);
+		break;
+	default:
+		REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+				DP_COMPONENT_DEPTH_6BPC);
+		break;
+	}
+
+	/* set dynamic range and YCbCr range */
+
+	switch (crtc_timing->display_color_depth) {
+	case COLOR_DEPTH_666:
+		colorimetry_bpc = 0;
+		break;
+	case COLOR_DEPTH_888:
+		colorimetry_bpc = 1;
+		break;
+	case COLOR_DEPTH_101010:
+		colorimetry_bpc = 2;
+		break;
+	case COLOR_DEPTH_121212:
+		colorimetry_bpc = 3;
+		break;
+	default:
+		colorimetry_bpc = 0;
+		break;
+	}
+
+	misc0 = misc0 | synchronous_clock;
+	misc0 = colorimetry_bpc << 5;
+
+	switch (output_color_space) {
+	case COLOR_SPACE_SRGB:
+		misc0 = misc0 | 0x0;
+		misc1 = misc1 & ~0x80; /* bit7 = 0*/
+		dynamic_range_rgb = 0; /*full range*/
+		break;
+	case COLOR_SPACE_SRGB_LIMITED:
+		misc0 = misc0 | 0x8; /* bit3=1 */
+		misc1 = misc1 & ~0x80; /* bit7 = 0*/
+		dynamic_range_rgb = 1; /*limited range*/
+		break;
+	case COLOR_SPACE_YCBCR601:
+	case COLOR_SPACE_YCBCR601_LIMITED:
+		misc0 = misc0 | 0x8; /* bit3=1, bit4=0 */
+		misc1 = misc1 & ~0x80; /* bit7 = 0*/
+		dynamic_range_ycbcr = 0; /*bt601*/
+		if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+			misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */
+		else if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR444)
+			misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */
+		break;
+	case COLOR_SPACE_YCBCR709:
+	case COLOR_SPACE_YCBCR709_LIMITED:
+		misc0 = misc0 | 0x18; /* bit3=1, bit4=1 */
+		misc1 = misc1 & ~0x80; /* bit7 = 0*/
+		dynamic_range_ycbcr = 1; /*bt709*/
+		if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+			misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */
+		else if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR444)
+			misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */
+		break;
+	case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
+		dynamic_range_rgb = 1; /*limited range*/
+		break;
+	case COLOR_SPACE_2020_RGB_FULLRANGE:
+	case COLOR_SPACE_2020_YCBCR:
+	case COLOR_SPACE_XR_RGB:
+	case COLOR_SPACE_MSREF_SCRGB:
+	case COLOR_SPACE_ADOBERGB:
+	case COLOR_SPACE_DCIP3:
+	case COLOR_SPACE_XV_YCC_709:
+	case COLOR_SPACE_XV_YCC_601:
+	case COLOR_SPACE_DISPLAYNATIVE:
+	case COLOR_SPACE_DOLBYVISION:
+	case COLOR_SPACE_APPCTRL:
+	case COLOR_SPACE_CUSTOMPOINTS:
+	case COLOR_SPACE_UNKNOWN:
+		/* do nothing */
+		break;
+	}
+
+	REG_SET(DP_MSA_COLORIMETRY, 0, DP_MSA_MISC0, misc0);
+	REG_WRITE(DP_MSA_MISC, misc1);   /* MSA_MISC1 */
+
+	/* dcn new register
+	 * dc_crtc_timing is vesa dmt struct. data from edid
+	 */
+	REG_SET_2(DP_MSA_TIMING_PARAM1, 0,
+			DP_MSA_HTOTAL, crtc_timing->h_total,
+			DP_MSA_VTOTAL, crtc_timing->v_total);
+
+	/* calculate from vesa timing parameters
+	 * h_active_start related to leading edge of sync
+	 */
+
+	h_blank = crtc_timing->h_total - crtc_timing->h_border_left -
+			crtc_timing->h_addressable - crtc_timing->h_border_right;
+
+	h_back_porch = h_blank - crtc_timing->h_front_porch -
+			crtc_timing->h_sync_width;
+
+	/* start at beginning of left border */
+	h_active_start = crtc_timing->h_sync_width + h_back_porch;
+
+
+	v_active_start = crtc_timing->v_total - crtc_timing->v_border_top -
+			crtc_timing->v_addressable - crtc_timing->v_border_bottom -
+			crtc_timing->v_front_porch;
+
+
+	/* start at beginning of left border */
+	REG_SET_2(DP_MSA_TIMING_PARAM2, 0,
+		DP_MSA_HSTART, h_active_start,
+		DP_MSA_VSTART, v_active_start);
+
+	REG_SET_4(DP_MSA_TIMING_PARAM3, 0,
+			DP_MSA_HSYNCWIDTH,
+			crtc_timing->h_sync_width,
+			DP_MSA_HSYNCPOLARITY,
+			!crtc_timing->flags.HSYNC_POSITIVE_POLARITY,
+			DP_MSA_VSYNCWIDTH,
+			crtc_timing->v_sync_width,
+			DP_MSA_VSYNCPOLARITY,
+			!crtc_timing->flags.VSYNC_POSITIVE_POLARITY);
+
+	/* HWDITH include border or overscan */
+	REG_SET_2(DP_MSA_TIMING_PARAM4, 0,
+		DP_MSA_HWIDTH, crtc_timing->h_border_left +
+		crtc_timing->h_addressable + crtc_timing->h_border_right,
+		DP_MSA_VHEIGHT, crtc_timing->v_border_top +
+		crtc_timing->v_addressable + crtc_timing->v_border_bottom);
+}
+
+static void enc1_stream_encoder_set_stream_attribute_helper(
+		struct dcn10_stream_encoder *enc1,
+		struct dc_crtc_timing *crtc_timing)
+{
+	switch (crtc_timing->pixel_encoding) {
+	case PIXEL_ENCODING_YCBCR422:
+		REG_UPDATE(DIG_FE_CNTL, TMDS_PIXEL_ENCODING, 1);
+		break;
+	default:
+		REG_UPDATE(DIG_FE_CNTL, TMDS_PIXEL_ENCODING, 0);
+		break;
+	}
+	REG_UPDATE(DIG_FE_CNTL, TMDS_COLOR_FORMAT, 0);
+}
+
+/* setup stream encoder in hdmi mode */
+static void enc1_stream_encoder_hdmi_set_stream_attribute(
+	struct stream_encoder *enc,
+	struct dc_crtc_timing *crtc_timing,
+	int actual_pix_clk_khz,
+	bool enable_audio)
+{
+	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+	struct bp_encoder_control cntl = {0};
+
+	cntl.action = ENCODER_CONTROL_SETUP;
+	cntl.engine_id = enc1->base.id;
+	cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A;
+	cntl.enable_dp_audio = enable_audio;
+	cntl.pixel_clock = actual_pix_clk_khz;
+	cntl.lanes_number = LANE_COUNT_FOUR;
+
+	if (enc1->base.bp->funcs->encoder_control(
+			enc1->base.bp, &cntl) != BP_RESULT_OK)
+		return;
+
+	enc1_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing);
+
+	/* setup HDMI engine */
+	REG_UPDATE_5(HDMI_CONTROL,
+		HDMI_PACKET_GEN_VERSION, 1,
+		HDMI_KEEPOUT_MODE, 1,
+		HDMI_DEEP_COLOR_ENABLE, 0,
+		HDMI_DATA_SCRAMBLE_EN, 0,
+		HDMI_CLOCK_CHANNEL_RATE, 0);
+
+
+	switch (crtc_timing->display_color_depth) {
+	case COLOR_DEPTH_888:
+		REG_UPDATE(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
+		break;
+	case COLOR_DEPTH_101010:
+		if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+			REG_UPDATE_2(HDMI_CONTROL,
+					HDMI_DEEP_COLOR_DEPTH, 1,
+					HDMI_DEEP_COLOR_ENABLE, 0);
+		} else {
+			REG_UPDATE_2(HDMI_CONTROL,
+					HDMI_DEEP_COLOR_DEPTH, 1,
+					HDMI_DEEP_COLOR_ENABLE, 1);
+			}
+		break;
+	case COLOR_DEPTH_121212:
+		if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+			REG_UPDATE_2(HDMI_CONTROL,
+					HDMI_DEEP_COLOR_DEPTH, 2,
+					HDMI_DEEP_COLOR_ENABLE, 0);
+		} else {
+			REG_UPDATE_2(HDMI_CONTROL,
+					HDMI_DEEP_COLOR_DEPTH, 2,
+					HDMI_DEEP_COLOR_ENABLE, 1);
+			}
+		break;
+	case COLOR_DEPTH_161616:
+		REG_UPDATE_2(HDMI_CONTROL,
+				HDMI_DEEP_COLOR_DEPTH, 3,
+				HDMI_DEEP_COLOR_ENABLE, 1);
+		break;
+	default:
+		break;
+	}
+
+	if (actual_pix_clk_khz >= HDMI_CLOCK_CHANNEL_RATE_MORE_340M) {
+		/* enable HDMI data scrambler
+		 * HDMI_CLOCK_CHANNEL_RATE_MORE_340M
+		 * Clock channel frequency is 1/4 of character rate.
+		 */
+		REG_UPDATE_2(HDMI_CONTROL,
+			HDMI_DATA_SCRAMBLE_EN, 1,
+			HDMI_CLOCK_CHANNEL_RATE, 1);
+	} else if (crtc_timing->flags.LTE_340MCSC_SCRAMBLE) {
+
+		/* TODO: New feature for DCE11, still need to implement */
+
+		/* enable HDMI data scrambler
+		 * HDMI_CLOCK_CHANNEL_FREQ_EQUAL_TO_CHAR_RATE
+		 * Clock channel frequency is the same
+		 * as character rate
+		 */
+		REG_UPDATE_2(HDMI_CONTROL,
+			HDMI_DATA_SCRAMBLE_EN, 1,
+			HDMI_CLOCK_CHANNEL_RATE, 0);
+	}
+
+
+	REG_UPDATE_3(HDMI_VBI_PACKET_CONTROL,
+		HDMI_GC_CONT, 1,
+		HDMI_GC_SEND, 1,
+		HDMI_NULL_SEND, 1);
+
+	/* following belongs to audio */
+	REG_UPDATE(HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
+
+	REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
+
+	REG_UPDATE(HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE,
+				VBI_LINE_0 + 2);
+
+	REG_UPDATE(HDMI_GC, HDMI_GC_AVMUTE, 0);
+}
+
+/* setup stream encoder in dvi mode */
+static void enc1_stream_encoder_dvi_set_stream_attribute(
+	struct stream_encoder *enc,
+	struct dc_crtc_timing *crtc_timing,
+	bool is_dual_link)
+{
+	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+	struct bp_encoder_control cntl = {0};
+
+	cntl.action = ENCODER_CONTROL_SETUP;
+	cntl.engine_id = enc1->base.id;
+	cntl.signal = is_dual_link ?
+			SIGNAL_TYPE_DVI_DUAL_LINK : SIGNAL_TYPE_DVI_SINGLE_LINK;
+	cntl.enable_dp_audio = false;
+	cntl.pixel_clock = crtc_timing->pix_clk_khz;
+	cntl.lanes_number = (is_dual_link) ? LANE_COUNT_EIGHT : LANE_COUNT_FOUR;
+
+	if (enc1->base.bp->funcs->encoder_control(
+			enc1->base.bp, &cntl) != BP_RESULT_OK)
+		return;
+
+	ASSERT(crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB);
+	ASSERT(crtc_timing->display_color_depth == COLOR_DEPTH_888);
+	enc1_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing);
+}
+
+static void enc1_stream_encoder_set_mst_bandwidth(
+	struct stream_encoder *enc,
+	struct fixed31_32 avg_time_slots_per_mtp)
+{
+	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+	uint32_t x = dal_fixed31_32_floor(
+		avg_time_slots_per_mtp);
+	uint32_t y = dal_fixed31_32_ceil(
+		dal_fixed31_32_shl(
+			dal_fixed31_32_sub_int(
+				avg_time_slots_per_mtp,
+				x),
+			26));
+
+	REG_SET_2(DP_MSE_RATE_CNTL, 0,
+		DP_MSE_RATE_X, x,
+		DP_MSE_RATE_Y, y);
+
+	/* wait for update to be completed on the link */
+	/* i.e. DP_MSE_RATE_UPDATE_PENDING field (read only) */
+	/* is reset to 0 (not pending) */
+	REG_WAIT(DP_MSE_RATE_UPDATE, DP_MSE_RATE_UPDATE_PENDING,
+			0,
+			10, DP_MST_UPDATE_MAX_RETRY);
+}
+
+static void enc1_stream_encoder_update_hdmi_info_packets(
+	struct stream_encoder *enc,
+	const struct encoder_info_frame *info_frame)
+{
+	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+	/* for bring up, disable dp double  TODO */
+	REG_UPDATE(HDMI_DB_CONTROL, HDMI_DB_DISABLE, 1);
+
+	enc1_update_hdmi_info_packet(enc1, 0, &info_frame->avi);
+	enc1_update_hdmi_info_packet(enc1, 1, &info_frame->vendor);
+	enc1_update_hdmi_info_packet(enc1, 2, &info_frame->gamut);
+	enc1_update_hdmi_info_packet(enc1, 3, &info_frame->spd);
+	enc1_update_hdmi_info_packet(enc1, 4, &info_frame->hdrsmd);
+}
+
+static void enc1_stream_encoder_stop_hdmi_info_packets(
+	struct stream_encoder *enc)
+{
+	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+	/* stop generic packets 0 & 1 on HDMI */
+	REG_SET_6(HDMI_GENERIC_PACKET_CONTROL0, 0,
+		HDMI_GENERIC1_CONT, 0,
+		HDMI_GENERIC1_LINE, 0,
+		HDMI_GENERIC1_SEND, 0,
+		HDMI_GENERIC0_CONT, 0,
+		HDMI_GENERIC0_LINE, 0,
+		HDMI_GENERIC0_SEND, 0);
+
+	/* stop generic packets 2 & 3 on HDMI */
+	REG_SET_6(HDMI_GENERIC_PACKET_CONTROL1, 0,
+		HDMI_GENERIC0_CONT, 0,
+		HDMI_GENERIC0_LINE, 0,
+		HDMI_GENERIC0_SEND, 0,
+		HDMI_GENERIC1_CONT, 0,
+		HDMI_GENERIC1_LINE, 0,
+		HDMI_GENERIC1_SEND, 0);
+
+	/* stop generic packets 2 & 3 on HDMI */
+	REG_SET_6(HDMI_GENERIC_PACKET_CONTROL2, 0,
+		HDMI_GENERIC0_CONT, 0,
+		HDMI_GENERIC0_LINE, 0,
+		HDMI_GENERIC0_SEND, 0,
+		HDMI_GENERIC1_CONT, 0,
+		HDMI_GENERIC1_LINE, 0,
+		HDMI_GENERIC1_SEND, 0);
+
+	REG_SET_6(HDMI_GENERIC_PACKET_CONTROL3, 0,
+		HDMI_GENERIC0_CONT, 0,
+		HDMI_GENERIC0_LINE, 0,
+		HDMI_GENERIC0_SEND, 0,
+		HDMI_GENERIC1_CONT, 0,
+		HDMI_GENERIC1_LINE, 0,
+		HDMI_GENERIC1_SEND, 0);
+}
+
+static void enc1_stream_encoder_update_dp_info_packets(
+	struct stream_encoder *enc,
+	const struct encoder_info_frame *info_frame)
+{
+	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+	uint32_t value = REG_READ(DP_SEC_CNTL);
+
+	if (info_frame->vsc.valid)
+		enc1_update_generic_info_packet(
+					enc1,
+					0,  /* packetIndex */
+					&info_frame->vsc);
+
+	if (info_frame->spd.valid)
+		enc1_update_generic_info_packet(
+				enc1,
+				2,  /* packetIndex */
+				&info_frame->spd);
+
+	if (info_frame->hdrsmd.valid)
+		enc1_update_generic_info_packet(
+				enc1,
+				3,  /* packetIndex */
+				&info_frame->hdrsmd);
+
+	/* enable/disable transmission of packet(s).
+	 * If enabled, packet transmission begins on the next frame
+	 */
+	REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, info_frame->vsc.valid);
+	REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, info_frame->spd.valid);
+	REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, info_frame->hdrsmd.valid);
+
+	/* This bit is the master enable bit.
+	 * When enabling secondary stream engine,
+	 * this master bit must also be set.
+	 * This register shared with audio info frame.
+	 * Therefore we need to enable master bit
+	 * if at least on of the fields is not 0
+	 */
+	if (value)
+		REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+}
+
+static void enc1_stream_encoder_stop_dp_info_packets(
+	struct stream_encoder *enc)
+{
+	/* stop generic packets on DP */
+	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+	uint32_t value = REG_READ(DP_SEC_CNTL);
+
+	REG_SET_10(DP_SEC_CNTL, 0,
+		DP_SEC_GSP0_ENABLE, 0,
+		DP_SEC_GSP1_ENABLE, 0,
+		DP_SEC_GSP2_ENABLE, 0,
+		DP_SEC_GSP3_ENABLE, 0,
+		DP_SEC_GSP4_ENABLE, 0,
+		DP_SEC_GSP5_ENABLE, 0,
+		DP_SEC_GSP6_ENABLE, 0,
+		DP_SEC_GSP7_ENABLE, 0,
+		DP_SEC_MPG_ENABLE, 0,
+		DP_SEC_STREAM_ENABLE, 0);
+
+	/* this register shared with audio info frame.
+	 * therefore we need to keep master enabled
+	 * if at least one of the fields is not 0 */
+
+	if (value)
+		REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+
+}
+
+static void enc1_stream_encoder_dp_blank(
+	struct stream_encoder *enc)
+{
+	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+	uint32_t retries = 0;
+	uint32_t  reg1 = 0;
+	uint32_t max_retries = DP_BLANK_MAX_RETRY * 10;
+
+	/* Note: For CZ, we are changing driver default to disable
+	 * stream deferred to next VBLANK. If results are positive, we
+	 * will make the same change to all DCE versions. There are a
+	 * handful of panels that cannot handle disable stream at
+	 * HBLANK and will result in a white line flash across the
+	 * screen on stream disable.
+	 */
+	REG_GET(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, &reg1);
+	if ((reg1 & 0x1) == 0)
+		/*stream not enabled*/
+		return;
+	/* Specify the video stream disable point
+	 * (2 = start of the next vertical blank)
+	 */
+	REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2);
+	/* Larger delay to wait until VBLANK - use max retry of
+	 * 10us*3000=30ms. This covers 16.6ms of typical 60 Hz mode +
+	 * a little more because we may not trust delay accuracy.
+	 */
+	max_retries = DP_BLANK_MAX_RETRY * 150;
+
+	/* disable DP stream */
+	REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
+
+	/* the encoder stops sending the video stream
+	 * at the start of the vertical blanking.
+	 * Poll for DP_VID_STREAM_STATUS == 0
+	 */
+
+	REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS,
+			0,
+			10, max_retries);
+
+	ASSERT(retries <= max_retries);
+
+	/* Tell the DP encoder to ignore timing from CRTC, must be done after
+	 * the polling. If we set DP_STEER_FIFO_RESET before DP stream blank is
+	 * complete, stream status will be stuck in video stream enabled state,
+	 * i.e. DP_VID_STREAM_STATUS stuck at 1.
+	 */
+
+	REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, true);
+}
+
+/* output video stream to link encoder */
+static void enc1_stream_encoder_dp_unblank(
+	struct stream_encoder *enc,
+	const struct encoder_unblank_param *param)
+{
+	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+	if (param->link_settings.link_rate != LINK_RATE_UNKNOWN) {
+		uint32_t n_vid = 0x8000;
+		uint32_t m_vid;
+
+		/* M / N = Fstream / Flink
+		 * m_vid / n_vid = pixel rate / link rate
+		 */
+
+		uint64_t m_vid_l = n_vid;
+
+		m_vid_l *= param->pixel_clk_khz;
+		m_vid_l = div_u64(m_vid_l,
+			param->link_settings.link_rate
+				* LINK_RATE_REF_FREQ_IN_KHZ);
+
+		m_vid = (uint32_t) m_vid_l;
+
+		/* enable auto measurement */
+
+		REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 0);
+
+		/* auto measurement need 1 full 0x8000 symbol cycle to kick in,
+		 * therefore program initial value for Mvid and Nvid
+		 */
+
+		REG_UPDATE(DP_VID_N, DP_VID_N, n_vid);
+
+		REG_UPDATE(DP_VID_M, DP_VID_M, m_vid);
+
+		REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 1);
+	}
+
+	/* set DIG_START to 0x1 to resync FIFO */
+
+	REG_UPDATE(DIG_FE_CNTL, DIG_START, 1);
+
+	/* switch DP encoder to CRTC data */
+
+	REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0);
+
+	/* wait 100us for DIG/DP logic to prime
+	 * (i.e. a few video lines)
+	 */
+	udelay(100);
+
+	/* the hardware would start sending video at the start of the next DP
+	 * frame (i.e. rising edge of the vblank).
+	 * NOTE: We used to program DP_VID_STREAM_DIS_DEFER = 2 here, but this
+	 * register has no effect on enable transition! HW always guarantees
+	 * VID_STREAM enable at start of next frame, and this is not
+	 * programmable
+	 */
+
+	REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
+}
+
+static void enc1_stream_encoder_set_avmute(
+	struct stream_encoder *enc,
+	bool enable)
+{
+	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+	unsigned int value = enable ? 1 : 0;
+
+	REG_UPDATE(HDMI_GC, HDMI_GC_AVMUTE, value);
+}
+
+
+#define DP_SEC_AUD_N__DP_SEC_AUD_N__DEFAULT 0x8000
+#define DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__AUTO_CALC 1
+
+#include "include/audio_types.h"
+
+/**
+* speakersToChannels
+*
+* @brief
+*  translate speakers to channels
+*
+*  FL  - Front Left
+*  FR  - Front Right
+*  RL  - Rear Left
+*  RR  - Rear Right
+*  RC  - Rear Center
+*  FC  - Front Center
+*  FLC - Front Left Center
+*  FRC - Front Right Center
+*  RLC - Rear Left Center
+*  RRC - Rear Right Center
+*  LFE - Low Freq Effect
+*
+*               FC
+*          FLC      FRC
+*    FL                    FR
+*
+*                    LFE
+*              ()
+*
+*
+*    RL                    RR
+*          RLC      RRC
+*               RC
+*
+*             ch  8   7   6   5   4   3   2   1
+* 0b00000011      -   -   -   -   -   -   FR  FL
+* 0b00000111      -   -   -   -   -   LFE FR  FL
+* 0b00001011      -   -   -   -   FC  -   FR  FL
+* 0b00001111      -   -   -   -   FC  LFE FR  FL
+* 0b00010011      -   -   -   RC  -   -   FR  FL
+* 0b00010111      -   -   -   RC  -   LFE FR  FL
+* 0b00011011      -   -   -   RC  FC  -   FR  FL
+* 0b00011111      -   -   -   RC  FC  LFE FR  FL
+* 0b00110011      -   -   RR  RL  -   -   FR  FL
+* 0b00110111      -   -   RR  RL  -   LFE FR  FL
+* 0b00111011      -   -   RR  RL  FC  -   FR  FL
+* 0b00111111      -   -   RR  RL  FC  LFE FR  FL
+* 0b01110011      -   RC  RR  RL  -   -   FR  FL
+* 0b01110111      -   RC  RR  RL  -   LFE FR  FL
+* 0b01111011      -   RC  RR  RL  FC  -   FR  FL
+* 0b01111111      -   RC  RR  RL  FC  LFE FR  FL
+* 0b11110011      RRC RLC RR  RL  -   -   FR  FL
+* 0b11110111      RRC RLC RR  RL  -   LFE FR  FL
+* 0b11111011      RRC RLC RR  RL  FC  -   FR  FL
+* 0b11111111      RRC RLC RR  RL  FC  LFE FR  FL
+* 0b11000011      FRC FLC -   -   -   -   FR  FL
+* 0b11000111      FRC FLC -   -   -   LFE FR  FL
+* 0b11001011      FRC FLC -   -   FC  -   FR  FL
+* 0b11001111      FRC FLC -   -   FC  LFE FR  FL
+* 0b11010011      FRC FLC -   RC  -   -   FR  FL
+* 0b11010111      FRC FLC -   RC  -   LFE FR  FL
+* 0b11011011      FRC FLC -   RC  FC  -   FR  FL
+* 0b11011111      FRC FLC -   RC  FC  LFE FR  FL
+* 0b11110011      FRC FLC RR  RL  -   -   FR  FL
+* 0b11110111      FRC FLC RR  RL  -   LFE FR  FL
+* 0b11111011      FRC FLC RR  RL  FC  -   FR  FL
+* 0b11111111      FRC FLC RR  RL  FC  LFE FR  FL
+*
+* @param
+*  speakers - speaker information as it comes from CEA audio block
+*/
+/* translate speakers to channels */
+
+union audio_cea_channels {
+	uint8_t all;
+	struct audio_cea_channels_bits {
+		uint32_t FL:1;
+		uint32_t FR:1;
+		uint32_t LFE:1;
+		uint32_t FC:1;
+		uint32_t RL_RC:1;
+		uint32_t RR:1;
+		uint32_t RC_RLC_FLC:1;
+		uint32_t RRC_FRC:1;
+	} channels;
+};
+
+struct audio_clock_info {
+	/* pixel clock frequency*/
+	uint32_t pixel_clock_in_10khz;
+	/* N - 32KHz audio */
+	uint32_t n_32khz;
+	/* CTS - 32KHz audio*/
+	uint32_t cts_32khz;
+	uint32_t n_44khz;
+	uint32_t cts_44khz;
+	uint32_t n_48khz;
+	uint32_t cts_48khz;
+};
+
+/* 25.2MHz/1.001*/
+/* 25.2MHz/1.001*/
+/* 25.2MHz*/
+/* 27MHz */
+/* 27MHz*1.001*/
+/* 27MHz*1.001*/
+/* 54MHz*/
+/* 54MHz*1.001*/
+/* 74.25MHz/1.001*/
+/* 74.25MHz*/
+/* 148.5MHz/1.001*/
+/* 148.5MHz*/
+
+static const struct audio_clock_info audio_clock_info_table[16] = {
+	{2517, 4576, 28125, 7007, 31250, 6864, 28125},
+	{2518, 4576, 28125, 7007, 31250, 6864, 28125},
+	{2520, 4096, 25200, 6272, 28000, 6144, 25200},
+	{2700, 4096, 27000, 6272, 30000, 6144, 27000},
+	{2702, 4096, 27027, 6272, 30030, 6144, 27027},
+	{2703, 4096, 27027, 6272, 30030, 6144, 27027},
+	{5400, 4096, 54000, 6272, 60000, 6144, 54000},
+	{5405, 4096, 54054, 6272, 60060, 6144, 54054},
+	{7417, 11648, 210937, 17836, 234375, 11648, 140625},
+	{7425, 4096, 74250, 6272, 82500, 6144, 74250},
+	{14835, 11648, 421875, 8918, 234375, 5824, 140625},
+	{14850, 4096, 148500, 6272, 165000, 6144, 148500},
+	{29670, 5824, 421875, 4459, 234375, 5824, 281250},
+	{29700, 3072, 222750, 4704, 247500, 5120, 247500},
+	{59340, 5824, 843750, 8918, 937500, 5824, 562500},
+	{59400, 3072, 445500, 9408, 990000, 6144, 594000}
+};
+
+static const struct audio_clock_info audio_clock_info_table_36bpc[14] = {
+	{2517,  9152,  84375,  7007,  48875,  9152,  56250},
+	{2518,  9152,  84375,  7007,  48875,  9152,  56250},
+	{2520,  4096,  37800,  6272,  42000,  6144,  37800},
+	{2700,  4096,  40500,  6272,  45000,  6144,  40500},
+	{2702,  8192,  81081,  6272,  45045,  8192,  54054},
+	{2703,  8192,  81081,  6272,  45045,  8192,  54054},
+	{5400,  4096,  81000,  6272,  90000,  6144,  81000},
+	{5405,  4096,  81081,  6272,  90090,  6144,  81081},
+	{7417, 11648, 316406, 17836, 351562, 11648, 210937},
+	{7425, 4096, 111375,  6272, 123750,  6144, 111375},
+	{14835, 11648, 632812, 17836, 703125, 11648, 421875},
+	{14850, 4096, 222750,  6272, 247500,  6144, 222750},
+	{29670, 5824, 632812,  8918, 703125,  5824, 421875},
+	{29700, 4096, 445500,  4704, 371250,  5120, 371250}
+};
+
+static const struct audio_clock_info audio_clock_info_table_48bpc[14] = {
+	{2517,  4576,  56250,  7007,  62500,  6864,  56250},
+	{2518,  4576,  56250,  7007,  62500,  6864,  56250},
+	{2520,  4096,  50400,  6272,  56000,  6144,  50400},
+	{2700,  4096,  54000,  6272,  60000,  6144,  54000},
+	{2702,  4096,  54054,  6267,  60060,  8192,  54054},
+	{2703,  4096,  54054,  6272,  60060,  8192,  54054},
+	{5400,  4096, 108000,  6272, 120000,  6144, 108000},
+	{5405,  4096, 108108,  6272, 120120,  6144, 108108},
+	{7417, 11648, 421875, 17836, 468750, 11648, 281250},
+	{7425,  4096, 148500,  6272, 165000,  6144, 148500},
+	{14835, 11648, 843750,  8918, 468750, 11648, 281250},
+	{14850, 4096, 297000,  6272, 330000,  6144, 297000},
+	{29670, 5824, 843750,  4459, 468750,  5824, 562500},
+	{29700, 3072, 445500,  4704, 495000,  5120, 495000}
+
+
+};
+
+static union audio_cea_channels speakers_to_channels(
+	struct audio_speaker_flags speaker_flags)
+{
+	union audio_cea_channels cea_channels = {0};
+
+	/* these are one to one */
+	cea_channels.channels.FL = speaker_flags.FL_FR;
+	cea_channels.channels.FR = speaker_flags.FL_FR;
+	cea_channels.channels.LFE = speaker_flags.LFE;
+	cea_channels.channels.FC = speaker_flags.FC;
+
+	/* if Rear Left and Right exist move RC speaker to channel 7
+	 * otherwise to channel 5
+	 */
+	if (speaker_flags.RL_RR) {
+		cea_channels.channels.RL_RC = speaker_flags.RL_RR;
+		cea_channels.channels.RR = speaker_flags.RL_RR;
+		cea_channels.channels.RC_RLC_FLC = speaker_flags.RC;
+	} else {
+		cea_channels.channels.RL_RC = speaker_flags.RC;
+	}
+
+	/* FRONT Left Right Center and REAR Left Right Center are exclusive */
+	if (speaker_flags.FLC_FRC) {
+		cea_channels.channels.RC_RLC_FLC = speaker_flags.FLC_FRC;
+		cea_channels.channels.RRC_FRC = speaker_flags.FLC_FRC;
+	} else {
+		cea_channels.channels.RC_RLC_FLC = speaker_flags.RLC_RRC;
+		cea_channels.channels.RRC_FRC = speaker_flags.RLC_RRC;
+	}
+
+	return cea_channels;
+}
+
+static uint32_t calc_max_audio_packets_per_line(
+	const struct audio_crtc_info *crtc_info)
+{
+	uint32_t max_packets_per_line;
+
+	max_packets_per_line =
+		crtc_info->h_total - crtc_info->h_active;
+
+	if (crtc_info->pixel_repetition)
+		max_packets_per_line *= crtc_info->pixel_repetition;
+
+	/* for other hdmi features */
+	max_packets_per_line -= 58;
+	/* for Control Period */
+	max_packets_per_line -= 16;
+	/* Number of Audio Packets per Line */
+	max_packets_per_line /= 32;
+
+	return max_packets_per_line;
+}
+
+static void get_audio_clock_info(
+	enum dc_color_depth color_depth,
+	uint32_t crtc_pixel_clock_in_khz,
+	uint32_t actual_pixel_clock_in_khz,
+	struct audio_clock_info *audio_clock_info)
+{
+	const struct audio_clock_info *clock_info;
+	uint32_t index;
+	uint32_t crtc_pixel_clock_in_10khz = crtc_pixel_clock_in_khz / 10;
+	uint32_t audio_array_size;
+
+	switch (color_depth) {
+	case COLOR_DEPTH_161616:
+		clock_info = audio_clock_info_table_48bpc;
+		audio_array_size = ARRAY_SIZE(
+				audio_clock_info_table_48bpc);
+		break;
+	case COLOR_DEPTH_121212:
+		clock_info = audio_clock_info_table_36bpc;
+		audio_array_size = ARRAY_SIZE(
+				audio_clock_info_table_36bpc);
+		break;
+	default:
+		clock_info = audio_clock_info_table;
+		audio_array_size = ARRAY_SIZE(
+				audio_clock_info_table);
+		break;
+	}
+
+	if (clock_info != NULL) {
+		/* search for exact pixel clock in table */
+		for (index = 0; index < audio_array_size; index++) {
+			if (clock_info[index].pixel_clock_in_10khz >
+				crtc_pixel_clock_in_10khz)
+				break;  /* not match */
+			else if (clock_info[index].pixel_clock_in_10khz ==
+					crtc_pixel_clock_in_10khz) {
+				/* match found */
+				*audio_clock_info = clock_info[index];
+				return;
+			}
+		}
+	}
+
+	/* not found */
+	if (actual_pixel_clock_in_khz == 0)
+		actual_pixel_clock_in_khz = crtc_pixel_clock_in_khz;
+
+	/* See HDMI spec  the table entry under
+	 *  pixel clock of "Other". */
+	audio_clock_info->pixel_clock_in_10khz =
+			actual_pixel_clock_in_khz / 10;
+	audio_clock_info->cts_32khz = actual_pixel_clock_in_khz;
+	audio_clock_info->cts_44khz = actual_pixel_clock_in_khz;
+	audio_clock_info->cts_48khz = actual_pixel_clock_in_khz;
+
+	audio_clock_info->n_32khz = 4096;
+	audio_clock_info->n_44khz = 6272;
+	audio_clock_info->n_48khz = 6144;
+}
+
+static void enc1_se_audio_setup(
+	struct stream_encoder *enc,
+	unsigned int az_inst,
+	struct audio_info *audio_info)
+{
+	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+	uint32_t speakers = 0;
+	uint32_t channels = 0;
+
+	ASSERT(audio_info);
+	if (audio_info == NULL)
+		/* This should not happen.it does so we don't get BSOD*/
+		return;
+
+	speakers = audio_info->flags.info.ALLSPEAKERS;
+	channels = speakers_to_channels(audio_info->flags.speaker_flags).all;
+
+	/* setup the audio stream source select (audio -> dig mapping) */
+	REG_SET(AFMT_AUDIO_SRC_CONTROL, 0, AFMT_AUDIO_SRC_SELECT, az_inst);
+
+	/* Channel allocation */
+	REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, channels);
+}
+
+static void enc1_se_setup_hdmi_audio(
+	struct stream_encoder *enc,
+	const struct audio_crtc_info *crtc_info)
+{
+	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+	struct audio_clock_info audio_clock_info = {0};
+	uint32_t max_packets_per_line;
+
+	/* For now still do calculation, although this field is ignored when
+	 * above HDMI_PACKET_GEN_VERSION set to 1
+	 */
+	max_packets_per_line = calc_max_audio_packets_per_line(crtc_info);
+
+	/* HDMI_AUDIO_PACKET_CONTROL */
+	REG_UPDATE_2(HDMI_AUDIO_PACKET_CONTROL,
+			HDMI_AUDIO_PACKETS_PER_LINE, max_packets_per_line,
+			HDMI_AUDIO_DELAY_EN, 1);
+
+	/* AFMT_AUDIO_PACKET_CONTROL */
+	REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
+
+	/* AFMT_AUDIO_PACKET_CONTROL2 */
+	REG_UPDATE_2(AFMT_AUDIO_PACKET_CONTROL2,
+			AFMT_AUDIO_LAYOUT_OVRD, 0,
+			AFMT_60958_OSF_OVRD, 0);
+
+	/* HDMI_ACR_PACKET_CONTROL */
+	REG_UPDATE_3(HDMI_ACR_PACKET_CONTROL,
+			HDMI_ACR_AUTO_SEND, 1,
+			HDMI_ACR_SOURCE, 0,
+			HDMI_ACR_AUDIO_PRIORITY, 0);
+
+	/* Program audio clock sample/regeneration parameters */
+	get_audio_clock_info(crtc_info->color_depth,
+			     crtc_info->requested_pixel_clock,
+			     crtc_info->calculated_pixel_clock,
+			     &audio_clock_info);
+	DC_LOG_HW_AUDIO(
+			"\n%s:Input::requested_pixel_clock = %d"	\
+			"calculated_pixel_clock = %d \n", __func__,	\
+			crtc_info->requested_pixel_clock,		\
+			crtc_info->calculated_pixel_clock);
+
+	/* HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK */
+	REG_UPDATE(HDMI_ACR_32_0, HDMI_ACR_CTS_32, audio_clock_info.cts_32khz);
+
+	/* HDMI_ACR_32_1__HDMI_ACR_N_32_MASK */
+	REG_UPDATE(HDMI_ACR_32_1, HDMI_ACR_N_32, audio_clock_info.n_32khz);
+
+	/* HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK */
+	REG_UPDATE(HDMI_ACR_44_0, HDMI_ACR_CTS_44, audio_clock_info.cts_44khz);
+
+	/* HDMI_ACR_44_1__HDMI_ACR_N_44_MASK */
+	REG_UPDATE(HDMI_ACR_44_1, HDMI_ACR_N_44, audio_clock_info.n_44khz);
+
+	/* HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK */
+	REG_UPDATE(HDMI_ACR_48_0, HDMI_ACR_CTS_48, audio_clock_info.cts_48khz);
+
+	/* HDMI_ACR_48_1__HDMI_ACR_N_48_MASK */
+	REG_UPDATE(HDMI_ACR_48_1, HDMI_ACR_N_48, audio_clock_info.n_48khz);
+
+	/* Video driver cannot know in advance which sample rate will
+	 * be used by HD Audio driver
+	 * HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE field is
+	 * programmed below in interruppt callback
+	 */
+
+	/* AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK &
+	 * AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK
+	 */
+	REG_UPDATE_2(AFMT_60958_0,
+			AFMT_60958_CS_CHANNEL_NUMBER_L, 1,
+			AFMT_60958_CS_CLOCK_ACCURACY, 0);
+
+	/* AFMT_60958_1 AFMT_60958_CS_CHALNNEL_NUMBER_R */
+	REG_UPDATE(AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
+
+	/* AFMT_60958_2 now keep this settings until
+	 * Programming guide comes out
+	 */
+	REG_UPDATE_6(AFMT_60958_2,
+			AFMT_60958_CS_CHANNEL_NUMBER_2, 3,
+			AFMT_60958_CS_CHANNEL_NUMBER_3, 4,
+			AFMT_60958_CS_CHANNEL_NUMBER_4, 5,
+			AFMT_60958_CS_CHANNEL_NUMBER_5, 6,
+			AFMT_60958_CS_CHANNEL_NUMBER_6, 7,
+			AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
+}
+
+static void enc1_se_setup_dp_audio(
+	struct stream_encoder *enc)
+{
+	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+	/* --- DP Audio packet configurations --- */
+
+	/* ATP Configuration */
+	REG_SET(DP_SEC_AUD_N, 0,
+			DP_SEC_AUD_N, DP_SEC_AUD_N__DP_SEC_AUD_N__DEFAULT);
+
+	/* Async/auto-calc timestamp mode */
+	REG_SET(DP_SEC_TIMESTAMP, 0, DP_SEC_TIMESTAMP_MODE,
+			DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__AUTO_CALC);
+
+	/* --- The following are the registers
+	 *  copied from the SetupHDMI ---
+	 */
+
+	/* AFMT_AUDIO_PACKET_CONTROL */
+	REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
+
+	/* AFMT_AUDIO_PACKET_CONTROL2 */
+	/* Program the ATP and AIP next */
+	REG_UPDATE_2(AFMT_AUDIO_PACKET_CONTROL2,
+			AFMT_AUDIO_LAYOUT_OVRD, 0,
+			AFMT_60958_OSF_OVRD, 0);
+
+	/* AFMT_INFOFRAME_CONTROL0 */
+	REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
+
+	/* AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK */
+	REG_UPDATE(AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, 0);
+}
+
+static void enc1_se_enable_audio_clock(
+	struct stream_encoder *enc,
+	bool enable)
+{
+	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+	if (REG(AFMT_CNTL) == 0)
+		return;   /* DCE8/10 does not have this register */
+
+	REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, !!enable);
+
+	/* wait for AFMT clock to turn on,
+	 * expectation: this should complete in 1-2 reads
+	 *
+	 * REG_WAIT(AFMT_CNTL, AFMT_AUDIO_CLOCK_ON, !!enable, 1, 10);
+	 *
+	 * TODO: wait for clock_on does not work well. May need HW
+	 * program sequence. But audio seems work normally even without wait
+	 * for clock_on status change
+	 */
+}
+
+static void enc1_se_enable_dp_audio(
+	struct stream_encoder *enc)
+{
+	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+	/* Enable Audio packets */
+	REG_UPDATE(DP_SEC_CNTL, DP_SEC_ASP_ENABLE, 1);
+
+	/* Program the ATP and AIP next */
+	REG_UPDATE_2(DP_SEC_CNTL,
+			DP_SEC_ATP_ENABLE, 1,
+			DP_SEC_AIP_ENABLE, 1);
+
+	/* Program STREAM_ENABLE after all the other enables. */
+	REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+}
+
+static void enc1_se_disable_dp_audio(
+	struct stream_encoder *enc)
+{
+	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+	uint32_t value = REG_READ(DP_SEC_CNTL);
+
+	/* Disable Audio packets */
+	REG_UPDATE_5(DP_SEC_CNTL,
+			DP_SEC_ASP_ENABLE, 0,
+			DP_SEC_ATP_ENABLE, 0,
+			DP_SEC_AIP_ENABLE, 0,
+			DP_SEC_ACM_ENABLE, 0,
+			DP_SEC_STREAM_ENABLE, 0);
+
+	/* This register shared with encoder info frame. Therefore we need to
+	 * keep master enabled if at least on of the fields is not 0
+	 */
+	if (value != 0)
+		REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+
+}
+
+void enc1_se_audio_mute_control(
+	struct stream_encoder *enc,
+	bool mute)
+{
+	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+	REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, !mute);
+}
+
+void enc1_se_dp_audio_setup(
+	struct stream_encoder *enc,
+	unsigned int az_inst,
+	struct audio_info *info)
+{
+	enc1_se_audio_setup(enc, az_inst, info);
+}
+
+void enc1_se_dp_audio_enable(
+	struct stream_encoder *enc)
+{
+	enc1_se_enable_audio_clock(enc, true);
+	enc1_se_setup_dp_audio(enc);
+	enc1_se_enable_dp_audio(enc);
+}
+
+void enc1_se_dp_audio_disable(
+	struct stream_encoder *enc)
+{
+	enc1_se_disable_dp_audio(enc);
+	enc1_se_enable_audio_clock(enc, false);
+}
+
+void enc1_se_hdmi_audio_setup(
+	struct stream_encoder *enc,
+	unsigned int az_inst,
+	struct audio_info *info,
+	struct audio_crtc_info *audio_crtc_info)
+{
+	enc1_se_enable_audio_clock(enc, true);
+	enc1_se_setup_hdmi_audio(enc, audio_crtc_info);
+	enc1_se_audio_setup(enc, az_inst, info);
+}
+
+void enc1_se_hdmi_audio_disable(
+	struct stream_encoder *enc)
+{
+	enc1_se_enable_audio_clock(enc, false);
+}
+
+
+static void enc1_setup_stereo_sync(
+	struct stream_encoder *enc,
+	int tg_inst, bool enable)
+{
+	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+	REG_UPDATE(DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, tg_inst);
+	REG_UPDATE(DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, !enable);
+}
+
+
+static const struct stream_encoder_funcs dcn10_str_enc_funcs = {
+	.dp_set_stream_attribute =
+		enc1_stream_encoder_dp_set_stream_attribute,
+	.hdmi_set_stream_attribute =
+		enc1_stream_encoder_hdmi_set_stream_attribute,
+	.dvi_set_stream_attribute =
+		enc1_stream_encoder_dvi_set_stream_attribute,
+	.set_mst_bandwidth =
+		enc1_stream_encoder_set_mst_bandwidth,
+	.update_hdmi_info_packets =
+		enc1_stream_encoder_update_hdmi_info_packets,
+	.stop_hdmi_info_packets =
+		enc1_stream_encoder_stop_hdmi_info_packets,
+	.update_dp_info_packets =
+		enc1_stream_encoder_update_dp_info_packets,
+	.stop_dp_info_packets =
+		enc1_stream_encoder_stop_dp_info_packets,
+	.dp_blank =
+		enc1_stream_encoder_dp_blank,
+	.dp_unblank =
+		enc1_stream_encoder_dp_unblank,
+	.audio_mute_control = enc1_se_audio_mute_control,
+
+	.dp_audio_setup = enc1_se_dp_audio_setup,
+	.dp_audio_enable = enc1_se_dp_audio_enable,
+	.dp_audio_disable = enc1_se_dp_audio_disable,
+
+	.hdmi_audio_setup = enc1_se_hdmi_audio_setup,
+	.hdmi_audio_disable = enc1_se_hdmi_audio_disable,
+	.setup_stereo_sync  = enc1_setup_stereo_sync,
+	.set_avmute = enc1_stream_encoder_set_avmute,
+};
+
+void dcn10_stream_encoder_construct(
+	struct dcn10_stream_encoder *enc1,
+	struct dc_context *ctx,
+	struct dc_bios *bp,
+	enum engine_id eng_id,
+	const struct dcn10_stream_enc_registers *regs,
+	const struct dcn10_stream_encoder_shift *se_shift,
+	const struct dcn10_stream_encoder_mask *se_mask)
+{
+	enc1->base.funcs = &dcn10_str_enc_funcs;
+	enc1->base.ctx = ctx;
+	enc1->base.id = eng_id;
+	enc1->base.bp = bp;
+	enc1->regs = regs;
+	enc1->se_shift = se_shift;
+	enc1->se_mask = se_mask;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
new file mode 100644
index 000000000000..86f8ee5ed8b8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -0,0 +1,584 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_STREAM_ENCODER_DCN10_H__
+#define __DC_STREAM_ENCODER_DCN10_H__
+
+#include "stream_encoder.h"
+
+#define DCN10STRENC_FROM_STRENC(stream_encoder)\
+	container_of(stream_encoder, struct dcn10_stream_encoder, base)
+
+#define SE_COMMON_REG_LIST_BASE(id) \
+	SRI(AFMT_GENERIC_0, DIG, id), \
+	SRI(AFMT_GENERIC_1, DIG, id), \
+	SRI(AFMT_GENERIC_2, DIG, id), \
+	SRI(AFMT_GENERIC_3, DIG, id), \
+	SRI(AFMT_GENERIC_4, DIG, id), \
+	SRI(AFMT_GENERIC_5, DIG, id), \
+	SRI(AFMT_GENERIC_6, DIG, id), \
+	SRI(AFMT_GENERIC_7, DIG, id), \
+	SRI(AFMT_GENERIC_HDR, DIG, id), \
+	SRI(AFMT_INFOFRAME_CONTROL0, DIG, id), \
+	SRI(AFMT_VBI_PACKET_CONTROL, DIG, id), \
+	SRI(AFMT_AUDIO_PACKET_CONTROL, DIG, id), \
+	SRI(AFMT_AUDIO_PACKET_CONTROL2, DIG, id), \
+	SRI(AFMT_AUDIO_SRC_CONTROL, DIG, id), \
+	SRI(AFMT_60958_0, DIG, id), \
+	SRI(AFMT_60958_1, DIG, id), \
+	SRI(AFMT_60958_2, DIG, id), \
+	SRI(DIG_FE_CNTL, DIG, id), \
+	SRI(HDMI_CONTROL, DIG, id), \
+	SRI(HDMI_GC, DIG, id), \
+	SRI(HDMI_GENERIC_PACKET_CONTROL0, DIG, id), \
+	SRI(HDMI_GENERIC_PACKET_CONTROL1, DIG, id), \
+	SRI(HDMI_INFOFRAME_CONTROL0, DIG, id), \
+	SRI(HDMI_INFOFRAME_CONTROL1, DIG, id), \
+	SRI(HDMI_VBI_PACKET_CONTROL, DIG, id), \
+	SRI(HDMI_AUDIO_PACKET_CONTROL, DIG, id),\
+	SRI(HDMI_ACR_PACKET_CONTROL, DIG, id),\
+	SRI(HDMI_ACR_32_0, DIG, id),\
+	SRI(HDMI_ACR_32_1, DIG, id),\
+	SRI(HDMI_ACR_44_0, DIG, id),\
+	SRI(HDMI_ACR_44_1, DIG, id),\
+	SRI(HDMI_ACR_48_0, DIG, id),\
+	SRI(HDMI_ACR_48_1, DIG, id),\
+	SRI(TMDS_CNTL, DIG, id), \
+	SRI(DP_MSE_RATE_CNTL, DP, id), \
+	SRI(DP_MSE_RATE_UPDATE, DP, id), \
+	SRI(DP_PIXEL_FORMAT, DP, id), \
+	SRI(DP_SEC_CNTL, DP, id), \
+	SRI(DP_STEER_FIFO, DP, id), \
+	SRI(DP_VID_M, DP, id), \
+	SRI(DP_VID_N, DP, id), \
+	SRI(DP_VID_STREAM_CNTL, DP, id), \
+	SRI(DP_VID_TIMING, DP, id), \
+	SRI(DP_SEC_AUD_N, DP, id), \
+	SRI(DP_SEC_TIMESTAMP, DP, id)
+
+#define SE_DCN_REG_LIST(id)\
+	SE_COMMON_REG_LIST_BASE(id),\
+	SRI(AFMT_CNTL, DIG, id),\
+	SRI(AFMT_VBI_PACKET_CONTROL1, DIG, id),\
+	SRI(HDMI_GENERIC_PACKET_CONTROL2, DIG, id), \
+	SRI(HDMI_GENERIC_PACKET_CONTROL3, DIG, id), \
+	SRI(DP_DB_CNTL, DP, id), \
+	SRI(DP_MSA_MISC, DP, id), \
+	SRI(DP_MSA_COLORIMETRY, DP, id), \
+	SRI(DP_MSA_TIMING_PARAM1, DP, id), \
+	SRI(DP_MSA_TIMING_PARAM2, DP, id), \
+	SRI(DP_MSA_TIMING_PARAM3, DP, id), \
+	SRI(DP_MSA_TIMING_PARAM4, DP, id), \
+	SRI(HDMI_DB_CONTROL, DIG, id)
+
+#define SE_SF(reg_name, field_name, post_fix)\
+	.field_name = reg_name ## __ ## field_name ## post_fix
+
+#define SE_COMMON_MASK_SH_LIST_SOC_BASE(mask_sh)\
+	SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_INDEX, mask_sh),\
+	SE_SF(DIG0_AFMT_GENERIC_HDR, AFMT_GENERIC_HB0, mask_sh),\
+	SE_SF(DIG0_AFMT_GENERIC_HDR, AFMT_GENERIC_HB1, mask_sh),\
+	SE_SF(DIG0_AFMT_GENERIC_HDR, AFMT_GENERIC_HB2, mask_sh),\
+	SE_SF(DIG0_AFMT_GENERIC_HDR, AFMT_GENERIC_HB3, mask_sh),\
+	SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, mask_sh),\
+	SE_SF(DP0_DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, mask_sh),\
+	SE_SF(DIG0_HDMI_CONTROL, HDMI_PACKET_GEN_VERSION, mask_sh),\
+	SE_SF(DIG0_HDMI_CONTROL, HDMI_KEEPOUT_MODE, mask_sh),\
+	SE_SF(DIG0_HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, mask_sh),\
+	SE_SF(DIG0_HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, mask_sh),\
+	SE_SF(DIG0_HDMI_CONTROL, HDMI_DATA_SCRAMBLE_EN, mask_sh),\
+	SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, mask_sh),\
+	SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, mask_sh),\
+	SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, mask_sh),\
+	SE_SF(DIG0_HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, mask_sh),\
+	SE_SF(DIG0_AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, mask_sh),\
+	SE_SF(DIG0_HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, mask_sh),\
+	SE_SF(DIG0_HDMI_GC, HDMI_GC_AVMUTE, mask_sh),\
+	SE_SF(DP0_DP_MSE_RATE_CNTL, DP_MSE_RATE_X, mask_sh),\
+	SE_SF(DP0_DP_MSE_RATE_CNTL, DP_MSE_RATE_Y, mask_sh),\
+	SE_SF(DP0_DP_MSE_RATE_UPDATE, DP_MSE_RATE_UPDATE_PENDING, mask_sh),\
+	SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, mask_sh),\
+	SE_SF(DP0_DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, mask_sh),\
+	SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP1_ENABLE, mask_sh),\
+	SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, mask_sh),\
+	SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, mask_sh),\
+	SE_SF(DP0_DP_SEC_CNTL, DP_SEC_MPG_ENABLE, mask_sh),\
+	SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, mask_sh),\
+	SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, mask_sh),\
+	SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, mask_sh),\
+	SE_SF(DP0_DP_STEER_FIFO, DP_STEER_FIFO_RESET, mask_sh),\
+	SE_SF(DP0_DP_VID_TIMING, DP_VID_M_N_GEN_EN, mask_sh),\
+	SE_SF(DP0_DP_VID_N, DP_VID_N, mask_sh),\
+	SE_SF(DP0_DP_VID_M, DP_VID_M, mask_sh),\
+	SE_SF(DIG0_DIG_FE_CNTL, DIG_START, mask_sh),\
+	SE_SF(DIG0_AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, mask_sh),\
+	SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, mask_sh),\
+	SE_SF(DIG0_HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, mask_sh),\
+	SE_SF(DIG0_HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, mask_sh),\
+	SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, mask_sh),\
+	SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_LAYOUT_OVRD, mask_sh),\
+	SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_60958_OSF_OVRD, mask_sh),\
+	SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, mask_sh),\
+	SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, mask_sh),\
+	SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUDIO_PRIORITY, mask_sh),\
+	SE_SF(DIG0_HDMI_ACR_32_0, HDMI_ACR_CTS_32, mask_sh),\
+	SE_SF(DIG0_HDMI_ACR_32_1, HDMI_ACR_N_32, mask_sh),\
+	SE_SF(DIG0_HDMI_ACR_44_0, HDMI_ACR_CTS_44, mask_sh),\
+	SE_SF(DIG0_HDMI_ACR_44_1, HDMI_ACR_N_44, mask_sh),\
+	SE_SF(DIG0_HDMI_ACR_48_0, HDMI_ACR_CTS_48, mask_sh),\
+	SE_SF(DIG0_HDMI_ACR_48_1, HDMI_ACR_N_48, mask_sh),\
+	SE_SF(DIG0_AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, mask_sh),\
+	SE_SF(DIG0_AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, mask_sh),\
+	SE_SF(DIG0_AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, mask_sh),\
+	SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, mask_sh),\
+	SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, mask_sh),\
+	SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, mask_sh),\
+	SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, mask_sh),\
+	SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, mask_sh),\
+	SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, mask_sh),\
+	SE_SF(DP0_DP_SEC_AUD_N, DP_SEC_AUD_N, mask_sh),\
+	SE_SF(DP0_DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, mask_sh),\
+	SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ASP_ENABLE, mask_sh),\
+	SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ATP_ENABLE, mask_sh),\
+	SE_SF(DP0_DP_SEC_CNTL, DP_SEC_AIP_ENABLE, mask_sh),\
+	SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ACM_ENABLE, mask_sh),\
+	SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, mask_sh),\
+	SE_SF(DIG0_AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, mask_sh),\
+	SE_SF(DIG0_HDMI_CONTROL, HDMI_CLOCK_CHANNEL_RATE, mask_sh),\
+	SE_SF(DIG0_DIG_FE_CNTL, TMDS_PIXEL_ENCODING, mask_sh),\
+	SE_SF(DIG0_DIG_FE_CNTL, TMDS_COLOR_FORMAT, mask_sh),\
+	SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, mask_sh),\
+	SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, mask_sh),\
+	SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_LOCK_STATUS, mask_sh),\
+	SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT, mask_sh),\
+	SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT_CLR, mask_sh),\
+	SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC0_FRAME_UPDATE_PENDING, mask_sh),\
+	SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC1_FRAME_UPDATE_PENDING, mask_sh),\
+	SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_FRAME_UPDATE_PENDING, mask_sh),\
+	SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_FRAME_UPDATE_PENDING, mask_sh),\
+	SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_FRAME_UPDATE_PENDING, mask_sh),\
+	SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_FRAME_UPDATE_PENDING, mask_sh),\
+	SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_FRAME_UPDATE_PENDING, mask_sh),\
+	SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE_PENDING, mask_sh),\
+	SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC0_FRAME_UPDATE, mask_sh),\
+	SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC1_FRAME_UPDATE, mask_sh),\
+	SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_FRAME_UPDATE, mask_sh),\
+	SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_FRAME_UPDATE, mask_sh),\
+	SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_FRAME_UPDATE, mask_sh),\
+	SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_FRAME_UPDATE, mask_sh),\
+	SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_FRAME_UPDATE, mask_sh),\
+	SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE, mask_sh),\
+	SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP4_ENABLE, mask_sh),\
+	SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP5_ENABLE, mask_sh),\
+	SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP6_ENABLE, mask_sh),\
+	SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP7_ENABLE, mask_sh),\
+	SE_SF(DP0_DP_DB_CNTL, DP_DB_DISABLE, mask_sh),\
+	SE_SF(DP0_DP_MSA_COLORIMETRY, DP_MSA_MISC0, mask_sh),\
+	SE_SF(DP0_DP_MSA_TIMING_PARAM1, DP_MSA_HTOTAL, mask_sh),\
+	SE_SF(DP0_DP_MSA_TIMING_PARAM1, DP_MSA_VTOTAL, mask_sh),\
+	SE_SF(DP0_DP_MSA_TIMING_PARAM2, DP_MSA_HSTART, mask_sh),\
+	SE_SF(DP0_DP_MSA_TIMING_PARAM2, DP_MSA_VSTART, mask_sh),\
+	SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_HSYNCWIDTH, mask_sh),\
+	SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_HSYNCPOLARITY, mask_sh),\
+	SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_VSYNCWIDTH, mask_sh),\
+	SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_VSYNCPOLARITY, mask_sh),\
+	SE_SF(DP0_DP_MSA_TIMING_PARAM4, DP_MSA_HWIDTH, mask_sh),\
+	SE_SF(DP0_DP_MSA_TIMING_PARAM4, DP_MSA_VHEIGHT, mask_sh),\
+	SE_SF(DIG0_HDMI_DB_CONTROL, HDMI_DB_DISABLE, mask_sh),\
+	SE_SF(DP0_DP_VID_TIMING, DP_VID_N_MUL, mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_SOC(mask_sh)\
+	SE_COMMON_MASK_SH_LIST_SOC_BASE(mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_DCN10(mask_sh)\
+	SE_COMMON_MASK_SH_LIST_SOC(mask_sh),\
+	SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_CONT, mask_sh),\
+	SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_SEND, mask_sh),\
+	SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_LINE, mask_sh),\
+	SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_CONT, mask_sh),\
+	SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_SEND, mask_sh),\
+	SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_LINE, mask_sh)
+
+struct dcn10_stream_encoder_shift {
+	uint8_t AFMT_GENERIC_INDEX;
+	uint8_t AFMT_GENERIC_HB0;
+	uint8_t AFMT_GENERIC_HB1;
+	uint8_t AFMT_GENERIC_HB2;
+	uint8_t AFMT_GENERIC_HB3;
+	uint8_t AFMT_GENERIC_LOCK_STATUS;
+	uint8_t AFMT_GENERIC_CONFLICT;
+	uint8_t AFMT_GENERIC_CONFLICT_CLR;
+	uint8_t AFMT_GENERIC0_FRAME_UPDATE_PENDING;
+	uint8_t AFMT_GENERIC1_FRAME_UPDATE_PENDING;
+	uint8_t AFMT_GENERIC2_FRAME_UPDATE_PENDING;
+	uint8_t AFMT_GENERIC3_FRAME_UPDATE_PENDING;
+	uint8_t AFMT_GENERIC4_FRAME_UPDATE_PENDING;
+	uint8_t AFMT_GENERIC5_FRAME_UPDATE_PENDING;
+	uint8_t AFMT_GENERIC6_FRAME_UPDATE_PENDING;
+	uint8_t AFMT_GENERIC7_FRAME_UPDATE_PENDING;
+	uint8_t AFMT_GENERIC0_FRAME_UPDATE;
+	uint8_t AFMT_GENERIC1_FRAME_UPDATE;
+	uint8_t AFMT_GENERIC2_FRAME_UPDATE;
+	uint8_t AFMT_GENERIC3_FRAME_UPDATE;
+	uint8_t AFMT_GENERIC4_FRAME_UPDATE;
+	uint8_t AFMT_GENERIC5_FRAME_UPDATE;
+	uint8_t AFMT_GENERIC6_FRAME_UPDATE;
+	uint8_t AFMT_GENERIC7_FRAME_UPDATE;
+	uint8_t HDMI_GENERIC0_CONT;
+	uint8_t HDMI_GENERIC0_SEND;
+	uint8_t HDMI_GENERIC0_LINE;
+	uint8_t HDMI_GENERIC1_CONT;
+	uint8_t HDMI_GENERIC1_SEND;
+	uint8_t HDMI_GENERIC1_LINE;
+	uint8_t HDMI_GENERIC2_CONT;
+	uint8_t HDMI_GENERIC2_SEND;
+	uint8_t HDMI_GENERIC2_LINE;
+	uint8_t HDMI_GENERIC3_CONT;
+	uint8_t HDMI_GENERIC3_SEND;
+	uint8_t HDMI_GENERIC3_LINE;
+	uint8_t HDMI_GENERIC4_CONT;
+	uint8_t HDMI_GENERIC4_SEND;
+	uint8_t HDMI_GENERIC4_LINE;
+	uint8_t HDMI_GENERIC5_CONT;
+	uint8_t HDMI_GENERIC5_SEND;
+	uint8_t HDMI_GENERIC5_LINE;
+	uint8_t HDMI_GENERIC6_CONT;
+	uint8_t HDMI_GENERIC6_SEND;
+	uint8_t HDMI_GENERIC6_LINE;
+	uint8_t HDMI_GENERIC7_CONT;
+	uint8_t HDMI_GENERIC7_SEND;
+	uint8_t HDMI_GENERIC7_LINE;
+	uint8_t DP_PIXEL_ENCODING;
+	uint8_t DP_COMPONENT_DEPTH;
+	uint8_t HDMI_PACKET_GEN_VERSION;
+	uint8_t HDMI_KEEPOUT_MODE;
+	uint8_t HDMI_DEEP_COLOR_ENABLE;
+	uint8_t HDMI_CLOCK_CHANNEL_RATE;
+	uint8_t HDMI_DEEP_COLOR_DEPTH;
+	uint8_t HDMI_GC_CONT;
+	uint8_t HDMI_GC_SEND;
+	uint8_t HDMI_NULL_SEND;
+	uint8_t HDMI_DATA_SCRAMBLE_EN;
+	uint8_t HDMI_AUDIO_INFO_SEND;
+	uint8_t AFMT_AUDIO_INFO_UPDATE;
+	uint8_t HDMI_AUDIO_INFO_LINE;
+	uint8_t HDMI_GC_AVMUTE;
+	uint8_t DP_MSE_RATE_X;
+	uint8_t DP_MSE_RATE_Y;
+	uint8_t DP_MSE_RATE_UPDATE_PENDING;
+	uint8_t DP_SEC_GSP0_ENABLE;
+	uint8_t DP_SEC_STREAM_ENABLE;
+	uint8_t DP_SEC_GSP1_ENABLE;
+	uint8_t DP_SEC_GSP2_ENABLE;
+	uint8_t DP_SEC_GSP3_ENABLE;
+	uint8_t DP_SEC_GSP4_ENABLE;
+	uint8_t DP_SEC_GSP5_ENABLE;
+	uint8_t DP_SEC_GSP6_ENABLE;
+	uint8_t DP_SEC_GSP7_ENABLE;
+	uint8_t DP_SEC_MPG_ENABLE;
+	uint8_t DP_VID_STREAM_DIS_DEFER;
+	uint8_t DP_VID_STREAM_ENABLE;
+	uint8_t DP_VID_STREAM_STATUS;
+	uint8_t DP_STEER_FIFO_RESET;
+	uint8_t DP_VID_M_N_GEN_EN;
+	uint8_t DP_VID_N;
+	uint8_t DP_VID_M;
+	uint8_t DIG_START;
+	uint8_t AFMT_AUDIO_SRC_SELECT;
+	uint8_t AFMT_AUDIO_CHANNEL_ENABLE;
+	uint8_t HDMI_AUDIO_PACKETS_PER_LINE;
+	uint8_t HDMI_AUDIO_DELAY_EN;
+	uint8_t AFMT_60958_CS_UPDATE;
+	uint8_t AFMT_AUDIO_LAYOUT_OVRD;
+	uint8_t AFMT_60958_OSF_OVRD;
+	uint8_t HDMI_ACR_AUTO_SEND;
+	uint8_t HDMI_ACR_SOURCE;
+	uint8_t HDMI_ACR_AUDIO_PRIORITY;
+	uint8_t HDMI_ACR_CTS_32;
+	uint8_t HDMI_ACR_N_32;
+	uint8_t HDMI_ACR_CTS_44;
+	uint8_t HDMI_ACR_N_44;
+	uint8_t HDMI_ACR_CTS_48;
+	uint8_t HDMI_ACR_N_48;
+	uint8_t AFMT_60958_CS_CHANNEL_NUMBER_L;
+	uint8_t AFMT_60958_CS_CLOCK_ACCURACY;
+	uint8_t AFMT_60958_CS_CHANNEL_NUMBER_R;
+	uint8_t AFMT_60958_CS_CHANNEL_NUMBER_2;
+	uint8_t AFMT_60958_CS_CHANNEL_NUMBER_3;
+	uint8_t AFMT_60958_CS_CHANNEL_NUMBER_4;
+	uint8_t AFMT_60958_CS_CHANNEL_NUMBER_5;
+	uint8_t AFMT_60958_CS_CHANNEL_NUMBER_6;
+	uint8_t AFMT_60958_CS_CHANNEL_NUMBER_7;
+	uint8_t DP_SEC_AUD_N;
+	uint8_t DP_SEC_TIMESTAMP_MODE;
+	uint8_t DP_SEC_ASP_ENABLE;
+	uint8_t DP_SEC_ATP_ENABLE;
+	uint8_t DP_SEC_AIP_ENABLE;
+	uint8_t DP_SEC_ACM_ENABLE;
+	uint8_t AFMT_AUDIO_SAMPLE_SEND;
+	uint8_t AFMT_AUDIO_CLOCK_EN;
+	uint8_t TMDS_PIXEL_ENCODING;
+	uint8_t TMDS_COLOR_FORMAT;
+	uint8_t DIG_STEREOSYNC_SELECT;
+	uint8_t DIG_STEREOSYNC_GATE_EN;
+	uint8_t DP_DB_DISABLE;
+	uint8_t DP_MSA_MISC0;
+	uint8_t DP_MSA_HTOTAL;
+	uint8_t DP_MSA_VTOTAL;
+	uint8_t DP_MSA_HSTART;
+	uint8_t DP_MSA_VSTART;
+	uint8_t DP_MSA_HSYNCWIDTH;
+	uint8_t DP_MSA_HSYNCPOLARITY;
+	uint8_t DP_MSA_VSYNCWIDTH;
+	uint8_t DP_MSA_VSYNCPOLARITY;
+	uint8_t DP_MSA_HWIDTH;
+	uint8_t DP_MSA_VHEIGHT;
+	uint8_t HDMI_DB_DISABLE;
+	uint8_t DP_VID_N_MUL;
+	uint8_t DP_VID_M_DOUBLE_VALUE_EN;
+};
+
+struct dcn10_stream_encoder_mask {
+	uint32_t AFMT_GENERIC_INDEX;
+	uint32_t AFMT_GENERIC_HB0;
+	uint32_t AFMT_GENERIC_HB1;
+	uint32_t AFMT_GENERIC_HB2;
+	uint32_t AFMT_GENERIC_HB3;
+	uint32_t AFMT_GENERIC_LOCK_STATUS;
+	uint32_t AFMT_GENERIC_CONFLICT;
+	uint32_t AFMT_GENERIC_CONFLICT_CLR;
+	uint32_t AFMT_GENERIC0_FRAME_UPDATE_PENDING;
+	uint32_t AFMT_GENERIC1_FRAME_UPDATE_PENDING;
+	uint32_t AFMT_GENERIC2_FRAME_UPDATE_PENDING;
+	uint32_t AFMT_GENERIC3_FRAME_UPDATE_PENDING;
+	uint32_t AFMT_GENERIC4_FRAME_UPDATE_PENDING;
+	uint32_t AFMT_GENERIC5_FRAME_UPDATE_PENDING;
+	uint32_t AFMT_GENERIC6_FRAME_UPDATE_PENDING;
+	uint32_t AFMT_GENERIC7_FRAME_UPDATE_PENDING;
+	uint32_t AFMT_GENERIC0_FRAME_UPDATE;
+	uint32_t AFMT_GENERIC1_FRAME_UPDATE;
+	uint32_t AFMT_GENERIC2_FRAME_UPDATE;
+	uint32_t AFMT_GENERIC3_FRAME_UPDATE;
+	uint32_t AFMT_GENERIC4_FRAME_UPDATE;
+	uint32_t AFMT_GENERIC5_FRAME_UPDATE;
+	uint32_t AFMT_GENERIC6_FRAME_UPDATE;
+	uint32_t AFMT_GENERIC7_FRAME_UPDATE;
+	uint32_t HDMI_GENERIC0_CONT;
+	uint32_t HDMI_GENERIC0_SEND;
+	uint32_t HDMI_GENERIC0_LINE;
+	uint32_t HDMI_GENERIC1_CONT;
+	uint32_t HDMI_GENERIC1_SEND;
+	uint32_t HDMI_GENERIC1_LINE;
+	uint32_t HDMI_GENERIC2_CONT;
+	uint32_t HDMI_GENERIC2_SEND;
+	uint32_t HDMI_GENERIC2_LINE;
+	uint32_t HDMI_GENERIC3_CONT;
+	uint32_t HDMI_GENERIC3_SEND;
+	uint32_t HDMI_GENERIC3_LINE;
+	uint32_t HDMI_GENERIC4_CONT;
+	uint32_t HDMI_GENERIC4_SEND;
+	uint32_t HDMI_GENERIC4_LINE;
+	uint32_t HDMI_GENERIC5_CONT;
+	uint32_t HDMI_GENERIC5_SEND;
+	uint32_t HDMI_GENERIC5_LINE;
+	uint32_t HDMI_GENERIC6_CONT;
+	uint32_t HDMI_GENERIC6_SEND;
+	uint32_t HDMI_GENERIC6_LINE;
+	uint32_t HDMI_GENERIC7_CONT;
+	uint32_t HDMI_GENERIC7_SEND;
+	uint32_t HDMI_GENERIC7_LINE;
+	uint32_t DP_PIXEL_ENCODING;
+	uint32_t DP_COMPONENT_DEPTH;
+	uint32_t HDMI_PACKET_GEN_VERSION;
+	uint32_t HDMI_KEEPOUT_MODE;
+	uint32_t HDMI_DEEP_COLOR_ENABLE;
+	uint32_t HDMI_CLOCK_CHANNEL_RATE;
+	uint32_t HDMI_DEEP_COLOR_DEPTH;
+	uint32_t HDMI_GC_CONT;
+	uint32_t HDMI_GC_SEND;
+	uint32_t HDMI_NULL_SEND;
+	uint32_t HDMI_DATA_SCRAMBLE_EN;
+	uint32_t HDMI_AUDIO_INFO_SEND;
+	uint32_t AFMT_AUDIO_INFO_UPDATE;
+	uint32_t HDMI_AUDIO_INFO_LINE;
+	uint32_t HDMI_GC_AVMUTE;
+	uint32_t DP_MSE_RATE_X;
+	uint32_t DP_MSE_RATE_Y;
+	uint32_t DP_MSE_RATE_UPDATE_PENDING;
+	uint32_t DP_SEC_GSP0_ENABLE;
+	uint32_t DP_SEC_STREAM_ENABLE;
+	uint32_t DP_SEC_GSP1_ENABLE;
+	uint32_t DP_SEC_GSP2_ENABLE;
+	uint32_t DP_SEC_GSP3_ENABLE;
+	uint32_t DP_SEC_GSP4_ENABLE;
+	uint32_t DP_SEC_GSP5_ENABLE;
+	uint32_t DP_SEC_GSP6_ENABLE;
+	uint32_t DP_SEC_GSP7_ENABLE;
+	uint32_t DP_SEC_MPG_ENABLE;
+	uint32_t DP_VID_STREAM_DIS_DEFER;
+	uint32_t DP_VID_STREAM_ENABLE;
+	uint32_t DP_VID_STREAM_STATUS;
+	uint32_t DP_STEER_FIFO_RESET;
+	uint32_t DP_VID_M_N_GEN_EN;
+	uint32_t DP_VID_N;
+	uint32_t DP_VID_M;
+	uint32_t DIG_START;
+	uint32_t AFMT_AUDIO_SRC_SELECT;
+	uint32_t AFMT_AUDIO_CHANNEL_ENABLE;
+	uint32_t HDMI_AUDIO_PACKETS_PER_LINE;
+	uint32_t HDMI_AUDIO_DELAY_EN;
+	uint32_t AFMT_60958_CS_UPDATE;
+	uint32_t AFMT_AUDIO_LAYOUT_OVRD;
+	uint32_t AFMT_60958_OSF_OVRD;
+	uint32_t HDMI_ACR_AUTO_SEND;
+	uint32_t HDMI_ACR_SOURCE;
+	uint32_t HDMI_ACR_AUDIO_PRIORITY;
+	uint32_t HDMI_ACR_CTS_32;
+	uint32_t HDMI_ACR_N_32;
+	uint32_t HDMI_ACR_CTS_44;
+	uint32_t HDMI_ACR_N_44;
+	uint32_t HDMI_ACR_CTS_48;
+	uint32_t HDMI_ACR_N_48;
+	uint32_t AFMT_60958_CS_CHANNEL_NUMBER_L;
+	uint32_t AFMT_60958_CS_CLOCK_ACCURACY;
+	uint32_t AFMT_60958_CS_CHANNEL_NUMBER_R;
+	uint32_t AFMT_60958_CS_CHANNEL_NUMBER_2;
+	uint32_t AFMT_60958_CS_CHANNEL_NUMBER_3;
+	uint32_t AFMT_60958_CS_CHANNEL_NUMBER_4;
+	uint32_t AFMT_60958_CS_CHANNEL_NUMBER_5;
+	uint32_t AFMT_60958_CS_CHANNEL_NUMBER_6;
+	uint32_t AFMT_60958_CS_CHANNEL_NUMBER_7;
+	uint32_t DP_SEC_AUD_N;
+	uint32_t DP_SEC_TIMESTAMP_MODE;
+	uint32_t DP_SEC_ASP_ENABLE;
+	uint32_t DP_SEC_ATP_ENABLE;
+	uint32_t DP_SEC_AIP_ENABLE;
+	uint32_t DP_SEC_ACM_ENABLE;
+	uint32_t AFMT_AUDIO_SAMPLE_SEND;
+	uint32_t AFMT_AUDIO_CLOCK_EN;
+	uint32_t TMDS_PIXEL_ENCODING;
+	uint32_t DIG_STEREOSYNC_SELECT;
+	uint32_t DIG_STEREOSYNC_GATE_EN;
+	uint32_t TMDS_COLOR_FORMAT;
+	uint32_t DP_DB_DISABLE;
+	uint32_t DP_MSA_MISC0;
+	uint32_t DP_MSA_HTOTAL;
+	uint32_t DP_MSA_VTOTAL;
+	uint32_t DP_MSA_HSTART;
+	uint32_t DP_MSA_VSTART;
+	uint32_t DP_MSA_HSYNCWIDTH;
+	uint32_t DP_MSA_HSYNCPOLARITY;
+	uint32_t DP_MSA_VSYNCWIDTH;
+	uint32_t DP_MSA_VSYNCPOLARITY;
+	uint32_t DP_MSA_HWIDTH;
+	uint32_t DP_MSA_VHEIGHT;
+	uint32_t HDMI_DB_DISABLE;
+	uint32_t DP_VID_N_MUL;
+	uint32_t DP_VID_M_DOUBLE_VALUE_EN;
+};
+
+struct dcn10_stream_enc_registers {
+	uint32_t AFMT_CNTL;
+	uint32_t AFMT_AVI_INFO0;
+	uint32_t AFMT_AVI_INFO1;
+	uint32_t AFMT_AVI_INFO2;
+	uint32_t AFMT_AVI_INFO3;
+	uint32_t AFMT_GENERIC_0;
+	uint32_t AFMT_GENERIC_1;
+	uint32_t AFMT_GENERIC_2;
+	uint32_t AFMT_GENERIC_3;
+	uint32_t AFMT_GENERIC_4;
+	uint32_t AFMT_GENERIC_5;
+	uint32_t AFMT_GENERIC_6;
+	uint32_t AFMT_GENERIC_7;
+	uint32_t AFMT_GENERIC_HDR;
+	uint32_t AFMT_INFOFRAME_CONTROL0;
+	uint32_t AFMT_VBI_PACKET_CONTROL;
+	uint32_t AFMT_VBI_PACKET_CONTROL1;
+	uint32_t AFMT_AUDIO_PACKET_CONTROL;
+	uint32_t AFMT_AUDIO_PACKET_CONTROL2;
+	uint32_t AFMT_AUDIO_SRC_CONTROL;
+	uint32_t AFMT_60958_0;
+	uint32_t AFMT_60958_1;
+	uint32_t AFMT_60958_2;
+	uint32_t DIG_FE_CNTL;
+	uint32_t DP_MSE_RATE_CNTL;
+	uint32_t DP_MSE_RATE_UPDATE;
+	uint32_t DP_PIXEL_FORMAT;
+	uint32_t DP_SEC_CNTL;
+	uint32_t DP_STEER_FIFO;
+	uint32_t DP_VID_M;
+	uint32_t DP_VID_N;
+	uint32_t DP_VID_STREAM_CNTL;
+	uint32_t DP_VID_TIMING;
+	uint32_t DP_SEC_AUD_N;
+	uint32_t DP_SEC_TIMESTAMP;
+	uint32_t HDMI_CONTROL;
+	uint32_t HDMI_GC;
+	uint32_t HDMI_GENERIC_PACKET_CONTROL0;
+	uint32_t HDMI_GENERIC_PACKET_CONTROL1;
+	uint32_t HDMI_GENERIC_PACKET_CONTROL2;
+	uint32_t HDMI_GENERIC_PACKET_CONTROL3;
+	uint32_t HDMI_GENERIC_PACKET_CONTROL4;
+	uint32_t HDMI_GENERIC_PACKET_CONTROL5;
+	uint32_t HDMI_INFOFRAME_CONTROL0;
+	uint32_t HDMI_INFOFRAME_CONTROL1;
+	uint32_t HDMI_VBI_PACKET_CONTROL;
+	uint32_t HDMI_AUDIO_PACKET_CONTROL;
+	uint32_t HDMI_ACR_PACKET_CONTROL;
+	uint32_t HDMI_ACR_32_0;
+	uint32_t HDMI_ACR_32_1;
+	uint32_t HDMI_ACR_44_0;
+	uint32_t HDMI_ACR_44_1;
+	uint32_t HDMI_ACR_48_0;
+	uint32_t HDMI_ACR_48_1;
+	uint32_t TMDS_CNTL;
+	uint32_t DP_DB_CNTL;
+	uint32_t DP_MSA_MISC;
+	uint32_t DP_MSA_COLORIMETRY;
+	uint32_t DP_MSA_TIMING_PARAM1;
+	uint32_t DP_MSA_TIMING_PARAM2;
+	uint32_t DP_MSA_TIMING_PARAM3;
+	uint32_t DP_MSA_TIMING_PARAM4;
+	uint32_t HDMI_DB_CONTROL;
+};
+
+struct dcn10_stream_encoder {
+	struct stream_encoder base;
+	const struct dcn10_stream_enc_registers *regs;
+	const struct dcn10_stream_encoder_shift *se_shift;
+	const struct dcn10_stream_encoder_mask *se_mask;
+};
+
+void dcn10_stream_encoder_construct(
+	struct dcn10_stream_encoder *enc1,
+	struct dc_context *ctx,
+	struct dc_bios *bp,
+	enum engine_id eng_id,
+	const struct dcn10_stream_enc_registers *regs,
+	const struct dcn10_stream_encoder_shift *se_shift,
+	const struct dcn10_stream_encoder_mask *se_mask);
+
+#endif /* __DC_STREAM_ENCODER_DCN10_H__ */

From 3dc8acad23519123bdecaf3184f2ae774c5775fc Mon Sep 17 00:00:00 2001
From: Jun Lei <Jun.Lei@amd.com>
Date: Thu, 1 Mar 2018 08:58:02 -0500
Subject: [PATCH 0363/1286] drm/amd/display: remove unused enum

Signed-off-by: Jun Lei <Jun.Lei@amd.com>
Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dc_types.h | 6 ------
 1 file changed, 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index cd324bcc45e8..9defe3b17617 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -370,12 +370,6 @@ struct dc_csc_adjustments {
 	struct fixed31_32 hue;
 };
 
-enum {
-	MAX_LANES = 2,
-	MAX_COFUNC_PATH = 6,
-	LAYER_INDEX_PRIMARY = -1,
-};
-
 enum dpcd_downstream_port_max_bpc {
 	DOWN_STREAM_MAX_8BPC = 0,
 	DOWN_STREAM_MAX_10BPC,

From cf65ebeb687678812eb3ddd5ef253bacf7ef330a Mon Sep 17 00:00:00 2001
From: Eric Yang <Eric.Yang2@amd.com>
Date: Fri, 23 Mar 2018 13:56:16 -0400
Subject: [PATCH 0364/1286] drm/amd/display: fix link bw calculation for 422
 and 420 encoding

Link bw required is reduced when we have chroma subsampling.

Signed-off-by: Eric Yang <Eric.Yang2@amd.com>
Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/dc/core/dc_link_dp.c  | 53 +++++++++++--------
 1 file changed, 32 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index b86325bb636f..07cc4385a7c1 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1378,37 +1378,48 @@ static uint32_t bandwidth_in_kbps_from_timing(
 {
 	uint32_t bits_per_channel = 0;
 	uint32_t kbps;
-	switch (timing->display_color_depth) {
 
-	case COLOR_DEPTH_666:
-		bits_per_channel = 6;
-		break;
-	case COLOR_DEPTH_888:
-		bits_per_channel = 8;
-		break;
-	case COLOR_DEPTH_101010:
-		bits_per_channel = 10;
-		break;
-	case COLOR_DEPTH_121212:
+	if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
 		bits_per_channel = 12;
-		break;
-	case COLOR_DEPTH_141414:
-		bits_per_channel = 14;
-		break;
-	case COLOR_DEPTH_161616:
-		bits_per_channel = 16;
-		break;
-	default:
-		break;
+	else{
+
+		switch (timing->display_color_depth) {
+
+		case COLOR_DEPTH_666:
+			bits_per_channel = 6;
+			break;
+		case COLOR_DEPTH_888:
+			bits_per_channel = 8;
+			break;
+		case COLOR_DEPTH_101010:
+			bits_per_channel = 10;
+			break;
+		case COLOR_DEPTH_121212:
+			bits_per_channel = 12;
+			break;
+		case COLOR_DEPTH_141414:
+			bits_per_channel = 14;
+			break;
+		case COLOR_DEPTH_161616:
+			bits_per_channel = 16;
+			break;
+		default:
+			break;
+		}
 	}
 	ASSERT(bits_per_channel != 0);
 
 	kbps = timing->pix_clk_khz;
 	kbps *= bits_per_channel;
 
-	if (timing->flags.Y_ONLY != 1)
+	if (timing->flags.Y_ONLY != 1) {
 		/*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
 		kbps *= 3;
+		if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+			kbps /= 2;
+		else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+			kbps = kbps * 2 / 3;
+	}
 
 	return kbps;
 

From 8f121fe281692ce4b7849ee7be9f3c0dcb079742 Mon Sep 17 00:00:00 2001
From: Jun Lei <Jun.Lei@amd.com>
Date: Mon, 26 Mar 2018 14:01:41 -0400
Subject: [PATCH 0365/1286] drm/amd/display: Fill calcs date from stream
 src/dst if available

We would otherwise fallback to the timing, which would always give us
identity.

Signed-off-by: Jun Lei <Jun.Lei@amd.com>
Reviewed-by: Wesley Chalmers <Wesley.Chalmers@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c | 13 +++++++++++++
 1 file changed, 13 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index 4b719328afd6..56f46a065a93 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -2933,6 +2933,19 @@ static void populate_initial_data(
 				data->bytes_per_pixel[num_displays + 4] = 4;
 				break;
 			}
+		} else if (pipe[i].stream->dst.width != 0 &&
+					pipe[i].stream->dst.height != 0 &&
+					pipe[i].stream->src.width != 0 &&
+					pipe[i].stream->src.height != 0) {
+			data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->src.width);
+			data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4];
+			data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->src.height);
+			data->h_taps[num_displays + 4] = pipe[i].stream->src.width == pipe[i].stream->dst.width ? bw_int_to_fixed(1) : bw_int_to_fixed(2);
+			data->v_taps[num_displays + 4] = pipe[i].stream->src.height == pipe[i].stream->dst.height ? bw_int_to_fixed(1) : bw_int_to_fixed(2);
+			data->h_scale_ratio[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->src.width, pipe[i].stream->dst.width);
+			data->v_scale_ratio[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->src.height, pipe[i].stream->dst.height);
+			data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0);
+			data->bytes_per_pixel[num_displays + 4] = 4;
 		} else {
 			data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_addressable);
 			data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4];

From 5282cbe32ea5b8008f86a743922f018287113e2c Mon Sep 17 00:00:00 2001
From: Yongqiang Sun <yongqiang.sun@amd.com>
Date: Tue, 27 Mar 2018 10:05:10 -0400
Subject: [PATCH 0366/1286] drm/amd/display: Change disable backlight ramp
 change threshold from 0 to maximum value.

Instead of user set brightness with range of percentage,
HLK test set brightness level with range of normal, this will result in
HLK test case set brightness from 0 to 255, DC set brightness with ramp is 0,
and disabled ramp change which will fail the HLK test.
Fix:
In case of unblank stream and turn on edp, change brightness level in
stream to 0xFFFFFFFF(actural maximum level is 0xFF), use that value as
a flag to recogonize this the case of resume from S3.

Signed-off-by: Yongqiang Sun <yongqiang.sun@amd.com>
Reviewed-by: Eric Yang <eric.yang2@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/core/dc_link.c               | 2 +-
 drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 2 +-
 drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h           | 2 ++
 3 files changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index d9efdd926145..0cd286f8eaa0 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1982,7 +1982,7 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
 		return false;
 
 	if (stream) {
-		if (stream->bl_pwm_level == 0)
+		if (stream->bl_pwm_level == EDP_BACKLIGHT_RAMP_DISABLE_LEVEL)
 			frame_ramp = 0;
 
 		((struct dc_stream_state *)stream)->bl_pwm_level = level;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index db2d15dfb831..78bf4fae9e0d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1036,7 +1036,7 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
 
 	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
 		link->dc->hwss.edp_backlight_control(link, true);
-		stream->bl_pwm_level = 0;
+		stream->bl_pwm_level = EDP_BACKLIGHT_RAMP_DISABLE_LEVEL;
 	}
 }
 void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index e764cbad881b..f54d478ffc5c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -32,6 +32,8 @@
 #include "inc/hw/link_encoder.h"
 #include "core_status.h"
 
+#define EDP_BACKLIGHT_RAMP_DISABLE_LEVEL 0xFFFFFFFF
+
 enum pipe_gating_control {
 	PIPE_GATING_CONTROL_DISABLE = 0,
 	PIPE_GATING_CONTROL_ENABLE,

From dc002a2e4f6e164fd9e5c1353df795dc65784887 Mon Sep 17 00:00:00 2001
From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Date: Mon, 26 Mar 2018 12:33:22 -0400
Subject: [PATCH 0367/1286] drm/amd/display: Update scaler v_active data if
 interlaced

Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Reviewed-by: Nikola Cornij <Nikola.Cornij@amd.com>
Reviewed-by: Wesley Chalmers <Wesley.Chalmers@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 50b84f69bd25..eb8f4792198c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -844,6 +844,9 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 	pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
 			pipe_ctx->plane_state->format);
 
+	if (pipe_ctx->stream->timing.flags.INTERLACE)
+		pipe_ctx->stream->dst.height *= 2;
+
 	calculate_scaling_ratios(pipe_ctx);
 
 	calculate_viewport(pipe_ctx);
@@ -864,6 +867,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 
 	pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right;
 	pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
+	if (pipe_ctx->stream->timing.flags.INTERLACE)
+		pipe_ctx->plane_res.scl_data.v_active *= 2;
 
 
 	/* Taps calculations */
@@ -909,6 +914,9 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 				plane_state->dst_rect.x,
 				plane_state->dst_rect.y);
 
+	if (pipe_ctx->stream->timing.flags.INTERLACE)
+		pipe_ctx->stream->dst.height /= 2;
+
 	return res;
 }
 

From c5011872f6ad7fb8700117ae2fbdcd3ebbbe8402 Mon Sep 17 00:00:00 2001
From: Eric Bernstein <eric.bernstein@amd.com>
Date: Mon, 26 Mar 2018 16:28:03 -0400
Subject: [PATCH 0368/1286] drm/amd/display: Make DCN stream encoder shareable

Signed-off-by: Eric Bernstein <eric.bernstein@amd.com>
Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../amd/display/dc/dce/dce_stream_encoder.c   |  39 +-
 .../display/dc/dcn10/dcn10_stream_encoder.c   |  62 +-
 .../display/dc/dcn10/dcn10_stream_encoder.h   | 646 ++++++++----------
 .../gpu/drm/amd/display/dc/inc/hw/hw_shared.h |  17 +
 4 files changed, 343 insertions(+), 421 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index b85fda5f38e8..07c32421c226 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -26,27 +26,10 @@
 #include "dc_bios_types.h"
 #include "dce_stream_encoder.h"
 #include "reg_helper.h"
+#include "hw_shared.h"
+
 #define DC_LOGGER \
 		enc110->base.ctx->logger
-enum DP_PIXEL_ENCODING {
-DP_PIXEL_ENCODING_RGB444                 = 0x00000000,
-DP_PIXEL_ENCODING_YCBCR422               = 0x00000001,
-DP_PIXEL_ENCODING_YCBCR444               = 0x00000002,
-DP_PIXEL_ENCODING_RGB_WIDE_GAMUT         = 0x00000003,
-DP_PIXEL_ENCODING_Y_ONLY                 = 0x00000004,
-DP_PIXEL_ENCODING_YCBCR420               = 0x00000005,
-DP_PIXEL_ENCODING_RESERVED               = 0x00000006,
-};
-
-
-enum DP_COMPONENT_DEPTH {
-DP_COMPONENT_DEPTH_6BPC                  = 0x00000000,
-DP_COMPONENT_DEPTH_8BPC                  = 0x00000001,
-DP_COMPONENT_DEPTH_10BPC                 = 0x00000002,
-DP_COMPONENT_DEPTH_12BPC                 = 0x00000003,
-DP_COMPONENT_DEPTH_16BPC                 = 0x00000004,
-DP_COMPONENT_DEPTH_RESERVED              = 0x00000005,
-};
 
 
 #define REG(reg)\
@@ -314,11 +297,11 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
 	switch (crtc_timing->pixel_encoding) {
 	case PIXEL_ENCODING_YCBCR422:
 		REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
-				DP_PIXEL_ENCODING_YCBCR422);
+				DP_PIXEL_ENCODING_TYPE_YCBCR422);
 		break;
 	case PIXEL_ENCODING_YCBCR444:
 		REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
-				DP_PIXEL_ENCODING_YCBCR444);
+				DP_PIXEL_ENCODING_TYPE_YCBCR444);
 
 		if (crtc_timing->flags.Y_ONLY)
 			if (crtc_timing->display_color_depth != COLOR_DEPTH_666)
@@ -326,7 +309,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
 				 * Color depth of Y-only could be
 				 * 8, 10, 12, 16 bits */
 				REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
-						DP_PIXEL_ENCODING_Y_ONLY);
+						DP_PIXEL_ENCODING_TYPE_Y_ONLY);
 		/* Note: DP_MSA_MISC1 bit 7 is the indicator
 		 * of Y-only mode.
 		 * This bit is set in HW if register
@@ -334,7 +317,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
 		break;
 	case PIXEL_ENCODING_YCBCR420:
 		REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
-				DP_PIXEL_ENCODING_YCBCR420);
+				DP_PIXEL_ENCODING_TYPE_YCBCR420);
 		if (enc110->se_mask->DP_VID_M_DOUBLE_VALUE_EN)
 			REG_UPDATE(DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, 1);
 
@@ -345,7 +328,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
 		break;
 	default:
 		REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
-				DP_PIXEL_ENCODING_RGB444);
+				DP_PIXEL_ENCODING_TYPE_RGB444);
 		break;
 	}
 
@@ -363,20 +346,20 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
 		break;
 	case COLOR_DEPTH_888:
 		REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
-				DP_COMPONENT_DEPTH_8BPC);
+				DP_COMPONENT_PIXEL_DEPTH_8BPC);
 		break;
 	case COLOR_DEPTH_101010:
 		REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
-				DP_COMPONENT_DEPTH_10BPC);
+				DP_COMPONENT_PIXEL_DEPTH_10BPC);
 
 		break;
 	case COLOR_DEPTH_121212:
 		REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
-				DP_COMPONENT_DEPTH_12BPC);
+				DP_COMPONENT_PIXEL_DEPTH_12BPC);
 		break;
 	default:
 		REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
-				DP_COMPONENT_DEPTH_6BPC);
+				DP_COMPONENT_PIXEL_DEPTH_6BPC);
 		break;
 	}
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index 0413c707b921..9ec46f8fc7cc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -26,29 +26,11 @@
 
 #include "dc_bios_types.h"
 #include "dcn10_stream_encoder.h"
-
 #include "reg_helper.h"
+#include "hw_shared.h"
+
 #define DC_LOGGER \
 		enc1->base.ctx->logger
-enum DP_PIXEL_ENCODING {
-DP_PIXEL_ENCODING_RGB444                 = 0x00000000,
-DP_PIXEL_ENCODING_YCBCR422               = 0x00000001,
-DP_PIXEL_ENCODING_YCBCR444               = 0x00000002,
-DP_PIXEL_ENCODING_RGB_WIDE_GAMUT         = 0x00000003,
-DP_PIXEL_ENCODING_Y_ONLY                 = 0x00000004,
-DP_PIXEL_ENCODING_YCBCR420               = 0x00000005,
-DP_PIXEL_ENCODING_RESERVED               = 0x00000006,
-};
-
-
-enum DP_COMPONENT_DEPTH {
-DP_COMPONENT_DEPTH_6BPC                  = 0x00000000,
-DP_COMPONENT_DEPTH_8BPC                  = 0x00000001,
-DP_COMPONENT_DEPTH_10BPC                 = 0x00000002,
-DP_COMPONENT_DEPTH_12BPC                 = 0x00000003,
-DP_COMPONENT_DEPTH_16BPC                 = 0x00000004,
-DP_COMPONENT_DEPTH_RESERVED              = 0x00000005,
-};
 
 
 #define REG(reg)\
@@ -70,7 +52,7 @@ enum {
 #define CTX \
 	enc1->base.ctx
 
-static void enc1_update_generic_info_packet(
+void enc1_update_generic_info_packet(
 	struct dcn10_stream_encoder *enc1,
 	uint32_t packet_index,
 	const struct dc_info_packet *info_packet)
@@ -260,7 +242,7 @@ static void enc1_update_hdmi_info_packet(
 }
 
 /* setup stream encoder in dp mode */
-static void enc1_stream_encoder_dp_set_stream_attribute(
+void enc1_stream_encoder_dp_set_stream_attribute(
 	struct stream_encoder *enc,
 	struct dc_crtc_timing *crtc_timing,
 	enum dc_color_space output_color_space)
@@ -284,11 +266,11 @@ static void enc1_stream_encoder_dp_set_stream_attribute(
 	switch (crtc_timing->pixel_encoding) {
 	case PIXEL_ENCODING_YCBCR422:
 		REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
-				DP_PIXEL_ENCODING_YCBCR422);
+				DP_PIXEL_ENCODING_TYPE_YCBCR422);
 		break;
 	case PIXEL_ENCODING_YCBCR444:
 		REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
-				DP_PIXEL_ENCODING_YCBCR444);
+				DP_PIXEL_ENCODING_TYPE_YCBCR444);
 
 		if (crtc_timing->flags.Y_ONLY)
 			if (crtc_timing->display_color_depth != COLOR_DEPTH_666)
@@ -297,7 +279,7 @@ static void enc1_stream_encoder_dp_set_stream_attribute(
 				 * 8, 10, 12, 16 bits
 				 */
 				REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
-						DP_PIXEL_ENCODING_Y_ONLY);
+						DP_PIXEL_ENCODING_TYPE_Y_ONLY);
 		/* Note: DP_MSA_MISC1 bit 7 is the indicator
 		 * of Y-only mode.
 		 * This bit is set in HW if register
@@ -306,12 +288,12 @@ static void enc1_stream_encoder_dp_set_stream_attribute(
 		break;
 	case PIXEL_ENCODING_YCBCR420:
 		REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
-				DP_PIXEL_ENCODING_YCBCR420);
+				DP_PIXEL_ENCODING_TYPE_YCBCR420);
 		REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1);
 		break;
 	default:
 		REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
-				DP_PIXEL_ENCODING_RGB444);
+				DP_PIXEL_ENCODING_TYPE_RGB444);
 		break;
 	}
 
@@ -326,20 +308,20 @@ static void enc1_stream_encoder_dp_set_stream_attribute(
 		break;
 	case COLOR_DEPTH_888:
 		REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
-				DP_COMPONENT_DEPTH_8BPC);
+				DP_COMPONENT_PIXEL_DEPTH_8BPC);
 		break;
 	case COLOR_DEPTH_101010:
 		REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
-				DP_COMPONENT_DEPTH_10BPC);
+				DP_COMPONENT_PIXEL_DEPTH_10BPC);
 
 		break;
 	case COLOR_DEPTH_121212:
 		REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
-				DP_COMPONENT_DEPTH_12BPC);
+				DP_COMPONENT_PIXEL_DEPTH_12BPC);
 		break;
 	default:
 		REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
-				DP_COMPONENT_DEPTH_6BPC);
+				DP_COMPONENT_PIXEL_DEPTH_6BPC);
 		break;
 	}
 
@@ -485,7 +467,7 @@ static void enc1_stream_encoder_set_stream_attribute_helper(
 }
 
 /* setup stream encoder in hdmi mode */
-static void enc1_stream_encoder_hdmi_set_stream_attribute(
+void enc1_stream_encoder_hdmi_set_stream_attribute(
 	struct stream_encoder *enc,
 	struct dc_crtc_timing *crtc_timing,
 	int actual_pix_clk_khz,
@@ -591,7 +573,7 @@ static void enc1_stream_encoder_hdmi_set_stream_attribute(
 }
 
 /* setup stream encoder in dvi mode */
-static void enc1_stream_encoder_dvi_set_stream_attribute(
+void enc1_stream_encoder_dvi_set_stream_attribute(
 	struct stream_encoder *enc,
 	struct dc_crtc_timing *crtc_timing,
 	bool is_dual_link)
@@ -616,7 +598,7 @@ static void enc1_stream_encoder_dvi_set_stream_attribute(
 	enc1_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing);
 }
 
-static void enc1_stream_encoder_set_mst_bandwidth(
+void enc1_stream_encoder_set_mst_bandwidth(
 	struct stream_encoder *enc,
 	struct fixed31_32 avg_time_slots_per_mtp)
 {
@@ -699,7 +681,7 @@ static void enc1_stream_encoder_stop_hdmi_info_packets(
 		HDMI_GENERIC1_SEND, 0);
 }
 
-static void enc1_stream_encoder_update_dp_info_packets(
+void enc1_stream_encoder_update_dp_info_packets(
 	struct stream_encoder *enc,
 	const struct encoder_info_frame *info_frame)
 {
@@ -742,7 +724,7 @@ static void enc1_stream_encoder_update_dp_info_packets(
 		REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
 }
 
-static void enc1_stream_encoder_stop_dp_info_packets(
+void enc1_stream_encoder_stop_dp_info_packets(
 	struct stream_encoder *enc)
 {
 	/* stop generic packets on DP */
@@ -770,7 +752,7 @@ static void enc1_stream_encoder_stop_dp_info_packets(
 
 }
 
-static void enc1_stream_encoder_dp_blank(
+void enc1_stream_encoder_dp_blank(
 	struct stream_encoder *enc)
 {
 	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
@@ -823,7 +805,7 @@ static void enc1_stream_encoder_dp_blank(
 }
 
 /* output video stream to link encoder */
-static void enc1_stream_encoder_dp_unblank(
+void enc1_stream_encoder_dp_unblank(
 	struct stream_encoder *enc,
 	const struct encoder_unblank_param *param)
 {
@@ -885,7 +867,7 @@ static void enc1_stream_encoder_dp_unblank(
 	REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
 }
 
-static void enc1_stream_encoder_set_avmute(
+void enc1_stream_encoder_set_avmute(
 	struct stream_encoder *enc,
 	bool enable)
 {
@@ -1442,7 +1424,7 @@ void enc1_se_hdmi_audio_disable(
 }
 
 
-static void enc1_setup_stereo_sync(
+void enc1_setup_stereo_sync(
 	struct stream_encoder *enc,
 	int tg_inst, bool enable)
 {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
index 86f8ee5ed8b8..6b3e4ded155b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -31,7 +31,8 @@
 #define DCN10STRENC_FROM_STRENC(stream_encoder)\
 	container_of(stream_encoder, struct dcn10_stream_encoder, base)
 
-#define SE_COMMON_REG_LIST_BASE(id) \
+#define SE_COMMON_DCN_REG_LIST(id) \
+	SRI(AFMT_CNTL, DIG, id), \
 	SRI(AFMT_GENERIC_0, DIG, id), \
 	SRI(AFMT_GENERIC_1, DIG, id), \
 	SRI(AFMT_GENERIC_2, DIG, id), \
@@ -43,6 +44,7 @@
 	SRI(AFMT_GENERIC_HDR, DIG, id), \
 	SRI(AFMT_INFOFRAME_CONTROL0, DIG, id), \
 	SRI(AFMT_VBI_PACKET_CONTROL, DIG, id), \
+	SRI(AFMT_VBI_PACKET_CONTROL1, DIG, id), \
 	SRI(AFMT_AUDIO_PACKET_CONTROL, DIG, id), \
 	SRI(AFMT_AUDIO_PACKET_CONTROL2, DIG, id), \
 	SRI(AFMT_AUDIO_SRC_CONTROL, DIG, id), \
@@ -51,9 +53,12 @@
 	SRI(AFMT_60958_2, DIG, id), \
 	SRI(DIG_FE_CNTL, DIG, id), \
 	SRI(HDMI_CONTROL, DIG, id), \
+	SRI(HDMI_DB_CONTROL, DIG, id), \
 	SRI(HDMI_GC, DIG, id), \
 	SRI(HDMI_GENERIC_PACKET_CONTROL0, DIG, id), \
 	SRI(HDMI_GENERIC_PACKET_CONTROL1, DIG, id), \
+	SRI(HDMI_GENERIC_PACKET_CONTROL2, DIG, id), \
+	SRI(HDMI_GENERIC_PACKET_CONTROL3, DIG, id), \
 	SRI(HDMI_INFOFRAME_CONTROL0, DIG, id), \
 	SRI(HDMI_INFOFRAME_CONTROL1, DIG, id), \
 	SRI(HDMI_VBI_PACKET_CONTROL, DIG, id), \
@@ -65,7 +70,13 @@
 	SRI(HDMI_ACR_44_1, DIG, id),\
 	SRI(HDMI_ACR_48_0, DIG, id),\
 	SRI(HDMI_ACR_48_1, DIG, id),\
-	SRI(TMDS_CNTL, DIG, id), \
+	SRI(DP_DB_CNTL, DP, id), \
+	SRI(DP_MSA_MISC, DP, id), \
+	SRI(DP_MSA_COLORIMETRY, DP, id), \
+	SRI(DP_MSA_TIMING_PARAM1, DP, id), \
+	SRI(DP_MSA_TIMING_PARAM2, DP, id), \
+	SRI(DP_MSA_TIMING_PARAM3, DP, id), \
+	SRI(DP_MSA_TIMING_PARAM4, DP, id), \
 	SRI(DP_MSE_RATE_CNTL, DP, id), \
 	SRI(DP_MSE_RATE_UPDATE, DP, id), \
 	SRI(DP_PIXEL_FORMAT, DP, id), \
@@ -79,19 +90,74 @@
 	SRI(DP_SEC_TIMESTAMP, DP, id)
 
 #define SE_DCN_REG_LIST(id)\
-	SE_COMMON_REG_LIST_BASE(id),\
-	SRI(AFMT_CNTL, DIG, id),\
-	SRI(AFMT_VBI_PACKET_CONTROL1, DIG, id),\
-	SRI(HDMI_GENERIC_PACKET_CONTROL2, DIG, id), \
-	SRI(HDMI_GENERIC_PACKET_CONTROL3, DIG, id), \
-	SRI(DP_DB_CNTL, DP, id), \
-	SRI(DP_MSA_MISC, DP, id), \
-	SRI(DP_MSA_COLORIMETRY, DP, id), \
-	SRI(DP_MSA_TIMING_PARAM1, DP, id), \
-	SRI(DP_MSA_TIMING_PARAM2, DP, id), \
-	SRI(DP_MSA_TIMING_PARAM3, DP, id), \
-	SRI(DP_MSA_TIMING_PARAM4, DP, id), \
-	SRI(HDMI_DB_CONTROL, DIG, id)
+	SE_COMMON_DCN_REG_LIST(id)
+
+
+struct dcn10_stream_enc_registers {
+	uint32_t AFMT_CNTL;
+	uint32_t AFMT_AVI_INFO0;
+	uint32_t AFMT_AVI_INFO1;
+	uint32_t AFMT_AVI_INFO2;
+	uint32_t AFMT_AVI_INFO3;
+	uint32_t AFMT_GENERIC_0;
+	uint32_t AFMT_GENERIC_1;
+	uint32_t AFMT_GENERIC_2;
+	uint32_t AFMT_GENERIC_3;
+	uint32_t AFMT_GENERIC_4;
+	uint32_t AFMT_GENERIC_5;
+	uint32_t AFMT_GENERIC_6;
+	uint32_t AFMT_GENERIC_7;
+	uint32_t AFMT_GENERIC_HDR;
+	uint32_t AFMT_INFOFRAME_CONTROL0;
+	uint32_t AFMT_VBI_PACKET_CONTROL;
+	uint32_t AFMT_VBI_PACKET_CONTROL1;
+	uint32_t AFMT_AUDIO_PACKET_CONTROL;
+	uint32_t AFMT_AUDIO_PACKET_CONTROL2;
+	uint32_t AFMT_AUDIO_SRC_CONTROL;
+	uint32_t AFMT_60958_0;
+	uint32_t AFMT_60958_1;
+	uint32_t AFMT_60958_2;
+	uint32_t DIG_FE_CNTL;
+	uint32_t DP_MSE_RATE_CNTL;
+	uint32_t DP_MSE_RATE_UPDATE;
+	uint32_t DP_PIXEL_FORMAT;
+	uint32_t DP_SEC_CNTL;
+	uint32_t DP_STEER_FIFO;
+	uint32_t DP_VID_M;
+	uint32_t DP_VID_N;
+	uint32_t DP_VID_STREAM_CNTL;
+	uint32_t DP_VID_TIMING;
+	uint32_t DP_SEC_AUD_N;
+	uint32_t DP_SEC_TIMESTAMP;
+	uint32_t HDMI_CONTROL;
+	uint32_t HDMI_GC;
+	uint32_t HDMI_GENERIC_PACKET_CONTROL0;
+	uint32_t HDMI_GENERIC_PACKET_CONTROL1;
+	uint32_t HDMI_GENERIC_PACKET_CONTROL2;
+	uint32_t HDMI_GENERIC_PACKET_CONTROL3;
+	uint32_t HDMI_GENERIC_PACKET_CONTROL4;
+	uint32_t HDMI_GENERIC_PACKET_CONTROL5;
+	uint32_t HDMI_INFOFRAME_CONTROL0;
+	uint32_t HDMI_INFOFRAME_CONTROL1;
+	uint32_t HDMI_VBI_PACKET_CONTROL;
+	uint32_t HDMI_AUDIO_PACKET_CONTROL;
+	uint32_t HDMI_ACR_PACKET_CONTROL;
+	uint32_t HDMI_ACR_32_0;
+	uint32_t HDMI_ACR_32_1;
+	uint32_t HDMI_ACR_44_0;
+	uint32_t HDMI_ACR_44_1;
+	uint32_t HDMI_ACR_48_0;
+	uint32_t HDMI_ACR_48_1;
+	uint32_t DP_DB_CNTL;
+	uint32_t DP_MSA_MISC;
+	uint32_t DP_MSA_COLORIMETRY;
+	uint32_t DP_MSA_TIMING_PARAM1;
+	uint32_t DP_MSA_TIMING_PARAM2;
+	uint32_t DP_MSA_TIMING_PARAM3;
+	uint32_t DP_MSA_TIMING_PARAM4;
+	uint32_t HDMI_DB_CONTROL;
+};
+
 
 #define SE_SF(reg_name, field_name, post_fix)\
 	.field_name = reg_name ## __ ## field_name ## post_fix
@@ -221,348 +287,151 @@
 	SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_SEND, mask_sh),\
 	SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_LINE, mask_sh)
 
+
+#define SE_REG_FIELD_LIST_DCN1_0(type) \
+	type AFMT_GENERIC_INDEX;\
+	type AFMT_GENERIC_HB0;\
+	type AFMT_GENERIC_HB1;\
+	type AFMT_GENERIC_HB2;\
+	type AFMT_GENERIC_HB3;\
+	type AFMT_GENERIC_LOCK_STATUS;\
+	type AFMT_GENERIC_CONFLICT;\
+	type AFMT_GENERIC_CONFLICT_CLR;\
+	type AFMT_GENERIC0_FRAME_UPDATE_PENDING;\
+	type AFMT_GENERIC1_FRAME_UPDATE_PENDING;\
+	type AFMT_GENERIC2_FRAME_UPDATE_PENDING;\
+	type AFMT_GENERIC3_FRAME_UPDATE_PENDING;\
+	type AFMT_GENERIC4_FRAME_UPDATE_PENDING;\
+	type AFMT_GENERIC5_FRAME_UPDATE_PENDING;\
+	type AFMT_GENERIC6_FRAME_UPDATE_PENDING;\
+	type AFMT_GENERIC7_FRAME_UPDATE_PENDING;\
+	type AFMT_GENERIC0_FRAME_UPDATE;\
+	type AFMT_GENERIC1_FRAME_UPDATE;\
+	type AFMT_GENERIC2_FRAME_UPDATE;\
+	type AFMT_GENERIC3_FRAME_UPDATE;\
+	type AFMT_GENERIC4_FRAME_UPDATE;\
+	type AFMT_GENERIC5_FRAME_UPDATE;\
+	type AFMT_GENERIC6_FRAME_UPDATE;\
+	type AFMT_GENERIC7_FRAME_UPDATE;\
+	type HDMI_GENERIC0_CONT;\
+	type HDMI_GENERIC0_SEND;\
+	type HDMI_GENERIC0_LINE;\
+	type HDMI_GENERIC1_CONT;\
+	type HDMI_GENERIC1_SEND;\
+	type HDMI_GENERIC1_LINE;\
+	type HDMI_GENERIC2_CONT;\
+	type HDMI_GENERIC2_SEND;\
+	type HDMI_GENERIC2_LINE;\
+	type HDMI_GENERIC3_CONT;\
+	type HDMI_GENERIC3_SEND;\
+	type HDMI_GENERIC3_LINE;\
+	type HDMI_GENERIC4_CONT;\
+	type HDMI_GENERIC4_SEND;\
+	type HDMI_GENERIC4_LINE;\
+	type HDMI_GENERIC5_CONT;\
+	type HDMI_GENERIC5_SEND;\
+	type HDMI_GENERIC5_LINE;\
+	type HDMI_GENERIC6_CONT;\
+	type HDMI_GENERIC6_SEND;\
+	type HDMI_GENERIC6_LINE;\
+	type HDMI_GENERIC7_CONT;\
+	type HDMI_GENERIC7_SEND;\
+	type HDMI_GENERIC7_LINE;\
+	type DP_PIXEL_ENCODING;\
+	type DP_COMPONENT_DEPTH;\
+	type HDMI_PACKET_GEN_VERSION;\
+	type HDMI_KEEPOUT_MODE;\
+	type HDMI_DEEP_COLOR_ENABLE;\
+	type HDMI_CLOCK_CHANNEL_RATE;\
+	type HDMI_DEEP_COLOR_DEPTH;\
+	type HDMI_GC_CONT;\
+	type HDMI_GC_SEND;\
+	type HDMI_NULL_SEND;\
+	type HDMI_DATA_SCRAMBLE_EN;\
+	type HDMI_AUDIO_INFO_SEND;\
+	type AFMT_AUDIO_INFO_UPDATE;\
+	type HDMI_AUDIO_INFO_LINE;\
+	type HDMI_GC_AVMUTE;\
+	type DP_MSE_RATE_X;\
+	type DP_MSE_RATE_Y;\
+	type DP_MSE_RATE_UPDATE_PENDING;\
+	type DP_SEC_GSP0_ENABLE;\
+	type DP_SEC_STREAM_ENABLE;\
+	type DP_SEC_GSP1_ENABLE;\
+	type DP_SEC_GSP2_ENABLE;\
+	type DP_SEC_GSP3_ENABLE;\
+	type DP_SEC_GSP4_ENABLE;\
+	type DP_SEC_GSP5_ENABLE;\
+	type DP_SEC_GSP6_ENABLE;\
+	type DP_SEC_GSP7_ENABLE;\
+	type DP_SEC_MPG_ENABLE;\
+	type DP_VID_STREAM_DIS_DEFER;\
+	type DP_VID_STREAM_ENABLE;\
+	type DP_VID_STREAM_STATUS;\
+	type DP_STEER_FIFO_RESET;\
+	type DP_VID_M_N_GEN_EN;\
+	type DP_VID_N;\
+	type DP_VID_M;\
+	type DIG_START;\
+	type AFMT_AUDIO_SRC_SELECT;\
+	type AFMT_AUDIO_CHANNEL_ENABLE;\
+	type HDMI_AUDIO_PACKETS_PER_LINE;\
+	type HDMI_AUDIO_DELAY_EN;\
+	type AFMT_60958_CS_UPDATE;\
+	type AFMT_AUDIO_LAYOUT_OVRD;\
+	type AFMT_60958_OSF_OVRD;\
+	type HDMI_ACR_AUTO_SEND;\
+	type HDMI_ACR_SOURCE;\
+	type HDMI_ACR_AUDIO_PRIORITY;\
+	type HDMI_ACR_CTS_32;\
+	type HDMI_ACR_N_32;\
+	type HDMI_ACR_CTS_44;\
+	type HDMI_ACR_N_44;\
+	type HDMI_ACR_CTS_48;\
+	type HDMI_ACR_N_48;\
+	type AFMT_60958_CS_CHANNEL_NUMBER_L;\
+	type AFMT_60958_CS_CLOCK_ACCURACY;\
+	type AFMT_60958_CS_CHANNEL_NUMBER_R;\
+	type AFMT_60958_CS_CHANNEL_NUMBER_2;\
+	type AFMT_60958_CS_CHANNEL_NUMBER_3;\
+	type AFMT_60958_CS_CHANNEL_NUMBER_4;\
+	type AFMT_60958_CS_CHANNEL_NUMBER_5;\
+	type AFMT_60958_CS_CHANNEL_NUMBER_6;\
+	type AFMT_60958_CS_CHANNEL_NUMBER_7;\
+	type DP_SEC_AUD_N;\
+	type DP_SEC_TIMESTAMP_MODE;\
+	type DP_SEC_ASP_ENABLE;\
+	type DP_SEC_ATP_ENABLE;\
+	type DP_SEC_AIP_ENABLE;\
+	type DP_SEC_ACM_ENABLE;\
+	type AFMT_AUDIO_SAMPLE_SEND;\
+	type AFMT_AUDIO_CLOCK_EN;\
+	type TMDS_PIXEL_ENCODING;\
+	type TMDS_COLOR_FORMAT;\
+	type DIG_STEREOSYNC_SELECT;\
+	type DIG_STEREOSYNC_GATE_EN;\
+	type DP_DB_DISABLE;\
+	type DP_MSA_MISC0;\
+	type DP_MSA_HTOTAL;\
+	type DP_MSA_VTOTAL;\
+	type DP_MSA_HSTART;\
+	type DP_MSA_VSTART;\
+	type DP_MSA_HSYNCWIDTH;\
+	type DP_MSA_HSYNCPOLARITY;\
+	type DP_MSA_VSYNCWIDTH;\
+	type DP_MSA_VSYNCPOLARITY;\
+	type DP_MSA_HWIDTH;\
+	type DP_MSA_VHEIGHT;\
+	type HDMI_DB_DISABLE;\
+	type DP_VID_N_MUL;\
+	type DP_VID_M_DOUBLE_VALUE_EN
+
 struct dcn10_stream_encoder_shift {
-	uint8_t AFMT_GENERIC_INDEX;
-	uint8_t AFMT_GENERIC_HB0;
-	uint8_t AFMT_GENERIC_HB1;
-	uint8_t AFMT_GENERIC_HB2;
-	uint8_t AFMT_GENERIC_HB3;
-	uint8_t AFMT_GENERIC_LOCK_STATUS;
-	uint8_t AFMT_GENERIC_CONFLICT;
-	uint8_t AFMT_GENERIC_CONFLICT_CLR;
-	uint8_t AFMT_GENERIC0_FRAME_UPDATE_PENDING;
-	uint8_t AFMT_GENERIC1_FRAME_UPDATE_PENDING;
-	uint8_t AFMT_GENERIC2_FRAME_UPDATE_PENDING;
-	uint8_t AFMT_GENERIC3_FRAME_UPDATE_PENDING;
-	uint8_t AFMT_GENERIC4_FRAME_UPDATE_PENDING;
-	uint8_t AFMT_GENERIC5_FRAME_UPDATE_PENDING;
-	uint8_t AFMT_GENERIC6_FRAME_UPDATE_PENDING;
-	uint8_t AFMT_GENERIC7_FRAME_UPDATE_PENDING;
-	uint8_t AFMT_GENERIC0_FRAME_UPDATE;
-	uint8_t AFMT_GENERIC1_FRAME_UPDATE;
-	uint8_t AFMT_GENERIC2_FRAME_UPDATE;
-	uint8_t AFMT_GENERIC3_FRAME_UPDATE;
-	uint8_t AFMT_GENERIC4_FRAME_UPDATE;
-	uint8_t AFMT_GENERIC5_FRAME_UPDATE;
-	uint8_t AFMT_GENERIC6_FRAME_UPDATE;
-	uint8_t AFMT_GENERIC7_FRAME_UPDATE;
-	uint8_t HDMI_GENERIC0_CONT;
-	uint8_t HDMI_GENERIC0_SEND;
-	uint8_t HDMI_GENERIC0_LINE;
-	uint8_t HDMI_GENERIC1_CONT;
-	uint8_t HDMI_GENERIC1_SEND;
-	uint8_t HDMI_GENERIC1_LINE;
-	uint8_t HDMI_GENERIC2_CONT;
-	uint8_t HDMI_GENERIC2_SEND;
-	uint8_t HDMI_GENERIC2_LINE;
-	uint8_t HDMI_GENERIC3_CONT;
-	uint8_t HDMI_GENERIC3_SEND;
-	uint8_t HDMI_GENERIC3_LINE;
-	uint8_t HDMI_GENERIC4_CONT;
-	uint8_t HDMI_GENERIC4_SEND;
-	uint8_t HDMI_GENERIC4_LINE;
-	uint8_t HDMI_GENERIC5_CONT;
-	uint8_t HDMI_GENERIC5_SEND;
-	uint8_t HDMI_GENERIC5_LINE;
-	uint8_t HDMI_GENERIC6_CONT;
-	uint8_t HDMI_GENERIC6_SEND;
-	uint8_t HDMI_GENERIC6_LINE;
-	uint8_t HDMI_GENERIC7_CONT;
-	uint8_t HDMI_GENERIC7_SEND;
-	uint8_t HDMI_GENERIC7_LINE;
-	uint8_t DP_PIXEL_ENCODING;
-	uint8_t DP_COMPONENT_DEPTH;
-	uint8_t HDMI_PACKET_GEN_VERSION;
-	uint8_t HDMI_KEEPOUT_MODE;
-	uint8_t HDMI_DEEP_COLOR_ENABLE;
-	uint8_t HDMI_CLOCK_CHANNEL_RATE;
-	uint8_t HDMI_DEEP_COLOR_DEPTH;
-	uint8_t HDMI_GC_CONT;
-	uint8_t HDMI_GC_SEND;
-	uint8_t HDMI_NULL_SEND;
-	uint8_t HDMI_DATA_SCRAMBLE_EN;
-	uint8_t HDMI_AUDIO_INFO_SEND;
-	uint8_t AFMT_AUDIO_INFO_UPDATE;
-	uint8_t HDMI_AUDIO_INFO_LINE;
-	uint8_t HDMI_GC_AVMUTE;
-	uint8_t DP_MSE_RATE_X;
-	uint8_t DP_MSE_RATE_Y;
-	uint8_t DP_MSE_RATE_UPDATE_PENDING;
-	uint8_t DP_SEC_GSP0_ENABLE;
-	uint8_t DP_SEC_STREAM_ENABLE;
-	uint8_t DP_SEC_GSP1_ENABLE;
-	uint8_t DP_SEC_GSP2_ENABLE;
-	uint8_t DP_SEC_GSP3_ENABLE;
-	uint8_t DP_SEC_GSP4_ENABLE;
-	uint8_t DP_SEC_GSP5_ENABLE;
-	uint8_t DP_SEC_GSP6_ENABLE;
-	uint8_t DP_SEC_GSP7_ENABLE;
-	uint8_t DP_SEC_MPG_ENABLE;
-	uint8_t DP_VID_STREAM_DIS_DEFER;
-	uint8_t DP_VID_STREAM_ENABLE;
-	uint8_t DP_VID_STREAM_STATUS;
-	uint8_t DP_STEER_FIFO_RESET;
-	uint8_t DP_VID_M_N_GEN_EN;
-	uint8_t DP_VID_N;
-	uint8_t DP_VID_M;
-	uint8_t DIG_START;
-	uint8_t AFMT_AUDIO_SRC_SELECT;
-	uint8_t AFMT_AUDIO_CHANNEL_ENABLE;
-	uint8_t HDMI_AUDIO_PACKETS_PER_LINE;
-	uint8_t HDMI_AUDIO_DELAY_EN;
-	uint8_t AFMT_60958_CS_UPDATE;
-	uint8_t AFMT_AUDIO_LAYOUT_OVRD;
-	uint8_t AFMT_60958_OSF_OVRD;
-	uint8_t HDMI_ACR_AUTO_SEND;
-	uint8_t HDMI_ACR_SOURCE;
-	uint8_t HDMI_ACR_AUDIO_PRIORITY;
-	uint8_t HDMI_ACR_CTS_32;
-	uint8_t HDMI_ACR_N_32;
-	uint8_t HDMI_ACR_CTS_44;
-	uint8_t HDMI_ACR_N_44;
-	uint8_t HDMI_ACR_CTS_48;
-	uint8_t HDMI_ACR_N_48;
-	uint8_t AFMT_60958_CS_CHANNEL_NUMBER_L;
-	uint8_t AFMT_60958_CS_CLOCK_ACCURACY;
-	uint8_t AFMT_60958_CS_CHANNEL_NUMBER_R;
-	uint8_t AFMT_60958_CS_CHANNEL_NUMBER_2;
-	uint8_t AFMT_60958_CS_CHANNEL_NUMBER_3;
-	uint8_t AFMT_60958_CS_CHANNEL_NUMBER_4;
-	uint8_t AFMT_60958_CS_CHANNEL_NUMBER_5;
-	uint8_t AFMT_60958_CS_CHANNEL_NUMBER_6;
-	uint8_t AFMT_60958_CS_CHANNEL_NUMBER_7;
-	uint8_t DP_SEC_AUD_N;
-	uint8_t DP_SEC_TIMESTAMP_MODE;
-	uint8_t DP_SEC_ASP_ENABLE;
-	uint8_t DP_SEC_ATP_ENABLE;
-	uint8_t DP_SEC_AIP_ENABLE;
-	uint8_t DP_SEC_ACM_ENABLE;
-	uint8_t AFMT_AUDIO_SAMPLE_SEND;
-	uint8_t AFMT_AUDIO_CLOCK_EN;
-	uint8_t TMDS_PIXEL_ENCODING;
-	uint8_t TMDS_COLOR_FORMAT;
-	uint8_t DIG_STEREOSYNC_SELECT;
-	uint8_t DIG_STEREOSYNC_GATE_EN;
-	uint8_t DP_DB_DISABLE;
-	uint8_t DP_MSA_MISC0;
-	uint8_t DP_MSA_HTOTAL;
-	uint8_t DP_MSA_VTOTAL;
-	uint8_t DP_MSA_HSTART;
-	uint8_t DP_MSA_VSTART;
-	uint8_t DP_MSA_HSYNCWIDTH;
-	uint8_t DP_MSA_HSYNCPOLARITY;
-	uint8_t DP_MSA_VSYNCWIDTH;
-	uint8_t DP_MSA_VSYNCPOLARITY;
-	uint8_t DP_MSA_HWIDTH;
-	uint8_t DP_MSA_VHEIGHT;
-	uint8_t HDMI_DB_DISABLE;
-	uint8_t DP_VID_N_MUL;
-	uint8_t DP_VID_M_DOUBLE_VALUE_EN;
+	SE_REG_FIELD_LIST_DCN1_0(uint8_t);
 };
 
 struct dcn10_stream_encoder_mask {
-	uint32_t AFMT_GENERIC_INDEX;
-	uint32_t AFMT_GENERIC_HB0;
-	uint32_t AFMT_GENERIC_HB1;
-	uint32_t AFMT_GENERIC_HB2;
-	uint32_t AFMT_GENERIC_HB3;
-	uint32_t AFMT_GENERIC_LOCK_STATUS;
-	uint32_t AFMT_GENERIC_CONFLICT;
-	uint32_t AFMT_GENERIC_CONFLICT_CLR;
-	uint32_t AFMT_GENERIC0_FRAME_UPDATE_PENDING;
-	uint32_t AFMT_GENERIC1_FRAME_UPDATE_PENDING;
-	uint32_t AFMT_GENERIC2_FRAME_UPDATE_PENDING;
-	uint32_t AFMT_GENERIC3_FRAME_UPDATE_PENDING;
-	uint32_t AFMT_GENERIC4_FRAME_UPDATE_PENDING;
-	uint32_t AFMT_GENERIC5_FRAME_UPDATE_PENDING;
-	uint32_t AFMT_GENERIC6_FRAME_UPDATE_PENDING;
-	uint32_t AFMT_GENERIC7_FRAME_UPDATE_PENDING;
-	uint32_t AFMT_GENERIC0_FRAME_UPDATE;
-	uint32_t AFMT_GENERIC1_FRAME_UPDATE;
-	uint32_t AFMT_GENERIC2_FRAME_UPDATE;
-	uint32_t AFMT_GENERIC3_FRAME_UPDATE;
-	uint32_t AFMT_GENERIC4_FRAME_UPDATE;
-	uint32_t AFMT_GENERIC5_FRAME_UPDATE;
-	uint32_t AFMT_GENERIC6_FRAME_UPDATE;
-	uint32_t AFMT_GENERIC7_FRAME_UPDATE;
-	uint32_t HDMI_GENERIC0_CONT;
-	uint32_t HDMI_GENERIC0_SEND;
-	uint32_t HDMI_GENERIC0_LINE;
-	uint32_t HDMI_GENERIC1_CONT;
-	uint32_t HDMI_GENERIC1_SEND;
-	uint32_t HDMI_GENERIC1_LINE;
-	uint32_t HDMI_GENERIC2_CONT;
-	uint32_t HDMI_GENERIC2_SEND;
-	uint32_t HDMI_GENERIC2_LINE;
-	uint32_t HDMI_GENERIC3_CONT;
-	uint32_t HDMI_GENERIC3_SEND;
-	uint32_t HDMI_GENERIC3_LINE;
-	uint32_t HDMI_GENERIC4_CONT;
-	uint32_t HDMI_GENERIC4_SEND;
-	uint32_t HDMI_GENERIC4_LINE;
-	uint32_t HDMI_GENERIC5_CONT;
-	uint32_t HDMI_GENERIC5_SEND;
-	uint32_t HDMI_GENERIC5_LINE;
-	uint32_t HDMI_GENERIC6_CONT;
-	uint32_t HDMI_GENERIC6_SEND;
-	uint32_t HDMI_GENERIC6_LINE;
-	uint32_t HDMI_GENERIC7_CONT;
-	uint32_t HDMI_GENERIC7_SEND;
-	uint32_t HDMI_GENERIC7_LINE;
-	uint32_t DP_PIXEL_ENCODING;
-	uint32_t DP_COMPONENT_DEPTH;
-	uint32_t HDMI_PACKET_GEN_VERSION;
-	uint32_t HDMI_KEEPOUT_MODE;
-	uint32_t HDMI_DEEP_COLOR_ENABLE;
-	uint32_t HDMI_CLOCK_CHANNEL_RATE;
-	uint32_t HDMI_DEEP_COLOR_DEPTH;
-	uint32_t HDMI_GC_CONT;
-	uint32_t HDMI_GC_SEND;
-	uint32_t HDMI_NULL_SEND;
-	uint32_t HDMI_DATA_SCRAMBLE_EN;
-	uint32_t HDMI_AUDIO_INFO_SEND;
-	uint32_t AFMT_AUDIO_INFO_UPDATE;
-	uint32_t HDMI_AUDIO_INFO_LINE;
-	uint32_t HDMI_GC_AVMUTE;
-	uint32_t DP_MSE_RATE_X;
-	uint32_t DP_MSE_RATE_Y;
-	uint32_t DP_MSE_RATE_UPDATE_PENDING;
-	uint32_t DP_SEC_GSP0_ENABLE;
-	uint32_t DP_SEC_STREAM_ENABLE;
-	uint32_t DP_SEC_GSP1_ENABLE;
-	uint32_t DP_SEC_GSP2_ENABLE;
-	uint32_t DP_SEC_GSP3_ENABLE;
-	uint32_t DP_SEC_GSP4_ENABLE;
-	uint32_t DP_SEC_GSP5_ENABLE;
-	uint32_t DP_SEC_GSP6_ENABLE;
-	uint32_t DP_SEC_GSP7_ENABLE;
-	uint32_t DP_SEC_MPG_ENABLE;
-	uint32_t DP_VID_STREAM_DIS_DEFER;
-	uint32_t DP_VID_STREAM_ENABLE;
-	uint32_t DP_VID_STREAM_STATUS;
-	uint32_t DP_STEER_FIFO_RESET;
-	uint32_t DP_VID_M_N_GEN_EN;
-	uint32_t DP_VID_N;
-	uint32_t DP_VID_M;
-	uint32_t DIG_START;
-	uint32_t AFMT_AUDIO_SRC_SELECT;
-	uint32_t AFMT_AUDIO_CHANNEL_ENABLE;
-	uint32_t HDMI_AUDIO_PACKETS_PER_LINE;
-	uint32_t HDMI_AUDIO_DELAY_EN;
-	uint32_t AFMT_60958_CS_UPDATE;
-	uint32_t AFMT_AUDIO_LAYOUT_OVRD;
-	uint32_t AFMT_60958_OSF_OVRD;
-	uint32_t HDMI_ACR_AUTO_SEND;
-	uint32_t HDMI_ACR_SOURCE;
-	uint32_t HDMI_ACR_AUDIO_PRIORITY;
-	uint32_t HDMI_ACR_CTS_32;
-	uint32_t HDMI_ACR_N_32;
-	uint32_t HDMI_ACR_CTS_44;
-	uint32_t HDMI_ACR_N_44;
-	uint32_t HDMI_ACR_CTS_48;
-	uint32_t HDMI_ACR_N_48;
-	uint32_t AFMT_60958_CS_CHANNEL_NUMBER_L;
-	uint32_t AFMT_60958_CS_CLOCK_ACCURACY;
-	uint32_t AFMT_60958_CS_CHANNEL_NUMBER_R;
-	uint32_t AFMT_60958_CS_CHANNEL_NUMBER_2;
-	uint32_t AFMT_60958_CS_CHANNEL_NUMBER_3;
-	uint32_t AFMT_60958_CS_CHANNEL_NUMBER_4;
-	uint32_t AFMT_60958_CS_CHANNEL_NUMBER_5;
-	uint32_t AFMT_60958_CS_CHANNEL_NUMBER_6;
-	uint32_t AFMT_60958_CS_CHANNEL_NUMBER_7;
-	uint32_t DP_SEC_AUD_N;
-	uint32_t DP_SEC_TIMESTAMP_MODE;
-	uint32_t DP_SEC_ASP_ENABLE;
-	uint32_t DP_SEC_ATP_ENABLE;
-	uint32_t DP_SEC_AIP_ENABLE;
-	uint32_t DP_SEC_ACM_ENABLE;
-	uint32_t AFMT_AUDIO_SAMPLE_SEND;
-	uint32_t AFMT_AUDIO_CLOCK_EN;
-	uint32_t TMDS_PIXEL_ENCODING;
-	uint32_t DIG_STEREOSYNC_SELECT;
-	uint32_t DIG_STEREOSYNC_GATE_EN;
-	uint32_t TMDS_COLOR_FORMAT;
-	uint32_t DP_DB_DISABLE;
-	uint32_t DP_MSA_MISC0;
-	uint32_t DP_MSA_HTOTAL;
-	uint32_t DP_MSA_VTOTAL;
-	uint32_t DP_MSA_HSTART;
-	uint32_t DP_MSA_VSTART;
-	uint32_t DP_MSA_HSYNCWIDTH;
-	uint32_t DP_MSA_HSYNCPOLARITY;
-	uint32_t DP_MSA_VSYNCWIDTH;
-	uint32_t DP_MSA_VSYNCPOLARITY;
-	uint32_t DP_MSA_HWIDTH;
-	uint32_t DP_MSA_VHEIGHT;
-	uint32_t HDMI_DB_DISABLE;
-	uint32_t DP_VID_N_MUL;
-	uint32_t DP_VID_M_DOUBLE_VALUE_EN;
-};
-
-struct dcn10_stream_enc_registers {
-	uint32_t AFMT_CNTL;
-	uint32_t AFMT_AVI_INFO0;
-	uint32_t AFMT_AVI_INFO1;
-	uint32_t AFMT_AVI_INFO2;
-	uint32_t AFMT_AVI_INFO3;
-	uint32_t AFMT_GENERIC_0;
-	uint32_t AFMT_GENERIC_1;
-	uint32_t AFMT_GENERIC_2;
-	uint32_t AFMT_GENERIC_3;
-	uint32_t AFMT_GENERIC_4;
-	uint32_t AFMT_GENERIC_5;
-	uint32_t AFMT_GENERIC_6;
-	uint32_t AFMT_GENERIC_7;
-	uint32_t AFMT_GENERIC_HDR;
-	uint32_t AFMT_INFOFRAME_CONTROL0;
-	uint32_t AFMT_VBI_PACKET_CONTROL;
-	uint32_t AFMT_VBI_PACKET_CONTROL1;
-	uint32_t AFMT_AUDIO_PACKET_CONTROL;
-	uint32_t AFMT_AUDIO_PACKET_CONTROL2;
-	uint32_t AFMT_AUDIO_SRC_CONTROL;
-	uint32_t AFMT_60958_0;
-	uint32_t AFMT_60958_1;
-	uint32_t AFMT_60958_2;
-	uint32_t DIG_FE_CNTL;
-	uint32_t DP_MSE_RATE_CNTL;
-	uint32_t DP_MSE_RATE_UPDATE;
-	uint32_t DP_PIXEL_FORMAT;
-	uint32_t DP_SEC_CNTL;
-	uint32_t DP_STEER_FIFO;
-	uint32_t DP_VID_M;
-	uint32_t DP_VID_N;
-	uint32_t DP_VID_STREAM_CNTL;
-	uint32_t DP_VID_TIMING;
-	uint32_t DP_SEC_AUD_N;
-	uint32_t DP_SEC_TIMESTAMP;
-	uint32_t HDMI_CONTROL;
-	uint32_t HDMI_GC;
-	uint32_t HDMI_GENERIC_PACKET_CONTROL0;
-	uint32_t HDMI_GENERIC_PACKET_CONTROL1;
-	uint32_t HDMI_GENERIC_PACKET_CONTROL2;
-	uint32_t HDMI_GENERIC_PACKET_CONTROL3;
-	uint32_t HDMI_GENERIC_PACKET_CONTROL4;
-	uint32_t HDMI_GENERIC_PACKET_CONTROL5;
-	uint32_t HDMI_INFOFRAME_CONTROL0;
-	uint32_t HDMI_INFOFRAME_CONTROL1;
-	uint32_t HDMI_VBI_PACKET_CONTROL;
-	uint32_t HDMI_AUDIO_PACKET_CONTROL;
-	uint32_t HDMI_ACR_PACKET_CONTROL;
-	uint32_t HDMI_ACR_32_0;
-	uint32_t HDMI_ACR_32_1;
-	uint32_t HDMI_ACR_44_0;
-	uint32_t HDMI_ACR_44_1;
-	uint32_t HDMI_ACR_48_0;
-	uint32_t HDMI_ACR_48_1;
-	uint32_t TMDS_CNTL;
-	uint32_t DP_DB_CNTL;
-	uint32_t DP_MSA_MISC;
-	uint32_t DP_MSA_COLORIMETRY;
-	uint32_t DP_MSA_TIMING_PARAM1;
-	uint32_t DP_MSA_TIMING_PARAM2;
-	uint32_t DP_MSA_TIMING_PARAM3;
-	uint32_t DP_MSA_TIMING_PARAM4;
-	uint32_t HDMI_DB_CONTROL;
+	SE_REG_FIELD_LIST_DCN1_0(uint32_t);
 };
 
 struct dcn10_stream_encoder {
@@ -581,4 +450,75 @@ void dcn10_stream_encoder_construct(
 	const struct dcn10_stream_encoder_shift *se_shift,
 	const struct dcn10_stream_encoder_mask *se_mask);
 
+void enc1_update_generic_info_packet(
+	struct dcn10_stream_encoder *enc1,
+	uint32_t packet_index,
+	const struct dc_info_packet *info_packet);
+
+void enc1_stream_encoder_dp_set_stream_attribute(
+	struct stream_encoder *enc,
+	struct dc_crtc_timing *crtc_timing,
+	enum dc_color_space output_color_space);
+
+void enc1_stream_encoder_hdmi_set_stream_attribute(
+	struct stream_encoder *enc,
+	struct dc_crtc_timing *crtc_timing,
+	int actual_pix_clk_khz,
+	bool enable_audio);
+
+void enc1_stream_encoder_dvi_set_stream_attribute(
+	struct stream_encoder *enc,
+	struct dc_crtc_timing *crtc_timing,
+	bool is_dual_link);
+
+void enc1_stream_encoder_set_mst_bandwidth(
+	struct stream_encoder *enc,
+	struct fixed31_32 avg_time_slots_per_mtp);
+
+void enc1_stream_encoder_update_dp_info_packets(
+	struct stream_encoder *enc,
+	const struct encoder_info_frame *info_frame);
+
+void enc1_stream_encoder_stop_dp_info_packets(
+	struct stream_encoder *enc);
+
+void enc1_stream_encoder_dp_blank(
+	struct stream_encoder *enc);
+
+void enc1_stream_encoder_dp_unblank(
+	struct stream_encoder *enc,
+	const struct encoder_unblank_param *param);
+
+void enc1_setup_stereo_sync(
+	struct stream_encoder *enc,
+	int tg_inst, bool enable);
+
+void enc1_stream_encoder_set_avmute(
+	struct stream_encoder *enc,
+	bool enable);
+
+void enc1_se_audio_mute_control(
+	struct stream_encoder *enc,
+	bool mute);
+
+void enc1_se_dp_audio_setup(
+	struct stream_encoder *enc,
+	unsigned int az_inst,
+	struct audio_info *info);
+
+void enc1_se_dp_audio_enable(
+	struct stream_encoder *enc);
+
+void enc1_se_dp_audio_disable(
+	struct stream_encoder *enc);
+
+void enc1_se_hdmi_audio_setup(
+	struct stream_encoder *enc,
+	unsigned int az_inst,
+	struct audio_info *info,
+	struct audio_crtc_info *audio_crtc_info);
+
+void enc1_se_hdmi_audio_disable(
+	struct stream_encoder *enc);
+
 #endif /* __DC_STREAM_ENCODER_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index 015e209e58bc..93da44527d2e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -191,6 +191,23 @@ enum controller_dp_test_pattern {
 	CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA
 };
 
+enum dp_pixel_encoding_type {
+	DP_PIXEL_ENCODING_TYPE_RGB444		= 0x00000000,
+	DP_PIXEL_ENCODING_TYPE_YCBCR422		= 0x00000001,
+	DP_PIXEL_ENCODING_TYPE_YCBCR444		= 0x00000002,
+	DP_PIXEL_ENCODING_TYPE_RGB_WIDE_GAMUT	= 0x00000003,
+	DP_PIXEL_ENCODING_TYPE_Y_ONLY		= 0x00000004,
+	DP_PIXEL_ENCODING_TYPE_YCBCR420		= 0x00000005
+};
+
+enum dp_component_depth {
+	DP_COMPONENT_PIXEL_DEPTH_6BPC		= 0x00000000,
+	DP_COMPONENT_PIXEL_DEPTH_8BPC		= 0x00000001,
+	DP_COMPONENT_PIXEL_DEPTH_10BPC		= 0x00000002,
+	DP_COMPONENT_PIXEL_DEPTH_12BPC		= 0x00000003,
+	DP_COMPONENT_PIXEL_DEPTH_16BPC		= 0x00000004
+};
+
 enum dc_lut_mode {
 	LUT_BYPASS,
 	LUT_RAM_A,

From 35ad2254cb7d0a46f135eb57990ca6618f79510b Mon Sep 17 00:00:00 2001
From: Anthony Koo <Anthony.Koo@amd.com>
Date: Thu, 29 Mar 2018 11:23:37 -0400
Subject: [PATCH 0369/1286] drm/amd/display: csc updates require FULL update

Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/core/dc.c | 12 ++++++++++--
 drivers/gpu/drm/amd/display/dc/dc.h      |  1 +
 2 files changed, 11 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index b331d9e78cdb..8f09f3ab0c29 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1154,12 +1154,20 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
 	if (u->input_csc_color_matrix)
 		update_flags->bits.input_csc_change = 1;
 
-	if (update_flags->bits.in_transfer_func_change
-			|| update_flags->bits.input_csc_change) {
+	if (u->coeff_reduction_factor)
+		update_flags->bits.coeff_reduction_change = 1;
+
+	if (update_flags->bits.in_transfer_func_change) {
 		type = UPDATE_TYPE_MED;
 		elevate_update_type(&overall_type, type);
 	}
 
+	if (update_flags->bits.input_csc_change
+			|| update_flags->bits.coeff_reduction_change) {
+		type = UPDATE_TYPE_FULL;
+		elevate_update_type(&overall_type, type);
+	}
+
 	return overall_type;
 }
 
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 63817ed56c11..7d1a3c5d1b10 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -429,6 +429,7 @@ union surface_update_flags {
 		uint32_t position_change:1;
 		uint32_t in_transfer_func_change:1;
 		uint32_t input_csc_change:1;
+		uint32_t coeff_reduction_change:1;
 		uint32_t output_tf_change:1;
 		uint32_t pixel_format_change:1;
 

From bb33b1842c3f2592a9be4e80c9d4afe6251a5da6 Mon Sep 17 00:00:00 2001
From: Roman Li <roman.li@amd.com>
Date: Thu, 29 Mar 2018 11:14:25 -0400
Subject: [PATCH 0370/1286] drm/amd/display: Fix FBC text console corruption

Signed-off-by: Roman Li <roman.li@amd.com>
Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../amd/display/dc/dce110/dce110_compressor.c | 63 +++++++++++++++----
 1 file changed, 52 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
index 775d3bf0bd39..9150d2694450 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
@@ -102,6 +102,43 @@ static uint32_t align_to_chunks_number_per_line(uint32_t pixels)
 	return 256 * ((pixels + 255) / 256);
 }
 
+static void reset_lb_on_vblank(struct dc_context *ctx)
+{
+	uint32_t value, frame_count;
+	uint32_t retry = 0;
+	uint32_t status_pos =
+			dm_read_reg(ctx, mmCRTC_STATUS_POSITION);
+
+
+	/* Only if CRTC is enabled and counter is moving we wait for one frame. */
+	if (status_pos != dm_read_reg(ctx, mmCRTC_STATUS_POSITION)) {
+		/* Resetting LB on VBlank */
+		value = dm_read_reg(ctx, mmLB_SYNC_RESET_SEL);
+		set_reg_field_value(value, 3, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL);
+		set_reg_field_value(value, 1, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL2);
+		dm_write_reg(ctx, mmLB_SYNC_RESET_SEL, value);
+
+		frame_count = dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT);
+
+
+		for (retry = 100; retry > 0; retry--) {
+			if (frame_count != dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT))
+				break;
+			msleep(1);
+		}
+		if (!retry)
+			dm_error("Frame count did not increase for 100ms.\n");
+
+		/* Resetting LB on VBlank */
+		value = dm_read_reg(ctx, mmLB_SYNC_RESET_SEL);
+		set_reg_field_value(value, 2, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL);
+		set_reg_field_value(value, 0, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL2);
+		dm_write_reg(ctx, mmLB_SYNC_RESET_SEL, value);
+
+	}
+
+}
+
 static void wait_for_fbc_state_changed(
 	struct dce110_compressor *cp110,
 	bool enabled)
@@ -232,19 +269,23 @@ void dce110_compressor_disable_fbc(struct compressor *compressor)
 {
 	struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor);
 
-	if (compressor->options.bits.FBC_SUPPORT &&
-		dce110_compressor_is_fbc_enabled_in_hw(compressor, NULL)) {
-		uint32_t reg_data;
-		/* Turn off compression */
-		reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL);
-		set_reg_field_value(reg_data, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
-		dm_write_reg(compressor->ctx, mmFBC_CNTL, reg_data);
+	if (compressor->options.bits.FBC_SUPPORT) {
+		if (dce110_compressor_is_fbc_enabled_in_hw(compressor, NULL)) {
+			uint32_t reg_data;
+			/* Turn off compression */
+			reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL);
+			set_reg_field_value(reg_data, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
+			dm_write_reg(compressor->ctx, mmFBC_CNTL, reg_data);
 
-		/* Reset enum controller_id to undefined */
-		compressor->attached_inst = 0;
-		compressor->is_enabled = false;
+			/* Reset enum controller_id to undefined */
+			compressor->attached_inst = 0;
+			compressor->is_enabled = false;
 
-		wait_for_fbc_state_changed(cp110, false);
+			wait_for_fbc_state_changed(cp110, false);
+		}
+
+		/* Sync line buffer  - dce100/110 only*/
+		reset_lb_on_vblank(compressor->ctx);
 	}
 }
 

From 144de8944805aef45964a904fdfd537486b1ce82 Mon Sep 17 00:00:00 2001
From: Yongqiang Sun <yongqiang.sun@amd.com>
Date: Thu, 29 Mar 2018 13:11:10 -0400
Subject: [PATCH 0371/1286] drm/amd/display: dal 3.1.41

Signed-off-by: Yongqiang Sun <yongqiang.sun@amd.com>
Reviewed-by: Yongqiang Sun <yongqiang.sun@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 7d1a3c5d1b10..23349148c7a4 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -38,7 +38,7 @@
 #include "inc/compressor.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.1.40"
+#define DC_VER "3.1.41"
 
 #define MAX_SURFACES 3
 #define MAX_STREAMS 6

From 7d3c425fefb91da7e984a43ba27dff6cdd53758a Mon Sep 17 00:00:00 2001
From: Oscar Mateo <oscar.mateo@intel.com>
Date: Tue, 10 Apr 2018 09:12:46 -0700
Subject: [PATCH 0372/1286] drm/i915: Move a bunch of workaround-related code
 to its own file

This has grown to be a sizable amount of code, so move it to
its own file before we try to refactor anything. For the moment,
we are leaving behind the WA BB code and the WAs that get applied
(incorrectly) in init_clock_gating, but we will deal with it later.

v2: Use intel_ prefix for code that deals with the hardware (Chris)
v3: Rebased
v4:
  - Rebased
  - New license header
v5:
  - Rebased
  - Added some organisational notes to the file (Chris)
v6: Include DOC section in the documentation build (Jani)

Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Jani Nikula <jani.nikula@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
[ickle: appease checkpatch, mostly]
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/1523376767-18480-1-git-send-email-oscar.mateo@intel.com
---
 Documentation/gpu/i915.rst               |   6 +
 drivers/gpu/drm/i915/Makefile            |   3 +-
 drivers/gpu/drm/i915/intel_engine_cs.c   | 634 ---------------------
 drivers/gpu/drm/i915/intel_lrc.c         |   1 +
 drivers/gpu/drm/i915/intel_ringbuffer.c  |   1 +
 drivers/gpu/drm/i915/intel_ringbuffer.h  |   3 -
 drivers/gpu/drm/i915/intel_workarounds.c | 686 +++++++++++++++++++++++
 drivers/gpu/drm/i915/intel_workarounds.h |  13 +
 8 files changed, 709 insertions(+), 638 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/intel_workarounds.c
 create mode 100644 drivers/gpu/drm/i915/intel_workarounds.h

diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index 34d22f275708..055df45596c1 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -58,6 +58,12 @@ Intel GVT-g Host Support(vGPU device model)
 .. kernel-doc:: drivers/gpu/drm/i915/intel_gvt.c
    :internal:
 
+Workarounds
+-----------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_workarounds.c
+   :doc: Hardware workarounds
+
 Display Hardware Handling
 =========================
 
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 0c79c19223af..9bee52a949a9 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -43,7 +43,8 @@ i915-y := i915_drv.o \
 	  intel_csr.o \
 	  intel_device_info.o \
 	  intel_pm.o \
-	  intel_runtime_pm.o
+	  intel_runtime_pm.o \
+	  intel_workarounds.o
 
 i915-$(CONFIG_COMPAT)   += i915_ioc32.o
 i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o intel_pipe_crc.o
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index a217b3fe5f0b..68898d58dd1e 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -903,640 +903,6 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
 	}
 }
 
-static int wa_add(struct drm_i915_private *dev_priv,
-		  i915_reg_t addr,
-		  const u32 mask, const u32 val)
-{
-	const u32 idx = dev_priv->workarounds.count;
-
-	if (WARN_ON(idx >= I915_MAX_WA_REGS))
-		return -ENOSPC;
-
-	dev_priv->workarounds.reg[idx].addr = addr;
-	dev_priv->workarounds.reg[idx].value = val;
-	dev_priv->workarounds.reg[idx].mask = mask;
-
-	dev_priv->workarounds.count++;
-
-	return 0;
-}
-
-#define WA_REG(addr, mask, val) do { \
-		const int r = wa_add(dev_priv, (addr), (mask), (val)); \
-		if (r) \
-			return r; \
-	} while (0)
-
-#define WA_SET_BIT_MASKED(addr, mask) \
-	WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
-
-#define WA_CLR_BIT_MASKED(addr, mask) \
-	WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
-
-#define WA_SET_FIELD_MASKED(addr, mask, value) \
-	WA_REG(addr, mask, _MASKED_FIELD(mask, value))
-
-static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
-				 i915_reg_t reg)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
-	struct i915_workarounds *wa = &dev_priv->workarounds;
-	const uint32_t index = wa->hw_whitelist_count[engine->id];
-
-	if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
-		return -EINVAL;
-
-	I915_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
-		   i915_mmio_reg_offset(reg));
-	wa->hw_whitelist_count[engine->id]++;
-
-	return 0;
-}
-
-static int gen8_init_workarounds(struct intel_engine_cs *engine)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
-
-	WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
-
-	/* WaDisableAsyncFlipPerfMode:bdw,chv */
-	WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
-
-	/* WaDisablePartialInstShootdown:bdw,chv */
-	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
-			  PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
-
-	/* Use Force Non-Coherent whenever executing a 3D context. This is a
-	 * workaround for for a possible hang in the unlikely event a TLB
-	 * invalidation occurs during a PSD flush.
-	 */
-	/* WaForceEnableNonCoherent:bdw,chv */
-	/* WaHdcDisableFetchWhenMasked:bdw,chv */
-	WA_SET_BIT_MASKED(HDC_CHICKEN0,
-			  HDC_DONOT_FETCH_MEM_WHEN_MASKED |
-			  HDC_FORCE_NON_COHERENT);
-
-	/* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
-	 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
-	 *  polygons in the same 8x4 pixel/sample area to be processed without
-	 *  stalling waiting for the earlier ones to write to Hierarchical Z
-	 *  buffer."
-	 *
-	 * This optimization is off by default for BDW and CHV; turn it on.
-	 */
-	WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
-
-	/* Wa4x4STCOptimizationDisable:bdw,chv */
-	WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
-
-	/*
-	 * BSpec recommends 8x4 when MSAA is used,
-	 * however in practice 16x4 seems fastest.
-	 *
-	 * Note that PS/WM thread counts depend on the WIZ hashing
-	 * disable bit, which we don't touch here, but it's good
-	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
-	 */
-	WA_SET_FIELD_MASKED(GEN7_GT_MODE,
-			    GEN6_WIZ_HASHING_MASK,
-			    GEN6_WIZ_HASHING_16x4);
-
-	return 0;
-}
-
-static int bdw_init_workarounds(struct intel_engine_cs *engine)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
-	int ret;
-
-	ret = gen8_init_workarounds(engine);
-	if (ret)
-		return ret;
-
-	/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
-	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
-
-	/* WaDisableDopClockGating:bdw
-	 *
-	 * Also see the related UCGTCL1 write in broadwell_init_clock_gating()
-	 * to disable EUTC clock gating.
-	 */
-	WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
-			  DOP_CLOCK_GATING_DISABLE);
-
-	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
-			  GEN8_SAMPLER_POWER_BYPASS_DIS);
-
-	WA_SET_BIT_MASKED(HDC_CHICKEN0,
-			  /* WaForceContextSaveRestoreNonCoherent:bdw */
-			  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
-			  /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
-			  (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
-
-	return 0;
-}
-
-static int chv_init_workarounds(struct intel_engine_cs *engine)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
-	int ret;
-
-	ret = gen8_init_workarounds(engine);
-	if (ret)
-		return ret;
-
-	/* WaDisableThreadStallDopClockGating:chv */
-	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
-
-	/* Improve HiZ throughput on CHV. */
-	WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
-
-	return 0;
-}
-
-static int gen9_init_workarounds(struct intel_engine_cs *engine)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
-	int ret;
-
-	/* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
-	I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
-
-	/* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
-	I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
-		   GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
-
-	/* WaDisableKillLogic:bxt,skl,kbl */
-	if (!IS_COFFEELAKE(dev_priv))
-		I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
-			   ECOCHK_DIS_TLB);
-
-	if (HAS_LLC(dev_priv)) {
-		/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
-		 *
-		 * Must match Display Engine. See
-		 * WaCompressedResourceDisplayNewHashMode.
-		 */
-		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
-				  GEN9_PBE_COMPRESSED_HASH_SELECTION);
-		WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
-				  GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
-
-		I915_WRITE(MMCD_MISC_CTRL,
-			   I915_READ(MMCD_MISC_CTRL) |
-			   MMCD_PCLA |
-			   MMCD_HOTSPOT_EN);
-	}
-
-	/* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
-	/* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
-	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
-			  FLOW_CONTROL_ENABLE |
-			  PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
-
-	/* Syncing dependencies between camera and graphics:skl,bxt,kbl */
-	if (!IS_COFFEELAKE(dev_priv))
-		WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
-				  GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
-
-	/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
-	/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
-	WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
-			  GEN9_ENABLE_YV12_BUGFIX |
-			  GEN9_ENABLE_GPGPU_PREEMPTION);
-
-	/* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
-	/* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
-	WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
-					 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
-
-	/* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
-	WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
-			  GEN9_CCS_TLB_PREFETCH_ENABLE);
-
-	/* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
-	WA_SET_BIT_MASKED(HDC_CHICKEN0,
-			  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
-			  HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
-
-	/* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
-	 * both tied to WaForceContextSaveRestoreNonCoherent
-	 * in some hsds for skl. We keep the tie for all gen9. The
-	 * documentation is a bit hazy and so we want to get common behaviour,
-	 * even though there is no clear evidence we would need both on kbl/bxt.
-	 * This area has been source of system hangs so we play it safe
-	 * and mimic the skl regardless of what bspec says.
-	 *
-	 * Use Force Non-Coherent whenever executing a 3D context. This
-	 * is a workaround for a possible hang in the unlikely event
-	 * a TLB invalidation occurs during a PSD flush.
-	 */
-
-	/* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
-	WA_SET_BIT_MASKED(HDC_CHICKEN0,
-			  HDC_FORCE_NON_COHERENT);
-
-	/* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
-	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
-		   BDW_DISABLE_HDC_INVALIDATION);
-
-	/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
-	if (IS_SKYLAKE(dev_priv) ||
-	    IS_KABYLAKE(dev_priv) ||
-	    IS_COFFEELAKE(dev_priv))
-		WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
-				  GEN8_SAMPLER_POWER_BYPASS_DIS);
-
-	/* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
-	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
-
-	/* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
-	if (IS_GEN9_LP(dev_priv)) {
-		u32 val = I915_READ(GEN8_L3SQCREG1);
-
-		val &= ~L3_PRIO_CREDITS_MASK;
-		val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
-		I915_WRITE(GEN8_L3SQCREG1, val);
-	}
-
-	/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
-	I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
-				    GEN8_LQSC_FLUSH_COHERENT_LINES));
-
-	/*
-	 * Supporting preemption with fine-granularity requires changes in the
-	 * batch buffer programming. Since we can't break old userspace, we
-	 * need to set our default preemption level to safe value. Userspace is
-	 * still able to use more fine-grained preemption levels, since in
-	 * WaEnablePreemptionGranularityControlByUMD we're whitelisting the
-	 * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
-	 * not real HW workarounds, but merely a way to start using preemption
-	 * while maintaining old contract with userspace.
-	 */
-
-	/* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
-	WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
-
-	/* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
-	WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK,
-			    GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
-
-	/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
-	ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
-	if (ret)
-		return ret;
-
-	/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
-	I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
-		   _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
-	ret = wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
-	if (ret)
-		return ret;
-
-	/* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
-	ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
-	u8 vals[3] = { 0, 0, 0 };
-	unsigned int i;
-
-	for (i = 0; i < 3; i++) {
-		u8 ss;
-
-		/*
-		 * Only consider slices where one, and only one, subslice has 7
-		 * EUs
-		 */
-		if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
-			continue;
-
-		/*
-		 * subslice_7eu[i] != 0 (because of the check above) and
-		 * ss_max == 4 (maximum number of subslices possible per slice)
-		 *
-		 * ->    0 <= ss <= 3;
-		 */
-		ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
-		vals[i] = 3 - ss;
-	}
-
-	if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
-		return 0;
-
-	/* Tune IZ hashing. See intel_device_info_runtime_init() */
-	WA_SET_FIELD_MASKED(GEN7_GT_MODE,
-			    GEN9_IZ_HASHING_MASK(2) |
-			    GEN9_IZ_HASHING_MASK(1) |
-			    GEN9_IZ_HASHING_MASK(0),
-			    GEN9_IZ_HASHING(2, vals[2]) |
-			    GEN9_IZ_HASHING(1, vals[1]) |
-			    GEN9_IZ_HASHING(0, vals[0]));
-
-	return 0;
-}
-
-static int skl_init_workarounds(struct intel_engine_cs *engine)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
-	int ret;
-
-	ret = gen9_init_workarounds(engine);
-	if (ret)
-		return ret;
-
-	/* WaEnableGapsTsvCreditFix:skl */
-	I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
-				   GEN9_GAPS_TSV_CREDIT_DISABLE));
-
-	/* WaDisableGafsUnitClkGating:skl */
-	I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) |
-				  GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE));
-
-	/* WaInPlaceDecompressionHang:skl */
-	if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
-		I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
-			   (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-			    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
-
-	/* WaDisableLSQCROPERFforOCL:skl */
-	ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
-	if (ret)
-		return ret;
-
-	return skl_tune_iz_hashing(engine);
-}
-
-static int bxt_init_workarounds(struct intel_engine_cs *engine)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
-	int ret;
-
-	ret = gen9_init_workarounds(engine);
-	if (ret)
-		return ret;
-
-	/* WaDisableThreadStallDopClockGating:bxt */
-	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
-			  STALL_DOP_GATING_DISABLE);
-
-	/* WaDisablePooledEuLoadBalancingFix:bxt */
-	I915_WRITE(FF_SLICE_CS_CHICKEN2,
-		   _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
-
-	/* WaToEnableHwFixForPushConstHWBug:bxt */
-	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
-			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
-	/* WaInPlaceDecompressionHang:bxt */
-	I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
-		   (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
-
-	return 0;
-}
-
-static int cnl_init_workarounds(struct intel_engine_cs *engine)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
-	int ret;
-
-	/* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
-	if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
-		I915_WRITE(GAMT_CHKN_BIT_REG,
-			   (I915_READ(GAMT_CHKN_BIT_REG) |
-			    GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT));
-
-	/* WaForceContextSaveRestoreNonCoherent:cnl */
-	WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
-			  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
-
-	/* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */
-	if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
-		WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5);
-
-	/* WaDisableReplayBufferBankArbitrationOptimization:cnl */
-	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
-			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
-	/* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */
-	if (IS_CNL_REVID(dev_priv, 0, CNL_REVID_B0))
-		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
-				  GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);
-
-	/* WaInPlaceDecompressionHang:cnl */
-	I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
-		   (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
-
-	/* WaPushConstantDereferenceHoldDisable:cnl */
-	WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
-
-	/* FtrEnableFastAnisoL1BankingFix: cnl */
-	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);
-
-	/* WaDisable3DMidCmdPreemption:cnl */
-	WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
-
-	/* WaDisableGPGPUMidCmdPreemption:cnl */
-	WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK,
-			    GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
-
-	/* WaEnablePreemptionGranularityControlByUMD:cnl */
-	I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
-		   _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
-	ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
-	if (ret)
-		return ret;
-
-	/* WaDisableEarlyEOT:cnl */
-	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
-
-	return 0;
-}
-
-static int kbl_init_workarounds(struct intel_engine_cs *engine)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
-	int ret;
-
-	ret = gen9_init_workarounds(engine);
-	if (ret)
-		return ret;
-
-	/* WaEnableGapsTsvCreditFix:kbl */
-	I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
-				   GEN9_GAPS_TSV_CREDIT_DISABLE));
-
-	/* WaDisableDynamicCreditSharing:kbl */
-	if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
-		I915_WRITE(GAMT_CHKN_BIT_REG,
-			   (I915_READ(GAMT_CHKN_BIT_REG) |
-			    GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING));
-
-	/* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
-	if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
-		WA_SET_BIT_MASKED(HDC_CHICKEN0,
-				  HDC_FENCE_DEST_SLM_DISABLE);
-
-	/* WaToEnableHwFixForPushConstHWBug:kbl */
-	if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
-		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
-				  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
-	/* WaDisableGafsUnitClkGating:kbl */
-	I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) |
-				  GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE));
-
-	/* WaDisableSbeCacheDispatchPortSharing:kbl */
-	WA_SET_BIT_MASKED(
-		GEN7_HALF_SLICE_CHICKEN1,
-		GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
-
-	/* WaInPlaceDecompressionHang:kbl */
-	I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
-		   (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
-
-	/* WaDisableLSQCROPERFforOCL:kbl */
-	ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static int glk_init_workarounds(struct intel_engine_cs *engine)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
-	int ret;
-
-	ret = gen9_init_workarounds(engine);
-	if (ret)
-		return ret;
-
-	/* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
-	ret = wa_ring_whitelist_reg(engine, GEN9_SLICE_COMMON_ECO_CHICKEN1);
-	if (ret)
-		return ret;
-
-	/* WaToEnableHwFixForPushConstHWBug:glk */
-	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
-			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
-	return 0;
-}
-
-static int cfl_init_workarounds(struct intel_engine_cs *engine)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
-	int ret;
-
-	ret = gen9_init_workarounds(engine);
-	if (ret)
-		return ret;
-
-	/* WaEnableGapsTsvCreditFix:cfl */
-	I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
-				   GEN9_GAPS_TSV_CREDIT_DISABLE));
-
-	/* WaToEnableHwFixForPushConstHWBug:cfl */
-	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
-			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
-	/* WaDisableGafsUnitClkGating:cfl */
-	I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) |
-				  GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE));
-
-	/* WaDisableSbeCacheDispatchPortSharing:cfl */
-	WA_SET_BIT_MASKED(
-		GEN7_HALF_SLICE_CHICKEN1,
-		GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
-
-	/* WaInPlaceDecompressionHang:cfl */
-	I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
-		   (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
-
-	return 0;
-}
-
-int init_workarounds_ring(struct intel_engine_cs *engine)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
-	int err;
-
-	if (GEM_WARN_ON(engine->id != RCS))
-		return -EINVAL;
-
-	dev_priv->workarounds.count = 0;
-	dev_priv->workarounds.hw_whitelist_count[engine->id] = 0;
-
-	if (IS_BROADWELL(dev_priv))
-		err = bdw_init_workarounds(engine);
-	else if (IS_CHERRYVIEW(dev_priv))
-		err = chv_init_workarounds(engine);
-	else if (IS_SKYLAKE(dev_priv))
-		err =  skl_init_workarounds(engine);
-	else if (IS_BROXTON(dev_priv))
-		err = bxt_init_workarounds(engine);
-	else if (IS_KABYLAKE(dev_priv))
-		err = kbl_init_workarounds(engine);
-	else if (IS_GEMINILAKE(dev_priv))
-		err =  glk_init_workarounds(engine);
-	else if (IS_COFFEELAKE(dev_priv))
-		err = cfl_init_workarounds(engine);
-	else if (IS_CANNONLAKE(dev_priv))
-		err = cnl_init_workarounds(engine);
-	else
-		err = 0;
-	if (err)
-		return err;
-
-	DRM_DEBUG_DRIVER("%s: Number of context specific w/a: %d\n",
-			 engine->name, dev_priv->workarounds.count);
-	return 0;
-}
-
-int intel_ring_workarounds_emit(struct i915_request *rq)
-{
-	struct i915_workarounds *w = &rq->i915->workarounds;
-	u32 *cs;
-	int ret, i;
-
-	if (w->count == 0)
-		return 0;
-
-	ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
-	if (ret)
-		return ret;
-
-	cs = intel_ring_begin(rq, w->count * 2 + 2);
-	if (IS_ERR(cs))
-		return PTR_ERR(cs);
-
-	*cs++ = MI_LOAD_REGISTER_IMM(w->count);
-	for (i = 0; i < w->count; i++) {
-		*cs++ = i915_mmio_reg_offset(w->reg[i].addr);
-		*cs++ = w->reg[i].value;
-	}
-	*cs++ = MI_NOOP;
-
-	intel_ring_advance(rq, cs);
-
-	ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
 static bool ring_is_idle(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 665d9e82e954..03b9d5ae883a 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -139,6 +139,7 @@
 #include "i915_gem_render_state.h"
 #include "intel_lrc_reg.h"
 #include "intel_mocs.h"
+#include "intel_workarounds.h"
 
 #define RING_EXECLIST_QFULL		(1 << 0x2)
 #define RING_EXECLIST1_VALID		(1 << 0x3)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 04d9d9a946a7..36acc32374e4 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -36,6 +36,7 @@
 #include "i915_gem_render_state.h"
 #include "i915_trace.h"
 #include "intel_drv.h"
+#include "intel_workarounds.h"
 
 /* Rough estimate of the typical request size, performing a flush,
  * set-context and then emitting the batch.
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 256d58487559..717041640135 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -885,9 +885,6 @@ static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
 	return READ_ONCE(engine->timeline->seqno);
 }
 
-int init_workarounds_ring(struct intel_engine_cs *engine);
-int intel_ring_workarounds_emit(struct i915_request *rq);
-
 void intel_engine_get_instdone(struct intel_engine_cs *engine,
 			       struct intel_instdone *instdone);
 
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
new file mode 100644
index 000000000000..d60a37700f84
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -0,0 +1,686 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_workarounds.h"
+
+/**
+ * DOC: Hardware workarounds
+ *
+ * This file is intended as a central place to implement most [1]_ of the
+ * required workarounds for hardware to work as originally intended. They fall
+ * in five basic categories depending on how/when they are applied:
+ *
+ * - Workarounds that touch registers that are saved/restored to/from the HW
+ *   context image. The list is emitted (via Load Register Immediate commands)
+ *   everytime a new context is created.
+ * - GT workarounds. The list of these WAs is applied whenever these registers
+ *   revert to default values (on GPU reset, suspend/resume [2]_, etc..).
+ * - Display workarounds. The list is applied during display clock-gating
+ *   initialization.
+ * - Workarounds that whitelist a privileged register, so that UMDs can manage
+ *   them directly. This is just a special case of a MMMIO workaround (as we
+ *   write the list of these to/be-whitelisted registers to some special HW
+ *   registers).
+ * - Workaround batchbuffers, that get executed automatically by the hardware
+ *   on every HW context restore.
+ *
+ * .. [1] Please notice that there are other WAs that, due to their nature,
+ *    cannot be applied from a central place. Those are peppered around the rest
+ *    of the code, as needed.
+ *
+ * .. [2] Technically, some registers are powercontext saved & restored, so they
+ *    survive a suspend/resume. In practice, writing them again is not too
+ *    costly and simplifies things. We can revisit this in the future.
+ *
+ * Layout
+ * ''''''
+ *
+ * Keep things in this file ordered by WA type, as per the above (context, GT,
+ * display, register whitelist, batchbuffer). Then, inside each type, keep the
+ * following order:
+ *
+ * - Infrastructure functions and macros
+ * - WAs per platform in standard gen/chrono order
+ * - Public functions to init or apply the given workaround type.
+ */
+
+static int wa_add(struct drm_i915_private *dev_priv,
+		  i915_reg_t addr,
+		  const u32 mask, const u32 val)
+{
+	const unsigned int idx = dev_priv->workarounds.count;
+
+	if (WARN_ON(idx >= I915_MAX_WA_REGS))
+		return -ENOSPC;
+
+	dev_priv->workarounds.reg[idx].addr = addr;
+	dev_priv->workarounds.reg[idx].value = val;
+	dev_priv->workarounds.reg[idx].mask = mask;
+
+	dev_priv->workarounds.count++;
+
+	return 0;
+}
+
+#define WA_REG(addr, mask, val) do { \
+		const int r = wa_add(dev_priv, (addr), (mask), (val)); \
+		if (r) \
+			return r; \
+	} while (0)
+
+#define WA_SET_BIT_MASKED(addr, mask) \
+	WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
+
+#define WA_CLR_BIT_MASKED(addr, mask) \
+	WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
+
+#define WA_SET_FIELD_MASKED(addr, mask, value) \
+	WA_REG(addr, (mask), _MASKED_FIELD(mask, value))
+
+static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
+				 i915_reg_t reg)
+{
+	struct drm_i915_private *dev_priv = engine->i915;
+	struct i915_workarounds *wa = &dev_priv->workarounds;
+	const unsigned int index = wa->hw_whitelist_count[engine->id];
+
+	if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
+		return -EINVAL;
+
+	I915_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
+		   i915_mmio_reg_offset(reg));
+	wa->hw_whitelist_count[engine->id]++;
+
+	return 0;
+}
+
+static int gen8_init_workarounds(struct intel_engine_cs *engine)
+{
+	struct drm_i915_private *dev_priv = engine->i915;
+
+	WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+
+	/* WaDisableAsyncFlipPerfMode:bdw,chv */
+	WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
+
+	/* WaDisablePartialInstShootdown:bdw,chv */
+	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+			  PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
+
+	/* Use Force Non-Coherent whenever executing a 3D context. This is a
+	 * workaround for for a possible hang in the unlikely event a TLB
+	 * invalidation occurs during a PSD flush.
+	 */
+	/* WaForceEnableNonCoherent:bdw,chv */
+	/* WaHdcDisableFetchWhenMasked:bdw,chv */
+	WA_SET_BIT_MASKED(HDC_CHICKEN0,
+			  HDC_DONOT_FETCH_MEM_WHEN_MASKED |
+			  HDC_FORCE_NON_COHERENT);
+
+	/* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
+	 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
+	 *  polygons in the same 8x4 pixel/sample area to be processed without
+	 *  stalling waiting for the earlier ones to write to Hierarchical Z
+	 *  buffer."
+	 *
+	 * This optimization is off by default for BDW and CHV; turn it on.
+	 */
+	WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
+
+	/* Wa4x4STCOptimizationDisable:bdw,chv */
+	WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
+
+	/*
+	 * BSpec recommends 8x4 when MSAA is used,
+	 * however in practice 16x4 seems fastest.
+	 *
+	 * Note that PS/WM thread counts depend on the WIZ hashing
+	 * disable bit, which we don't touch here, but it's good
+	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+	 */
+	WA_SET_FIELD_MASKED(GEN7_GT_MODE,
+			    GEN6_WIZ_HASHING_MASK,
+			    GEN6_WIZ_HASHING_16x4);
+
+	return 0;
+}
+
+static int bdw_init_workarounds(struct intel_engine_cs *engine)
+{
+	struct drm_i915_private *dev_priv = engine->i915;
+	int ret;
+
+	ret = gen8_init_workarounds(engine);
+	if (ret)
+		return ret;
+
+	/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
+	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
+
+	/* WaDisableDopClockGating:bdw
+	 *
+	 * Also see the related UCGTCL1 write in broadwell_init_clock_gating()
+	 * to disable EUTC clock gating.
+	 */
+	WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
+			  DOP_CLOCK_GATING_DISABLE);
+
+	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
+			  GEN8_SAMPLER_POWER_BYPASS_DIS);
+
+	WA_SET_BIT_MASKED(HDC_CHICKEN0,
+			  /* WaForceContextSaveRestoreNonCoherent:bdw */
+			  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+			  /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
+			  (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
+
+	return 0;
+}
+
+static int chv_init_workarounds(struct intel_engine_cs *engine)
+{
+	struct drm_i915_private *dev_priv = engine->i915;
+	int ret;
+
+	ret = gen8_init_workarounds(engine);
+	if (ret)
+		return ret;
+
+	/* WaDisableThreadStallDopClockGating:chv */
+	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
+
+	/* Improve HiZ throughput on CHV. */
+	WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
+
+	return 0;
+}
+
+static int gen9_init_workarounds(struct intel_engine_cs *engine)
+{
+	struct drm_i915_private *dev_priv = engine->i915;
+	int ret;
+
+	/* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
+	I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
+		   _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
+
+	/* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
+	I915_WRITE(BDW_SCRATCH1,
+		   I915_READ(BDW_SCRATCH1) |
+		   GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+
+	/* WaDisableKillLogic:bxt,skl,kbl */
+	if (!IS_COFFEELAKE(dev_priv))
+		I915_WRITE(GAM_ECOCHK,
+			   I915_READ(GAM_ECOCHK) | ECOCHK_DIS_TLB);
+
+	if (HAS_LLC(dev_priv)) {
+		/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
+		 *
+		 * Must match Display Engine. See
+		 * WaCompressedResourceDisplayNewHashMode.
+		 */
+		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+				  GEN9_PBE_COMPRESSED_HASH_SELECTION);
+		WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
+				  GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
+
+		I915_WRITE(MMCD_MISC_CTRL,
+			   I915_READ(MMCD_MISC_CTRL) |
+			   MMCD_PCLA |
+			   MMCD_HOTSPOT_EN);
+	}
+
+	/* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
+	/* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
+	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+			  FLOW_CONTROL_ENABLE |
+			  PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
+
+	/* Syncing dependencies between camera and graphics:skl,bxt,kbl */
+	if (!IS_COFFEELAKE(dev_priv))
+		WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
+				  GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
+
+	/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
+	/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
+	WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
+			  GEN9_ENABLE_YV12_BUGFIX |
+			  GEN9_ENABLE_GPGPU_PREEMPTION);
+
+	/* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
+	/* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
+	WA_SET_BIT_MASKED(CACHE_MODE_1,
+			  GEN8_4x4_STC_OPTIMIZATION_DISABLE |
+			  GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
+
+	/* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
+	WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
+			  GEN9_CCS_TLB_PREFETCH_ENABLE);
+
+	/* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
+	WA_SET_BIT_MASKED(HDC_CHICKEN0,
+			  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+			  HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
+
+	/* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
+	 * both tied to WaForceContextSaveRestoreNonCoherent
+	 * in some hsds for skl. We keep the tie for all gen9. The
+	 * documentation is a bit hazy and so we want to get common behaviour,
+	 * even though there is no clear evidence we would need both on kbl/bxt.
+	 * This area has been source of system hangs so we play it safe
+	 * and mimic the skl regardless of what bspec says.
+	 *
+	 * Use Force Non-Coherent whenever executing a 3D context. This
+	 * is a workaround for a possible hang in the unlikely event
+	 * a TLB invalidation occurs during a PSD flush.
+	 */
+
+	/* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
+	WA_SET_BIT_MASKED(HDC_CHICKEN0,
+			  HDC_FORCE_NON_COHERENT);
+
+	/* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
+	I915_WRITE(GAM_ECOCHK,
+		   I915_READ(GAM_ECOCHK) | BDW_DISABLE_HDC_INVALIDATION);
+
+	/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
+	if (IS_SKYLAKE(dev_priv) ||
+	    IS_KABYLAKE(dev_priv) ||
+	    IS_COFFEELAKE(dev_priv))
+		WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
+				  GEN8_SAMPLER_POWER_BYPASS_DIS);
+
+	/* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
+	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
+
+	/* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
+	if (IS_GEN9_LP(dev_priv)) {
+		u32 val = I915_READ(GEN8_L3SQCREG1);
+
+		val &= ~L3_PRIO_CREDITS_MASK;
+		val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
+		I915_WRITE(GEN8_L3SQCREG1, val);
+	}
+
+	/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
+	I915_WRITE(GEN8_L3SQCREG4,
+		   I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_FLUSH_COHERENT_LINES);
+
+	/*
+	 * Supporting preemption with fine-granularity requires changes in the
+	 * batch buffer programming. Since we can't break old userspace, we
+	 * need to set our default preemption level to safe value. Userspace is
+	 * still able to use more fine-grained preemption levels, since in
+	 * WaEnablePreemptionGranularityControlByUMD we're whitelisting the
+	 * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
+	 * not real HW workarounds, but merely a way to start using preemption
+	 * while maintaining old contract with userspace.
+	 */
+
+	/* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
+	WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
+
+	/* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
+	WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+			    GEN9_PREEMPT_GPGPU_LEVEL_MASK,
+			    GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
+
+	/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
+	ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
+	if (ret)
+		return ret;
+
+	/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
+	I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
+		   _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+	ret = wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
+	if (ret)
+		return ret;
+
+	/* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
+	ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
+{
+	struct drm_i915_private *dev_priv = engine->i915;
+	u8 vals[3] = { 0, 0, 0 };
+	unsigned int i;
+
+	for (i = 0; i < 3; i++) {
+		u8 ss;
+
+		/*
+		 * Only consider slices where one, and only one, subslice has 7
+		 * EUs
+		 */
+		if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
+			continue;
+
+		/*
+		 * subslice_7eu[i] != 0 (because of the check above) and
+		 * ss_max == 4 (maximum number of subslices possible per slice)
+		 *
+		 * ->    0 <= ss <= 3;
+		 */
+		ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
+		vals[i] = 3 - ss;
+	}
+
+	if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
+		return 0;
+
+	/* Tune IZ hashing. See intel_device_info_runtime_init() */
+	WA_SET_FIELD_MASKED(GEN7_GT_MODE,
+			    GEN9_IZ_HASHING_MASK(2) |
+			    GEN9_IZ_HASHING_MASK(1) |
+			    GEN9_IZ_HASHING_MASK(0),
+			    GEN9_IZ_HASHING(2, vals[2]) |
+			    GEN9_IZ_HASHING(1, vals[1]) |
+			    GEN9_IZ_HASHING(0, vals[0]));
+
+	return 0;
+}
+
+static int skl_init_workarounds(struct intel_engine_cs *engine)
+{
+	struct drm_i915_private *dev_priv = engine->i915;
+	int ret;
+
+	ret = gen9_init_workarounds(engine);
+	if (ret)
+		return ret;
+
+	/* WaEnableGapsTsvCreditFix:skl */
+	I915_WRITE(GEN8_GARBCNTL,
+		   I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+
+	/* WaDisableGafsUnitClkGating:skl */
+	I915_WRITE(GEN7_UCGCTL4,
+		   I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
+	/* WaInPlaceDecompressionHang:skl */
+	if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
+		I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
+			   I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
+			   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+
+	/* WaDisableLSQCROPERFforOCL:skl */
+	ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
+	if (ret)
+		return ret;
+
+	return skl_tune_iz_hashing(engine);
+}
+
+static int bxt_init_workarounds(struct intel_engine_cs *engine)
+{
+	struct drm_i915_private *dev_priv = engine->i915;
+	int ret;
+
+	ret = gen9_init_workarounds(engine);
+	if (ret)
+		return ret;
+
+	/* WaDisableThreadStallDopClockGating:bxt */
+	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+			  STALL_DOP_GATING_DISABLE);
+
+	/* WaDisablePooledEuLoadBalancingFix:bxt */
+	I915_WRITE(FF_SLICE_CS_CHICKEN2,
+		   _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
+
+	/* WaToEnableHwFixForPushConstHWBug:bxt */
+	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+	/* WaInPlaceDecompressionHang:bxt */
+	I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
+		   I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
+		   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+
+	return 0;
+}
+
+static int cnl_init_workarounds(struct intel_engine_cs *engine)
+{
+	struct drm_i915_private *dev_priv = engine->i915;
+	int ret;
+
+	/* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
+	if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
+		I915_WRITE(GAMT_CHKN_BIT_REG,
+			   I915_READ(GAMT_CHKN_BIT_REG) |
+			   GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
+
+	/* WaForceContextSaveRestoreNonCoherent:cnl */
+	WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
+			  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
+
+	/* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */
+	if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
+		WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5);
+
+	/* WaDisableReplayBufferBankArbitrationOptimization:cnl */
+	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+	/* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */
+	if (IS_CNL_REVID(dev_priv, 0, CNL_REVID_B0))
+		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+				  GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);
+
+	/* WaInPlaceDecompressionHang:cnl */
+	I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
+		   I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
+		   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+
+	/* WaPushConstantDereferenceHoldDisable:cnl */
+	WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
+
+	/* FtrEnableFastAnisoL1BankingFix: cnl */
+	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);
+
+	/* WaDisable3DMidCmdPreemption:cnl */
+	WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
+
+	/* WaDisableGPGPUMidCmdPreemption:cnl */
+	WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+			    GEN9_PREEMPT_GPGPU_LEVEL_MASK,
+			    GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
+
+	/* WaEnablePreemptionGranularityControlByUMD:cnl */
+	I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
+		   _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+	ret = wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
+	if (ret)
+		return ret;
+
+	/* WaDisableEarlyEOT:cnl */
+	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
+
+	return 0;
+}
+
+static int kbl_init_workarounds(struct intel_engine_cs *engine)
+{
+	struct drm_i915_private *dev_priv = engine->i915;
+	int ret;
+
+	ret = gen9_init_workarounds(engine);
+	if (ret)
+		return ret;
+
+	/* WaEnableGapsTsvCreditFix:kbl */
+	I915_WRITE(GEN8_GARBCNTL,
+		   I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+
+	/* WaDisableDynamicCreditSharing:kbl */
+	if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+		I915_WRITE(GAMT_CHKN_BIT_REG,
+			   I915_READ(GAMT_CHKN_BIT_REG) |
+			   GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
+
+	/* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
+	if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
+		WA_SET_BIT_MASKED(HDC_CHICKEN0,
+				  HDC_FENCE_DEST_SLM_DISABLE);
+
+	/* WaToEnableHwFixForPushConstHWBug:kbl */
+	if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
+		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+				  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+	/* WaDisableGafsUnitClkGating:kbl */
+	I915_WRITE(GEN7_UCGCTL4,
+		   I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
+	/* WaDisableSbeCacheDispatchPortSharing:kbl */
+	WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
+			  GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+
+	/* WaInPlaceDecompressionHang:kbl */
+	I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
+		   I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
+		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+
+	/* WaDisableLSQCROPERFforOCL:kbl */
+	ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int glk_init_workarounds(struct intel_engine_cs *engine)
+{
+	struct drm_i915_private *dev_priv = engine->i915;
+	int ret;
+
+	ret = gen9_init_workarounds(engine);
+	if (ret)
+		return ret;
+
+	/* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
+	ret = wa_ring_whitelist_reg(engine, GEN9_SLICE_COMMON_ECO_CHICKEN1);
+	if (ret)
+		return ret;
+
+	/* WaToEnableHwFixForPushConstHWBug:glk */
+	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+	return 0;
+}
+
+static int cfl_init_workarounds(struct intel_engine_cs *engine)
+{
+	struct drm_i915_private *dev_priv = engine->i915;
+	int ret;
+
+	ret = gen9_init_workarounds(engine);
+	if (ret)
+		return ret;
+
+	/* WaEnableGapsTsvCreditFix:cfl */
+	I915_WRITE(GEN8_GARBCNTL,
+		   I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+
+	/* WaToEnableHwFixForPushConstHWBug:cfl */
+	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+	/* WaDisableGafsUnitClkGating:cfl */
+	I915_WRITE(GEN7_UCGCTL4,
+		   I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
+	/* WaDisableSbeCacheDispatchPortSharing:cfl */
+	WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
+			  GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+
+	/* WaInPlaceDecompressionHang:cfl */
+	I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
+		   I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
+		   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+
+	return 0;
+}
+
+int init_workarounds_ring(struct intel_engine_cs *engine)
+{
+	struct drm_i915_private *dev_priv = engine->i915;
+	int err;
+
+	if (GEM_WARN_ON(engine->id != RCS))
+		return -EINVAL;
+
+	dev_priv->workarounds.count = 0;
+	dev_priv->workarounds.hw_whitelist_count[engine->id] = 0;
+
+	if (IS_BROADWELL(dev_priv))
+		err = bdw_init_workarounds(engine);
+	else if (IS_CHERRYVIEW(dev_priv))
+		err = chv_init_workarounds(engine);
+	else if (IS_SKYLAKE(dev_priv))
+		err =  skl_init_workarounds(engine);
+	else if (IS_BROXTON(dev_priv))
+		err = bxt_init_workarounds(engine);
+	else if (IS_KABYLAKE(dev_priv))
+		err = kbl_init_workarounds(engine);
+	else if (IS_GEMINILAKE(dev_priv))
+		err =  glk_init_workarounds(engine);
+	else if (IS_COFFEELAKE(dev_priv))
+		err = cfl_init_workarounds(engine);
+	else if (IS_CANNONLAKE(dev_priv))
+		err = cnl_init_workarounds(engine);
+	else
+		err = 0;
+	if (err)
+		return err;
+
+	DRM_DEBUG_DRIVER("%s: Number of context specific w/a: %d\n",
+			 engine->name, dev_priv->workarounds.count);
+	return 0;
+}
+
+int intel_ring_workarounds_emit(struct i915_request *rq)
+{
+	struct i915_workarounds *w = &rq->i915->workarounds;
+	u32 *cs;
+	int ret, i;
+
+	if (w->count == 0)
+		return 0;
+
+	ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
+	if (ret)
+		return ret;
+
+	cs = intel_ring_begin(rq, w->count * 2 + 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	*cs++ = MI_LOAD_REGISTER_IMM(w->count);
+	for (i = 0; i < w->count; i++) {
+		*cs++ = i915_mmio_reg_offset(w->reg[i].addr);
+		*cs++ = w->reg[i].value;
+	}
+	*cs++ = MI_NOOP;
+
+	intel_ring_advance(rq, cs);
+
+	ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
+	if (ret)
+		return ret;
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_workarounds.h b/drivers/gpu/drm/i915/intel_workarounds.h
new file mode 100644
index 000000000000..2afea73aeeae
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_workarounds.h
@@ -0,0 +1,13 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#ifndef _I915_WORKAROUNDS_H_
+#define _I915_WORKAROUNDS_H_
+
+int init_workarounds_ring(struct intel_engine_cs *engine);
+int intel_ring_workarounds_emit(struct i915_request *rq);
+
+#endif

From 59b449d5c82af03acdfc3f9a343c9d085ab5568f Mon Sep 17 00:00:00 2001
From: Oscar Mateo <oscar.mateo@intel.com>
Date: Tue, 10 Apr 2018 09:12:47 -0700
Subject: [PATCH 0373/1286] drm/i915: Split out functions for different kinds
 of workarounds
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

There are different kind of workarounds (those that modify registers that
live in the context image, those that modify global registers, those that
whitelist registers, etc...) and they have different requirements in terms
of where they are applied and how. Also, by splitting them apart, it should
be easier to decide where a new workaround should go.

v2:
  - Add multiple MISSING_CASE
  - Rebased

v3:
  - Rename mmio_workarounds to gt_workarounds (Chris, Mika)
  - Create empty placeholders for BDW and CHV GT WAs
  - Rebased

v4: Rebased

v5:
 - Rebased
 - FORCE_TO_NONPRIV register exists since BDW, so make a path
   for it to achieve universality, even if empty (Chris)

Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
[ickle: appease checkpatch]
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/1523376767-18480-2-git-send-email-oscar.mateo@intel.com
---
 drivers/gpu/drm/i915/i915_gem.c          |   3 +
 drivers/gpu/drm/i915/i915_gem_context.c  |   6 +
 drivers/gpu/drm/i915/intel_lrc.c         |  14 +-
 drivers/gpu/drm/i915/intel_ringbuffer.c  |   8 +-
 drivers/gpu/drm/i915/intel_workarounds.c | 712 ++++++++++++++---------
 drivers/gpu/drm/i915/intel_workarounds.h |   8 +-
 6 files changed, 473 insertions(+), 278 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 60cf7cfc24ee..4c9d2a6f7d28 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -35,6 +35,7 @@
 #include "intel_drv.h"
 #include "intel_frontbuffer.h"
 #include "intel_mocs.h"
+#include "intel_workarounds.h"
 #include "i915_gemfs.h"
 #include <linux/dma-fence-array.h>
 #include <linux/kthread.h>
@@ -5191,6 +5192,8 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
 		}
 	}
 
+	intel_gt_workarounds_apply(dev_priv);
+
 	i915_gem_init_swizzling(dev_priv);
 
 	/*
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 5cfac0255758..9b3834a846e8 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -90,6 +90,7 @@
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 #include "i915_trace.h"
+#include "intel_workarounds.h"
 
 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
 
@@ -459,11 +460,16 @@ static bool needs_preempt_context(struct drm_i915_private *i915)
 int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
 {
 	struct i915_gem_context *ctx;
+	int ret;
 
 	/* Reassure ourselves we are only called once */
 	GEM_BUG_ON(dev_priv->kernel_context);
 	GEM_BUG_ON(dev_priv->preempt_context);
 
+	ret = intel_ctx_workarounds_init(dev_priv);
+	if (ret)
+		return ret;
+
 	INIT_LIST_HEAD(&dev_priv->contexts.list);
 	INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker);
 	init_llist_head(&dev_priv->contexts.free_list);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 03b9d5ae883a..c7c85134a84a 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1744,6 +1744,10 @@ static int gen8_init_render_ring(struct intel_engine_cs *engine)
 	if (ret)
 		return ret;
 
+	ret = intel_whitelist_workarounds_apply(engine);
+	if (ret)
+		return ret;
+
 	/* We need to disable the AsyncFlip performance optimisations in order
 	 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
 	 * programmed to '1' on all products.
@@ -1754,7 +1758,7 @@ static int gen8_init_render_ring(struct intel_engine_cs *engine)
 
 	I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
-	return init_workarounds_ring(engine);
+	return 0;
 }
 
 static int gen9_init_render_ring(struct intel_engine_cs *engine)
@@ -1765,7 +1769,11 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
 	if (ret)
 		return ret;
 
-	return init_workarounds_ring(engine);
+	ret = intel_whitelist_workarounds_apply(engine);
+	if (ret)
+		return ret;
+
+	return 0;
 }
 
 static void reset_common_ring(struct intel_engine_cs *engine,
@@ -2090,7 +2098,7 @@ static int gen8_init_rcs_context(struct i915_request *rq)
 {
 	int ret;
 
-	ret = intel_ring_workarounds_emit(rq);
+	ret = intel_ctx_workarounds_emit(rq);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 36acc32374e4..757bb0990c07 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -600,7 +600,7 @@ static int intel_rcs_ctx_init(struct i915_request *rq)
 {
 	int ret;
 
-	ret = intel_ring_workarounds_emit(rq);
+	ret = intel_ctx_workarounds_emit(rq);
 	if (ret != 0)
 		return ret;
 
@@ -618,6 +618,10 @@ static int init_render_ring(struct intel_engine_cs *engine)
 	if (ret)
 		return ret;
 
+	ret = intel_whitelist_workarounds_apply(engine);
+	if (ret)
+		return ret;
+
 	/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
 	if (IS_GEN(dev_priv, 4, 6))
 		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
@@ -659,7 +663,7 @@ static int init_render_ring(struct intel_engine_cs *engine)
 	if (INTEL_GEN(dev_priv) >= 6)
 		I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
 
-	return init_workarounds_ring(engine);
+	return 0;
 }
 
 static u32 *gen6_signal(struct i915_request *rq, u32 *cs)
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index d60a37700f84..bbbf4ed4aa97 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -81,27 +81,8 @@ static int wa_add(struct drm_i915_private *dev_priv,
 #define WA_SET_FIELD_MASKED(addr, mask, value) \
 	WA_REG(addr, (mask), _MASKED_FIELD(mask, value))
 
-static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
-				 i915_reg_t reg)
+static int gen8_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
-	struct i915_workarounds *wa = &dev_priv->workarounds;
-	const unsigned int index = wa->hw_whitelist_count[engine->id];
-
-	if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
-		return -EINVAL;
-
-	I915_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
-		   i915_mmio_reg_offset(reg));
-	wa->hw_whitelist_count[engine->id]++;
-
-	return 0;
-}
-
-static int gen8_init_workarounds(struct intel_engine_cs *engine)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
-
 	WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
 
 	/* WaDisableAsyncFlipPerfMode:bdw,chv */
@@ -149,12 +130,11 @@ static int gen8_init_workarounds(struct intel_engine_cs *engine)
 	return 0;
 }
 
-static int bdw_init_workarounds(struct intel_engine_cs *engine)
+static int bdw_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
 	int ret;
 
-	ret = gen8_init_workarounds(engine);
+	ret = gen8_ctx_workarounds_init(dev_priv);
 	if (ret)
 		return ret;
 
@@ -181,12 +161,11 @@ static int bdw_init_workarounds(struct intel_engine_cs *engine)
 	return 0;
 }
 
-static int chv_init_workarounds(struct intel_engine_cs *engine)
+static int chv_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
 	int ret;
 
-	ret = gen8_init_workarounds(engine);
+	ret = gen8_ctx_workarounds_init(dev_priv);
 	if (ret)
 		return ret;
 
@@ -199,25 +178,8 @@ static int chv_init_workarounds(struct intel_engine_cs *engine)
 	return 0;
 }
 
-static int gen9_init_workarounds(struct intel_engine_cs *engine)
+static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
-	int ret;
-
-	/* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
-	I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
-		   _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
-
-	/* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
-	I915_WRITE(BDW_SCRATCH1,
-		   I915_READ(BDW_SCRATCH1) |
-		   GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
-
-	/* WaDisableKillLogic:bxt,skl,kbl */
-	if (!IS_COFFEELAKE(dev_priv))
-		I915_WRITE(GAM_ECOCHK,
-			   I915_READ(GAM_ECOCHK) | ECOCHK_DIS_TLB);
-
 	if (HAS_LLC(dev_priv)) {
 		/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
 		 *
@@ -228,11 +190,6 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
 				  GEN9_PBE_COMPRESSED_HASH_SELECTION);
 		WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
 				  GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
-
-		I915_WRITE(MMCD_MISC_CTRL,
-			   I915_READ(MMCD_MISC_CTRL) |
-			   MMCD_PCLA |
-			   MMCD_HOTSPOT_EN);
 	}
 
 	/* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
@@ -284,10 +241,6 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
 	WA_SET_BIT_MASKED(HDC_CHICKEN0,
 			  HDC_FORCE_NON_COHERENT);
 
-	/* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
-	I915_WRITE(GAM_ECOCHK,
-		   I915_READ(GAM_ECOCHK) | BDW_DISABLE_HDC_INVALIDATION);
-
 	/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
 	if (IS_SKYLAKE(dev_priv) ||
 	    IS_KABYLAKE(dev_priv) ||
@@ -298,19 +251,6 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
 	/* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
 	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
 
-	/* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
-	if (IS_GEN9_LP(dev_priv)) {
-		u32 val = I915_READ(GEN8_L3SQCREG1);
-
-		val &= ~L3_PRIO_CREDITS_MASK;
-		val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
-		I915_WRITE(GEN8_L3SQCREG1, val);
-	}
-
-	/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
-	I915_WRITE(GEN8_L3SQCREG4,
-		   I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_FLUSH_COHERENT_LINES);
-
 	/*
 	 * Supporting preemption with fine-granularity requires changes in the
 	 * batch buffer programming. Since we can't break old userspace, we
@@ -330,29 +270,11 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
 			    GEN9_PREEMPT_GPGPU_LEVEL_MASK,
 			    GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
 
-	/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
-	ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
-	if (ret)
-		return ret;
-
-	/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
-	I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
-		   _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
-	ret = wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
-	if (ret)
-		return ret;
-
-	/* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
-	ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
-	if (ret)
-		return ret;
-
 	return 0;
 }
 
-static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
+static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
 	u8 vals[3] = { 0, 0, 0 };
 	unsigned int i;
 
@@ -391,43 +313,22 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
 	return 0;
 }
 
-static int skl_init_workarounds(struct intel_engine_cs *engine)
+static int skl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
 	int ret;
 
-	ret = gen9_init_workarounds(engine);
+	ret = gen9_ctx_workarounds_init(dev_priv);
 	if (ret)
 		return ret;
 
-	/* WaEnableGapsTsvCreditFix:skl */
-	I915_WRITE(GEN8_GARBCNTL,
-		   I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
-
-	/* WaDisableGafsUnitClkGating:skl */
-	I915_WRITE(GEN7_UCGCTL4,
-		   I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
-
-	/* WaInPlaceDecompressionHang:skl */
-	if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
-		I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
-			   I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-			   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
-	/* WaDisableLSQCROPERFforOCL:skl */
-	ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
-	if (ret)
-		return ret;
-
-	return skl_tune_iz_hashing(engine);
+	return skl_tune_iz_hashing(dev_priv);
 }
 
-static int bxt_init_workarounds(struct intel_engine_cs *engine)
+static int bxt_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
 	int ret;
 
-	ret = gen9_init_workarounds(engine);
+	ret = gen9_ctx_workarounds_init(dev_priv);
 	if (ret)
 		return ret;
 
@@ -435,33 +336,74 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
 	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
 			  STALL_DOP_GATING_DISABLE);
 
-	/* WaDisablePooledEuLoadBalancingFix:bxt */
-	I915_WRITE(FF_SLICE_CS_CHICKEN2,
-		   _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
-
 	/* WaToEnableHwFixForPushConstHWBug:bxt */
 	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
 			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
 
-	/* WaInPlaceDecompressionHang:bxt */
-	I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
-		   I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-		   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+	return 0;
+}
+
+static int kbl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+{
+	int ret;
+
+	ret = gen9_ctx_workarounds_init(dev_priv);
+	if (ret)
+		return ret;
+
+	/* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
+	if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
+		WA_SET_BIT_MASKED(HDC_CHICKEN0,
+				  HDC_FENCE_DEST_SLM_DISABLE);
+
+	/* WaToEnableHwFixForPushConstHWBug:kbl */
+	if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
+		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+				  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+	/* WaDisableSbeCacheDispatchPortSharing:kbl */
+	WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
+			  GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
 
 	return 0;
 }
 
-static int cnl_init_workarounds(struct intel_engine_cs *engine)
+static int glk_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
 	int ret;
 
-	/* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
-	if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
-		I915_WRITE(GAMT_CHKN_BIT_REG,
-			   I915_READ(GAMT_CHKN_BIT_REG) |
-			   GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
+	ret = gen9_ctx_workarounds_init(dev_priv);
+	if (ret)
+		return ret;
 
+	/* WaToEnableHwFixForPushConstHWBug:glk */
+	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+	return 0;
+}
+
+static int cfl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+{
+	int ret;
+
+	ret = gen9_ctx_workarounds_init(dev_priv);
+	if (ret)
+		return ret;
+
+	/* WaToEnableHwFixForPushConstHWBug:cfl */
+	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+	/* WaDisableSbeCacheDispatchPortSharing:cfl */
+	WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
+			  GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+
+	return 0;
+}
+
+static int cnl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+{
 	/* WaForceContextSaveRestoreNonCoherent:cnl */
 	WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
 			  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
@@ -479,15 +421,10 @@ static int cnl_init_workarounds(struct intel_engine_cs *engine)
 		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
 				  GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);
 
-	/* WaInPlaceDecompressionHang:cnl */
-	I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
-		   I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-		   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
 	/* WaPushConstantDereferenceHoldDisable:cnl */
 	WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
 
-	/* FtrEnableFastAnisoL1BankingFix: cnl */
+	/* FtrEnableFastAnisoL1BankingFix:cnl */
 	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);
 
 	/* WaDisable3DMidCmdPreemption:cnl */
@@ -498,161 +435,47 @@ static int cnl_init_workarounds(struct intel_engine_cs *engine)
 			    GEN9_PREEMPT_GPGPU_LEVEL_MASK,
 			    GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
 
-	/* WaEnablePreemptionGranularityControlByUMD:cnl */
-	I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
-		   _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
-	ret = wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
-	if (ret)
-		return ret;
-
 	/* WaDisableEarlyEOT:cnl */
 	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
 
 	return 0;
 }
 
-static int kbl_init_workarounds(struct intel_engine_cs *engine)
+int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
-	int ret;
-
-	ret = gen9_init_workarounds(engine);
-	if (ret)
-		return ret;
-
-	/* WaEnableGapsTsvCreditFix:kbl */
-	I915_WRITE(GEN8_GARBCNTL,
-		   I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
-
-	/* WaDisableDynamicCreditSharing:kbl */
-	if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
-		I915_WRITE(GAMT_CHKN_BIT_REG,
-			   I915_READ(GAMT_CHKN_BIT_REG) |
-			   GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
-
-	/* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
-	if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
-		WA_SET_BIT_MASKED(HDC_CHICKEN0,
-				  HDC_FENCE_DEST_SLM_DISABLE);
-
-	/* WaToEnableHwFixForPushConstHWBug:kbl */
-	if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
-		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
-				  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
-	/* WaDisableGafsUnitClkGating:kbl */
-	I915_WRITE(GEN7_UCGCTL4,
-		   I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
-
-	/* WaDisableSbeCacheDispatchPortSharing:kbl */
-	WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
-			  GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
-
-	/* WaInPlaceDecompressionHang:kbl */
-	I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
-		   I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
-	/* WaDisableLSQCROPERFforOCL:kbl */
-	ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static int glk_init_workarounds(struct intel_engine_cs *engine)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
-	int ret;
-
-	ret = gen9_init_workarounds(engine);
-	if (ret)
-		return ret;
-
-	/* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
-	ret = wa_ring_whitelist_reg(engine, GEN9_SLICE_COMMON_ECO_CHICKEN1);
-	if (ret)
-		return ret;
-
-	/* WaToEnableHwFixForPushConstHWBug:glk */
-	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
-			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
-	return 0;
-}
-
-static int cfl_init_workarounds(struct intel_engine_cs *engine)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
-	int ret;
-
-	ret = gen9_init_workarounds(engine);
-	if (ret)
-		return ret;
-
-	/* WaEnableGapsTsvCreditFix:cfl */
-	I915_WRITE(GEN8_GARBCNTL,
-		   I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
-
-	/* WaToEnableHwFixForPushConstHWBug:cfl */
-	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
-			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
-	/* WaDisableGafsUnitClkGating:cfl */
-	I915_WRITE(GEN7_UCGCTL4,
-		   I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
-
-	/* WaDisableSbeCacheDispatchPortSharing:cfl */
-	WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
-			  GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
-
-	/* WaInPlaceDecompressionHang:cfl */
-	I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
-		   I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-		   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
-	return 0;
-}
-
-int init_workarounds_ring(struct intel_engine_cs *engine)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
-	int err;
-
-	if (GEM_WARN_ON(engine->id != RCS))
-		return -EINVAL;
+	int err = 0;
 
 	dev_priv->workarounds.count = 0;
-	dev_priv->workarounds.hw_whitelist_count[engine->id] = 0;
 
-	if (IS_BROADWELL(dev_priv))
-		err = bdw_init_workarounds(engine);
-	else if (IS_CHERRYVIEW(dev_priv))
-		err = chv_init_workarounds(engine);
-	else if (IS_SKYLAKE(dev_priv))
-		err =  skl_init_workarounds(engine);
-	else if (IS_BROXTON(dev_priv))
-		err = bxt_init_workarounds(engine);
-	else if (IS_KABYLAKE(dev_priv))
-		err = kbl_init_workarounds(engine);
-	else if (IS_GEMINILAKE(dev_priv))
-		err =  glk_init_workarounds(engine);
-	else if (IS_COFFEELAKE(dev_priv))
-		err = cfl_init_workarounds(engine);
-	else if (IS_CANNONLAKE(dev_priv))
-		err = cnl_init_workarounds(engine);
-	else
+	if (INTEL_GEN(dev_priv) < 8)
 		err = 0;
+	else if (IS_BROADWELL(dev_priv))
+		err = bdw_ctx_workarounds_init(dev_priv);
+	else if (IS_CHERRYVIEW(dev_priv))
+		err = chv_ctx_workarounds_init(dev_priv);
+	else if (IS_SKYLAKE(dev_priv))
+		err = skl_ctx_workarounds_init(dev_priv);
+	else if (IS_BROXTON(dev_priv))
+		err = bxt_ctx_workarounds_init(dev_priv);
+	else if (IS_KABYLAKE(dev_priv))
+		err = kbl_ctx_workarounds_init(dev_priv);
+	else if (IS_GEMINILAKE(dev_priv))
+		err = glk_ctx_workarounds_init(dev_priv);
+	else if (IS_COFFEELAKE(dev_priv))
+		err = cfl_ctx_workarounds_init(dev_priv);
+	else if (IS_CANNONLAKE(dev_priv))
+		err = cnl_ctx_workarounds_init(dev_priv);
+	else
+		MISSING_CASE(INTEL_GEN(dev_priv));
 	if (err)
 		return err;
 
-	DRM_DEBUG_DRIVER("%s: Number of context specific w/a: %d\n",
-			 engine->name, dev_priv->workarounds.count);
+	DRM_DEBUG_DRIVER("Number of context specific w/a: %d\n",
+			 dev_priv->workarounds.count);
 	return 0;
 }
 
-int intel_ring_workarounds_emit(struct i915_request *rq)
+int intel_ctx_workarounds_emit(struct i915_request *rq)
 {
 	struct i915_workarounds *w = &rq->i915->workarounds;
 	u32 *cs;
@@ -665,7 +488,7 @@ int intel_ring_workarounds_emit(struct i915_request *rq)
 	if (ret)
 		return ret;
 
-	cs = intel_ring_begin(rq, w->count * 2 + 2);
+	cs = intel_ring_begin(rq, (w->count * 2 + 2));
 	if (IS_ERR(cs))
 		return PTR_ERR(cs);
 
@@ -684,3 +507,350 @@ int intel_ring_workarounds_emit(struct i915_request *rq)
 
 	return 0;
 }
+
+static void bdw_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+{
+}
+
+static void chv_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+{
+}
+
+static void gen9_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+{
+	/* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
+	I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
+		   _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
+
+	/* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
+	I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
+		   GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+
+	/* WaDisableKillLogic:bxt,skl,kbl */
+	if (!IS_COFFEELAKE(dev_priv))
+		I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+			   ECOCHK_DIS_TLB);
+
+	if (HAS_LLC(dev_priv)) {
+		/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
+		 *
+		 * Must match Display Engine. See
+		 * WaCompressedResourceDisplayNewHashMode.
+		 */
+		I915_WRITE(MMCD_MISC_CTRL,
+			   I915_READ(MMCD_MISC_CTRL) |
+			   MMCD_PCLA |
+			   MMCD_HOTSPOT_EN);
+	}
+
+	/* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
+	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+		   BDW_DISABLE_HDC_INVALIDATION);
+
+	/* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
+	if (IS_GEN9_LP(dev_priv)) {
+		u32 val = I915_READ(GEN8_L3SQCREG1);
+
+		val &= ~L3_PRIO_CREDITS_MASK;
+		val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
+		I915_WRITE(GEN8_L3SQCREG1, val);
+	}
+
+	/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
+	I915_WRITE(GEN8_L3SQCREG4,
+		   I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_FLUSH_COHERENT_LINES);
+
+	/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
+	I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
+		   _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+}
+
+static void skl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+{
+	gen9_gt_workarounds_apply(dev_priv);
+
+	/* WaEnableGapsTsvCreditFix:skl */
+	I915_WRITE(GEN8_GARBCNTL,
+		   I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+
+	/* WaDisableGafsUnitClkGating:skl */
+	I915_WRITE(GEN7_UCGCTL4,
+		   I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
+	/* WaInPlaceDecompressionHang:skl */
+	if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
+		I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
+			   I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
+			   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+}
+
+static void bxt_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+{
+	gen9_gt_workarounds_apply(dev_priv);
+
+	/* WaDisablePooledEuLoadBalancingFix:bxt */
+	I915_WRITE(FF_SLICE_CS_CHICKEN2,
+		   _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
+
+	/* WaInPlaceDecompressionHang:bxt */
+	I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
+		   I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
+		   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+}
+
+static void kbl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+{
+	gen9_gt_workarounds_apply(dev_priv);
+
+	/* WaEnableGapsTsvCreditFix:kbl */
+	I915_WRITE(GEN8_GARBCNTL,
+		   I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+
+	/* WaDisableDynamicCreditSharing:kbl */
+	if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+		I915_WRITE(GAMT_CHKN_BIT_REG,
+			   I915_READ(GAMT_CHKN_BIT_REG) |
+			   GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
+
+	/* WaDisableGafsUnitClkGating:kbl */
+	I915_WRITE(GEN7_UCGCTL4,
+		   I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
+	/* WaInPlaceDecompressionHang:kbl */
+	I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
+		   I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
+		   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+}
+
+static void glk_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+{
+	gen9_gt_workarounds_apply(dev_priv);
+}
+
+static void cfl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+{
+	gen9_gt_workarounds_apply(dev_priv);
+
+	/* WaEnableGapsTsvCreditFix:cfl */
+	I915_WRITE(GEN8_GARBCNTL,
+		   I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+
+	/* WaDisableGafsUnitClkGating:cfl */
+	I915_WRITE(GEN7_UCGCTL4,
+		   I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
+	/* WaInPlaceDecompressionHang:cfl */
+	I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
+		   I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
+		   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+}
+
+static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+{
+	/* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
+	if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
+		I915_WRITE(GAMT_CHKN_BIT_REG,
+			   I915_READ(GAMT_CHKN_BIT_REG) |
+			   GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
+
+	/* WaInPlaceDecompressionHang:cnl */
+	I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
+		   I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
+		   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+
+	/* WaEnablePreemptionGranularityControlByUMD:cnl */
+	I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
+		   _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+}
+
+void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+{
+	if (INTEL_GEN(dev_priv) < 8)
+		return;
+	else if (IS_BROADWELL(dev_priv))
+		bdw_gt_workarounds_apply(dev_priv);
+	else if (IS_CHERRYVIEW(dev_priv))
+		chv_gt_workarounds_apply(dev_priv);
+	else if (IS_SKYLAKE(dev_priv))
+		skl_gt_workarounds_apply(dev_priv);
+	else if (IS_BROXTON(dev_priv))
+		bxt_gt_workarounds_apply(dev_priv);
+	else if (IS_KABYLAKE(dev_priv))
+		kbl_gt_workarounds_apply(dev_priv);
+	else if (IS_GEMINILAKE(dev_priv))
+		glk_gt_workarounds_apply(dev_priv);
+	else if (IS_COFFEELAKE(dev_priv))
+		cfl_gt_workarounds_apply(dev_priv);
+	else if (IS_CANNONLAKE(dev_priv))
+		cnl_gt_workarounds_apply(dev_priv);
+	else
+		MISSING_CASE(INTEL_GEN(dev_priv));
+}
+
+static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
+				 i915_reg_t reg)
+{
+	struct drm_i915_private *dev_priv = engine->i915;
+	struct i915_workarounds *wa = &dev_priv->workarounds;
+	const unsigned int index = wa->hw_whitelist_count[engine->id];
+
+	if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
+		return -EINVAL;
+
+	I915_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
+		   i915_mmio_reg_offset(reg));
+	wa->hw_whitelist_count[engine->id]++;
+
+	return 0;
+}
+
+static int bdw_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+{
+	return 0;
+}
+
+static int chv_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+{
+	return 0;
+}
+
+static int gen9_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+{
+	int ret;
+
+	/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
+	ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
+	if (ret)
+		return ret;
+
+	/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
+	ret = wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
+	if (ret)
+		return ret;
+
+	/* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
+	ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int skl_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+{
+	int ret;
+
+	ret = gen9_whitelist_workarounds_apply(engine);
+	if (ret)
+		return ret;
+
+	/* WaDisableLSQCROPERFforOCL:skl */
+	ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int bxt_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+{
+	int ret;
+
+	ret = gen9_whitelist_workarounds_apply(engine);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int kbl_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+{
+	int ret;
+
+	ret = gen9_whitelist_workarounds_apply(engine);
+	if (ret)
+		return ret;
+
+	/* WaDisableLSQCROPERFforOCL:kbl */
+	ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int glk_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+{
+	int ret;
+
+	ret = gen9_whitelist_workarounds_apply(engine);
+	if (ret)
+		return ret;
+
+	/* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
+	ret = wa_ring_whitelist_reg(engine, GEN9_SLICE_COMMON_ECO_CHICKEN1);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int cfl_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+{
+	int ret;
+
+	ret = gen9_whitelist_workarounds_apply(engine);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int cnl_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+{
+	int ret;
+
+	/* WaEnablePreemptionGranularityControlByUMD:cnl */
+	ret = wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+int intel_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+{
+	struct drm_i915_private *dev_priv = engine->i915;
+	int err = 0;
+
+	WARN_ON(engine->id != RCS);
+
+	dev_priv->workarounds.hw_whitelist_count[engine->id] = 0;
+
+	if (INTEL_GEN(dev_priv) < 8)
+		err = 0;
+	else if (IS_BROADWELL(dev_priv))
+		err = bdw_whitelist_workarounds_apply(engine);
+	else if (IS_CHERRYVIEW(dev_priv))
+		err = chv_whitelist_workarounds_apply(engine);
+	else if (IS_SKYLAKE(dev_priv))
+		err = skl_whitelist_workarounds_apply(engine);
+	else if (IS_BROXTON(dev_priv))
+		err = bxt_whitelist_workarounds_apply(engine);
+	else if (IS_KABYLAKE(dev_priv))
+		err = kbl_whitelist_workarounds_apply(engine);
+	else if (IS_GEMINILAKE(dev_priv))
+		err = glk_whitelist_workarounds_apply(engine);
+	else if (IS_COFFEELAKE(dev_priv))
+		err = cfl_whitelist_workarounds_apply(engine);
+	else if (IS_CANNONLAKE(dev_priv))
+		err = cnl_whitelist_workarounds_apply(engine);
+	else
+		MISSING_CASE(INTEL_GEN(dev_priv));
+	if (err)
+		return err;
+
+	DRM_DEBUG_DRIVER("%s: Number of whitelist w/a: %d\n", engine->name,
+			 dev_priv->workarounds.hw_whitelist_count[engine->id]);
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_workarounds.h b/drivers/gpu/drm/i915/intel_workarounds.h
index 2afea73aeeae..d9b0cc5afb4a 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.h
+++ b/drivers/gpu/drm/i915/intel_workarounds.h
@@ -7,7 +7,11 @@
 #ifndef _I915_WORKAROUNDS_H_
 #define _I915_WORKAROUNDS_H_
 
-int init_workarounds_ring(struct intel_engine_cs *engine);
-int intel_ring_workarounds_emit(struct i915_request *rq);
+int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv);
+int intel_ctx_workarounds_emit(struct i915_request *rq);
+
+void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv);
+
+int intel_whitelist_workarounds_apply(struct intel_engine_cs *engine);
 
 #endif

From f212bf9abe5de9f938fecea7df07046e74052dde Mon Sep 17 00:00:00 2001
From: Jani Nikula <jani.nikula@intel.com>
Date: Wed, 11 Apr 2018 16:15:18 +0300
Subject: [PATCH 0374/1286] drm/i915/bios: filter out invalid DDC pins from VBT
 child devices

The VBT contains the DDC pin to use for specific ports. Alas, sometimes
the field appears to contain bogus data, and while we check for it later
on in intel_gmbus_get_adapter() we fail to check the returned NULL on
errors. Oops results.

The simplest approach seems to be to catch and ignore the bogus DDC pins
already at the VBT parsing phase, reverting to fixed per port default
pins. This doesn't guarantee display working, but at least it prevents
the oops. And we continue to be fuzzed by VBT.

One affected machine is Dell Latitude 5590 where a BIOS upgrade added
invalid DDC pins.

Typical backtrace:

[   35.461411] WARN_ON(!intel_gmbus_is_valid_pin(dev_priv, pin))
[   35.461432] WARNING: CPU: 6 PID: 411 at drivers/gpu/drm/i915/intel_i2c.c:844 intel_gmbus_get_adapter+0x32/0x37 [i915]
[   35.461437] Modules linked in: i915 ahci libahci dm_snapshot dm_bufio dm_raid raid456 async_raid6_recov async_pq raid6_pq async_xor xor async_memcpy async_tx
[   35.461445] CPU: 6 PID: 411 Comm: kworker/u16:2 Not tainted 4.16.0-rc7.x64-g1cda370ffded #1
[   35.461447] Hardware name: Dell Inc. Latitude 5590/0MM81M, BIOS 1.1.9 03/13/2018
[   35.461450] Workqueue: events_unbound async_run_entry_fn
[   35.461465] RIP: 0010:intel_gmbus_get_adapter+0x32/0x37 [i915]
[   35.461467] RSP: 0018:ffff9b4e43d47c40 EFLAGS: 00010286
[   35.461469] RAX: 0000000000000000 RBX: ffff98f90639f800 RCX: ffffffffae051960
[   35.461471] RDX: 0000000000000001 RSI: 0000000000000092 RDI: 0000000000000246
[   35.461472] RBP: ffff98f905410000 R08: 0000004d062a83f6 R09: 00000000000003bd
[   35.461474] R10: 0000000000000031 R11: ffffffffad4eda58 R12: ffff98f905410000
[   35.461475] R13: ffff98f9064c1000 R14: ffff9b4e43d47cf0 R15: ffff98f905410000
[   35.461477] FS:  0000000000000000(0000) GS:ffff98f92e580000(0000) knlGS:0000000000000000
[   35.461479] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[   35.461481] CR2: 00007f5682359008 CR3: 00000001b700c005 CR4: 00000000003606e0
[   35.461483] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
[   35.461484] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
[   35.461486] Call Trace:
[   35.461501]  intel_hdmi_set_edid+0x37/0x27f [i915]
[   35.461515]  intel_hdmi_detect+0x7c/0x97 [i915]
[   35.461518]  drm_helper_probe_single_connector_modes+0xe1/0x6c0
[   35.461521]  drm_setup_crtcs+0x129/0xa6a
[   35.461523]  ? __switch_to_asm+0x34/0x70
[   35.461525]  ? __switch_to_asm+0x34/0x70
[   35.461527]  ? __switch_to_asm+0x40/0x70
[   35.461528]  ? __switch_to_asm+0x34/0x70
[   35.461529]  ? __switch_to_asm+0x40/0x70
[   35.461531]  ? __switch_to_asm+0x34/0x70
[   35.461532]  ? __switch_to_asm+0x40/0x70
[   35.461534]  ? __switch_to_asm+0x34/0x70
[   35.461536]  __drm_fb_helper_initial_config_and_unlock+0x34/0x46f
[   35.461538]  ? __switch_to_asm+0x40/0x70
[   35.461541]  ? _cond_resched+0x10/0x33
[   35.461557]  intel_fbdev_initial_config+0xf/0x1c [i915]
[   35.461560]  async_run_entry_fn+0x2e/0xf5
[   35.461563]  process_one_work+0x15b/0x364
[   35.461565]  worker_thread+0x2c/0x3a0
[   35.461567]  ? process_one_work+0x364/0x364
[   35.461568]  kthread+0x10c/0x122
[   35.461570]  ? _kthread_create_on_node+0x5d/0x5d
[   35.461572]  ret_from_fork+0x35/0x40
[   35.461574] Code: 74 16 89 f6 48 8d 04 b6 48 c1 e0 05 48 29 f0 48 8d 84 c7 e8 11 00 00 c3 48 c7 c6 b0 19 1e c0 48 c7 c7 64 8a 1c c0 e8 47 88 ed ec <0f> 0b 31 c0 c3 8b 87 a4 04 00 00 80 e4 fc 09 c6 89 b7 a4 04 00
[   35.461604] WARNING: CPU: 6 PID: 411 at drivers/gpu/drm/i915/intel_i2c.c:844 intel_gmbus_get_adapter+0x32/0x37 [i915]
[   35.461606] ---[ end trace 4fe1e63e2dd93373 ]---
[   35.461609] BUG: unable to handle kernel NULL pointer dereference at 0000000000000010
[   35.461613] IP: i2c_transfer+0x4/0x86
[   35.461614] PGD 0 P4D 0
[   35.461616] Oops: 0000 [#1] SMP PTI
[   35.461618] Modules linked in: i915 ahci libahci dm_snapshot dm_bufio dm_raid raid456 async_raid6_recov async_pq raid6_pq async_xor xor async_memcpy async_tx
[   35.461624] CPU: 6 PID: 411 Comm: kworker/u16:2 Tainted: G        W        4.16.0-rc7.x64-g1cda370ffded #1
[   35.461625] Hardware name: Dell Inc. Latitude 5590/0MM81M, BIOS 1.1.9 03/13/2018
[   35.461628] Workqueue: events_unbound async_run_entry_fn
[   35.461630] RIP: 0010:i2c_transfer+0x4/0x86
[   35.461631] RSP: 0018:ffff9b4e43d47b30 EFLAGS: 00010246
[   35.461633] RAX: ffff9b4e43d47b6e RBX: 0000000000000005 RCX: 0000000000000001
[   35.461635] RDX: 0000000000000002 RSI: ffff9b4e43d47b80 RDI: 0000000000000000
[   35.461636] RBP: ffff9b4e43d47bd8 R08: 0000004d062a83f6 R09: 00000000000003bd
[   35.461638] R10: 0000000000000031 R11: ffffffffad4eda58 R12: 0000000000000002
[   35.461639] R13: 0000000000000001 R14: ffff9b4e43d47b6f R15: ffff9b4e43d47c07
[   35.461641] FS:  0000000000000000(0000) GS:ffff98f92e580000(0000) knlGS:0000000000000000
[   35.461643] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[   35.461645] CR2: 0000000000000010 CR3: 00000001b700c005 CR4: 00000000003606e0
[   35.461646] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
[   35.461647] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
[   35.461649] Call Trace:
[   35.461652]  drm_do_probe_ddc_edid+0xb3/0x128
[   35.461654]  drm_get_edid+0xe5/0x38d
[   35.461669]  intel_hdmi_set_edid+0x45/0x27f [i915]
[   35.461684]  intel_hdmi_detect+0x7c/0x97 [i915]
[   35.461687]  drm_helper_probe_single_connector_modes+0xe1/0x6c0
[   35.461689]  drm_setup_crtcs+0x129/0xa6a
[   35.461691]  ? __switch_to_asm+0x34/0x70
[   35.461693]  ? __switch_to_asm+0x34/0x70
[   35.461694]  ? __switch_to_asm+0x40/0x70
[   35.461696]  ? __switch_to_asm+0x34/0x70
[   35.461697]  ? __switch_to_asm+0x40/0x70
[   35.461698]  ? __switch_to_asm+0x34/0x70
[   35.461700]  ? __switch_to_asm+0x40/0x70
[   35.461701]  ? __switch_to_asm+0x34/0x70
[   35.461703]  __drm_fb_helper_initial_config_and_unlock+0x34/0x46f
[   35.461705]  ? __switch_to_asm+0x40/0x70
[   35.461707]  ? _cond_resched+0x10/0x33
[   35.461724]  intel_fbdev_initial_config+0xf/0x1c [i915]
[   35.461727]  async_run_entry_fn+0x2e/0xf5
[   35.461729]  process_one_work+0x15b/0x364
[   35.461731]  worker_thread+0x2c/0x3a0
[   35.461733]  ? process_one_work+0x364/0x364
[   35.461734]  kthread+0x10c/0x122
[   35.461736]  ? _kthread_create_on_node+0x5d/0x5d
[   35.461738]  ret_from_fork+0x35/0x40
[   35.461739] Code: 5c fa e1 ad 48 89 df e8 ea fb ff ff e9 2a ff ff ff 0f 1f 44 00 00 31 c0 e9 43 fd ff ff 31 c0 45 31 e4 e9 c5 fd ff ff 41 54 55 53 <48> 8b 47 10 48 83 78 10 00 74 70 41 89 d4 48 89 f5 48 89 fb 65
[   35.461756] RIP: i2c_transfer+0x4/0x86 RSP: ffff9b4e43d47b30
[   35.461757] CR2: 0000000000000010
[   35.461759] ---[ end trace 4fe1e63e2dd93374 ]---

Based on a patch by Fei Li.

v2: s/reverting/sticking/ (Chris)

Cc: stable@vger.kernel.org
Cc: Fei Li <fei.li@intel.com>
Co-developed-by: Fei Li <fei.li@intel.com>
Reported-by: Pavel Nakonechnyi <zorg1331@gmail.com>
Reported-and-tested-by: Seweryn Kokot <sewkokot@gmail.com>
Reported-and-tested-by: Laszlo Valko <valko@linux.karinthy.hu>
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=105549
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=105961
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180411131519.9091-1-jani.nikula@intel.com
---
 drivers/gpu/drm/i915/intel_bios.c | 13 +++++++++----
 1 file changed, 9 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 6aae88c4df52..0f25cecb942f 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1256,7 +1256,6 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
 		return;
 
 	aux_channel = child->aux_channel;
-	ddc_pin = child->ddc_pin;
 
 	is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
 	is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
@@ -1296,9 +1295,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
 		DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
 
 	if (is_dvi) {
-		info->alternate_ddc_pin = map_ddc_pin(dev_priv, ddc_pin);
-
-		sanitize_ddc_pin(dev_priv, port);
+		ddc_pin = map_ddc_pin(dev_priv, child->ddc_pin);
+		if (intel_gmbus_is_valid_pin(dev_priv, ddc_pin)) {
+			info->alternate_ddc_pin = ddc_pin;
+			sanitize_ddc_pin(dev_priv, port);
+		} else {
+			DRM_DEBUG_KMS("Port %c has invalid DDC pin %d, "
+				      "sticking to defaults\n",
+				      port_name(port), ddc_pin);
+		}
 	}
 
 	if (is_dp) {

From e53a1058395435b8801591361b2be18adda869ff Mon Sep 17 00:00:00 2001
From: Jani Nikula <jani.nikula@intel.com>
Date: Wed, 11 Apr 2018 16:15:19 +0300
Subject: [PATCH 0375/1286] drm/i915/bios: reduce the scope of some local
 variables in parse_ddi_port()

No functional changes.

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180411131519.9091-2-jani.nikula@intel.com
---
 drivers/gpu/drm/i915/intel_bios.c | 10 ++++------
 1 file changed, 4 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 0f25cecb942f..702d3fab97fc 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1215,10 +1215,8 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
 {
 	struct child_device_config *it, *child = NULL;
 	struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
-	uint8_t hdmi_level_shift;
 	int i, j;
 	bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
-	uint8_t aux_channel, ddc_pin;
 	/* Each DDI port can have more than one value on the "DVO Port" field,
 	 * so look for all the possible values for each port.
 	 */
@@ -1255,8 +1253,6 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
 	if (!child)
 		return;
 
-	aux_channel = child->aux_channel;
-
 	is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
 	is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
 	is_crt = child->device_type & DEVICE_TYPE_ANALOG_OUTPUT;
@@ -1295,6 +1291,8 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
 		DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
 
 	if (is_dvi) {
+		u8 ddc_pin;
+
 		ddc_pin = map_ddc_pin(dev_priv, child->ddc_pin);
 		if (intel_gmbus_is_valid_pin(dev_priv, ddc_pin)) {
 			info->alternate_ddc_pin = ddc_pin;
@@ -1307,14 +1305,14 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
 	}
 
 	if (is_dp) {
-		info->alternate_aux_channel = aux_channel;
+		info->alternate_aux_channel = child->aux_channel;
 
 		sanitize_aux_ch(dev_priv, port);
 	}
 
 	if (bdb_version >= 158) {
 		/* The VBT HDMI level shift values match the table we have. */
-		hdmi_level_shift = child->hdmi_level_shifter_value;
+		u8 hdmi_level_shift = child->hdmi_level_shifter_value;
 		DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
 			      port_name(port),
 			      hdmi_level_shift);

From 61bf9719fa170cd73b1937770d08cb062e070958 Mon Sep 17 00:00:00 2001
From: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Thu, 12 Apr 2018 17:58:02 +0300
Subject: [PATCH 0376/1286] drm/i915/cnl: Use mmio access to context status
 buffer

Evidence indicates that Cannonlake HWSP is not coherent
as it should. Revert to using mmio access for now.

Testcase: igt/gem_ctx_switch
References: https://bugs.freedesktop.org/show_bug.cgi?id=105888
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Rafael Antognolli <rafael.antognolli@intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180412145802.23313-1-mika.kuoppala@linux.intel.com
---
 drivers/gpu/drm/i915/intel_engine_cs.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 68898d58dd1e..1a8370779bbb 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -475,6 +475,9 @@ static bool csb_force_mmio(struct drm_i915_private *i915)
 	if (intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915))
 		return true;
 
+	if (IS_CANNONLAKE(i915))
+		return true;
+
 	return false;
 }
 

From fadec6eefe232696c5c471b40df33e6db616e854 Mon Sep 17 00:00:00 2001
From: Jani Nikula <jani.nikula@intel.com>
Date: Fri, 13 Apr 2018 12:20:58 +0300
Subject: [PATCH 0377/1286] drm/i915: Update DRIVER_DATE to 20180413

Signed-off-by: Jani Nikula <jani.nikula@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 649c0f2f3bae..e50d9589d6e3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -84,8 +84,8 @@
 
 #define DRIVER_NAME		"i915"
 #define DRIVER_DESC		"Intel Graphics"
-#define DRIVER_DATE		"20180308"
-#define DRIVER_TIMESTAMP	1520513379
+#define DRIVER_DATE		"20180413"
+#define DRIVER_TIMESTAMP	1523611258
 
 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
  * WARN_ON()) for hw state sanity checks to check for unexpected conditions

From 5bbed0b38dd02dfb58d195487ef5b23e74b8b6c1 Mon Sep 17 00:00:00 2001
From: "Piorkowski, Piotr" <piotr.piorkowski@intel.com>
Date: Fri, 13 Apr 2018 10:52:45 +0200
Subject: [PATCH 0378/1286] drm/i915/guc: Remove GUC_CTL_DEVICE_INFO parameter
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

It looks that GuC does not actively use GUC_CTL_DEVICE_INFO parameter
where we are passing GT type and Core family values.
Let's stop/remove setup of this parameter and remove related
definitions.

v2: (this time without squashed HAX)
  - New title and description
  - Remove also GUC_CORE_FAMILY_* definitions (Michel)
v3:
  - The removed define GUC_CTL_DEVICE_INFO has been restored (Michel)
  - Updated description (Sagar)
v4: rebase

Signed-off-by: Piotr Piórkowski <piotr.piorkowski@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Cc: John A Spotswood <john.a.spotswood@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michel Thierry <michel.thierry@intel.com>
Acked-by: Michel Thierry <michel.thierry@intel.com>
Reviewed-by: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180413085245.57206-1-piotr.piorkowski@intel.com
---
 drivers/gpu/drm/i915/intel_guc.c      | 24 ------------------------
 drivers/gpu/drm/i915/intel_guc_fwif.h |  5 -----
 2 files changed, 29 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index a00a59a7d9ec..116f4ccf1bbd 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -203,26 +203,6 @@ void intel_guc_fini(struct intel_guc *guc)
 	guc_shared_data_destroy(guc);
 }
 
-static u32 get_gt_type(struct drm_i915_private *dev_priv)
-{
-	/* XXX: GT type based on PCI device ID? field seems unused by fw */
-	return 0;
-}
-
-static u32 get_core_family(struct drm_i915_private *dev_priv)
-{
-	u32 gen = INTEL_GEN(dev_priv);
-
-	switch (gen) {
-	case 9:
-		return GUC_CORE_FAMILY_GEN9;
-
-	default:
-		MISSING_CASE(gen);
-		return GUC_CORE_FAMILY_UNKNOWN;
-	}
-}
-
 static u32 get_log_control_flags(void)
 {
 	u32 level = i915_modparams.guc_log_level;
@@ -255,10 +235,6 @@ void intel_guc_init_params(struct intel_guc *guc)
 
 	memset(params, 0, sizeof(params));
 
-	params[GUC_CTL_DEVICE_INFO] |=
-		(get_gt_type(dev_priv) << GUC_CTL_GT_TYPE_SHIFT) |
-		(get_core_family(dev_priv) << GUC_CTL_CORE_FAMILY_SHIFT);
-
 	/*
 	 * GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
 	 * second. This ARAR is calculated by:
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index d73673f5d30c..0867ba76d445 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -23,9 +23,6 @@
 #ifndef _INTEL_GUC_FWIF_H
 #define _INTEL_GUC_FWIF_H
 
-#define GUC_CORE_FAMILY_GEN9		12
-#define GUC_CORE_FAMILY_UNKNOWN		0x7fffffff
-
 #define GUC_CLIENT_PRIORITY_KMD_HIGH	0
 #define GUC_CLIENT_PRIORITY_HIGH	1
 #define GUC_CLIENT_PRIORITY_KMD_NORMAL	2
@@ -82,8 +79,6 @@
 #define GUC_CTL_ARAT_LOW		2
 
 #define GUC_CTL_DEVICE_INFO		3
-#define   GUC_CTL_GT_TYPE_SHIFT		0
-#define   GUC_CTL_CORE_FAMILY_SHIFT	7
 
 #define GUC_CTL_LOG_PARAMS		4
 #define   GUC_LOG_VALID			(1 << 0)

From 8feaccf71dd61f2201493068055e0d1d699014df Mon Sep 17 00:00:00 2001
From: Dan Carpenter <dan.carpenter@oracle.com>
Date: Tue, 24 Apr 2018 16:35:49 +0300
Subject: [PATCH 0379/1286] drm/amdkfd: Integer overflows in ioctl

args->n_devices is a u32 that comes from the user.  The multiplication
could overflow on 32 bit systems possibly leading to privilege
escalation.

Fixes: 5ec7e02854b3 ("drm/amdkfd: Add ioctls for GPUVM memory management")
Signed-off-by: Dan Carpenter dan.carpenter@oracle.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 5694fbead9a5..ce15baf68d4c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -1303,8 +1303,8 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
 		return -EINVAL;
 	}
 
-	devices_arr = kmalloc(args->n_devices * sizeof(*devices_arr),
-			      GFP_KERNEL);
+	devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
+				    GFP_KERNEL);
 	if (!devices_arr)
 		return -ENOMEM;
 
@@ -1412,8 +1412,8 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
 		return -EINVAL;
 	}
 
-	devices_arr = kmalloc(args->n_devices * sizeof(*devices_arr),
-			      GFP_KERNEL);
+	devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
+				    GFP_KERNEL);
 	if (!devices_arr)
 		return -ENOMEM;
 

From 24f48a42038f5baaae49b181b64782ecfb703a9c Mon Sep 17 00:00:00 2001
From: Oak Zeng <Oak.Zeng@amd.com>
Date: Tue, 1 May 2018 17:56:01 -0400
Subject: [PATCH 0380/1286] drm/amdkfd: Dump HQD of HIQ

Signed-off-by: Oak Zeng <Oak.Zeng@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c    | 12 ++++++++++++
 1 file changed, 12 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 9af94b1f9074..668ad07ebe1f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1713,6 +1713,18 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
 	int pipe, queue;
 	int r = 0;
 
+	r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->kgd,
+		KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE, &dump, &n_regs);
+	if (!r) {
+		seq_printf(m, "  HIQ on MEC %d Pipe %d Queue %d\n",
+				KFD_CIK_HIQ_PIPE/get_pipes_per_mec(dqm)+1,
+				KFD_CIK_HIQ_PIPE%get_pipes_per_mec(dqm),
+				KFD_CIK_HIQ_QUEUE);
+		seq_reg_dump(m, dump, n_regs);
+
+		kfree(dump);
+	}
+
 	for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
 		int pipe_offset = pipe * get_queues_per_pipe(dqm);
 

From 87e6d4e0777daf774ed9aa59ed25b6ebaaad7052 Mon Sep 17 00:00:00 2001
From: Jay Cornwall <Jay.Cornwall@amd.com>
Date: Tue, 1 May 2018 17:56:02 -0400
Subject: [PATCH 0381/1286] drm/amdkfd: Reduce priority of context-saving waves
 before spin-wait

Synchronization between context-saving wavefronts is achieved by
sending a SAVEWAVE message to the SPI and then spin-waiting for a
response. These spin-waiting wavefronts may inhibit the progress
of other wavefronts in the context save handler, leading to the
synchronization condition never being achieved.

Before spin-waiting reduce the priority of each wavefront to
guarantee foward progress in the others.

Signed-off-by: Jay Cornwall <Jay.Cornwall@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm | 10 ++++++++--
 drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm |  8 +++++++-
 2 files changed, 15 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
index 997a383dcb8b..34eabcdd27a0 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
@@ -98,6 +98,7 @@ var SWIZZLE_EN                      =   0                   //whether we use swi
 /**************************************************************************/
 var SQ_WAVE_STATUS_INST_ATC_SHIFT  = 23
 var SQ_WAVE_STATUS_INST_ATC_MASK   = 0x00800000
+var SQ_WAVE_STATUS_SPI_PRIO_SHIFT  = 1
 var SQ_WAVE_STATUS_SPI_PRIO_MASK   = 0x00000006
 
 var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT    = 12
@@ -319,6 +320,10 @@ end
         s_sendmsg   sendmsg(MSG_SAVEWAVE)  //send SPI a message and wait for SPI's write to EXEC
     end
 
+    // Set SPI_PRIO=2 to avoid starving instruction fetch in the waves we're waiting for.
+    s_or_b32 s_save_tmp, s_save_status, (2 << SQ_WAVE_STATUS_SPI_PRIO_SHIFT)
+    s_setreg_b32 hwreg(HW_REG_STATUS), s_save_tmp
+
   L_SLEEP:
     s_sleep 0x2                // sleep 1 (64clk) is not enough for 8 waves per SIMD, which will cause SQ hang, since the 7,8th wave could not get arbit to exec inst, while other waves are stuck into the sleep-loop and waiting for wrexec!=0
 
@@ -1132,7 +1137,7 @@ end
 #endif
 
 static const uint32_t cwsr_trap_gfx8_hex[] = {
-	0xbf820001, 0xbf820123,
+	0xbf820001, 0xbf820125,
 	0xb8f4f802, 0x89748674,
 	0xb8f5f803, 0x8675ff75,
 	0x00000400, 0xbf850011,
@@ -1158,7 +1163,8 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
 	0x867aff7a, 0x00007fff,
 	0xb97af807, 0xbef2007e,
 	0xbef3007f, 0xbefe0180,
-	0xbf900004, 0xbf8e0002,
+	0xbf900004, 0x877a8474,
+	0xb97af802, 0xbf8e0002,
 	0xbf88fffe, 0xbef8007e,
 	0x8679ff7f, 0x0000ffff,
 	0x8779ff79, 0x00040000,
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
index 033580c997ea..cac8d4992e04 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
@@ -97,6 +97,7 @@ var ACK_SQC_STORE		    =	1		    //workaround for suspected SQC store bug causing
 /**************************************************************************/
 var SQ_WAVE_STATUS_INST_ATC_SHIFT  = 23
 var SQ_WAVE_STATUS_INST_ATC_MASK   = 0x00800000
+var SQ_WAVE_STATUS_SPI_PRIO_SHIFT  = 1
 var SQ_WAVE_STATUS_SPI_PRIO_MASK   = 0x00000006
 var SQ_WAVE_STATUS_HALT_MASK       = 0x2000
 
@@ -362,6 +363,10 @@ end
 	s_sendmsg   sendmsg(MSG_SAVEWAVE)  //send SPI a message and wait for SPI's write to EXEC
     end
 
+    // Set SPI_PRIO=2 to avoid starving instruction fetch in the waves we're waiting for.
+    s_or_b32 s_save_tmp, s_save_status, (2 << SQ_WAVE_STATUS_SPI_PRIO_SHIFT)
+    s_setreg_b32 hwreg(HW_REG_STATUS), s_save_tmp
+
   L_SLEEP:
     s_sleep 0x2		       // sleep 1 (64clk) is not enough for 8 waves per SIMD, which will cause SQ hang, since the 7,8th wave could not get arbit to exec inst, while other waves are stuck into the sleep-loop and waiting for wrexec!=0
 
@@ -1210,7 +1215,7 @@ end
 #endif
 
 static const uint32_t cwsr_trap_gfx9_hex[] = {
-	0xbf820001, 0xbf820158,
+	0xbf820001, 0xbf82015a,
 	0xb8f8f802, 0x89788678,
 	0xb8f1f803, 0x866eff71,
 	0x00000400, 0xbf850034,
@@ -1249,6 +1254,7 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
 	0x00007fff, 0xb970f807,
 	0xbeee007e, 0xbeef007f,
 	0xbefe0180, 0xbf900004,
+	0x87708478, 0xb970f802,
 	0xbf8e0002, 0xbf88fffe,
 	0xb8f02a05, 0x80708170,
 	0x8e708a70, 0xb8f11605,

From 2774c63ef3dbb6052dd1d224b38a9decf89be61c Mon Sep 17 00:00:00 2001
From: Jay Cornwall <Jay.Cornwall@amd.com>
Date: Tue, 1 May 2018 17:56:03 -0400
Subject: [PATCH 0382/1286] drm/amdkfd: Use volatile MTYPE in default/alternate
 apertures

MTYPE_NC_NV (0) marks scalar/vector L1 cache lines as non-volatile.
Cache lines loaded through these apertures are intended to be
invalidated before (and sometimes during) a dispatch. The non-volatile
qualifier prevents these cache lines from being distinguished from
those loaded through the private aperture.

Use MTYPE_NC (1) instead on both Gfx7 and Gfx8. This allows the
compiler to use the BUFFER_WBINVL1_VOL instruction and is a precursor
to automatic per-dispatch scalar/vector L1 volatile invalidation.

Signed-off-by: Jay Cornwall <Jay.Cornwall@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdkfd/cik_regs.h | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/cik_regs.h b/drivers/gpu/drm/amd/amdkfd/cik_regs.h
index 48769d12dd7b..37ce6dd65391 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_regs.h
+++ b/drivers/gpu/drm/amd/amdkfd/cik_regs.h
@@ -33,7 +33,8 @@
 #define	APE1_MTYPE(x)					((x) << 7)
 
 /* valid for both DEFAULT_MTYPE and APE1_MTYPE */
-#define	MTYPE_CACHED					0
+#define	MTYPE_CACHED_NV					0
+#define	MTYPE_CACHED					1
 #define	MTYPE_NONCACHED					3
 
 #define	DEFAULT_CP_HQD_PERSISTENT_STATE			(0x33U << 8)

From fa7e65147e5dcafdf8d6c3787e5b22ec5f6bcbdc Mon Sep 17 00:00:00 2001
From: Philip Yang <Philip.Yang@amd.com>
Date: Tue, 1 May 2018 17:56:04 -0400
Subject: [PATCH 0383/1286] drm/amdkfd: use %px to print user space address
 instead of %p

Signed-off-by: Philip Yang <Philip.Yang@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 2 +-
 drivers/gpu/drm/amd/amdkfd/kfd_queue.c   | 8 ++++----
 2 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index ce15baf68d4c..beaa613c22f4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -233,7 +233,7 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
 	pr_debug("Queue Size: 0x%llX, %u\n",
 			q_properties->queue_size, args->ring_size);
 
-	pr_debug("Queue r/w Pointers: %p, %p\n",
+	pr_debug("Queue r/w Pointers: %px, %px\n",
 			q_properties->read_ptr,
 			q_properties->write_ptr);
 
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
index a5315d4f1c95..6dcd621e5b71 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
@@ -36,8 +36,8 @@ void print_queue_properties(struct queue_properties *q)
 	pr_debug("Queue Address: 0x%llX\n", q->queue_address);
 	pr_debug("Queue Id: %u\n", q->queue_id);
 	pr_debug("Queue Process Vmid: %u\n", q->vmid);
-	pr_debug("Queue Read Pointer: 0x%p\n", q->read_ptr);
-	pr_debug("Queue Write Pointer: 0x%p\n", q->write_ptr);
+	pr_debug("Queue Read Pointer: 0x%px\n", q->read_ptr);
+	pr_debug("Queue Write Pointer: 0x%px\n", q->write_ptr);
 	pr_debug("Queue Doorbell Pointer: 0x%p\n", q->doorbell_ptr);
 	pr_debug("Queue Doorbell Offset: %u\n", q->doorbell_off);
 }
@@ -53,8 +53,8 @@ void print_queue(struct queue *q)
 	pr_debug("Queue Address: 0x%llX\n", q->properties.queue_address);
 	pr_debug("Queue Id: %u\n", q->properties.queue_id);
 	pr_debug("Queue Process Vmid: %u\n", q->properties.vmid);
-	pr_debug("Queue Read Pointer: 0x%p\n", q->properties.read_ptr);
-	pr_debug("Queue Write Pointer: 0x%p\n", q->properties.write_ptr);
+	pr_debug("Queue Read Pointer: 0x%px\n", q->properties.read_ptr);
+	pr_debug("Queue Write Pointer: 0x%px\n", q->properties.write_ptr);
 	pr_debug("Queue Doorbell Pointer: 0x%p\n", q->properties.doorbell_ptr);
 	pr_debug("Queue Doorbell Offset: %u\n", q->properties.doorbell_off);
 	pr_debug("Queue MQD Address: 0x%p\n", q->mqd);

From a2e94158b83185c9dac430cb53bff26737a786ef Mon Sep 17 00:00:00 2001
From: Felix Kuehling <Felix.Kuehling@amd.com>
Date: Tue, 1 May 2018 17:56:05 -0400
Subject: [PATCH 0384/1286] drm/amdkfd: Remove redundant include of amd-iommu.h

Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_device.c | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index dd6c7535b6b4..c1d9e2772cbc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -20,9 +20,6 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 
-#if defined(CONFIG_AMD_IOMMU_V2_MODULE) || defined(CONFIG_AMD_IOMMU_V2)
-#include <linux/amd-iommu.h>
-#endif
 #include <linux/bsearch.h>
 #include <linux/pci.h>
 #include <linux/slab.h>

From 0db54b24ad676c3f2d0cf5291c9d170e3e15f213 Mon Sep 17 00:00:00 2001
From: Yong Zhao <yong.zhao@amd.com>
Date: Tue, 1 May 2018 17:56:06 -0400
Subject: [PATCH 0385/1286] drm/amdkfd: Separate trap handler assembly code and
 its hex values

Since the assembly code is inside "#if 0", it is ineffective. Despite that,
during debugging, we need to change the assembly code, extract it into
a separate file and compile the new file into hex values using sp3.
That process also requires us to remove "#if 0" and modify lines starting
with "#", so that sp3 can successfully compile the new file.

With this change, all the above chore is no longer needed, and
cwsr_trap_handler_gfx*.asm can be directly used by sp3 to generate its
hex values.

Signed-off-by: Yong Zhao <yong.zhao@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 .../gpu/drm/amd/amdkfd/cwsr_trap_handler.h    | 560 ++++++++++++++++++
 .../drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm | 267 +--------
 .../drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm | 300 +---------
 drivers/gpu/drm/amd/amdkfd/kfd_device.c       |   3 +-
 4 files changed, 575 insertions(+), 555 deletions(-)
 create mode 100644 drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h

diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
new file mode 100644
index 000000000000..a546a219d025
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -0,0 +1,560 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+static const uint32_t cwsr_trap_gfx8_hex[] = {
+	0xbf820001, 0xbf820125,
+	0xb8f4f802, 0x89748674,
+	0xb8f5f803, 0x8675ff75,
+	0x00000400, 0xbf850011,
+	0xc00a1e37, 0x00000000,
+	0xbf8c007f, 0x87777978,
+	0xbf840002, 0xb974f802,
+	0xbe801d78, 0xb8f5f803,
+	0x8675ff75, 0x000001ff,
+	0xbf850002, 0x80708470,
+	0x82718071, 0x8671ff71,
+	0x0000ffff, 0xb974f802,
+	0xbe801f70, 0xb8f5f803,
+	0x8675ff75, 0x00000100,
+	0xbf840006, 0xbefa0080,
+	0xb97a0203, 0x8671ff71,
+	0x0000ffff, 0x80f08870,
+	0x82f18071, 0xbefa0080,
+	0xb97a0283, 0xbef60068,
+	0xbef70069, 0xb8fa1c07,
+	0x8e7a9c7a, 0x87717a71,
+	0xb8fa03c7, 0x8e7a9b7a,
+	0x87717a71, 0xb8faf807,
+	0x867aff7a, 0x00007fff,
+	0xb97af807, 0xbef2007e,
+	0xbef3007f, 0xbefe0180,
+	0xbf900004, 0x877a8474,
+	0xb97af802, 0xbf8e0002,
+	0xbf88fffe, 0xbef8007e,
+	0x8679ff7f, 0x0000ffff,
+	0x8779ff79, 0x00040000,
+	0xbefa0080, 0xbefb00ff,
+	0x00807fac, 0x867aff7f,
+	0x08000000, 0x8f7a837a,
+	0x877b7a7b, 0x867aff7f,
+	0x70000000, 0x8f7a817a,
+	0x877b7a7b, 0xbeef007c,
+	0xbeee0080, 0xb8ee2a05,
+	0x806e816e, 0x8e6e8a6e,
+	0xb8fa1605, 0x807a817a,
+	0x8e7a867a, 0x806e7a6e,
+	0xbefa0084, 0xbefa00ff,
+	0x01000000, 0xbefe007c,
+	0xbefc006e, 0xc0611bfc,
+	0x0000007c, 0x806e846e,
+	0xbefc007e, 0xbefe007c,
+	0xbefc006e, 0xc0611c3c,
+	0x0000007c, 0x806e846e,
+	0xbefc007e, 0xbefe007c,
+	0xbefc006e, 0xc0611c7c,
+	0x0000007c, 0x806e846e,
+	0xbefc007e, 0xbefe007c,
+	0xbefc006e, 0xc0611cbc,
+	0x0000007c, 0x806e846e,
+	0xbefc007e, 0xbefe007c,
+	0xbefc006e, 0xc0611cfc,
+	0x0000007c, 0x806e846e,
+	0xbefc007e, 0xbefe007c,
+	0xbefc006e, 0xc0611d3c,
+	0x0000007c, 0x806e846e,
+	0xbefc007e, 0xb8f5f803,
+	0xbefe007c, 0xbefc006e,
+	0xc0611d7c, 0x0000007c,
+	0x806e846e, 0xbefc007e,
+	0xbefe007c, 0xbefc006e,
+	0xc0611dbc, 0x0000007c,
+	0x806e846e, 0xbefc007e,
+	0xbefe007c, 0xbefc006e,
+	0xc0611dfc, 0x0000007c,
+	0x806e846e, 0xbefc007e,
+	0xb8eff801, 0xbefe007c,
+	0xbefc006e, 0xc0611bfc,
+	0x0000007c, 0x806e846e,
+	0xbefc007e, 0xbefe007c,
+	0xbefc006e, 0xc0611b3c,
+	0x0000007c, 0x806e846e,
+	0xbefc007e, 0xbefe007c,
+	0xbefc006e, 0xc0611b7c,
+	0x0000007c, 0x806e846e,
+	0xbefc007e, 0x867aff7f,
+	0x04000000, 0xbef30080,
+	0x8773737a, 0xb8ee2a05,
+	0x806e816e, 0x8e6e8a6e,
+	0xb8f51605, 0x80758175,
+	0x8e758475, 0x8e7a8275,
+	0xbefa00ff, 0x01000000,
+	0xbef60178, 0x80786e78,
+	0x82798079, 0xbefc0080,
+	0xbe802b00, 0xbe822b02,
+	0xbe842b04, 0xbe862b06,
+	0xbe882b08, 0xbe8a2b0a,
+	0xbe8c2b0c, 0xbe8e2b0e,
+	0xc06b003c, 0x00000000,
+	0xc06b013c, 0x00000010,
+	0xc06b023c, 0x00000020,
+	0xc06b033c, 0x00000030,
+	0x8078c078, 0x82798079,
+	0x807c907c, 0xbf0a757c,
+	0xbf85ffeb, 0xbef80176,
+	0xbeee0080, 0xbefe00c1,
+	0xbeff00c1, 0xbefa00ff,
+	0x01000000, 0xe0724000,
+	0x6e1e0000, 0xe0724100,
+	0x6e1e0100, 0xe0724200,
+	0x6e1e0200, 0xe0724300,
+	0x6e1e0300, 0xbefe00c1,
+	0xbeff00c1, 0xb8f54306,
+	0x8675c175, 0xbf84002c,
+	0xbf8a0000, 0x867aff73,
+	0x04000000, 0xbf840028,
+	0x8e758675, 0x8e758275,
+	0xbefa0075, 0xb8ee2a05,
+	0x806e816e, 0x8e6e8a6e,
+	0xb8fa1605, 0x807a817a,
+	0x8e7a867a, 0x806e7a6e,
+	0x806eff6e, 0x00000080,
+	0xbefa00ff, 0x01000000,
+	0xbefc0080, 0xd28c0002,
+	0x000100c1, 0xd28d0003,
+	0x000204c1, 0xd1060002,
+	0x00011103, 0x7e0602ff,
+	0x00000200, 0xbefc00ff,
+	0x00010000, 0xbe80007b,
+	0x867bff7b, 0xff7fffff,
+	0x877bff7b, 0x00058000,
+	0xd8ec0000, 0x00000002,
+	0xbf8c007f, 0xe0765000,
+	0x6e1e0002, 0x32040702,
+	0xd0c9006a, 0x0000eb02,
+	0xbf87fff7, 0xbefb0000,
+	0xbeee00ff, 0x00000400,
+	0xbefe00c1, 0xbeff00c1,
+	0xb8f52a05, 0x80758175,
+	0x8e758275, 0x8e7a8875,
+	0xbefa00ff, 0x01000000,
+	0xbefc0084, 0xbf0a757c,
+	0xbf840015, 0xbf11017c,
+	0x8075ff75, 0x00001000,
+	0x7e000300, 0x7e020301,
+	0x7e040302, 0x7e060303,
+	0xe0724000, 0x6e1e0000,
+	0xe0724100, 0x6e1e0100,
+	0xe0724200, 0x6e1e0200,
+	0xe0724300, 0x6e1e0300,
+	0x807c847c, 0x806eff6e,
+	0x00000400, 0xbf0a757c,
+	0xbf85ffef, 0xbf9c0000,
+	0xbf8200ca, 0xbef8007e,
+	0x8679ff7f, 0x0000ffff,
+	0x8779ff79, 0x00040000,
+	0xbefa0080, 0xbefb00ff,
+	0x00807fac, 0x8676ff7f,
+	0x08000000, 0x8f768376,
+	0x877b767b, 0x8676ff7f,
+	0x70000000, 0x8f768176,
+	0x877b767b, 0x8676ff7f,
+	0x04000000, 0xbf84001e,
+	0xbefe00c1, 0xbeff00c1,
+	0xb8f34306, 0x8673c173,
+	0xbf840019, 0x8e738673,
+	0x8e738273, 0xbefa0073,
+	0xb8f22a05, 0x80728172,
+	0x8e728a72, 0xb8f61605,
+	0x80768176, 0x8e768676,
+	0x80727672, 0x8072ff72,
+	0x00000080, 0xbefa00ff,
+	0x01000000, 0xbefc0080,
+	0xe0510000, 0x721e0000,
+	0xe0510100, 0x721e0000,
+	0x807cff7c, 0x00000200,
+	0x8072ff72, 0x00000200,
+	0xbf0a737c, 0xbf85fff6,
+	0xbef20080, 0xbefe00c1,
+	0xbeff00c1, 0xb8f32a05,
+	0x80738173, 0x8e738273,
+	0x8e7a8873, 0xbefa00ff,
+	0x01000000, 0xbef60072,
+	0x8072ff72, 0x00000400,
+	0xbefc0084, 0xbf11087c,
+	0x8073ff73, 0x00008000,
+	0xe0524000, 0x721e0000,
+	0xe0524100, 0x721e0100,
+	0xe0524200, 0x721e0200,
+	0xe0524300, 0x721e0300,
+	0xbf8c0f70, 0x7e000300,
+	0x7e020301, 0x7e040302,
+	0x7e060303, 0x807c847c,
+	0x8072ff72, 0x00000400,
+	0xbf0a737c, 0xbf85ffee,
+	0xbf9c0000, 0xe0524000,
+	0x761e0000, 0xe0524100,
+	0x761e0100, 0xe0524200,
+	0x761e0200, 0xe0524300,
+	0x761e0300, 0xb8f22a05,
+	0x80728172, 0x8e728a72,
+	0xb8f61605, 0x80768176,
+	0x8e768676, 0x80727672,
+	0x80f2c072, 0xb8f31605,
+	0x80738173, 0x8e738473,
+	0x8e7a8273, 0xbefa00ff,
+	0x01000000, 0xbefc0073,
+	0xc031003c, 0x00000072,
+	0x80f2c072, 0xbf8c007f,
+	0x80fc907c, 0xbe802d00,
+	0xbe822d02, 0xbe842d04,
+	0xbe862d06, 0xbe882d08,
+	0xbe8a2d0a, 0xbe8c2d0c,
+	0xbe8e2d0e, 0xbf06807c,
+	0xbf84fff1, 0xb8f22a05,
+	0x80728172, 0x8e728a72,
+	0xb8f61605, 0x80768176,
+	0x8e768676, 0x80727672,
+	0xbefa0084, 0xbefa00ff,
+	0x01000000, 0xc0211cfc,
+	0x00000072, 0x80728472,
+	0xc0211c3c, 0x00000072,
+	0x80728472, 0xc0211c7c,
+	0x00000072, 0x80728472,
+	0xc0211bbc, 0x00000072,
+	0x80728472, 0xc0211bfc,
+	0x00000072, 0x80728472,
+	0xc0211d3c, 0x00000072,
+	0x80728472, 0xc0211d7c,
+	0x00000072, 0x80728472,
+	0xc0211a3c, 0x00000072,
+	0x80728472, 0xc0211a7c,
+	0x00000072, 0x80728472,
+	0xc0211dfc, 0x00000072,
+	0x80728472, 0xc0211b3c,
+	0x00000072, 0x80728472,
+	0xc0211b7c, 0x00000072,
+	0x80728472, 0xbf8c007f,
+	0x8671ff71, 0x0000ffff,
+	0xbefc0073, 0xbefe006e,
+	0xbeff006f, 0x867375ff,
+	0x000003ff, 0xb9734803,
+	0x867375ff, 0xfffff800,
+	0x8f738b73, 0xb973a2c3,
+	0xb977f801, 0x8673ff71,
+	0xf0000000, 0x8f739c73,
+	0x8e739073, 0xbef60080,
+	0x87767376, 0x8673ff71,
+	0x08000000, 0x8f739b73,
+	0x8e738f73, 0x87767376,
+	0x8673ff74, 0x00800000,
+	0x8f739773, 0xb976f807,
+	0x86fe7e7e, 0x86ea6a6a,
+	0xb974f802, 0xbf8a0000,
+	0x95807370, 0xbf810000,
+};
+
+
+static const uint32_t cwsr_trap_gfx9_hex[] = {
+	0xbf820001, 0xbf82015a,
+	0xb8f8f802, 0x89788678,
+	0xb8f1f803, 0x866eff71,
+	0x00000400, 0xbf850034,
+	0x866eff71, 0x00000800,
+	0xbf850003, 0x866eff71,
+	0x00000100, 0xbf840008,
+	0x866eff78, 0x00002000,
+	0xbf840001, 0xbf810000,
+	0x8778ff78, 0x00002000,
+	0x80ec886c, 0x82ed806d,
+	0xb8eef807, 0x866fff6e,
+	0x001f8000, 0x8e6f8b6f,
+	0x8977ff77, 0xfc000000,
+	0x87776f77, 0x896eff6e,
+	0x001f8000, 0xb96ef807,
+	0xb8f0f812, 0xb8f1f813,
+	0x8ef08870, 0xc0071bb8,
+	0x00000000, 0xbf8cc07f,
+	0xc0071c38, 0x00000008,
+	0xbf8cc07f, 0x86ee6e6e,
+	0xbf840001, 0xbe801d6e,
+	0xb8f1f803, 0x8671ff71,
+	0x000001ff, 0xbf850002,
+	0x806c846c, 0x826d806d,
+	0x866dff6d, 0x0000ffff,
+	0x8f6e8b77, 0x866eff6e,
+	0x001f8000, 0xb96ef807,
+	0x86fe7e7e, 0x86ea6a6a,
+	0xb978f802, 0xbe801f6c,
+	0x866dff6d, 0x0000ffff,
+	0xbef00080, 0xb9700283,
+	0xb8f02407, 0x8e709c70,
+	0x876d706d, 0xb8f003c7,
+	0x8e709b70, 0x876d706d,
+	0xb8f0f807, 0x8670ff70,
+	0x00007fff, 0xb970f807,
+	0xbeee007e, 0xbeef007f,
+	0xbefe0180, 0xbf900004,
+	0x87708478, 0xb970f802,
+	0xbf8e0002, 0xbf88fffe,
+	0xb8f02a05, 0x80708170,
+	0x8e708a70, 0xb8f11605,
+	0x80718171, 0x8e718671,
+	0x80707170, 0x80707e70,
+	0x8271807f, 0x8671ff71,
+	0x0000ffff, 0xc0471cb8,
+	0x00000040, 0xbf8cc07f,
+	0xc04b1d38, 0x00000048,
+	0xbf8cc07f, 0xc0431e78,
+	0x00000058, 0xbf8cc07f,
+	0xc0471eb8, 0x0000005c,
+	0xbf8cc07f, 0xbef4007e,
+	0x8675ff7f, 0x0000ffff,
+	0x8775ff75, 0x00040000,
+	0xbef60080, 0xbef700ff,
+	0x00807fac, 0x8670ff7f,
+	0x08000000, 0x8f708370,
+	0x87777077, 0x8670ff7f,
+	0x70000000, 0x8f708170,
+	0x87777077, 0xbefb007c,
+	0xbefa0080, 0xb8fa2a05,
+	0x807a817a, 0x8e7a8a7a,
+	0xb8f01605, 0x80708170,
+	0x8e708670, 0x807a707a,
+	0xbef60084, 0xbef600ff,
+	0x01000000, 0xbefe007c,
+	0xbefc007a, 0xc0611efa,
+	0x0000007c, 0xbf8cc07f,
+	0x807a847a, 0xbefc007e,
+	0xbefe007c, 0xbefc007a,
+	0xc0611b3a, 0x0000007c,
+	0xbf8cc07f, 0x807a847a,
+	0xbefc007e, 0xbefe007c,
+	0xbefc007a, 0xc0611b7a,
+	0x0000007c, 0xbf8cc07f,
+	0x807a847a, 0xbefc007e,
+	0xbefe007c, 0xbefc007a,
+	0xc0611bba, 0x0000007c,
+	0xbf8cc07f, 0x807a847a,
+	0xbefc007e, 0xbefe007c,
+	0xbefc007a, 0xc0611bfa,
+	0x0000007c, 0xbf8cc07f,
+	0x807a847a, 0xbefc007e,
+	0xbefe007c, 0xbefc007a,
+	0xc0611e3a, 0x0000007c,
+	0xbf8cc07f, 0x807a847a,
+	0xbefc007e, 0xb8f1f803,
+	0xbefe007c, 0xbefc007a,
+	0xc0611c7a, 0x0000007c,
+	0xbf8cc07f, 0x807a847a,
+	0xbefc007e, 0xbefe007c,
+	0xbefc007a, 0xc0611a3a,
+	0x0000007c, 0xbf8cc07f,
+	0x807a847a, 0xbefc007e,
+	0xbefe007c, 0xbefc007a,
+	0xc0611a7a, 0x0000007c,
+	0xbf8cc07f, 0x807a847a,
+	0xbefc007e, 0xb8fbf801,
+	0xbefe007c, 0xbefc007a,
+	0xc0611efa, 0x0000007c,
+	0xbf8cc07f, 0x807a847a,
+	0xbefc007e, 0x8670ff7f,
+	0x04000000, 0xbeef0080,
+	0x876f6f70, 0xb8fa2a05,
+	0x807a817a, 0x8e7a8a7a,
+	0xb8f11605, 0x80718171,
+	0x8e718471, 0x8e768271,
+	0xbef600ff, 0x01000000,
+	0xbef20174, 0x80747a74,
+	0x82758075, 0xbefc0080,
+	0xbf800000, 0xbe802b00,
+	0xbe822b02, 0xbe842b04,
+	0xbe862b06, 0xbe882b08,
+	0xbe8a2b0a, 0xbe8c2b0c,
+	0xbe8e2b0e, 0xc06b003a,
+	0x00000000, 0xbf8cc07f,
+	0xc06b013a, 0x00000010,
+	0xbf8cc07f, 0xc06b023a,
+	0x00000020, 0xbf8cc07f,
+	0xc06b033a, 0x00000030,
+	0xbf8cc07f, 0x8074c074,
+	0x82758075, 0x807c907c,
+	0xbf0a717c, 0xbf85ffe7,
+	0xbef40172, 0xbefa0080,
+	0xbefe00c1, 0xbeff00c1,
+	0xbee80080, 0xbee90080,
+	0xbef600ff, 0x01000000,
+	0xe0724000, 0x7a1d0000,
+	0xe0724100, 0x7a1d0100,
+	0xe0724200, 0x7a1d0200,
+	0xe0724300, 0x7a1d0300,
+	0xbefe00c1, 0xbeff00c1,
+	0xb8f14306, 0x8671c171,
+	0xbf84002c, 0xbf8a0000,
+	0x8670ff6f, 0x04000000,
+	0xbf840028, 0x8e718671,
+	0x8e718271, 0xbef60071,
+	0xb8fa2a05, 0x807a817a,
+	0x8e7a8a7a, 0xb8f01605,
+	0x80708170, 0x8e708670,
+	0x807a707a, 0x807aff7a,
+	0x00000080, 0xbef600ff,
+	0x01000000, 0xbefc0080,
+	0xd28c0002, 0x000100c1,
+	0xd28d0003, 0x000204c1,
+	0xd1060002, 0x00011103,
+	0x7e0602ff, 0x00000200,
+	0xbefc00ff, 0x00010000,
+	0xbe800077, 0x8677ff77,
+	0xff7fffff, 0x8777ff77,
+	0x00058000, 0xd8ec0000,
+	0x00000002, 0xbf8cc07f,
+	0xe0765000, 0x7a1d0002,
+	0x68040702, 0xd0c9006a,
+	0x0000e302, 0xbf87fff7,
+	0xbef70000, 0xbefa00ff,
+	0x00000400, 0xbefe00c1,
+	0xbeff00c1, 0xb8f12a05,
+	0x80718171, 0x8e718271,
+	0x8e768871, 0xbef600ff,
+	0x01000000, 0xbefc0084,
+	0xbf0a717c, 0xbf840015,
+	0xbf11017c, 0x8071ff71,
+	0x00001000, 0x7e000300,
+	0x7e020301, 0x7e040302,
+	0x7e060303, 0xe0724000,
+	0x7a1d0000, 0xe0724100,
+	0x7a1d0100, 0xe0724200,
+	0x7a1d0200, 0xe0724300,
+	0x7a1d0300, 0x807c847c,
+	0x807aff7a, 0x00000400,
+	0xbf0a717c, 0xbf85ffef,
+	0xbf9c0000, 0xbf8200d9,
+	0xbef4007e, 0x8675ff7f,
+	0x0000ffff, 0x8775ff75,
+	0x00040000, 0xbef60080,
+	0xbef700ff, 0x00807fac,
+	0x866eff7f, 0x08000000,
+	0x8f6e836e, 0x87776e77,
+	0x866eff7f, 0x70000000,
+	0x8f6e816e, 0x87776e77,
+	0x866eff7f, 0x04000000,
+	0xbf84001e, 0xbefe00c1,
+	0xbeff00c1, 0xb8ef4306,
+	0x866fc16f, 0xbf840019,
+	0x8e6f866f, 0x8e6f826f,
+	0xbef6006f, 0xb8f82a05,
+	0x80788178, 0x8e788a78,
+	0xb8ee1605, 0x806e816e,
+	0x8e6e866e, 0x80786e78,
+	0x8078ff78, 0x00000080,
+	0xbef600ff, 0x01000000,
+	0xbefc0080, 0xe0510000,
+	0x781d0000, 0xe0510100,
+	0x781d0000, 0x807cff7c,
+	0x00000200, 0x8078ff78,
+	0x00000200, 0xbf0a6f7c,
+	0xbf85fff6, 0xbef80080,
+	0xbefe00c1, 0xbeff00c1,
+	0xb8ef2a05, 0x806f816f,
+	0x8e6f826f, 0x8e76886f,
+	0xbef600ff, 0x01000000,
+	0xbeee0078, 0x8078ff78,
+	0x00000400, 0xbefc0084,
+	0xbf11087c, 0x806fff6f,
+	0x00008000, 0xe0524000,
+	0x781d0000, 0xe0524100,
+	0x781d0100, 0xe0524200,
+	0x781d0200, 0xe0524300,
+	0x781d0300, 0xbf8c0f70,
+	0x7e000300, 0x7e020301,
+	0x7e040302, 0x7e060303,
+	0x807c847c, 0x8078ff78,
+	0x00000400, 0xbf0a6f7c,
+	0xbf85ffee, 0xbf9c0000,
+	0xe0524000, 0x6e1d0000,
+	0xe0524100, 0x6e1d0100,
+	0xe0524200, 0x6e1d0200,
+	0xe0524300, 0x6e1d0300,
+	0xb8f82a05, 0x80788178,
+	0x8e788a78, 0xb8ee1605,
+	0x806e816e, 0x8e6e866e,
+	0x80786e78, 0x80f8c078,
+	0xb8ef1605, 0x806f816f,
+	0x8e6f846f, 0x8e76826f,
+	0xbef600ff, 0x01000000,
+	0xbefc006f, 0xc031003a,
+	0x00000078, 0x80f8c078,
+	0xbf8cc07f, 0x80fc907c,
+	0xbf800000, 0xbe802d00,
+	0xbe822d02, 0xbe842d04,
+	0xbe862d06, 0xbe882d08,
+	0xbe8a2d0a, 0xbe8c2d0c,
+	0xbe8e2d0e, 0xbf06807c,
+	0xbf84fff0, 0xb8f82a05,
+	0x80788178, 0x8e788a78,
+	0xb8ee1605, 0x806e816e,
+	0x8e6e866e, 0x80786e78,
+	0xbef60084, 0xbef600ff,
+	0x01000000, 0xc0211bfa,
+	0x00000078, 0x80788478,
+	0xc0211b3a, 0x00000078,
+	0x80788478, 0xc0211b7a,
+	0x00000078, 0x80788478,
+	0xc0211eba, 0x00000078,
+	0x80788478, 0xc0211efa,
+	0x00000078, 0x80788478,
+	0xc0211c3a, 0x00000078,
+	0x80788478, 0xc0211c7a,
+	0x00000078, 0x80788478,
+	0xc0211a3a, 0x00000078,
+	0x80788478, 0xc0211a7a,
+	0x00000078, 0x80788478,
+	0xc0211cfa, 0x00000078,
+	0x80788478, 0xbf8cc07f,
+	0x866dff6d, 0x0000ffff,
+	0xbefc006f, 0xbefe007a,
+	0xbeff007b, 0x866f71ff,
+	0x000003ff, 0xb96f4803,
+	0x866f71ff, 0xfffff800,
+	0x8f6f8b6f, 0xb96fa2c3,
+	0xb973f801, 0xb8ee2a05,
+	0x806e816e, 0x8e6e8a6e,
+	0xb8ef1605, 0x806f816f,
+	0x8e6f866f, 0x806e6f6e,
+	0x806e746e, 0x826f8075,
+	0x866fff6f, 0x0000ffff,
+	0xc0071cb7, 0x00000040,
+	0xc00b1d37, 0x00000048,
+	0xc0031e77, 0x00000058,
+	0xc0071eb7, 0x0000005c,
+	0xbf8cc07f, 0x866fff6d,
+	0xf0000000, 0x8f6f9c6f,
+	0x8e6f906f, 0xbeee0080,
+	0x876e6f6e, 0x866fff6d,
+	0x08000000, 0x8f6f9b6f,
+	0x8e6f8f6f, 0x876e6f6e,
+	0x866fff70, 0x00800000,
+	0x8f6f976f, 0xb96ef807,
+	0x86fe7e7e, 0x86ea6a6a,
+	0xb970f802, 0xbf8a0000,
+	0x95806f6c, 0xbf810000,
+};
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
index 34eabcdd27a0..658a4c6be8e4 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
@@ -20,9 +20,12 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 
-#if 0
-HW (VI) source code for CWSR trap handler
-#Version 18 + multiple trap handler
+/* To compile this assembly code:
+ * PROJECT=vi ./sp3 cwsr_trap_handler_gfx8.asm -hex tmp.hex
+ */
+
+/* HW (VI) source code for CWSR trap handler */
+/* Version 18 + multiple trap handler */
 
 // this performance-optimal version was originally from Seven Xu at SRDC
 
@@ -150,7 +153,7 @@ var s_save_spi_init_lo              =   exec_lo
 var s_save_spi_init_hi              =   exec_hi
 
                                                 //tba_lo and tba_hi need to be saved/restored
-var s_save_pc_lo            =   ttmp0           //{TTMP1, TTMP0} = {3??h0,pc_rewind[3:0], HT[0],trapID[7:0], PC[47:0]}
+var s_save_pc_lo            =   ttmp0           //{TTMP1, TTMP0} = {3'h0,pc_rewind[3:0], HT[0],trapID[7:0], PC[47:0]}
 var s_save_pc_hi            =   ttmp1
 var s_save_exec_lo          =   ttmp2
 var s_save_exec_hi          =   ttmp3
@@ -1132,259 +1135,3 @@ end
 function get_hwreg_size_bytes
     return 128 //HWREG size 128 bytes
 end
-
-
-#endif
-
-static const uint32_t cwsr_trap_gfx8_hex[] = {
-	0xbf820001, 0xbf820125,
-	0xb8f4f802, 0x89748674,
-	0xb8f5f803, 0x8675ff75,
-	0x00000400, 0xbf850011,
-	0xc00a1e37, 0x00000000,
-	0xbf8c007f, 0x87777978,
-	0xbf840002, 0xb974f802,
-	0xbe801d78, 0xb8f5f803,
-	0x8675ff75, 0x000001ff,
-	0xbf850002, 0x80708470,
-	0x82718071, 0x8671ff71,
-	0x0000ffff, 0xb974f802,
-	0xbe801f70, 0xb8f5f803,
-	0x8675ff75, 0x00000100,
-	0xbf840006, 0xbefa0080,
-	0xb97a0203, 0x8671ff71,
-	0x0000ffff, 0x80f08870,
-	0x82f18071, 0xbefa0080,
-	0xb97a0283, 0xbef60068,
-	0xbef70069, 0xb8fa1c07,
-	0x8e7a9c7a, 0x87717a71,
-	0xb8fa03c7, 0x8e7a9b7a,
-	0x87717a71, 0xb8faf807,
-	0x867aff7a, 0x00007fff,
-	0xb97af807, 0xbef2007e,
-	0xbef3007f, 0xbefe0180,
-	0xbf900004, 0x877a8474,
-	0xb97af802, 0xbf8e0002,
-	0xbf88fffe, 0xbef8007e,
-	0x8679ff7f, 0x0000ffff,
-	0x8779ff79, 0x00040000,
-	0xbefa0080, 0xbefb00ff,
-	0x00807fac, 0x867aff7f,
-	0x08000000, 0x8f7a837a,
-	0x877b7a7b, 0x867aff7f,
-	0x70000000, 0x8f7a817a,
-	0x877b7a7b, 0xbeef007c,
-	0xbeee0080, 0xb8ee2a05,
-	0x806e816e, 0x8e6e8a6e,
-	0xb8fa1605, 0x807a817a,
-	0x8e7a867a, 0x806e7a6e,
-	0xbefa0084, 0xbefa00ff,
-	0x01000000, 0xbefe007c,
-	0xbefc006e, 0xc0611bfc,
-	0x0000007c, 0x806e846e,
-	0xbefc007e, 0xbefe007c,
-	0xbefc006e, 0xc0611c3c,
-	0x0000007c, 0x806e846e,
-	0xbefc007e, 0xbefe007c,
-	0xbefc006e, 0xc0611c7c,
-	0x0000007c, 0x806e846e,
-	0xbefc007e, 0xbefe007c,
-	0xbefc006e, 0xc0611cbc,
-	0x0000007c, 0x806e846e,
-	0xbefc007e, 0xbefe007c,
-	0xbefc006e, 0xc0611cfc,
-	0x0000007c, 0x806e846e,
-	0xbefc007e, 0xbefe007c,
-	0xbefc006e, 0xc0611d3c,
-	0x0000007c, 0x806e846e,
-	0xbefc007e, 0xb8f5f803,
-	0xbefe007c, 0xbefc006e,
-	0xc0611d7c, 0x0000007c,
-	0x806e846e, 0xbefc007e,
-	0xbefe007c, 0xbefc006e,
-	0xc0611dbc, 0x0000007c,
-	0x806e846e, 0xbefc007e,
-	0xbefe007c, 0xbefc006e,
-	0xc0611dfc, 0x0000007c,
-	0x806e846e, 0xbefc007e,
-	0xb8eff801, 0xbefe007c,
-	0xbefc006e, 0xc0611bfc,
-	0x0000007c, 0x806e846e,
-	0xbefc007e, 0xbefe007c,
-	0xbefc006e, 0xc0611b3c,
-	0x0000007c, 0x806e846e,
-	0xbefc007e, 0xbefe007c,
-	0xbefc006e, 0xc0611b7c,
-	0x0000007c, 0x806e846e,
-	0xbefc007e, 0x867aff7f,
-	0x04000000, 0xbef30080,
-	0x8773737a, 0xb8ee2a05,
-	0x806e816e, 0x8e6e8a6e,
-	0xb8f51605, 0x80758175,
-	0x8e758475, 0x8e7a8275,
-	0xbefa00ff, 0x01000000,
-	0xbef60178, 0x80786e78,
-	0x82798079, 0xbefc0080,
-	0xbe802b00, 0xbe822b02,
-	0xbe842b04, 0xbe862b06,
-	0xbe882b08, 0xbe8a2b0a,
-	0xbe8c2b0c, 0xbe8e2b0e,
-	0xc06b003c, 0x00000000,
-	0xc06b013c, 0x00000010,
-	0xc06b023c, 0x00000020,
-	0xc06b033c, 0x00000030,
-	0x8078c078, 0x82798079,
-	0x807c907c, 0xbf0a757c,
-	0xbf85ffeb, 0xbef80176,
-	0xbeee0080, 0xbefe00c1,
-	0xbeff00c1, 0xbefa00ff,
-	0x01000000, 0xe0724000,
-	0x6e1e0000, 0xe0724100,
-	0x6e1e0100, 0xe0724200,
-	0x6e1e0200, 0xe0724300,
-	0x6e1e0300, 0xbefe00c1,
-	0xbeff00c1, 0xb8f54306,
-	0x8675c175, 0xbf84002c,
-	0xbf8a0000, 0x867aff73,
-	0x04000000, 0xbf840028,
-	0x8e758675, 0x8e758275,
-	0xbefa0075, 0xb8ee2a05,
-	0x806e816e, 0x8e6e8a6e,
-	0xb8fa1605, 0x807a817a,
-	0x8e7a867a, 0x806e7a6e,
-	0x806eff6e, 0x00000080,
-	0xbefa00ff, 0x01000000,
-	0xbefc0080, 0xd28c0002,
-	0x000100c1, 0xd28d0003,
-	0x000204c1, 0xd1060002,
-	0x00011103, 0x7e0602ff,
-	0x00000200, 0xbefc00ff,
-	0x00010000, 0xbe80007b,
-	0x867bff7b, 0xff7fffff,
-	0x877bff7b, 0x00058000,
-	0xd8ec0000, 0x00000002,
-	0xbf8c007f, 0xe0765000,
-	0x6e1e0002, 0x32040702,
-	0xd0c9006a, 0x0000eb02,
-	0xbf87fff7, 0xbefb0000,
-	0xbeee00ff, 0x00000400,
-	0xbefe00c1, 0xbeff00c1,
-	0xb8f52a05, 0x80758175,
-	0x8e758275, 0x8e7a8875,
-	0xbefa00ff, 0x01000000,
-	0xbefc0084, 0xbf0a757c,
-	0xbf840015, 0xbf11017c,
-	0x8075ff75, 0x00001000,
-	0x7e000300, 0x7e020301,
-	0x7e040302, 0x7e060303,
-	0xe0724000, 0x6e1e0000,
-	0xe0724100, 0x6e1e0100,
-	0xe0724200, 0x6e1e0200,
-	0xe0724300, 0x6e1e0300,
-	0x807c847c, 0x806eff6e,
-	0x00000400, 0xbf0a757c,
-	0xbf85ffef, 0xbf9c0000,
-	0xbf8200ca, 0xbef8007e,
-	0x8679ff7f, 0x0000ffff,
-	0x8779ff79, 0x00040000,
-	0xbefa0080, 0xbefb00ff,
-	0x00807fac, 0x8676ff7f,
-	0x08000000, 0x8f768376,
-	0x877b767b, 0x8676ff7f,
-	0x70000000, 0x8f768176,
-	0x877b767b, 0x8676ff7f,
-	0x04000000, 0xbf84001e,
-	0xbefe00c1, 0xbeff00c1,
-	0xb8f34306, 0x8673c173,
-	0xbf840019, 0x8e738673,
-	0x8e738273, 0xbefa0073,
-	0xb8f22a05, 0x80728172,
-	0x8e728a72, 0xb8f61605,
-	0x80768176, 0x8e768676,
-	0x80727672, 0x8072ff72,
-	0x00000080, 0xbefa00ff,
-	0x01000000, 0xbefc0080,
-	0xe0510000, 0x721e0000,
-	0xe0510100, 0x721e0000,
-	0x807cff7c, 0x00000200,
-	0x8072ff72, 0x00000200,
-	0xbf0a737c, 0xbf85fff6,
-	0xbef20080, 0xbefe00c1,
-	0xbeff00c1, 0xb8f32a05,
-	0x80738173, 0x8e738273,
-	0x8e7a8873, 0xbefa00ff,
-	0x01000000, 0xbef60072,
-	0x8072ff72, 0x00000400,
-	0xbefc0084, 0xbf11087c,
-	0x8073ff73, 0x00008000,
-	0xe0524000, 0x721e0000,
-	0xe0524100, 0x721e0100,
-	0xe0524200, 0x721e0200,
-	0xe0524300, 0x721e0300,
-	0xbf8c0f70, 0x7e000300,
-	0x7e020301, 0x7e040302,
-	0x7e060303, 0x807c847c,
-	0x8072ff72, 0x00000400,
-	0xbf0a737c, 0xbf85ffee,
-	0xbf9c0000, 0xe0524000,
-	0x761e0000, 0xe0524100,
-	0x761e0100, 0xe0524200,
-	0x761e0200, 0xe0524300,
-	0x761e0300, 0xb8f22a05,
-	0x80728172, 0x8e728a72,
-	0xb8f61605, 0x80768176,
-	0x8e768676, 0x80727672,
-	0x80f2c072, 0xb8f31605,
-	0x80738173, 0x8e738473,
-	0x8e7a8273, 0xbefa00ff,
-	0x01000000, 0xbefc0073,
-	0xc031003c, 0x00000072,
-	0x80f2c072, 0xbf8c007f,
-	0x80fc907c, 0xbe802d00,
-	0xbe822d02, 0xbe842d04,
-	0xbe862d06, 0xbe882d08,
-	0xbe8a2d0a, 0xbe8c2d0c,
-	0xbe8e2d0e, 0xbf06807c,
-	0xbf84fff1, 0xb8f22a05,
-	0x80728172, 0x8e728a72,
-	0xb8f61605, 0x80768176,
-	0x8e768676, 0x80727672,
-	0xbefa0084, 0xbefa00ff,
-	0x01000000, 0xc0211cfc,
-	0x00000072, 0x80728472,
-	0xc0211c3c, 0x00000072,
-	0x80728472, 0xc0211c7c,
-	0x00000072, 0x80728472,
-	0xc0211bbc, 0x00000072,
-	0x80728472, 0xc0211bfc,
-	0x00000072, 0x80728472,
-	0xc0211d3c, 0x00000072,
-	0x80728472, 0xc0211d7c,
-	0x00000072, 0x80728472,
-	0xc0211a3c, 0x00000072,
-	0x80728472, 0xc0211a7c,
-	0x00000072, 0x80728472,
-	0xc0211dfc, 0x00000072,
-	0x80728472, 0xc0211b3c,
-	0x00000072, 0x80728472,
-	0xc0211b7c, 0x00000072,
-	0x80728472, 0xbf8c007f,
-	0x8671ff71, 0x0000ffff,
-	0xbefc0073, 0xbefe006e,
-	0xbeff006f, 0x867375ff,
-	0x000003ff, 0xb9734803,
-	0x867375ff, 0xfffff800,
-	0x8f738b73, 0xb973a2c3,
-	0xb977f801, 0x8673ff71,
-	0xf0000000, 0x8f739c73,
-	0x8e739073, 0xbef60080,
-	0x87767376, 0x8673ff71,
-	0x08000000, 0x8f739b73,
-	0x8e738f73, 0x87767376,
-	0x8673ff74, 0x00800000,
-	0x8f739773, 0xb976f807,
-	0x86fe7e7e, 0x86ea6a6a,
-	0xb974f802, 0xbf8a0000,
-	0x95807370, 0xbf810000,
-};
-
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
index cac8d4992e04..065f55ae9e41 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
@@ -20,9 +20,12 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 
-#if 0
-HW (GFX9) source code for CWSR trap handler
-#Version 18 + multiple trap handler
+/* To compile this assembly code:
+ * PROJECT=greenland ./sp3 cwsr_trap_handler_gfx9.asm -hex tmp.hex
+ */
+
+/* HW (GFX9) source code for CWSR trap handler */
+/* Version 18 + multiple trap handler */
 
 // this performance-optimal version was originally from Seven Xu at SRDC
 
@@ -151,7 +154,7 @@ var S_SAVE_PC_HI_FIRST_REPLAY_MASK	=   0x08000000		//FIXME
 var s_save_spi_init_lo		    =	exec_lo
 var s_save_spi_init_hi		    =	exec_hi
 
-var s_save_pc_lo	    =	ttmp0		//{TTMP1, TTMP0} = {3??h0,pc_rewind[3:0], HT[0],trapID[7:0], PC[47:0]}
+var s_save_pc_lo	    =	ttmp0		//{TTMP1, TTMP0} = {3'h0,pc_rewind[3:0], HT[0],trapID[7:0], PC[47:0]}
 var s_save_pc_hi	    =	ttmp1
 var s_save_exec_lo	    =	ttmp2
 var s_save_exec_hi	    =	ttmp3
@@ -1210,292 +1213,3 @@ function ack_sqc_store_workaround
         s_waitcnt lgkmcnt(0)
     end
 end
-
-
-#endif
-
-static const uint32_t cwsr_trap_gfx9_hex[] = {
-	0xbf820001, 0xbf82015a,
-	0xb8f8f802, 0x89788678,
-	0xb8f1f803, 0x866eff71,
-	0x00000400, 0xbf850034,
-	0x866eff71, 0x00000800,
-	0xbf850003, 0x866eff71,
-	0x00000100, 0xbf840008,
-	0x866eff78, 0x00002000,
-	0xbf840001, 0xbf810000,
-	0x8778ff78, 0x00002000,
-	0x80ec886c, 0x82ed806d,
-	0xb8eef807, 0x866fff6e,
-	0x001f8000, 0x8e6f8b6f,
-	0x8977ff77, 0xfc000000,
-	0x87776f77, 0x896eff6e,
-	0x001f8000, 0xb96ef807,
-	0xb8f0f812, 0xb8f1f813,
-	0x8ef08870, 0xc0071bb8,
-	0x00000000, 0xbf8cc07f,
-	0xc0071c38, 0x00000008,
-	0xbf8cc07f, 0x86ee6e6e,
-	0xbf840001, 0xbe801d6e,
-	0xb8f1f803, 0x8671ff71,
-	0x000001ff, 0xbf850002,
-	0x806c846c, 0x826d806d,
-	0x866dff6d, 0x0000ffff,
-	0x8f6e8b77, 0x866eff6e,
-	0x001f8000, 0xb96ef807,
-	0x86fe7e7e, 0x86ea6a6a,
-	0xb978f802, 0xbe801f6c,
-	0x866dff6d, 0x0000ffff,
-	0xbef00080, 0xb9700283,
-	0xb8f02407, 0x8e709c70,
-	0x876d706d, 0xb8f003c7,
-	0x8e709b70, 0x876d706d,
-	0xb8f0f807, 0x8670ff70,
-	0x00007fff, 0xb970f807,
-	0xbeee007e, 0xbeef007f,
-	0xbefe0180, 0xbf900004,
-	0x87708478, 0xb970f802,
-	0xbf8e0002, 0xbf88fffe,
-	0xb8f02a05, 0x80708170,
-	0x8e708a70, 0xb8f11605,
-	0x80718171, 0x8e718671,
-	0x80707170, 0x80707e70,
-	0x8271807f, 0x8671ff71,
-	0x0000ffff, 0xc0471cb8,
-	0x00000040, 0xbf8cc07f,
-	0xc04b1d38, 0x00000048,
-	0xbf8cc07f, 0xc0431e78,
-	0x00000058, 0xbf8cc07f,
-	0xc0471eb8, 0x0000005c,
-	0xbf8cc07f, 0xbef4007e,
-	0x8675ff7f, 0x0000ffff,
-	0x8775ff75, 0x00040000,
-	0xbef60080, 0xbef700ff,
-	0x00807fac, 0x8670ff7f,
-	0x08000000, 0x8f708370,
-	0x87777077, 0x8670ff7f,
-	0x70000000, 0x8f708170,
-	0x87777077, 0xbefb007c,
-	0xbefa0080, 0xb8fa2a05,
-	0x807a817a, 0x8e7a8a7a,
-	0xb8f01605, 0x80708170,
-	0x8e708670, 0x807a707a,
-	0xbef60084, 0xbef600ff,
-	0x01000000, 0xbefe007c,
-	0xbefc007a, 0xc0611efa,
-	0x0000007c, 0xbf8cc07f,
-	0x807a847a, 0xbefc007e,
-	0xbefe007c, 0xbefc007a,
-	0xc0611b3a, 0x0000007c,
-	0xbf8cc07f, 0x807a847a,
-	0xbefc007e, 0xbefe007c,
-	0xbefc007a, 0xc0611b7a,
-	0x0000007c, 0xbf8cc07f,
-	0x807a847a, 0xbefc007e,
-	0xbefe007c, 0xbefc007a,
-	0xc0611bba, 0x0000007c,
-	0xbf8cc07f, 0x807a847a,
-	0xbefc007e, 0xbefe007c,
-	0xbefc007a, 0xc0611bfa,
-	0x0000007c, 0xbf8cc07f,
-	0x807a847a, 0xbefc007e,
-	0xbefe007c, 0xbefc007a,
-	0xc0611e3a, 0x0000007c,
-	0xbf8cc07f, 0x807a847a,
-	0xbefc007e, 0xb8f1f803,
-	0xbefe007c, 0xbefc007a,
-	0xc0611c7a, 0x0000007c,
-	0xbf8cc07f, 0x807a847a,
-	0xbefc007e, 0xbefe007c,
-	0xbefc007a, 0xc0611a3a,
-	0x0000007c, 0xbf8cc07f,
-	0x807a847a, 0xbefc007e,
-	0xbefe007c, 0xbefc007a,
-	0xc0611a7a, 0x0000007c,
-	0xbf8cc07f, 0x807a847a,
-	0xbefc007e, 0xb8fbf801,
-	0xbefe007c, 0xbefc007a,
-	0xc0611efa, 0x0000007c,
-	0xbf8cc07f, 0x807a847a,
-	0xbefc007e, 0x8670ff7f,
-	0x04000000, 0xbeef0080,
-	0x876f6f70, 0xb8fa2a05,
-	0x807a817a, 0x8e7a8a7a,
-	0xb8f11605, 0x80718171,
-	0x8e718471, 0x8e768271,
-	0xbef600ff, 0x01000000,
-	0xbef20174, 0x80747a74,
-	0x82758075, 0xbefc0080,
-	0xbf800000, 0xbe802b00,
-	0xbe822b02, 0xbe842b04,
-	0xbe862b06, 0xbe882b08,
-	0xbe8a2b0a, 0xbe8c2b0c,
-	0xbe8e2b0e, 0xc06b003a,
-	0x00000000, 0xbf8cc07f,
-	0xc06b013a, 0x00000010,
-	0xbf8cc07f, 0xc06b023a,
-	0x00000020, 0xbf8cc07f,
-	0xc06b033a, 0x00000030,
-	0xbf8cc07f, 0x8074c074,
-	0x82758075, 0x807c907c,
-	0xbf0a717c, 0xbf85ffe7,
-	0xbef40172, 0xbefa0080,
-	0xbefe00c1, 0xbeff00c1,
-	0xbee80080, 0xbee90080,
-	0xbef600ff, 0x01000000,
-	0xe0724000, 0x7a1d0000,
-	0xe0724100, 0x7a1d0100,
-	0xe0724200, 0x7a1d0200,
-	0xe0724300, 0x7a1d0300,
-	0xbefe00c1, 0xbeff00c1,
-	0xb8f14306, 0x8671c171,
-	0xbf84002c, 0xbf8a0000,
-	0x8670ff6f, 0x04000000,
-	0xbf840028, 0x8e718671,
-	0x8e718271, 0xbef60071,
-	0xb8fa2a05, 0x807a817a,
-	0x8e7a8a7a, 0xb8f01605,
-	0x80708170, 0x8e708670,
-	0x807a707a, 0x807aff7a,
-	0x00000080, 0xbef600ff,
-	0x01000000, 0xbefc0080,
-	0xd28c0002, 0x000100c1,
-	0xd28d0003, 0x000204c1,
-	0xd1060002, 0x00011103,
-	0x7e0602ff, 0x00000200,
-	0xbefc00ff, 0x00010000,
-	0xbe800077, 0x8677ff77,
-	0xff7fffff, 0x8777ff77,
-	0x00058000, 0xd8ec0000,
-	0x00000002, 0xbf8cc07f,
-	0xe0765000, 0x7a1d0002,
-	0x68040702, 0xd0c9006a,
-	0x0000e302, 0xbf87fff7,
-	0xbef70000, 0xbefa00ff,
-	0x00000400, 0xbefe00c1,
-	0xbeff00c1, 0xb8f12a05,
-	0x80718171, 0x8e718271,
-	0x8e768871, 0xbef600ff,
-	0x01000000, 0xbefc0084,
-	0xbf0a717c, 0xbf840015,
-	0xbf11017c, 0x8071ff71,
-	0x00001000, 0x7e000300,
-	0x7e020301, 0x7e040302,
-	0x7e060303, 0xe0724000,
-	0x7a1d0000, 0xe0724100,
-	0x7a1d0100, 0xe0724200,
-	0x7a1d0200, 0xe0724300,
-	0x7a1d0300, 0x807c847c,
-	0x807aff7a, 0x00000400,
-	0xbf0a717c, 0xbf85ffef,
-	0xbf9c0000, 0xbf8200d9,
-	0xbef4007e, 0x8675ff7f,
-	0x0000ffff, 0x8775ff75,
-	0x00040000, 0xbef60080,
-	0xbef700ff, 0x00807fac,
-	0x866eff7f, 0x08000000,
-	0x8f6e836e, 0x87776e77,
-	0x866eff7f, 0x70000000,
-	0x8f6e816e, 0x87776e77,
-	0x866eff7f, 0x04000000,
-	0xbf84001e, 0xbefe00c1,
-	0xbeff00c1, 0xb8ef4306,
-	0x866fc16f, 0xbf840019,
-	0x8e6f866f, 0x8e6f826f,
-	0xbef6006f, 0xb8f82a05,
-	0x80788178, 0x8e788a78,
-	0xb8ee1605, 0x806e816e,
-	0x8e6e866e, 0x80786e78,
-	0x8078ff78, 0x00000080,
-	0xbef600ff, 0x01000000,
-	0xbefc0080, 0xe0510000,
-	0x781d0000, 0xe0510100,
-	0x781d0000, 0x807cff7c,
-	0x00000200, 0x8078ff78,
-	0x00000200, 0xbf0a6f7c,
-	0xbf85fff6, 0xbef80080,
-	0xbefe00c1, 0xbeff00c1,
-	0xb8ef2a05, 0x806f816f,
-	0x8e6f826f, 0x8e76886f,
-	0xbef600ff, 0x01000000,
-	0xbeee0078, 0x8078ff78,
-	0x00000400, 0xbefc0084,
-	0xbf11087c, 0x806fff6f,
-	0x00008000, 0xe0524000,
-	0x781d0000, 0xe0524100,
-	0x781d0100, 0xe0524200,
-	0x781d0200, 0xe0524300,
-	0x781d0300, 0xbf8c0f70,
-	0x7e000300, 0x7e020301,
-	0x7e040302, 0x7e060303,
-	0x807c847c, 0x8078ff78,
-	0x00000400, 0xbf0a6f7c,
-	0xbf85ffee, 0xbf9c0000,
-	0xe0524000, 0x6e1d0000,
-	0xe0524100, 0x6e1d0100,
-	0xe0524200, 0x6e1d0200,
-	0xe0524300, 0x6e1d0300,
-	0xb8f82a05, 0x80788178,
-	0x8e788a78, 0xb8ee1605,
-	0x806e816e, 0x8e6e866e,
-	0x80786e78, 0x80f8c078,
-	0xb8ef1605, 0x806f816f,
-	0x8e6f846f, 0x8e76826f,
-	0xbef600ff, 0x01000000,
-	0xbefc006f, 0xc031003a,
-	0x00000078, 0x80f8c078,
-	0xbf8cc07f, 0x80fc907c,
-	0xbf800000, 0xbe802d00,
-	0xbe822d02, 0xbe842d04,
-	0xbe862d06, 0xbe882d08,
-	0xbe8a2d0a, 0xbe8c2d0c,
-	0xbe8e2d0e, 0xbf06807c,
-	0xbf84fff0, 0xb8f82a05,
-	0x80788178, 0x8e788a78,
-	0xb8ee1605, 0x806e816e,
-	0x8e6e866e, 0x80786e78,
-	0xbef60084, 0xbef600ff,
-	0x01000000, 0xc0211bfa,
-	0x00000078, 0x80788478,
-	0xc0211b3a, 0x00000078,
-	0x80788478, 0xc0211b7a,
-	0x00000078, 0x80788478,
-	0xc0211eba, 0x00000078,
-	0x80788478, 0xc0211efa,
-	0x00000078, 0x80788478,
-	0xc0211c3a, 0x00000078,
-	0x80788478, 0xc0211c7a,
-	0x00000078, 0x80788478,
-	0xc0211a3a, 0x00000078,
-	0x80788478, 0xc0211a7a,
-	0x00000078, 0x80788478,
-	0xc0211cfa, 0x00000078,
-	0x80788478, 0xbf8cc07f,
-	0x866dff6d, 0x0000ffff,
-	0xbefc006f, 0xbefe007a,
-	0xbeff007b, 0x866f71ff,
-	0x000003ff, 0xb96f4803,
-	0x866f71ff, 0xfffff800,
-	0x8f6f8b6f, 0xb96fa2c3,
-	0xb973f801, 0xb8ee2a05,
-	0x806e816e, 0x8e6e8a6e,
-	0xb8ef1605, 0x806f816f,
-	0x8e6f866f, 0x806e6f6e,
-	0x806e746e, 0x826f8075,
-	0x866fff6f, 0x0000ffff,
-	0xc0071cb7, 0x00000040,
-	0xc00b1d37, 0x00000048,
-	0xc0031e77, 0x00000058,
-	0xc0071eb7, 0x0000005c,
-	0xbf8cc07f, 0x866fff6d,
-	0xf0000000, 0x8f6f9c6f,
-	0x8e6f906f, 0xbeee0080,
-	0x876e6f6e, 0x866fff6d,
-	0x08000000, 0x8f6f9b6f,
-	0x8e6f8f6f, 0x876e6f6e,
-	0x866fff70, 0x00800000,
-	0x8f6f976f, 0xb96ef807,
-	0x86fe7e7e, 0x86ea6a6a,
-	0xb970f802, 0xbf8a0000,
-	0x95806f6c, 0xbf810000,
-};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index c1d9e2772cbc..7ee6cec2c060 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -26,8 +26,7 @@
 #include "kfd_priv.h"
 #include "kfd_device_queue_manager.h"
 #include "kfd_pm4_headers_vi.h"
-#include "cwsr_trap_handler_gfx8.asm"
-#include "cwsr_trap_handler_gfx9.asm"
+#include "cwsr_trap_handler.h"
 #include "kfd_iommu.h"
 
 #define MQD_SIZE_ALIGNED 768

From f8ea72d097965617bba0d6773fd29d44070c5e1a Mon Sep 17 00:00:00 2001
From: Yong Zhao <yong.zhao@amd.com>
Date: Tue, 1 May 2018 17:56:07 -0400
Subject: [PATCH 0386/1286] drm/amdkfd: Fix CP soft hang on APUs

The problem happens on Raven and Carrizo. The context save handler
should not clear the high bits of PC_HI before extracting the bits
of IB_STS.

The bug is not relevant to VEGA10 until we enable demand paging.

Signed-off-by: Jay Cornwall <Jay.Cornwall@amd.com>
Signed-off-by: Yong Zhao <yong.zhao@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h        | 4 ++--
 drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm | 3 +--
 drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm | 3 +--
 3 files changed, 4 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index a546a219d025..f68aef02fc1f 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -253,7 +253,6 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
 	0x00000072, 0x80728472,
 	0xc0211b7c, 0x00000072,
 	0x80728472, 0xbf8c007f,
-	0x8671ff71, 0x0000ffff,
 	0xbefc0073, 0xbefe006e,
 	0xbeff006f, 0x867375ff,
 	0x000003ff, 0xb9734803,
@@ -267,6 +266,7 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
 	0x8e738f73, 0x87767376,
 	0x8673ff74, 0x00800000,
 	0x8f739773, 0xb976f807,
+	0x8671ff71, 0x0000ffff,
 	0x86fe7e7e, 0x86ea6a6a,
 	0xb974f802, 0xbf8a0000,
 	0x95807370, 0xbf810000,
@@ -530,7 +530,6 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
 	0x00000078, 0x80788478,
 	0xc0211cfa, 0x00000078,
 	0x80788478, 0xbf8cc07f,
-	0x866dff6d, 0x0000ffff,
 	0xbefc006f, 0xbefe007a,
 	0xbeff007b, 0x866f71ff,
 	0x000003ff, 0xb96f4803,
@@ -554,6 +553,7 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
 	0x8e6f8f6f, 0x876e6f6e,
 	0x866fff70, 0x00800000,
 	0x8f6f976f, 0xb96ef807,
+	0x866dff6d, 0x0000ffff,
 	0x86fe7e7e, 0x86ea6a6a,
 	0xb970f802, 0xbf8a0000,
 	0x95806f6c, 0xbf810000,
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
index 658a4c6be8e4..a2a04bb64096 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
@@ -1015,8 +1015,6 @@ end
 
     s_waitcnt       lgkmcnt(0)                                                                                      //from now on, it is safe to restore STATUS and IB_STS
 
-    s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff      //pc[47:32]        //Do it here in order not to affect STATUS
-
     //for normal save & restore, the saved PC points to the next inst to execute, no adjustment needs to be made, otherwise:
     if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
         s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 8            //pc[31:0]+8     //two back-to-back s_trap are used (first for save and second for restore)
@@ -1052,6 +1050,7 @@ end
     s_lshr_b32      s_restore_m0, s_restore_m0, SQ_WAVE_STATUS_INST_ATC_SHIFT
     s_setreg_b32    hwreg(HW_REG_IB_STS),   s_restore_tmp
 
+    s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff      //pc[47:32]        //Do it here in order not to affect STATUS
     s_and_b64    exec, exec, exec  // Restore STATUS.EXECZ, not writable by s_setreg_b32
     s_and_b64    vcc, vcc, vcc  // Restore STATUS.VCCZ, not writable by s_setreg_b32
     s_setreg_b32    hwreg(HW_REG_STATUS),   s_restore_status     // SCC is included, which is changed by previous salu
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
index 065f55ae9e41..998be96be736 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
@@ -1067,8 +1067,6 @@ end
 
     s_waitcnt	    lgkmcnt(0)											    //from now on, it is safe to restore STATUS and IB_STS
 
-    s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff	//pc[47:32]	   //Do it here in order not to affect STATUS
-
     //for normal save & restore, the saved PC points to the next inst to execute, no adjustment needs to be made, otherwise:
     if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
 	s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 8		 //pc[31:0]+8	  //two back-to-back s_trap are used (first for save and second for restore)
@@ -1119,6 +1117,7 @@ end
     s_lshr_b32	    s_restore_m0, s_restore_m0, SQ_WAVE_STATUS_INST_ATC_SHIFT
     s_setreg_b32    hwreg(HW_REG_IB_STS),   s_restore_tmp
 
+    s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff	//pc[47:32]	   //Do it here in order not to affect STATUS
     s_and_b64	 exec, exec, exec  // Restore STATUS.EXECZ, not writable by s_setreg_b32
     s_and_b64	 vcc, vcc, vcc	// Restore STATUS.VCCZ, not writable by s_setreg_b32
     s_setreg_b32    hwreg(HW_REG_STATUS),   s_restore_status	 // SCC is included, which is changed by previous salu

From eeb27b7eb3826c23cc5688c47845e7309f20fc32 Mon Sep 17 00:00:00 2001
From: Felix Kuehling <Felix.Kuehling@amd.com>
Date: Tue, 1 May 2018 17:56:08 -0400
Subject: [PATCH 0387/1286] drm/amdkfd: Fix signal handling performance again

It turns out that idr_for_each_entry is really slow compared to just
iterating over the slots. Based on measurements the difference is
estimated to be about a factor 64. That means using idr_for_each_entry
is only worth it with very few allocated events.

Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_events.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index bccf2f761177..5562e94e786a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -496,7 +496,7 @@ void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
 			pr_debug_ratelimited("Partial ID invalid: %u (%u valid bits)\n",
 					     partial_id, valid_id_bits);
 
-		if (p->signal_event_count < KFD_SIGNAL_EVENT_LIMIT/2) {
+		if (p->signal_event_count < KFD_SIGNAL_EVENT_LIMIT / 64) {
 			/* With relatively few events, it's faster to
 			 * iterate over the event IDR
 			 */

From ccb76b149e1c849c0aee6b5043aed74d41064ad6 Mon Sep 17 00:00:00 2001
From: Felix Kuehling <Felix.Kuehling@amd.com>
Date: Tue, 1 May 2018 17:56:09 -0400
Subject: [PATCH 0388/1286] drm/amdkfd: Remove initialization of
 cp_hqd_ib_control on CIK

The initialization is not necessary. amd-kfd-staging and ROCm
releases have worked without it for two years.

Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 4 ----
 1 file changed, 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 2bc49c62cc8c..06eaa218eba6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -79,10 +79,6 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
 	m->cp_mqd_base_addr_lo        = lower_32_bits(addr);
 	m->cp_mqd_base_addr_hi        = upper_32_bits(addr);
 
-	m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE | IB_ATC_EN;
-	/* Although WinKFD writes this, I suspect it should not be necessary */
-	m->cp_hqd_ib_control = IB_ATC_EN | DEFAULT_MIN_IB_AVAIL_SIZE;
-
 	m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS |
 				QUANTUM_DURATION(10);
 

From bfdcbfd25516eba6cd7b9862779a325ec26006ad Mon Sep 17 00:00:00 2001
From: Ben Goz <ben.goz@amd.com>
Date: Tue, 1 May 2018 17:56:10 -0400
Subject: [PATCH 0389/1286] drm/amdkfd: Locking PM mutex while allocating IB
 buffer

Signed-off-by: Ben Goz <ben.goz@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 91f0350b6180..c317feb43f69 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -94,12 +94,14 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
 
 	pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
 
+	mutex_lock(&pm->lock);
+
 	retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size,
 					&pm->ib_buffer_obj);
 
 	if (retval) {
 		pr_err("Failed to allocate runlist IB\n");
-		return retval;
+		goto out;
 	}
 
 	*(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr;
@@ -107,6 +109,9 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
 
 	memset(*rl_buffer, 0, *rl_buffer_size);
 	pm->allocated = true;
+
+out:
+	mutex_unlock(&pm->lock);
 	return retval;
 }
 

From 2533f0741e5f7259393d7edecb4bca3106c583c2 Mon Sep 17 00:00:00 2001
From: Shaoyun Liu <Shaoyun.Liu@amd.com>
Date: Tue, 1 May 2018 17:56:11 -0400
Subject: [PATCH 0390/1286] drm/amdkfd: Remove queue node when destroy queue
 failed

HWS may hang in the middle of destroy queue, remove the queue from the
process queue list so it won't be freed again in the future

Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 10 +++++++---
 1 file changed, 7 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 3045aebdc3f7..d65ce0436b31 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -241,7 +241,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
 	}
 
 	if (retval != 0) {
-		pr_err("DQM create queue failed\n");
+		pr_err("Pasid %d DQM create queue %d failed. ret %d\n",
+			pqm->process->pasid, type, retval);
 		goto err_create_queue;
 	}
 
@@ -319,8 +320,11 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
 		dqm = pqn->q->device->dqm;
 		retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
 		if (retval) {
-			pr_debug("Destroy queue failed, returned %d\n", retval);
-			goto err_destroy_queue;
+			pr_err("Pasid %d destroy queue %d failed, ret %d\n",
+				pqm->process->pasid,
+				pqn->q->properties.queue_id, retval);
+			if (retval != -ETIME)
+				goto err_destroy_queue;
 		}
 		uninit_queue(pqn->q);
 	}

From c129db1206bd11ab0531a4d91a455a0809acae0e Mon Sep 17 00:00:00 2001
From: Felix Kuehling <Felix.Kuehling@amd.com>
Date: Tue, 1 May 2018 17:56:12 -0400
Subject: [PATCH 0391/1286] drm/amdkfd: Add sanity checks in IRQ handlers

Only accept interrupts from KFD VMIDs. Just checking for a PASID may
not be enough because amdgpu started using PASIDs to map VM faults
to processes.

Warn if an IRQ doesn't have a valid PASID (indicating a firmware bug).

Suggested-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
Suggested-by: Oak Zeng <Oak.Zeng@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 .../gpu/drm/amd/amdkfd/cik_event_interrupt.c  | 20 +++++++---
 .../gpu/drm/amd/amdkfd/kfd_int_process_v9.c   | 40 +++++++++++--------
 2 files changed, 39 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
index 3d5ccb3755d4..49df6c791cfc 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
@@ -27,18 +27,28 @@
 static bool cik_event_interrupt_isr(struct kfd_dev *dev,
 					const uint32_t *ih_ring_entry)
 {
-	unsigned int pasid;
 	const struct cik_ih_ring_entry *ihre =
 			(const struct cik_ih_ring_entry *)ih_ring_entry;
+	unsigned int vmid, pasid;
 
+	/* Only handle interrupts from KFD VMIDs */
+	vmid  = (ihre->ring_id & 0x0000ff00) >> 8;
+	if (vmid < dev->vm_info.first_vmid_kfd ||
+	    vmid > dev->vm_info.last_vmid_kfd)
+		return 0;
+
+	/* If there is no valid PASID, it's likely a firmware bug */
 	pasid = (ihre->ring_id & 0xffff0000) >> 16;
+	if (WARN_ONCE(pasid == 0, "FW bug: No PASID in KFD interrupt"))
+		return 0;
 
-	/* Do not process in ISR, just request it to be forwarded to WQ. */
-	return (pasid != 0) &&
-		(ihre->source_id == CIK_INTSRC_CP_END_OF_PIPE ||
+	/* Interrupt types we care about: various signals and faults.
+	 * They will be forwarded to a work queue (see below).
+	 */
+	return ihre->source_id == CIK_INTSRC_CP_END_OF_PIPE ||
 		ihre->source_id == CIK_INTSRC_SDMA_TRAP ||
 		ihre->source_id == CIK_INTSRC_SQ_INTERRUPT_MSG ||
-		ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE);
+		ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE;
 }
 
 static void cik_event_interrupt_wq(struct kfd_dev *dev,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 39d41155581f..37029baa3346 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -29,27 +29,35 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
 					const uint32_t *ih_ring_entry)
 {
 	uint16_t source_id, client_id, pasid, vmid;
+	const uint32_t *data = ih_ring_entry;
+
+	/* Only handle interrupts from KFD VMIDs */
+	vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
+	if (vmid < dev->vm_info.first_vmid_kfd ||
+	    vmid > dev->vm_info.last_vmid_kfd)
+		return 0;
+
+	/* If there is no valid PASID, it's likely a firmware bug */
+	pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
+	if (WARN_ONCE(pasid == 0, "FW bug: No PASID in KFD interrupt"))
+		return 0;
 
 	source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
 	client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
-	pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
-	vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
 
-	if (pasid) {
-		const uint32_t *data = ih_ring_entry;
+	pr_debug("client id 0x%x, source id %d, pasid 0x%x. raw data:\n",
+		 client_id, source_id, pasid);
+	pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
+		 data[0], data[1], data[2], data[3],
+		 data[4], data[5], data[6], data[7]);
 
-		pr_debug("client id 0x%x, source id %d, pasid 0x%x. raw data:\n",
-			 client_id, source_id, pasid);
-		pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
-			 data[0], data[1], data[2], data[3],
-			 data[4], data[5], data[6], data[7]);
-	}
-
-	return (pasid != 0) &&
-		(source_id == SOC15_INTSRC_CP_END_OF_PIPE ||
-		 source_id == SOC15_INTSRC_SDMA_TRAP ||
-		 source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
-		 source_id == SOC15_INTSRC_CP_BAD_OPCODE);
+	/* Interrupt types we care about: various signals and faults.
+	 * They will be forwarded to a work queue (see below).
+	 */
+	return source_id == SOC15_INTSRC_CP_END_OF_PIPE ||
+		source_id == SOC15_INTSRC_SDMA_TRAP ||
+		source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
+		source_id == SOC15_INTSRC_CP_BAD_OPCODE;
 }
 
 static void event_interrupt_wq_v9(struct kfd_dev *dev,

From af47b390273f1068bdb1d01263a81948c4e2f97a Mon Sep 17 00:00:00 2001
From: Laura Abbott <labbott@redhat.com>
Date: Fri, 13 Apr 2018 14:24:12 -0700
Subject: [PATCH 0392/1286] drm/amdkfd: Remove vla
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

There's an ongoing effort to remove VLAs[1] from the kernel to eventually
turn on -Wvla. Switch to a constant value that covers all hardware.

[1] https://lkml.org/lkml/2018/3/7/621

Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Laura Abbott <labbott@redhat.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c | 8 +++++---
 drivers/gpu/drm/amd/amdkfd/kfd_priv.h      | 2 ++
 2 files changed, 7 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
index 035c351f47c5..db6d9336b80d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
@@ -139,10 +139,12 @@ static void interrupt_wq(struct work_struct *work)
 {
 	struct kfd_dev *dev = container_of(work, struct kfd_dev,
 						interrupt_work);
+	uint32_t ih_ring_entry[KFD_MAX_RING_ENTRY_SIZE];
 
-	uint32_t ih_ring_entry[DIV_ROUND_UP(
-				dev->device_info->ih_ring_entry_size,
-				sizeof(uint32_t))];
+	if (dev->device_info->ih_ring_entry_size > sizeof(ih_ring_entry)) {
+		dev_err_once(kfd_chardev(), "Ring entry too small\n");
+		return;
+	}
 
 	while (dequeue_ih_ring_entry(dev, ih_ring_entry))
 		dev->device_info->event_interrupt_class->interrupt_wq(dev,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 10d5b5445195..5e3990bb4c4b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -39,6 +39,8 @@
 
 #include "amd_shared.h"
 
+#define KFD_MAX_RING_ENTRY_SIZE	8
+
 #define KFD_SYSFS_FILE_MODE 0444
 
 /* GPU ID hash width in bits */

From f4ecfbfc32ed0cb502374164638d14c4fb03e916 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Sat, 14 Apr 2018 13:27:54 +0100
Subject: [PATCH 0393/1286] drm/i915: Check whitelist registers across resets

Add a selftest to ensure that we restore the whitelisted registers after
rewrite the registers everytime they might be scrubbed, e.g. module
load, reset and resume. For the other volatile workaround registers, we
export their presence via debugfs and check in igt/gem_workarounds.
However, we don't export the whitelist and rather than do so, let's test
them directly in the kernel.

The test we use is to read the registers back from the CS (this helps us
be sure that the registers will be valid for MI_LRI etc). In order to
generate the expected list, we split intel_whitelist_workarounds_emit
into two phases, the first to build the list and the second to apply.
Inside the test, we only build the list and then check that list against
the hw.

v2: Filter out pre-gen8 as they do not have RING_NONPRIV.
v3: Drop unused engine parameter, no plans to use it now or future.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Oscar Mateo <oscar.mateo@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Oscar Mateo <oscar.mateo@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180414122754.569-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_debugfs.c           |  14 +-
 drivers/gpu/drm/i915/i915_drv.h               |   1 -
 drivers/gpu/drm/i915/intel_lrc.c              |   8 +-
 drivers/gpu/drm/i915/intel_ringbuffer.c       |   4 +-
 drivers/gpu/drm/i915/intel_workarounds.c      | 208 ++++++-------
 drivers/gpu/drm/i915/intel_workarounds.h      |   2 +-
 .../drm/i915/selftests/i915_live_selftests.h  |   1 +
 .../drm/i915/selftests/intel_workarounds.c    | 284 ++++++++++++++++++
 8 files changed, 381 insertions(+), 141 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/selftests/intel_workarounds.c

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 2e6652a9bb9e..e0274f41bc76 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -3304,24 +3304,13 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
 
 static int i915_wa_registers(struct seq_file *m, void *unused)
 {
-	int i;
-	int ret;
-	struct intel_engine_cs *engine;
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
 	struct i915_workarounds *workarounds = &dev_priv->workarounds;
-	enum intel_engine_id id;
-
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
+	int i;
 
 	intel_runtime_pm_get(dev_priv);
 
 	seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
-	for_each_engine(engine, dev_priv, id)
-		seq_printf(m, "HW whitelist count for %s: %d\n",
-			   engine->name, workarounds->hw_whitelist_count[id]);
 	for (i = 0; i < workarounds->count; ++i) {
 		i915_reg_t addr;
 		u32 mask, value, read;
@@ -3337,7 +3326,6 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
 	}
 
 	intel_runtime_pm_put(dev_priv);
-	mutex_unlock(&dev->struct_mutex);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e50d9589d6e3..8e8667d9b084 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1297,7 +1297,6 @@ struct i915_wa_reg {
 struct i915_workarounds {
 	struct i915_wa_reg reg[I915_MAX_WA_REGS];
 	u32 count;
-	u32 hw_whitelist_count[I915_NUM_ENGINES];
 };
 
 struct i915_virtual_gpu {
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index c7c85134a84a..4f728587a756 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1744,9 +1744,7 @@ static int gen8_init_render_ring(struct intel_engine_cs *engine)
 	if (ret)
 		return ret;
 
-	ret = intel_whitelist_workarounds_apply(engine);
-	if (ret)
-		return ret;
+	intel_whitelist_workarounds_apply(engine);
 
 	/* We need to disable the AsyncFlip performance optimisations in order
 	 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
@@ -1769,9 +1767,7 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
 	if (ret)
 		return ret;
 
-	ret = intel_whitelist_workarounds_apply(engine);
-	if (ret)
-		return ret;
+	intel_whitelist_workarounds_apply(engine);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 757bb0990c07..c68ac605b8a9 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -618,9 +618,7 @@ static int init_render_ring(struct intel_engine_cs *engine)
 	if (ret)
 		return ret;
 
-	ret = intel_whitelist_workarounds_apply(engine);
-	if (ret)
-		return ret;
+	intel_whitelist_workarounds_apply(engine);
 
 	/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
 	if (IS_GEN(dev_priv, 4, 6))
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index bbbf4ed4aa97..ec9d340fcb00 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -687,170 +687,144 @@ void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
 		MISSING_CASE(INTEL_GEN(dev_priv));
 }
 
-static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
-				 i915_reg_t reg)
+struct whitelist {
+	i915_reg_t reg[RING_MAX_NONPRIV_SLOTS];
+	unsigned int count;
+	u32 nopid;
+};
+
+static void whitelist_reg(struct whitelist *w, i915_reg_t reg)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
-	struct i915_workarounds *wa = &dev_priv->workarounds;
-	const unsigned int index = wa->hw_whitelist_count[engine->id];
+	if (GEM_WARN_ON(w->count >= RING_MAX_NONPRIV_SLOTS))
+		return;
 
-	if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
-		return -EINVAL;
-
-	I915_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
-		   i915_mmio_reg_offset(reg));
-	wa->hw_whitelist_count[engine->id]++;
-
-	return 0;
+	w->reg[w->count++] = reg;
 }
 
-static int bdw_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static void bdw_whitelist_build(struct whitelist *w)
 {
-	return 0;
 }
 
-static int chv_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static void chv_whitelist_build(struct whitelist *w)
 {
-	return 0;
 }
 
-static int gen9_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static void gen9_whitelist_build(struct whitelist *w)
 {
-	int ret;
-
 	/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
-	ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
-	if (ret)
-		return ret;
+	whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
 
 	/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
-	ret = wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
-	if (ret)
-		return ret;
+	whitelist_reg(w, GEN8_CS_CHICKEN1);
 
 	/* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
-	ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
-	if (ret)
-		return ret;
-
-	return 0;
+	whitelist_reg(w, GEN8_HDC_CHICKEN1);
 }
 
-static int skl_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static void skl_whitelist_build(struct whitelist *w)
 {
-	int ret;
-
-	ret = gen9_whitelist_workarounds_apply(engine);
-	if (ret)
-		return ret;
+	gen9_whitelist_build(w);
 
 	/* WaDisableLSQCROPERFforOCL:skl */
-	ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
-	if (ret)
-		return ret;
-
-	return 0;
+	whitelist_reg(w, GEN8_L3SQCREG4);
 }
 
-static int bxt_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static void bxt_whitelist_build(struct whitelist *w)
 {
-	int ret;
-
-	ret = gen9_whitelist_workarounds_apply(engine);
-	if (ret)
-		return ret;
-
-	return 0;
+	gen9_whitelist_build(w);
 }
 
-static int kbl_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static void kbl_whitelist_build(struct whitelist *w)
 {
-	int ret;
-
-	ret = gen9_whitelist_workarounds_apply(engine);
-	if (ret)
-		return ret;
+	gen9_whitelist_build(w);
 
 	/* WaDisableLSQCROPERFforOCL:kbl */
-	ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
-	if (ret)
-		return ret;
-
-	return 0;
+	whitelist_reg(w, GEN8_L3SQCREG4);
 }
 
-static int glk_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static void glk_whitelist_build(struct whitelist *w)
 {
-	int ret;
-
-	ret = gen9_whitelist_workarounds_apply(engine);
-	if (ret)
-		return ret;
+	gen9_whitelist_build(w);
 
 	/* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
-	ret = wa_ring_whitelist_reg(engine, GEN9_SLICE_COMMON_ECO_CHICKEN1);
-	if (ret)
-		return ret;
-
-	return 0;
+	whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
 }
 
-static int cfl_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static void cfl_whitelist_build(struct whitelist *w)
 {
-	int ret;
-
-	ret = gen9_whitelist_workarounds_apply(engine);
-	if (ret)
-		return ret;
-
-	return 0;
+	gen9_whitelist_build(w);
 }
 
-static int cnl_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static void cnl_whitelist_build(struct whitelist *w)
 {
-	int ret;
-
 	/* WaEnablePreemptionGranularityControlByUMD:cnl */
-	ret = wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
-	if (ret)
-		return ret;
-
-	return 0;
+	whitelist_reg(w, GEN8_CS_CHICKEN1);
 }
 
-int intel_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static struct whitelist *whitelist_build(struct intel_engine_cs *engine,
+					 struct whitelist *w)
+{
+	struct drm_i915_private *i915 = engine->i915;
+
+	GEM_BUG_ON(engine->id != RCS);
+
+	w->count = 0;
+	w->nopid = i915_mmio_reg_offset(RING_NOPID(engine->mmio_base));
+
+	if (INTEL_GEN(i915) < 8)
+		return NULL;
+	else if (IS_BROADWELL(i915))
+		bdw_whitelist_build(w);
+	else if (IS_CHERRYVIEW(i915))
+		chv_whitelist_build(w);
+	else if (IS_SKYLAKE(i915))
+		skl_whitelist_build(w);
+	else if (IS_BROXTON(i915))
+		bxt_whitelist_build(w);
+	else if (IS_KABYLAKE(i915))
+		kbl_whitelist_build(w);
+	else if (IS_GEMINILAKE(i915))
+		glk_whitelist_build(w);
+	else if (IS_COFFEELAKE(i915))
+		cfl_whitelist_build(w);
+	else if (IS_CANNONLAKE(i915))
+		cnl_whitelist_build(w);
+	else
+		MISSING_CASE(INTEL_GEN(i915));
+
+	return w;
+}
+
+static void whitelist_apply(struct intel_engine_cs *engine,
+			    const struct whitelist *w)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
-	int err = 0;
+	const u32 base = engine->mmio_base;
+	unsigned int i;
 
-	WARN_ON(engine->id != RCS);
+	if (!w)
+		return;
 
-	dev_priv->workarounds.hw_whitelist_count[engine->id] = 0;
+	intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
 
-	if (INTEL_GEN(dev_priv) < 8)
-		err = 0;
-	else if (IS_BROADWELL(dev_priv))
-		err = bdw_whitelist_workarounds_apply(engine);
-	else if (IS_CHERRYVIEW(dev_priv))
-		err = chv_whitelist_workarounds_apply(engine);
-	else if (IS_SKYLAKE(dev_priv))
-		err = skl_whitelist_workarounds_apply(engine);
-	else if (IS_BROXTON(dev_priv))
-		err = bxt_whitelist_workarounds_apply(engine);
-	else if (IS_KABYLAKE(dev_priv))
-		err = kbl_whitelist_workarounds_apply(engine);
-	else if (IS_GEMINILAKE(dev_priv))
-		err = glk_whitelist_workarounds_apply(engine);
-	else if (IS_COFFEELAKE(dev_priv))
-		err = cfl_whitelist_workarounds_apply(engine);
-	else if (IS_CANNONLAKE(dev_priv))
-		err = cnl_whitelist_workarounds_apply(engine);
-	else
-		MISSING_CASE(INTEL_GEN(dev_priv));
-	if (err)
-		return err;
+	for (i = 0; i < w->count; i++)
+		I915_WRITE_FW(RING_FORCE_TO_NONPRIV(base, i),
+			      i915_mmio_reg_offset(w->reg[i]));
 
-	DRM_DEBUG_DRIVER("%s: Number of whitelist w/a: %d\n", engine->name,
-			 dev_priv->workarounds.hw_whitelist_count[engine->id]);
-	return 0;
+	/* And clear the rest just in case of garbage */
+	for (; i < RING_MAX_NONPRIV_SLOTS; i++)
+		I915_WRITE_FW(RING_FORCE_TO_NONPRIV(base, i), w->nopid);
+
+	intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
 }
+
+void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+{
+	struct whitelist w;
+
+	whitelist_apply(engine, whitelist_build(engine, &w));
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/intel_workarounds.c"
+#endif
diff --git a/drivers/gpu/drm/i915/intel_workarounds.h b/drivers/gpu/drm/i915/intel_workarounds.h
index d9b0cc5afb4a..b11d0623e626 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.h
+++ b/drivers/gpu/drm/i915/intel_workarounds.h
@@ -12,6 +12,6 @@ int intel_ctx_workarounds_emit(struct i915_request *rq);
 
 void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv);
 
-int intel_whitelist_workarounds_apply(struct intel_engine_cs *engine);
+void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine);
 
 #endif
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 8bf6aa573226..a00e2bd08bce 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -11,6 +11,7 @@
  */
 selftest(sanitycheck, i915_live_sanitycheck) /* keep first (igt selfcheck) */
 selftest(uncore, intel_uncore_live_selftests)
+selftest(workarounds, intel_workarounds_live_selftests)
 selftest(requests, i915_request_live_selftests)
 selftest(objects, i915_gem_object_live_selftests)
 selftest(dmabuf, i915_gem_dmabuf_live_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
new file mode 100644
index 000000000000..fe7deca33d77
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -0,0 +1,284 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "../i915_selftest.h"
+
+#include "mock_context.h"
+
+static struct drm_i915_gem_object *
+read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
+{
+	struct drm_i915_gem_object *result;
+	struct i915_request *rq;
+	struct i915_vma *vma;
+	const u32 base = engine->mmio_base;
+	u32 srm, *cs;
+	int err;
+	int i;
+
+	result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
+	if (IS_ERR(result))
+		return result;
+
+	i915_gem_object_set_cache_level(result, I915_CACHE_LLC);
+
+	cs = i915_gem_object_pin_map(result, I915_MAP_WB);
+	if (IS_ERR(cs)) {
+		err = PTR_ERR(cs);
+		goto err_obj;
+	}
+	memset(cs, 0xc5, PAGE_SIZE);
+	i915_gem_object_unpin_map(result);
+
+	vma = i915_vma_instance(result, &engine->i915->ggtt.base, NULL);
+	if (IS_ERR(vma)) {
+		err = PTR_ERR(vma);
+		goto err_obj;
+	}
+
+	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+	if (err)
+		goto err_obj;
+
+	rq = i915_request_alloc(engine, ctx);
+	if (IS_ERR(rq)) {
+		err = PTR_ERR(rq);
+		goto err_pin;
+	}
+
+	srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+	if (INTEL_GEN(ctx->i915) >= 8)
+		srm++;
+
+	cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
+	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
+		*cs++ = srm;
+		*cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
+		*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
+		*cs++ = 0;
+	}
+	intel_ring_advance(rq, cs);
+
+	i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+	reservation_object_lock(vma->resv, NULL);
+	reservation_object_add_excl_fence(vma->resv, &rq->fence);
+	reservation_object_unlock(vma->resv);
+
+	i915_gem_object_get(result);
+	i915_gem_object_set_active_reference(result);
+
+	__i915_request_add(rq, true);
+	i915_vma_unpin(vma);
+
+	return result;
+
+err_pin:
+	i915_vma_unpin(vma);
+err_obj:
+	i915_gem_object_put(result);
+	return ERR_PTR(err);
+}
+
+static u32 get_whitelist_reg(const struct whitelist *w, unsigned int i)
+{
+	return i < w->count ? i915_mmio_reg_offset(w->reg[i]) : w->nopid;
+}
+
+static void print_results(const struct whitelist *w, const u32 *results)
+{
+	unsigned int i;
+
+	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
+		u32 expected = get_whitelist_reg(w, i);
+		u32 actual = results[i];
+
+		pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
+			i, expected, actual);
+	}
+}
+
+static int check_whitelist(const struct whitelist *w,
+			   struct i915_gem_context *ctx,
+			   struct intel_engine_cs *engine)
+{
+	struct drm_i915_gem_object *results;
+	u32 *vaddr;
+	int err;
+	int i;
+
+	results = read_nonprivs(ctx, engine);
+	if (IS_ERR(results))
+		return PTR_ERR(results);
+
+	err = i915_gem_object_set_to_cpu_domain(results, false);
+	if (err)
+		goto out_put;
+
+	vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
+	if (IS_ERR(vaddr)) {
+		err = PTR_ERR(vaddr);
+		goto out_put;
+	}
+
+	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
+		u32 expected = get_whitelist_reg(w, i);
+		u32 actual = vaddr[i];
+
+		if (expected != actual) {
+			print_results(w, vaddr);
+			pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
+			       i, expected, actual);
+
+			err = -EINVAL;
+			break;
+		}
+	}
+
+	i915_gem_object_unpin_map(results);
+out_put:
+	i915_gem_object_put(results);
+	return err;
+}
+
+static int do_device_reset(struct intel_engine_cs *engine)
+{
+	i915_reset(engine->i915, ENGINE_MASK(engine->id), NULL);
+	return 0;
+}
+
+static int do_engine_reset(struct intel_engine_cs *engine)
+{
+	return i915_reset_engine(engine, NULL);
+}
+
+static int switch_to_scratch_context(struct intel_engine_cs *engine)
+{
+	struct i915_gem_context *ctx;
+	struct i915_request *rq;
+
+	ctx = kernel_context(engine->i915);
+	if (IS_ERR(ctx))
+		return PTR_ERR(ctx);
+
+	rq = i915_request_alloc(engine, ctx);
+	kernel_context_close(ctx);
+	if (IS_ERR(rq))
+		return PTR_ERR(rq);
+
+	i915_request_add(rq);
+
+	return 0;
+}
+
+static int check_whitelist_across_reset(struct intel_engine_cs *engine,
+					int (*reset)(struct intel_engine_cs *),
+					const struct whitelist *w,
+					const char *name)
+{
+	struct i915_gem_context *ctx;
+	int err;
+
+	ctx = kernel_context(engine->i915);
+	if (IS_ERR(ctx))
+		return PTR_ERR(ctx);
+
+	err = check_whitelist(w, ctx, engine);
+	if (err) {
+		pr_err("Invalid whitelist *before* %s reset!\n", name);
+		goto out;
+	}
+
+	err = switch_to_scratch_context(engine);
+	if (err)
+		goto out;
+
+	err = reset(engine);
+	if (err) {
+		pr_err("%s reset failed\n", name);
+		goto out;
+	}
+
+	err = check_whitelist(w, ctx, engine);
+	if (err) {
+		pr_err("Whitelist not preserved in context across %s reset!\n",
+		       name);
+		goto out;
+	}
+
+	kernel_context_close(ctx);
+
+	ctx = kernel_context(engine->i915);
+	if (IS_ERR(ctx))
+		return PTR_ERR(ctx);
+
+	err = check_whitelist(w, ctx, engine);
+	if (err) {
+		pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
+		       name);
+		goto out;
+	}
+
+out:
+	kernel_context_close(ctx);
+	return err;
+}
+
+static int live_reset_whitelist(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct intel_engine_cs *engine = i915->engine[RCS];
+	struct i915_gpu_error *error = &i915->gpu_error;
+	struct whitelist w;
+	int err;
+
+	/* If we reset the gpu, we should not lose the RING_NONPRIV */
+
+	if (!engine)
+		return 0;
+
+	if (!whitelist_build(engine, &w))
+		return 0;
+
+	pr_info("Checking %d whitelisted registers (RING_NONPRIV)\n", w.count);
+
+	set_bit(I915_RESET_BACKOFF, &error->flags);
+	set_bit(I915_RESET_ENGINE + engine->id, &error->flags);
+
+	if (intel_has_reset_engine(i915)) {
+		err = check_whitelist_across_reset(engine,
+						   do_engine_reset, &w,
+						   "engine");
+		if (err)
+			goto out;
+	}
+
+	if (intel_has_gpu_reset(i915)) {
+		err = check_whitelist_across_reset(engine,
+						   do_device_reset, &w,
+						   "device");
+		if (err)
+			goto out;
+	}
+
+out:
+	clear_bit(I915_RESET_ENGINE + engine->id, &error->flags);
+	clear_bit(I915_RESET_BACKOFF, &error->flags);
+	return err;
+}
+
+int intel_workarounds_live_selftests(struct drm_i915_private *i915)
+{
+	static const struct i915_subtest tests[] = {
+		SUBTEST(live_reset_whitelist),
+	};
+	int err;
+
+	mutex_lock(&i915->drm.struct_mutex);
+	err = i915_subtests(tests, i915);
+	mutex_unlock(&i915->drm.struct_mutex);
+
+	return err;
+}

From 9f172f6fbd243759c808d97bd83c95e49325b2c9 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Sat, 14 Apr 2018 10:12:33 +0100
Subject: [PATCH 0394/1286] drm/i915: Call i915_perf_fini() on init_hw error
 unwind

We have to cleanup after i915_perf_init(), even on the error path, as it
passes a pointer into the module to the sysfs core. If we fail to
unregister the sysctl table, we leave a dangling pointer which then may
explode anytime later.

Fixes: 9f9b2792b6d3 ("drm/i915/perf: reuse timestamp frequency from device info")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180414091233.32224-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_drv.c | 27 +++++++++++++++------------
 1 file changed, 15 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f770be18b2d7..840020681985 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1101,30 +1101,32 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 
 	ret = i915_ggtt_probe_hw(dev_priv);
 	if (ret)
-		return ret;
+		goto err_perf;
 
-	/* WARNING: Apparently we must kick fbdev drivers before vgacon,
-	 * otherwise the vga fbdev driver falls over. */
+	/*
+	 * WARNING: Apparently we must kick fbdev drivers before vgacon,
+	 * otherwise the vga fbdev driver falls over.
+	 */
 	ret = i915_kick_out_firmware_fb(dev_priv);
 	if (ret) {
 		DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
-		goto out_ggtt;
+		goto err_ggtt;
 	}
 
 	ret = i915_kick_out_vgacon(dev_priv);
 	if (ret) {
 		DRM_ERROR("failed to remove conflicting VGA console\n");
-		goto out_ggtt;
+		goto err_ggtt;
 	}
 
 	ret = i915_ggtt_init_hw(dev_priv);
 	if (ret)
-		return ret;
+		goto err_ggtt;
 
 	ret = i915_ggtt_enable_hw(dev_priv);
 	if (ret) {
 		DRM_ERROR("failed to enable GGTT\n");
-		goto out_ggtt;
+		goto err_ggtt;
 	}
 
 	pci_set_master(pdev);
@@ -1135,7 +1137,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 		if (ret) {
 			DRM_ERROR("failed to set DMA mask\n");
 
-			goto out_ggtt;
+			goto err_ggtt;
 		}
 	}
 
@@ -1153,7 +1155,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 		if (ret) {
 			DRM_ERROR("failed to set DMA mask\n");
 
-			goto out_ggtt;
+			goto err_ggtt;
 		}
 	}
 
@@ -1186,13 +1188,14 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 
 	ret = intel_gvt_init(dev_priv);
 	if (ret)
-		goto out_ggtt;
+		goto err_ggtt;
 
 	return 0;
 
-out_ggtt:
+err_ggtt:
 	i915_ggtt_cleanup_hw(dev_priv);
-
+err_perf:
+	i915_perf_fini(dev_priv);
 	return ret;
 }
 

From ae0e28265e216dad11d4cbde42fc15e92919af78 Mon Sep 17 00:00:00 2001
From: Maxime Ripard <maxime.ripard@bootlin.com>
Date: Wed, 11 Apr 2018 09:39:25 +0200
Subject: [PATCH 0395/1286] drm/blend: Add a generic alpha property

Some drivers duplicate the logic to create a property to store a per-plane
alpha.

This is especially useful if we ever want to support extra protocols for
Wayland like:
https://lists.freedesktop.org/archives/wayland-devel/2017-August/034741.html

Let's create a helper in order to move that to the core.

Acked-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: Sean Paul <seanpaul@chromium.org>
Reviewed-by: Boris Brezillon <boris.brezillon@bootlin.com>
Reviewed-by: Eric Anholt <eric@anholt.net>
Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Reviewed-by: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
Signed-off-by: Maxime Ripard <maxime.ripard@bootlin.com>
Link: https://patchwork.freedesktop.org/patch/msgid/6e1ce0db78fcfc407e94913c64819e65109d034d.1523432341.git-series.maxime.ripard@bootlin.com
---
 drivers/gpu/drm/drm_atomic.c        |  4 +++
 drivers/gpu/drm/drm_atomic_helper.c |  4 +++
 drivers/gpu/drm/drm_blend.c         | 39 +++++++++++++++++++++++++++++
 include/drm/drm_blend.h             |  3 +++
 include/drm/drm_plane.h             |  6 +++++
 5 files changed, 56 insertions(+)

diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 7d25c42f22db..3d9ae057a6cd 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -783,6 +783,8 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
 		state->src_w = val;
 	} else if (property == config->prop_src_h) {
 		state->src_h = val;
+	} else if (property == plane->alpha_property) {
+		state->alpha = val;
 	} else if (property == plane->rotation_property) {
 		if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK))
 			return -EINVAL;
@@ -848,6 +850,8 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
 		*val = state->src_w;
 	} else if (property == config->prop_src_h) {
 		*val = state->src_h;
+	} else if (property == plane->alpha_property) {
+		*val = state->alpha;
 	} else if (property == plane->rotation_property) {
 		*val = state->rotation;
 	} else if (property == plane->zpos_property) {
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index ee03c1ed2521..0587a0a2f3aa 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -3500,6 +3500,10 @@ void drm_atomic_helper_plane_reset(struct drm_plane *plane)
 	if (plane->state) {
 		plane->state->plane = plane;
 		plane->state->rotation = DRM_MODE_ROTATE_0;
+
+		/* Reset the alpha value to fully opaque if it matters */
+		if (plane->alpha_property)
+			plane->state->alpha = plane->alpha_property->values[1];
 	}
 }
 EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index 5a81e1b4c076..a16a74d7e15e 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -88,6 +88,13 @@
  * On top of this basic transformation additional properties can be exposed by
  * the driver:
  *
+ * alpha:
+ * 	Alpha is setup with drm_plane_create_alpha_property(). It controls the
+ * 	plane-wide opacity, from transparent (0) to opaque (0xffff). It can be
+ * 	combined with pixel alpha.
+ *	The pixel values in the framebuffers are expected to not be
+ *	pre-multiplied by the global alpha associated to the plane.
+ *
  * rotation:
  *	Rotation is set up with drm_plane_create_rotation_property(). It adds a
  *	rotation and reflection step between the source and destination rectangles.
@@ -105,6 +112,38 @@
  * exposed and assumed to be black).
  */
 
+/**
+ * drm_plane_create_alpha_property - create a new alpha property
+ * @plane: drm plane
+ *
+ * This function creates a generic, mutable, alpha property and enables support
+ * for it in the DRM core. It is attached to @plane.
+ *
+ * The alpha property will be allowed to be within the bounds of 0
+ * (transparent) to 0xffff (opaque).
+ *
+ * Returns:
+ * 0 on success, negative error code on failure.
+ */
+int drm_plane_create_alpha_property(struct drm_plane *plane)
+{
+	struct drm_property *prop;
+
+	prop = drm_property_create_range(plane->dev, 0, "alpha",
+					 0, DRM_BLEND_ALPHA_OPAQUE);
+	if (!prop)
+		return -ENOMEM;
+
+	drm_object_attach_property(&plane->base, prop, DRM_BLEND_ALPHA_OPAQUE);
+	plane->alpha_property = prop;
+
+	if (plane->state)
+		plane->state->alpha = DRM_BLEND_ALPHA_OPAQUE;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_plane_create_alpha_property);
+
 /**
  * drm_plane_create_rotation_property - create a new rotation property
  * @plane: drm plane
diff --git a/include/drm/drm_blend.h b/include/drm/drm_blend.h
index 17606026590b..330c561c4c11 100644
--- a/include/drm/drm_blend.h
+++ b/include/drm/drm_blend.h
@@ -36,6 +36,9 @@ static inline bool drm_rotation_90_or_270(unsigned int rotation)
 	return rotation & (DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270);
 }
 
+#define DRM_BLEND_ALPHA_OPAQUE		0xffff
+
+int drm_plane_create_alpha_property(struct drm_plane *plane);
 int drm_plane_create_rotation_property(struct drm_plane *plane,
 				       unsigned int rotation,
 				       unsigned int supported_rotations);
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index d6da26d66a4b..9563bd25f19b 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -43,6 +43,7 @@ struct drm_modeset_acquire_ctx;
  *	plane (in 16.16)
  * @src_w: width of visible portion of plane (in 16.16)
  * @src_h: height of visible portion of plane (in 16.16)
+ * @alpha: opacity of the plane
  * @rotation: rotation of the plane
  * @zpos: priority of the given plane on crtc (optional)
  *	Note that multiple active planes on the same crtc can have an identical
@@ -106,6 +107,9 @@ struct drm_plane_state {
 	uint32_t src_x, src_y;
 	uint32_t src_h, src_w;
 
+	/* Plane opacity */
+	u16 alpha;
+
 	/* Plane rotation */
 	unsigned int rotation;
 
@@ -496,6 +500,7 @@ enum drm_plane_type {
  * @funcs: helper functions
  * @properties: property tracking for this plane
  * @type: type of plane (overlay, primary, cursor)
+ * @alpha_property: alpha property for this plane
  * @zpos_property: zpos property for this plane
  * @rotation_property: rotation property for this plane
  * @helper_private: mid-layer private data
@@ -571,6 +576,7 @@ struct drm_plane {
 	 */
 	struct drm_plane_state *state;
 
+	struct drm_property *alpha_property;
 	struct drm_property *zpos_property;
 	struct drm_property *rotation_property;
 

From 7f73c10b256bbaf0843adf509dc4744fc846bcd5 Mon Sep 17 00:00:00 2001
From: Maxime Ripard <maxime.ripard@bootlin.com>
Date: Wed, 11 Apr 2018 09:39:26 +0200
Subject: [PATCH 0396/1286] drm/atmel-hclcdc: Convert to the new generic alpha
 property

Now that we have support for per-plane alpha in the core, let's use it.

Acked-by: Boris Brezillon <boris.brezillon@bootlin.com>
Acked-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: Sean Paul <seanpaul@chromium.org>
Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Maxime Ripard <maxime.ripard@bootlin.com>
Link: https://patchwork.freedesktop.org/patch/msgid/e5e97e2aae129600233e0983b748e4ba51ced239.1523432341.git-series.maxime.ripard@bootlin.com
---
 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h  | 13 ---
 .../gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c   | 89 +++----------------
 2 files changed, 14 insertions(+), 88 deletions(-)

diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
index ab32d5b268d2..60c937f42114 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
@@ -299,7 +299,6 @@ struct atmel_hlcdc_layer {
 struct atmel_hlcdc_plane {
 	struct drm_plane base;
 	struct atmel_hlcdc_layer layer;
-	struct atmel_hlcdc_plane_properties *properties;
 };
 
 static inline struct atmel_hlcdc_plane *
@@ -345,18 +344,6 @@ struct atmel_hlcdc_dc_desc {
 	int nlayers;
 };
 
-/**
- * Atmel HLCDC Plane properties.
- *
- * This structure stores plane property definitions.
- *
- * @alpha: alpha blending (or transparency) property
- * @rotation: rotation property
- */
-struct atmel_hlcdc_plane_properties {
-	struct drm_property *alpha;
-};
-
 /**
  * Atmel HLCDC Display Controller.
  *
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index e18800ed7cd1..73c875db45f4 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -31,7 +31,6 @@
  * @src_y: y buffer position
  * @src_w: buffer width
  * @src_h: buffer height
- * @alpha: alpha blending of the plane
  * @disc_x: x discard position
  * @disc_y: y discard position
  * @disc_w: discard width
@@ -54,8 +53,6 @@ struct atmel_hlcdc_plane_state {
 	uint32_t src_w;
 	uint32_t src_h;
 
-	u8 alpha;
-
 	int disc_x;
 	int disc_y;
 	int disc_w;
@@ -385,7 +382,7 @@ atmel_hlcdc_plane_update_general_settings(struct atmel_hlcdc_plane *plane,
 			cfg |= ATMEL_HLCDC_LAYER_LAEN;
 		else
 			cfg |= ATMEL_HLCDC_LAYER_GAEN |
-			       ATMEL_HLCDC_LAYER_GA(state->alpha);
+			       ATMEL_HLCDC_LAYER_GA(state->base.alpha >> 8);
 	}
 
 	if (state->disc_h && state->disc_w)
@@ -553,7 +550,7 @@ atmel_hlcdc_plane_prepare_disc_area(struct drm_crtc_state *c_state)
 
 		if (!ovl_s->fb ||
 		    ovl_s->fb->format->has_alpha ||
-		    ovl_state->alpha != 255)
+		    ovl_s->alpha != DRM_BLEND_ALPHA_OPAQUE)
 			continue;
 
 		/* TODO: implement a smarter hidden area detection */
@@ -829,51 +826,18 @@ static void atmel_hlcdc_plane_destroy(struct drm_plane *p)
 	drm_plane_cleanup(p);
 }
 
-static int atmel_hlcdc_plane_atomic_set_property(struct drm_plane *p,
-						 struct drm_plane_state *s,
-						 struct drm_property *property,
-						 uint64_t val)
-{
-	struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
-	struct atmel_hlcdc_plane_properties *props = plane->properties;
-	struct atmel_hlcdc_plane_state *state =
-			drm_plane_state_to_atmel_hlcdc_plane_state(s);
-
-	if (property == props->alpha)
-		state->alpha = val;
-	else
-		return -EINVAL;
-
-	return 0;
-}
-
-static int atmel_hlcdc_plane_atomic_get_property(struct drm_plane *p,
-					const struct drm_plane_state *s,
-					struct drm_property *property,
-					uint64_t *val)
-{
-	struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
-	struct atmel_hlcdc_plane_properties *props = plane->properties;
-	const struct atmel_hlcdc_plane_state *state =
-		container_of(s, const struct atmel_hlcdc_plane_state, base);
-
-	if (property == props->alpha)
-		*val = state->alpha;
-	else
-		return -EINVAL;
-
-	return 0;
-}
-
-static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
-				struct atmel_hlcdc_plane_properties *props)
+static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane)
 {
 	const struct atmel_hlcdc_layer_desc *desc = plane->layer.desc;
 
 	if (desc->type == ATMEL_HLCDC_OVERLAY_LAYER ||
-	    desc->type == ATMEL_HLCDC_CURSOR_LAYER)
-		drm_object_attach_property(&plane->base.base,
-					   props->alpha, 255);
+	    desc->type == ATMEL_HLCDC_CURSOR_LAYER) {
+		int ret;
+
+		ret = drm_plane_create_alpha_property(&plane->base);
+		if (ret)
+			return ret;
+	}
 
 	if (desc->layout.xstride && desc->layout.pstride) {
 		int ret;
@@ -988,8 +952,8 @@ static void atmel_hlcdc_plane_reset(struct drm_plane *p)
 			return;
 		}
 
-		state->alpha = 255;
 		p->state = &state->base;
+		p->state->alpha = DRM_BLEND_ALPHA_OPAQUE;
 		p->state->plane = p;
 	}
 }
@@ -1042,13 +1006,10 @@ static const struct drm_plane_funcs layer_plane_funcs = {
 	.reset = atmel_hlcdc_plane_reset,
 	.atomic_duplicate_state = atmel_hlcdc_plane_atomic_duplicate_state,
 	.atomic_destroy_state = atmel_hlcdc_plane_atomic_destroy_state,
-	.atomic_set_property = atmel_hlcdc_plane_atomic_set_property,
-	.atomic_get_property = atmel_hlcdc_plane_atomic_get_property,
 };
 
 static int atmel_hlcdc_plane_create(struct drm_device *dev,
-				    const struct atmel_hlcdc_layer_desc *desc,
-				    struct atmel_hlcdc_plane_properties *props)
+				    const struct atmel_hlcdc_layer_desc *desc)
 {
 	struct atmel_hlcdc_dc *dc = dev->dev_private;
 	struct atmel_hlcdc_plane *plane;
@@ -1060,7 +1021,6 @@ static int atmel_hlcdc_plane_create(struct drm_device *dev,
 		return -ENOMEM;
 
 	atmel_hlcdc_layer_init(&plane->layer, desc, dc->hlcdc->regmap);
-	plane->properties = props;
 
 	if (desc->type == ATMEL_HLCDC_BASE_LAYER)
 		type = DRM_PLANE_TYPE_PRIMARY;
@@ -1081,7 +1041,7 @@ static int atmel_hlcdc_plane_create(struct drm_device *dev,
 			     &atmel_hlcdc_layer_plane_helper_funcs);
 
 	/* Set default property values*/
-	ret = atmel_hlcdc_plane_init_properties(plane, props);
+	ret = atmel_hlcdc_plane_init_properties(plane);
 	if (ret)
 		return ret;
 
@@ -1090,34 +1050,13 @@ static int atmel_hlcdc_plane_create(struct drm_device *dev,
 	return 0;
 }
 
-static struct atmel_hlcdc_plane_properties *
-atmel_hlcdc_plane_create_properties(struct drm_device *dev)
-{
-	struct atmel_hlcdc_plane_properties *props;
-
-	props = devm_kzalloc(dev->dev, sizeof(*props), GFP_KERNEL);
-	if (!props)
-		return ERR_PTR(-ENOMEM);
-
-	props->alpha = drm_property_create_range(dev, 0, "alpha", 0, 255);
-	if (!props->alpha)
-		return ERR_PTR(-ENOMEM);
-
-	return props;
-}
-
 int atmel_hlcdc_create_planes(struct drm_device *dev)
 {
 	struct atmel_hlcdc_dc *dc = dev->dev_private;
-	struct atmel_hlcdc_plane_properties *props;
 	const struct atmel_hlcdc_layer_desc *descs = dc->desc->layers;
 	int nlayers = dc->desc->nlayers;
 	int i, ret;
 
-	props = atmel_hlcdc_plane_create_properties(dev);
-	if (IS_ERR(props))
-		return PTR_ERR(props);
-
 	dc->dscrpool = dmam_pool_create("atmel-hlcdc-dscr", dev->dev,
 				sizeof(struct atmel_hlcdc_dma_channel_dscr),
 				sizeof(u64), 0);
@@ -1130,7 +1069,7 @@ int atmel_hlcdc_create_planes(struct drm_device *dev)
 		    descs[i].type != ATMEL_HLCDC_CURSOR_LAYER)
 			continue;
 
-		ret = atmel_hlcdc_plane_create(dev, &descs[i], props);
+		ret = atmel_hlcdc_plane_create(dev, &descs[i]);
 		if (ret)
 			return ret;
 	}

From 301a9b8d545690f7bd91e1794e1498aa62902d13 Mon Sep 17 00:00:00 2001
From: Maxime Ripard <maxime.ripard@bootlin.com>
Date: Wed, 11 Apr 2018 09:39:27 +0200
Subject: [PATCH 0397/1286] drm/rcar-du: Convert to the new generic alpha
 property

Now that we have support for per-plane alpha in the core, let's use it.

Acked-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: Sean Paul <seanpaul@chromium.org>
Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Maxime Ripard <maxime.ripard@bootlin.com>
Link: https://patchwork.freedesktop.org/patch/msgid/a343697b87109cd8d9675ea8bce2e561051a696f.1523432341.git-series.maxime.ripard@bootlin.com
---
 drivers/gpu/drm/rcar-du/rcar_du_drv.h   |  1 -
 drivers/gpu/drm/rcar-du/rcar_du_kms.c   |  5 ---
 drivers/gpu/drm/rcar-du/rcar_du_plane.c | 15 +++------
 drivers/gpu/drm/rcar-du/rcar_du_plane.h |  2 --
 drivers/gpu/drm/rcar-du/rcar_du_vsp.c   | 42 +++----------------------
 drivers/gpu/drm/rcar-du/rcar_du_vsp.h   |  3 --
 6 files changed, 9 insertions(+), 59 deletions(-)

diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index 5c7ec15818c7..131d8e88b06c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -87,7 +87,6 @@ struct rcar_du_device {
 	struct rcar_du_vsp vsps[RCAR_DU_MAX_VSPS];
 
 	struct {
-		struct drm_property *alpha;
 		struct drm_property *colorkey;
 	} props;
 
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index ab59d2061e06..f4ac0f884f00 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -407,11 +407,6 @@ static int rcar_du_encoders_init(struct rcar_du_device *rcdu)
 
 static int rcar_du_properties_init(struct rcar_du_device *rcdu)
 {
-	rcdu->props.alpha =
-		drm_property_create_range(rcdu->ddev, 0, "alpha", 0, 255);
-	if (rcdu->props.alpha == NULL)
-		return -ENOMEM;
-
 	/*
 	 * The color key is expressed as an RGB888 triplet stored in a 32-bit
 	 * integer in XRGB8888 format. Bit 24 is used as a flag to disable (0)
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index 68556bd9dad2..c20f7ed48c8d 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -423,7 +423,7 @@ static void rcar_du_plane_setup_mode(struct rcar_du_group *rgrp,
 		rcar_du_plane_write(rgrp, index, PnALPHAR, PnALPHAR_ABIT_0);
 	else
 		rcar_du_plane_write(rgrp, index, PnALPHAR,
-				    PnALPHAR_ABIT_X | state->alpha);
+				    PnALPHAR_ABIT_X | state->state.alpha >> 8);
 
 	pnmr = PnMR_BM_MD | state->format->pnmr;
 
@@ -692,11 +692,11 @@ static void rcar_du_plane_reset(struct drm_plane *plane)
 
 	state->hwindex = -1;
 	state->source = RCAR_DU_PLANE_MEMORY;
-	state->alpha = 255;
 	state->colorkey = RCAR_DU_COLORKEY_NONE;
 	state->state.zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : 1;
 
 	plane->state = &state->state;
+	plane->state->alpha = DRM_BLEND_ALPHA_OPAQUE;
 	plane->state->plane = plane;
 }
 
@@ -708,9 +708,7 @@ static int rcar_du_plane_atomic_set_property(struct drm_plane *plane,
 	struct rcar_du_plane_state *rstate = to_rcar_plane_state(state);
 	struct rcar_du_device *rcdu = to_rcar_plane(plane)->group->dev;
 
-	if (property == rcdu->props.alpha)
-		rstate->alpha = val;
-	else if (property == rcdu->props.colorkey)
+	if (property == rcdu->props.colorkey)
 		rstate->colorkey = val;
 	else
 		return -EINVAL;
@@ -726,9 +724,7 @@ static int rcar_du_plane_atomic_get_property(struct drm_plane *plane,
 		container_of(state, const struct rcar_du_plane_state, state);
 	struct rcar_du_device *rcdu = to_rcar_plane(plane)->group->dev;
 
-	if (property == rcdu->props.alpha)
-		*val = rstate->alpha;
-	else if (property == rcdu->props.colorkey)
+	if (property == rcdu->props.colorkey)
 		*val = rstate->colorkey;
 	else
 		return -EINVAL;
@@ -796,11 +792,10 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
 		if (type == DRM_PLANE_TYPE_PRIMARY)
 			continue;
 
-		drm_object_attach_property(&plane->plane.base,
-					   rcdu->props.alpha, 255);
 		drm_object_attach_property(&plane->plane.base,
 					   rcdu->props.colorkey,
 					   RCAR_DU_COLORKEY_NONE);
+		drm_plane_create_alpha_property(&plane->plane);
 		drm_plane_create_zpos_property(&plane->plane, 1, 1, 7);
 	}
 
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.h b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
index 890321b4665d..5c19c69e4691 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
@@ -50,7 +50,6 @@ static inline struct rcar_du_plane *to_rcar_plane(struct drm_plane *plane)
  * @state: base DRM plane state
  * @format: information about the pixel format used by the plane
  * @hwindex: 0-based hardware plane index, -1 means unused
- * @alpha: value of the plane alpha property
  * @colorkey: value of the plane colorkey property
  */
 struct rcar_du_plane_state {
@@ -60,7 +59,6 @@ struct rcar_du_plane_state {
 	int hwindex;
 	enum rcar_du_plane_source source;
 
-	unsigned int alpha;
 	unsigned int colorkey;
 };
 
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index 2c260c33840b..b3bec0125696 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -54,6 +54,7 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
 	};
 	struct rcar_du_plane_state state = {
 		.state = {
+			.alpha = DRM_BLEND_ALPHA_OPAQUE,
 			.crtc = &crtc->crtc,
 			.dst.x1 = 0,
 			.dst.y1 = 0,
@@ -67,7 +68,6 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
 		},
 		.format = rcar_du_format_info(DRM_FORMAT_ARGB8888),
 		.source = RCAR_DU_PLANE_VSPD1,
-		.alpha = 255,
 		.colorkey = 0,
 	};
 
@@ -173,7 +173,7 @@ static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
 	struct vsp1_du_atomic_config cfg = {
 		.pixelformat = 0,
 		.pitch = fb->pitches[0],
-		.alpha = state->alpha,
+		.alpha = state->state.alpha >> 8,
 		.zpos = state->state.zpos,
 	};
 	unsigned int i;
@@ -335,44 +335,13 @@ static void rcar_du_vsp_plane_reset(struct drm_plane *plane)
 	if (state == NULL)
 		return;
 
-	state->alpha = 255;
+	state->state.alpha = DRM_BLEND_ALPHA_OPAQUE;
 	state->state.zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : 1;
 
 	plane->state = &state->state;
 	plane->state->plane = plane;
 }
 
-static int rcar_du_vsp_plane_atomic_set_property(struct drm_plane *plane,
-	struct drm_plane_state *state, struct drm_property *property,
-	uint64_t val)
-{
-	struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state);
-	struct rcar_du_device *rcdu = to_rcar_vsp_plane(plane)->vsp->dev;
-
-	if (property == rcdu->props.alpha)
-		rstate->alpha = val;
-	else
-		return -EINVAL;
-
-	return 0;
-}
-
-static int rcar_du_vsp_plane_atomic_get_property(struct drm_plane *plane,
-	const struct drm_plane_state *state, struct drm_property *property,
-	uint64_t *val)
-{
-	const struct rcar_du_vsp_plane_state *rstate =
-		container_of(state, const struct rcar_du_vsp_plane_state, state);
-	struct rcar_du_device *rcdu = to_rcar_vsp_plane(plane)->vsp->dev;
-
-	if (property == rcdu->props.alpha)
-		*val = rstate->alpha;
-	else
-		return -EINVAL;
-
-	return 0;
-}
-
 static const struct drm_plane_funcs rcar_du_vsp_plane_funcs = {
 	.update_plane = drm_atomic_helper_update_plane,
 	.disable_plane = drm_atomic_helper_disable_plane,
@@ -380,8 +349,6 @@ static const struct drm_plane_funcs rcar_du_vsp_plane_funcs = {
 	.destroy = drm_plane_cleanup,
 	.atomic_duplicate_state = rcar_du_vsp_plane_atomic_duplicate_state,
 	.atomic_destroy_state = rcar_du_vsp_plane_atomic_destroy_state,
-	.atomic_set_property = rcar_du_vsp_plane_atomic_set_property,
-	.atomic_get_property = rcar_du_vsp_plane_atomic_get_property,
 };
 
 int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
@@ -438,8 +405,7 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
 		if (type == DRM_PLANE_TYPE_PRIMARY)
 			continue;
 
-		drm_object_attach_property(&plane->plane.base,
-					   rcdu->props.alpha, 255);
+		drm_plane_create_alpha_property(&plane->plane);
 		drm_plane_create_zpos_property(&plane->plane, 1, 1,
 					       vsp->num_planes - 1);
 	}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
index 4c5d7bbce6aa..8a8a25c8c8e8 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
@@ -44,15 +44,12 @@ static inline struct rcar_du_vsp_plane *to_rcar_vsp_plane(struct drm_plane *p)
  * @state: base DRM plane state
  * @format: information about the pixel format used by the plane
  * @sg_tables: scatter-gather tables for the frame buffer memory
- * @alpha: value of the plane alpha property
  */
 struct rcar_du_vsp_plane_state {
 	struct drm_plane_state state;
 
 	const struct rcar_du_format_info *format;
 	struct sg_table sg_tables[3];
-
-	unsigned int alpha;
 };
 
 static inline struct rcar_du_vsp_plane_state *

From d99008aab9f57d5b036cf675f22b9d3939e7e3fe Mon Sep 17 00:00:00 2001
From: Maxime Ripard <maxime.ripard@bootlin.com>
Date: Wed, 11 Apr 2018 09:39:28 +0200
Subject: [PATCH 0398/1286] drm/sun4i: Add support for plane alpha

Our backend supports a per-plane alpha property. Support it through our new
helper.

Acked-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: Sean Paul <seanpaul@chromium.org>
Reviewed-by: Chen-Yu Tsai <wens@csie.org>
Reviewed-by: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
Signed-off-by: Maxime Ripard <maxime.ripard@bootlin.com>
Link: https://patchwork.freedesktop.org/patch/msgid/13e89f0d2f2b55752a22eb8c4f37f325246a3a9c.1523432341.git-series.maxime.ripard@bootlin.com
---
 drivers/gpu/drm/sun4i/sun4i_backend.c | 16 +++++++++++++---
 drivers/gpu/drm/sun4i/sun4i_backend.h |  3 +++
 drivers/gpu/drm/sun4i/sun4i_layer.c   |  2 ++
 3 files changed, 18 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 9bad54f3de38..de0a76dfa1a2 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -295,6 +295,15 @@ int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
 	DRM_DEBUG_DRIVER("Switching display backend interlaced mode %s\n",
 			 interlaced ? "on" : "off");
 
+	val = SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA(state->alpha >> 8);
+	if (state->alpha != DRM_BLEND_ALPHA_OPAQUE)
+		val |= SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN;
+	regmap_update_bits(backend->engine.regs,
+			   SUN4I_BACKEND_ATTCTL_REG0(layer),
+			   SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_MASK |
+			   SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN,
+			   val);
+
 	if (sun4i_backend_format_is_yuv(fb->format->format))
 		return sun4i_backend_update_yuv_format(backend, layer, plane);
 
@@ -490,7 +499,7 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
 		DRM_DEBUG_DRIVER("Plane FB format is %s\n",
 				 drm_get_format_name(fb->format->format,
 						     &format_name));
-		if (fb->format->has_alpha)
+		if (fb->format->has_alpha || (plane_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
 			num_alpha_planes++;
 
 		if (sun4i_backend_format_is_yuv(fb->format->format)) {
@@ -548,7 +557,8 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
 	}
 
 	/* We can't have an alpha plane at the lowest position */
-	if (plane_states[0]->fb->format->has_alpha)
+	if (plane_states[0]->fb->format->has_alpha ||
+	    (plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE))
 		return -EINVAL;
 
 	for (i = 1; i < num_planes; i++) {
@@ -560,7 +570,7 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
 		 * The only alpha position is the lowest plane of the
 		 * second pipe.
 		 */
-		if (fb->format->has_alpha)
+		if (fb->format->has_alpha || (p_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
 			current_pipe++;
 
 		s_state->pipe = current_pipe;
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.h b/drivers/gpu/drm/sun4i/sun4i_backend.h
index 316f2179e9e1..4caee0392fa4 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.h
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.h
@@ -68,12 +68,15 @@
 #define SUN4I_BACKEND_CKMIN_REG			0x884
 #define SUN4I_BACKEND_CKCFG_REG			0x888
 #define SUN4I_BACKEND_ATTCTL_REG0(l)		(0x890 + (0x4 * (l)))
+#define SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_MASK	GENMASK(31, 24)
+#define SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA(x)		((x) << 24)
 #define SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL_MASK	BIT(15)
 #define SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(x)		((x) << 15)
 #define SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL_MASK	GENMASK(11, 10)
 #define SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL(x)			((x) << 10)
 #define SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN		BIT(2)
 #define SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN		BIT(1)
+#define SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN	BIT(0)
 
 #define SUN4I_BACKEND_ATTCTL_REG1(l)		(0x8a0 + (0x4 * (l)))
 #define SUN4I_BACKEND_ATTCTL_REG1_LAY_HSCAFCT		GENMASK(15, 14)
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c
index 2949a3c912c1..750ad24de1d7 100644
--- a/drivers/gpu/drm/sun4i/sun4i_layer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.c
@@ -37,6 +37,7 @@ static void sun4i_backend_layer_reset(struct drm_plane *plane)
 	if (state) {
 		plane->state = &state->state;
 		plane->state->plane = plane;
+		plane->state->alpha = DRM_BLEND_ALPHA_OPAQUE;
 		plane->state->zpos = layer->id;
 	}
 }
@@ -167,6 +168,7 @@ static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm,
 			     &sun4i_backend_layer_helper_funcs);
 	layer->backend = backend;
 
+	drm_plane_create_alpha_property(&layer->plane);
 	drm_plane_create_zpos_property(&layer->plane, 0, 0,
 				       SUN4I_BACKEND_NUM_LAYERS - 1);
 

From d50479ad314dc6eca438b95d38a44dd5f72dd7f7 Mon Sep 17 00:00:00 2001
From: Maxime Ripard <maxime.ripard@bootlin.com>
Date: Wed, 11 Apr 2018 09:39:29 +0200
Subject: [PATCH 0399/1286] drm/docs: Remove the rcar alpha from the csv file

Now that we moved the rcar-du DRM driver has been switched to the generic
alpha property, remove the former property documentation from the
deprecated CSV file.

Acked-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: Sean Paul <seanpaul@chromium.org>
Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Maxime Ripard <maxime.ripard@bootlin.com>
Link: https://patchwork.freedesktop.org/patch/msgid/04be5e2256aa8d33d9521a68a10f0b73a24f8040.1523432341.git-series.maxime.ripard@bootlin.com
---
 Documentation/gpu/kms-properties.csv | 1 -
 1 file changed, 1 deletion(-)

diff --git a/Documentation/gpu/kms-properties.csv b/Documentation/gpu/kms-properties.csv
index 6b28b014cb7d..07ed22ea3bd6 100644
--- a/Documentation/gpu/kms-properties.csv
+++ b/Documentation/gpu/kms-properties.csv
@@ -98,5 +98,4 @@ radeon,DVI-I,“coherent”,RANGE,"Min=0, Max=1",Connector,TBD
 ,,"""underscan vborder""",RANGE,"Min=0, Max=128",Connector,TBD
 ,Audio,“audio”,ENUM,"{ ""off"", ""on"", ""auto"" }",Connector,TBD
 ,FMT Dithering,“dither”,ENUM,"{ ""off"", ""on"" }",Connector,TBD
-rcar-du,Generic,"""alpha""",RANGE,"Min=0, Max=255",Plane,TBD
 ,,"""colorkey""",RANGE,"Min=0, Max=0x01ffffff",Plane,TBD

From 2d078c2d04a535c2f342beb84c59cdade5cfe3d3 Mon Sep 17 00:00:00 2001
From: John Keeping <john@metanate.com>
Date: Wed, 28 Mar 2018 17:03:51 +0100
Subject: [PATCH 0400/1286] drm/rockchip: fix VOP vblank race

We have seen a case of a bad reference count for vblanks with the
Rockchip VOP:

	------------[ cut here ]------------
	WARNING: CPU: 1 PID: 383 at drivers/gpu/drm/drm_irq.c:1198 drm_vblank_put+0x40/0xcc
	Modules linked in: brcmfmac brcmutil
	CPU: 1 PID: 383 Comm: kworker/u8:2 Not tainted 4.9.75-rt60 #1
	Hardware name: Rockchip (Device Tree)
	Workqueue: events_unbound flip_worker
	Backtrace:
	[<c010b7b0>] (dump_backtrace) from [<c010ba4c>] (show_stack+0x18/0x1c)
	 r7:c0b1b13c r6:600b0013 r5:00000000 r4:c0b1b13c
	[<c010ba34>] (show_stack) from [<c032d248>] (dump_stack+0x78/0x94)
	[<c032d1d0>] (dump_stack) from [<c011e6e8>] (__warn+0xe4/0x104)
	 r7:00000009 r6:c03cf26c r5:00000000 r4:00000000
	[<c011e604>] (__warn) from [<c011e7c0>] (warn_slowpath_null+0x28/0x30)
	 r9:eeb443a0 r8:eeb443c8 r7:ee8a5ec0 r6:ee8a5ec0 r5:edb47f00 r4:ee096200
	[<c011e798>] (warn_slowpath_null) from [<c03cf26c>] (drm_vblank_put+0x40/0xcc)
	[<c03cf22c>] (drm_vblank_put) from [<c03cf310>] (drm_crtc_vblank_put+0x18/0x1c)
	 r5:edb47f00 r4:ee3c8a80
	[<c03cf2f8>] (drm_crtc_vblank_put) from [<c03ef9b4>] (vop_fb_unref_worker+0x18/0x24)
	[<c03ef99c>] (vop_fb_unref_worker) from [<c03df194>] (flip_worker+0x98/0xb4)
	 r5:edb47f00 r4:eeb443a8
	[<c03df0fc>] (flip_worker) from [<c0134808>] (process_one_work+0x1a8/0x2fc)
	 r9:00000000 r8:ee807d00 r7:00000000 r6:ee809c00 r5:eeb443a8 r4:edfe5f80
	[<c0134660>] (process_one_work) from [<c01358ec>] (worker_thread+0x2ac/0x458)
	 r10:00000088 r9:edfe5f98 r8:ee809c2c r7:c0b04100 r6:ee809c00 r5:ee809c00
	 r4:edfe5f80
	[<c0135640>] (worker_thread) from [<c013a0bc>] (kthread+0xfc/0x10c)
	 r10:00000000 r9:00000000 r8:c0135640 r7:edfe5f80 r6:00000000 r5:edf0e240
	 r4:ee8a4000 r3:ed194e00
	[<c0139fc0>] (kthread) from [<c0107cb8>] (ret_from_fork+0x14/0x3c)
	 r8:00000000 r7:00000000 r6:00000000 r5:c0139fc0 r4:edf0e240
	---[ end trace 0000000000000002 ]---

It seems that this is caused by unfortunate timing between
vop_crtc_atomic_flush() and vop_handle_vblank() given the following
ordering:

	atomic_flush		handle_vblank
	------------		-------------

	drm_flip_work_queue
	set_bit
	     			if (test_and_clear_bit(...))
	     				drm_flip_work_commit
	drm_vblank_get

This results in vop_fb_unref_worker (called as flip work) decrementing
the vblank refcount before it has been incremented.

Signed-off-by: John Keeping <john@metanate.com>
Reviewed-by: Sandy huang <hjc@rock-chips.com>
Signed-off-by: Sandy Huang <hjc@rock-chips.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180328160351.23763-1-john@metanate.com
---
 drivers/gpu/drm/rockchip/rockchip_drm_vop.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 53d4afe15278..510cdf076bb1 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -1017,9 +1017,9 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
 			continue;
 
 		drm_framebuffer_get(old_plane_state->fb);
+		WARN_ON(drm_crtc_vblank_get(crtc) != 0);
 		drm_flip_work_queue(&vop->fb_unref_work, old_plane_state->fb);
 		set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
-		WARN_ON(drm_crtc_vblank_get(crtc) != 0);
 	}
 }
 

From 94f8dfc6cdfc3c48c3aea59ce528fa93cb54a69f Mon Sep 17 00:00:00 2001
From: Oscar Mateo <oscar.mateo@intel.com>
Date: Mon, 16 Apr 2018 14:57:01 -0700
Subject: [PATCH 0401/1286] drm/i915/selftests: Handle a potential failure of
 intel_ring_begin

Silence smatch over:

drivers/gpu/drm/i915/selftests/intel_workarounds.c:58 read_nonprivs() error: 'cs' dereferencing possible ERR_PTR()

by handling a potential (but unlikely) failure of intel_ring_begin.

Fixes: f4ecfbfc32ed ("drm/i915: Check whitelist registers across resets")
Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/1523915821-30624-1-git-send-email-oscar.mateo@intel.com
---
 drivers/gpu/drm/i915/selftests/intel_workarounds.c | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index fe7deca33d77..5455b2626627 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -54,6 +54,11 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
 		srm++;
 
 	cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
+	if (IS_ERR(cs)) {
+		err = PTR_ERR(cs);
+		goto err_req;
+	}
+
 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
 		*cs++ = srm;
 		*cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
@@ -75,6 +80,8 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
 
 	return result;
 
+err_req:
+	i915_request_add(rq);
 err_pin:
 	i915_vma_unpin(vma);
 err_obj:

From aa808440426f6d163a4f51076132628fee6e1e7d Mon Sep 17 00:00:00 2001
From: Eric Anholt <eric@anholt.net>
Date: Wed, 11 Apr 2018 22:49:12 +0200
Subject: [PATCH 0402/1286] drm/vc4: Add some missing HVS register definitions.

At least the RGBA expand field we should have been setting, because we
aren't expanding correctly for 565 -> 8888.  Other registers are ones
that may be interesting for various projects that have been discussed.

Signed-off-by: Eric Anholt <eric@anholt.net>
Acked-by: Stefan Schake <stschake@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1523479755-20812-2-git-send-email-stschake@gmail.com
---
 drivers/gpu/drm/vc4/vc4_regs.h | 96 ++++++++++++++++++++++++++++++++++
 1 file changed, 96 insertions(+)

diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index a141496104a6..4af3e29d076a 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -330,6 +330,21 @@
 #define SCALER_DISPCTRL0                        0x00000040
 # define SCALER_DISPCTRLX_ENABLE		BIT(31)
 # define SCALER_DISPCTRLX_RESET			BIT(30)
+/* Generates a single frame when VSTART is seen and stops at the last
+ * pixel read from the FIFO.
+ */
+# define SCALER_DISPCTRLX_ONESHOT		BIT(29)
+/* Processes a single context in the dlist and then task switch,
+ * instead of an entire line.
+ */
+# define SCALER_DISPCTRLX_ONECTX		BIT(28)
+/* Set to have DISPSLAVE return 2 16bpp pixels and no status data. */
+# define SCALER_DISPCTRLX_FIFO32		BIT(27)
+/* Turns on output to the DISPSLAVE register instead of the normal
+ * FIFO.
+ */
+# define SCALER_DISPCTRLX_FIFOREG		BIT(26)
+
 # define SCALER_DISPCTRLX_WIDTH_MASK		VC4_MASK(23, 12)
 # define SCALER_DISPCTRLX_WIDTH_SHIFT		12
 # define SCALER_DISPCTRLX_HEIGHT_MASK		VC4_MASK(11, 0)
@@ -402,6 +417,68 @@
  */
 # define SCALER_GAMADDR_SRAMENB			BIT(30)
 
+#define SCALER_OLEDOFFS                         0x00000080
+/* Clamps R to [16,235] and G/B to [16,240]. */
+# define SCALER_OLEDOFFS_YUVCLAMP               BIT(31)
+
+/* Chooses which display FIFO the matrix applies to. */
+# define SCALER_OLEDOFFS_DISPFIFO_MASK          VC4_MASK(25, 24)
+# define SCALER_OLEDOFFS_DISPFIFO_SHIFT         24
+# define SCALER_OLEDOFFS_DISPFIFO_DISABLED      0
+# define SCALER_OLEDOFFS_DISPFIFO_0             1
+# define SCALER_OLEDOFFS_DISPFIFO_1             2
+# define SCALER_OLEDOFFS_DISPFIFO_2             3
+
+/* Offsets are 8-bit 2s-complement. */
+# define SCALER_OLEDOFFS_RED_MASK               VC4_MASK(23, 16)
+# define SCALER_OLEDOFFS_RED_SHIFT              16
+# define SCALER_OLEDOFFS_GREEN_MASK             VC4_MASK(15, 8)
+# define SCALER_OLEDOFFS_GREEN_SHIFT            8
+# define SCALER_OLEDOFFS_BLUE_MASK              VC4_MASK(7, 0)
+# define SCALER_OLEDOFFS_BLUE_SHIFT             0
+
+/* The coefficients are S0.9 fractions. */
+#define SCALER_OLEDCOEF0                        0x00000084
+# define SCALER_OLEDCOEF0_B_TO_R_MASK           VC4_MASK(29, 20)
+# define SCALER_OLEDCOEF0_B_TO_R_SHIFT          20
+# define SCALER_OLEDCOEF0_B_TO_G_MASK           VC4_MASK(19, 10)
+# define SCALER_OLEDCOEF0_B_TO_G_SHIFT          10
+# define SCALER_OLEDCOEF0_B_TO_B_MASK           VC4_MASK(9, 0)
+# define SCALER_OLEDCOEF0_B_TO_B_SHIFT          0
+
+#define SCALER_OLEDCOEF1                        0x00000088
+# define SCALER_OLEDCOEF1_G_TO_R_MASK           VC4_MASK(29, 20)
+# define SCALER_OLEDCOEF1_G_TO_R_SHIFT          20
+# define SCALER_OLEDCOEF1_G_TO_G_MASK           VC4_MASK(19, 10)
+# define SCALER_OLEDCOEF1_G_TO_G_SHIFT          10
+# define SCALER_OLEDCOEF1_G_TO_B_MASK           VC4_MASK(9, 0)
+# define SCALER_OLEDCOEF1_G_TO_B_SHIFT          0
+
+#define SCALER_OLEDCOEF2                        0x0000008c
+# define SCALER_OLEDCOEF2_R_TO_R_MASK           VC4_MASK(29, 20)
+# define SCALER_OLEDCOEF2_R_TO_R_SHIFT          20
+# define SCALER_OLEDCOEF2_R_TO_G_MASK           VC4_MASK(19, 10)
+# define SCALER_OLEDCOEF2_R_TO_G_SHIFT          10
+# define SCALER_OLEDCOEF2_R_TO_B_MASK           VC4_MASK(9, 0)
+# define SCALER_OLEDCOEF2_R_TO_B_SHIFT          0
+
+/* Slave addresses for DMAing from HVS composition output to other
+ * devices.  The top bits are valid only in !FIFO32 mode.
+ */
+#define SCALER_DISPSLAVE0                       0x000000c0
+#define SCALER_DISPSLAVE1                       0x000000c9
+#define SCALER_DISPSLAVE2                       0x000000d0
+# define SCALER_DISPSLAVE_ISSUE_VSTART          BIT(31)
+# define SCALER_DISPSLAVE_ISSUE_HSTART          BIT(30)
+/* Set when the current line has been read and an HSTART is required. */
+# define SCALER_DISPSLAVE_EOL                   BIT(26)
+/* Set when the display FIFO is empty. */
+# define SCALER_DISPSLAVE_EMPTY                 BIT(25)
+/* Set when there is RGB data ready to read. */
+# define SCALER_DISPSLAVE_VALID                 BIT(24)
+# define SCALER_DISPSLAVE_RGB_MASK              VC4_MASK(23, 0)
+# define SCALER_DISPSLAVE_RGB_SHIFT             0
+
 #define SCALER_GAMDATA                          0x000000e0
 #define SCALER_DLIST_START                      0x00002000
 #define SCALER_DLIST_SIZE                       0x00004000
@@ -767,6 +844,10 @@ enum hvs_pixel_format {
 	HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE = 9,
 	HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE = 10,
 	HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE = 11,
+	HVS_PIXEL_FORMAT_H264 = 12,
+	HVS_PIXEL_FORMAT_PALETTE = 13,
+	HVS_PIXEL_FORMAT_YUV444_RGB = 14,
+	HVS_PIXEL_FORMAT_AYUV444_RGB = 15,
 };
 
 /* Note: the LSB is the rightmost character shown.  Only valid for
@@ -800,12 +881,27 @@ enum hvs_pixel_format {
 #define SCALER_CTL0_TILING_128B			2
 #define SCALER_CTL0_TILING_256B_OR_T		3
 
+#define SCALER_CTL0_ALPHA_MASK                  BIT(19)
 #define SCALER_CTL0_HFLIP                       BIT(16)
 #define SCALER_CTL0_VFLIP                       BIT(15)
 
+#define SCALER_CTL0_KEY_MODE_MASK		VC4_MASK(18, 17)
+#define SCALER_CTL0_KEY_MODE_SHIFT		17
+#define SCALER_CTL0_KEY_DISABLED		0
+#define SCALER_CTL0_KEY_LUMA_OR_COMMON_RGB	1
+#define SCALER_CTL0_KEY_MATCH			2 /* turn transparent */
+#define SCALER_CTL0_KEY_REPLACE			3 /* replace with value from key mask word 2 */
+
 #define SCALER_CTL0_ORDER_MASK			VC4_MASK(14, 13)
 #define SCALER_CTL0_ORDER_SHIFT			13
 
+#define SCALER_CTL0_RGBA_EXPAND_MASK		VC4_MASK(12, 11)
+#define SCALER_CTL0_RGBA_EXPAND_SHIFT		11
+#define SCALER_CTL0_RGBA_EXPAND_ZERO		0
+#define SCALER_CTL0_RGBA_EXPAND_LSB		1
+#define SCALER_CTL0_RGBA_EXPAND_MSB		2
+#define SCALER_CTL0_RGBA_EXPAND_ROUND		3
+
 #define SCALER_CTL0_SCL1_MASK			VC4_MASK(10, 8)
 #define SCALER_CTL0_SCL1_SHIFT			8
 

From 640e0c79de8b0a68b5a23c9cd46da3518cbff7d3 Mon Sep 17 00:00:00 2001
From: Stefan Schake <stschake@gmail.com>
Date: Wed, 11 Apr 2018 22:49:13 +0200
Subject: [PATCH 0403/1286] drm/vc4: Expose gamma as atomic property

We are an atomic driver so the gamma LUT should also be exposed as a
CRTC property through the DRM atomic color management. This will also
take care of the legacy path for us.

Signed-off-by: Stefan Schake <stschake@gmail.com>
Signed-off-by: Eric Anholt <eric@anholt.net>
Reviewed-by: Eric Anholt <eric@anholt.net>
Link: https://patchwork.freedesktop.org/patch/msgid/1523479755-20812-3-git-send-email-stschake@gmail.com
---
 drivers/gpu/drm/vc4/vc4_crtc.c | 37 ++++++++++++++++++++++++----------
 1 file changed, 26 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index bf4667481935..285f88dd9a82 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -298,23 +298,21 @@ vc4_crtc_lut_load(struct drm_crtc *crtc)
 		HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_b[i]);
 }
 
-static int
-vc4_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
-		   uint32_t size,
-		   struct drm_modeset_acquire_ctx *ctx)
+static void
+vc4_crtc_update_gamma_lut(struct drm_crtc *crtc)
 {
 	struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+	struct drm_color_lut *lut = crtc->state->gamma_lut->data;
+	u32 length = drm_color_lut_size(crtc->state->gamma_lut);
 	u32 i;
 
-	for (i = 0; i < size; i++) {
-		vc4_crtc->lut_r[i] = r[i] >> 8;
-		vc4_crtc->lut_g[i] = g[i] >> 8;
-		vc4_crtc->lut_b[i] = b[i] >> 8;
+	for (i = 0; i < length; i++) {
+		vc4_crtc->lut_r[i] = drm_color_lut_extract(lut[i].red, 8);
+		vc4_crtc->lut_g[i] = drm_color_lut_extract(lut[i].green, 8);
+		vc4_crtc->lut_b[i] = drm_color_lut_extract(lut[i].blue, 8);
 	}
 
 	vc4_crtc_lut_load(crtc);
-
-	return 0;
 }
 
 static u32 vc4_get_fifo_full_level(u32 format)
@@ -699,6 +697,22 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
 	if (crtc->state->active && old_state->active)
 		vc4_crtc_update_dlist(crtc);
 
+	if (crtc->state->color_mgmt_changed) {
+		u32 dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(vc4_crtc->channel));
+
+		if (crtc->state->gamma_lut) {
+			vc4_crtc_update_gamma_lut(crtc);
+			dispbkgndx |= SCALER_DISPBKGND_GAMMA;
+		} else {
+			/* Unsetting DISPBKGND_GAMMA skips the gamma lut step
+			 * in hardware, which is the same as a linear lut that
+			 * DRM expects us to use in absence of a user lut.
+			 */
+			dispbkgndx &= ~SCALER_DISPBKGND_GAMMA;
+		}
+		HVS_WRITE(SCALER_DISPBKGNDX(vc4_crtc->channel), dispbkgndx);
+	}
+
 	if (debug_dump_regs) {
 		DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
 		vc4_hvs_dump_state(dev);
@@ -909,7 +923,7 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = {
 	.reset = vc4_crtc_reset,
 	.atomic_duplicate_state = vc4_crtc_duplicate_state,
 	.atomic_destroy_state = vc4_crtc_destroy_state,
-	.gamma_set = vc4_crtc_gamma_set,
+	.gamma_set = drm_atomic_helper_legacy_gamma_set,
 	.enable_vblank = vc4_enable_vblank,
 	.disable_vblank = vc4_disable_vblank,
 };
@@ -1035,6 +1049,7 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
 	primary_plane->crtc = crtc;
 	vc4_crtc->channel = vc4_crtc->data->hvs_channel;
 	drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r));
+	drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size);
 
 	/* Set up some arbitrary number of planes.  We're not limited
 	 * by a set number of physical registers, just the space in

From 792718070621f6137a833fcc4a9fde77f9b6a4c5 Mon Sep 17 00:00:00 2001
From: Stefan Schake <stschake@gmail.com>
Date: Wed, 11 Apr 2018 22:49:14 +0200
Subject: [PATCH 0404/1286] drm/vc4: Move CRTC state to header

We need to access the channel for configuring our CTM hardware.

Signed-off-by: Stefan Schake <stschake@gmail.com>
Signed-off-by: Eric Anholt <eric@anholt.net>
Reviewed-by: Eric Anholt <eric@anholt.net>
Link: https://patchwork.freedesktop.org/patch/msgid/1523479755-20812-4-git-send-email-stschake@gmail.com
---
 drivers/gpu/drm/vc4/vc4_crtc.c | 33 ---------------------------------
 drivers/gpu/drm/vc4/vc4_drv.h  | 33 +++++++++++++++++++++++++++++++++
 2 files changed, 33 insertions(+), 33 deletions(-)

diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 285f88dd9a82..08fe8dd7d8df 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -42,51 +42,18 @@
 #include "vc4_drv.h"
 #include "vc4_regs.h"
 
-struct vc4_crtc {
-	struct drm_crtc base;
-	const struct vc4_crtc_data *data;
-	void __iomem *regs;
-
-	/* Timestamp at start of vblank irq - unaffected by lock delays. */
-	ktime_t t_vblank;
-
-	/* Which HVS channel we're using for our CRTC. */
-	int channel;
-
-	u8 lut_r[256];
-	u8 lut_g[256];
-	u8 lut_b[256];
-	/* Size in pixels of the COB memory allocated to this CRTC. */
-	u32 cob_size;
-
-	struct drm_pending_vblank_event *event;
-};
-
 struct vc4_crtc_state {
 	struct drm_crtc_state base;
 	/* Dlist area for this CRTC configuration. */
 	struct drm_mm_node mm;
 };
 
-static inline struct vc4_crtc *
-to_vc4_crtc(struct drm_crtc *crtc)
-{
-	return (struct vc4_crtc *)crtc;
-}
-
 static inline struct vc4_crtc_state *
 to_vc4_crtc_state(struct drm_crtc_state *crtc_state)
 {
 	return (struct vc4_crtc_state *)crtc_state;
 }
 
-struct vc4_crtc_data {
-	/* Which channel of the HVS this pixelvalve sources from. */
-	int hvs_channel;
-
-	enum vc4_encoder_type encoder_types[4];
-};
-
 #define CRTC_WRITE(offset, val) writel(val, vc4_crtc->regs + (offset))
 #define CRTC_READ(offset) readl(vc4_crtc->regs + (offset))
 
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 1b4cd1fabf56..4288615b66a2 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -392,6 +392,39 @@ to_vc4_encoder(struct drm_encoder *encoder)
 	return container_of(encoder, struct vc4_encoder, base);
 }
 
+struct vc4_crtc_data {
+	/* Which channel of the HVS this pixelvalve sources from. */
+	int hvs_channel;
+
+	enum vc4_encoder_type encoder_types[4];
+};
+
+struct vc4_crtc {
+	struct drm_crtc base;
+	const struct vc4_crtc_data *data;
+	void __iomem *regs;
+
+	/* Timestamp at start of vblank irq - unaffected by lock delays. */
+	ktime_t t_vblank;
+
+	/* Which HVS channel we're using for our CRTC. */
+	int channel;
+
+	u8 lut_r[256];
+	u8 lut_g[256];
+	u8 lut_b[256];
+	/* Size in pixels of the COB memory allocated to this CRTC. */
+	u32 cob_size;
+
+	struct drm_pending_vblank_event *event;
+};
+
+static inline struct vc4_crtc *
+to_vc4_crtc(struct drm_crtc *crtc)
+{
+	return (struct vc4_crtc *)crtc;
+}
+
 #define V3D_READ(offset) readl(vc4->v3d->regs + offset)
 #define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset)
 #define HVS_READ(offset) readl(vc4->hvs->regs + offset)

From 539c320bfa9778ef4a96ee4c846d230f80ca6f50 Mon Sep 17 00:00:00 2001
From: Gustavo Padovan <gustavo.padovan@collabora.com>
Date: Fri, 30 Mar 2018 10:54:45 +0200
Subject: [PATCH 0405/1286] drm/vc4: update cursors asynchronously through
 atomic
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Add support for async updates of cursors by using the new atomic
interface for that. Basically what this commit does is do what
vc4_update_plane() did but through atomic.

v7: Place the drm_atomic_set_fb_for_plane() call after the new
    FB has been applied to the HW to avoid possible use-after-free
    issues
v6: add missing drm_atomic_set_fb_for_plane() in
    vc4_plane_atomic_async_update() (Boris Brezillon)
v5: add missing call to vc4_plane_atomic_check() (Eric Anholt)
v4: add drm_atomic_helper_async() commit (Eric Anholt)
v3: move size checks back to drivers (Ville Syrjälä)
v2: move fb setting to core and use new state (Eric Anholt)

Signed-off-by: Gustavo Padovan <gustavo.padovan@collabora.com>
Reviewed-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Boris Brezillon <boris.brezillon@bootlin.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180330085445.31726-1-boris.brezillon@bootlin.com
---
 drivers/gpu/drm/vc4/vc4_kms.c   |  20 +++++
 drivers/gpu/drm/vc4/vc4_plane.c | 131 +++++++++++++-------------------
 2 files changed, 74 insertions(+), 77 deletions(-)

diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index ba60153dddb5..e791e498a3dd 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -90,6 +90,26 @@ static int vc4_atomic_commit(struct drm_device *dev,
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
 	int ret;
 
+	if (state->async_update) {
+		ret = down_interruptible(&vc4->async_modeset);
+		if (ret)
+			return ret;
+
+		ret = drm_atomic_helper_prepare_planes(dev, state);
+		if (ret) {
+			up(&vc4->async_modeset);
+			return ret;
+		}
+
+		drm_atomic_helper_async_commit(dev, state);
+
+		drm_atomic_helper_cleanup_planes(dev, state);
+
+		up(&vc4->async_modeset);
+
+		return 0;
+	}
+
 	ret = drm_atomic_helper_setup_commit(state, nonblock);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index ce39390be389..c3a37a99e601 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -741,6 +741,57 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
 	vc4_state->dlist[vc4_state->ptr0_offset] = addr;
 }
 
+static void vc4_plane_atomic_async_update(struct drm_plane *plane,
+					  struct drm_plane_state *state)
+{
+	struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
+
+	if (plane->state->fb != state->fb) {
+		vc4_plane_async_set_fb(plane, state->fb);
+		drm_atomic_set_fb_for_plane(plane->state, state->fb);
+	}
+
+	/* Set the cursor's position on the screen.  This is the
+	 * expected change from the drm_mode_cursor_universal()
+	 * helper.
+	 */
+	plane->state->crtc_x = state->crtc_x;
+	plane->state->crtc_y = state->crtc_y;
+
+	/* Allow changing the start position within the cursor BO, if
+	 * that matters.
+	 */
+	plane->state->src_x = state->src_x;
+	plane->state->src_y = state->src_y;
+
+	/* Update the display list based on the new crtc_x/y. */
+	vc4_plane_atomic_check(plane, plane->state);
+
+	/* Note that we can't just call vc4_plane_write_dlist()
+	 * because that would smash the context data that the HVS is
+	 * currently using.
+	 */
+	writel(vc4_state->dlist[vc4_state->pos0_offset],
+	       &vc4_state->hw_dlist[vc4_state->pos0_offset]);
+	writel(vc4_state->dlist[vc4_state->pos2_offset],
+	       &vc4_state->hw_dlist[vc4_state->pos2_offset]);
+	writel(vc4_state->dlist[vc4_state->ptr0_offset],
+	       &vc4_state->hw_dlist[vc4_state->ptr0_offset]);
+}
+
+static int vc4_plane_atomic_async_check(struct drm_plane *plane,
+					struct drm_plane_state *state)
+{
+	/* No configuring new scaling in the fast path. */
+	if (plane->state->crtc_w != state->crtc_w ||
+	    plane->state->crtc_h != state->crtc_h ||
+	    plane->state->src_w != state->src_w ||
+	    plane->state->src_h != state->src_h)
+		return -EINVAL;
+
+	return 0;
+}
+
 static int vc4_prepare_fb(struct drm_plane *plane,
 			  struct drm_plane_state *state)
 {
@@ -780,6 +831,8 @@ static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
 	.atomic_update = vc4_plane_atomic_update,
 	.prepare_fb = vc4_prepare_fb,
 	.cleanup_fb = vc4_cleanup_fb,
+	.atomic_async_check = vc4_plane_atomic_async_check,
+	.atomic_async_update = vc4_plane_atomic_async_update,
 };
 
 static void vc4_plane_destroy(struct drm_plane *plane)
@@ -788,82 +841,6 @@ static void vc4_plane_destroy(struct drm_plane *plane)
 	drm_plane_cleanup(plane);
 }
 
-/* Implements immediate (non-vblank-synced) updates of the cursor
- * position, or falls back to the atomic helper otherwise.
- */
-static int
-vc4_update_plane(struct drm_plane *plane,
-		 struct drm_crtc *crtc,
-		 struct drm_framebuffer *fb,
-		 int crtc_x, int crtc_y,
-		 unsigned int crtc_w, unsigned int crtc_h,
-		 uint32_t src_x, uint32_t src_y,
-		 uint32_t src_w, uint32_t src_h,
-		 struct drm_modeset_acquire_ctx *ctx)
-{
-	struct drm_plane_state *plane_state;
-	struct vc4_plane_state *vc4_state;
-
-	if (plane != crtc->cursor)
-		goto out;
-
-	plane_state = plane->state;
-	vc4_state = to_vc4_plane_state(plane_state);
-
-	if (!plane_state)
-		goto out;
-
-	/* No configuring new scaling in the fast path. */
-	if (crtc_w != plane_state->crtc_w ||
-	    crtc_h != plane_state->crtc_h ||
-	    src_w != plane_state->src_w ||
-	    src_h != plane_state->src_h) {
-		goto out;
-	}
-
-	if (fb != plane_state->fb) {
-		drm_atomic_set_fb_for_plane(plane->state, fb);
-		vc4_plane_async_set_fb(plane, fb);
-	}
-
-	/* Set the cursor's position on the screen.  This is the
-	 * expected change from the drm_mode_cursor_universal()
-	 * helper.
-	 */
-	plane_state->crtc_x = crtc_x;
-	plane_state->crtc_y = crtc_y;
-
-	/* Allow changing the start position within the cursor BO, if
-	 * that matters.
-	 */
-	plane_state->src_x = src_x;
-	plane_state->src_y = src_y;
-
-	/* Update the display list based on the new crtc_x/y. */
-	vc4_plane_atomic_check(plane, plane_state);
-
-	/* Note that we can't just call vc4_plane_write_dlist()
-	 * because that would smash the context data that the HVS is
-	 * currently using.
-	 */
-	writel(vc4_state->dlist[vc4_state->pos0_offset],
-	       &vc4_state->hw_dlist[vc4_state->pos0_offset]);
-	writel(vc4_state->dlist[vc4_state->pos2_offset],
-	       &vc4_state->hw_dlist[vc4_state->pos2_offset]);
-	writel(vc4_state->dlist[vc4_state->ptr0_offset],
-	       &vc4_state->hw_dlist[vc4_state->ptr0_offset]);
-
-	return 0;
-
-out:
-	return drm_atomic_helper_update_plane(plane, crtc, fb,
-					      crtc_x, crtc_y,
-					      crtc_w, crtc_h,
-					      src_x, src_y,
-					      src_w, src_h,
-					      ctx);
-}
-
 static bool vc4_format_mod_supported(struct drm_plane *plane,
 				     uint32_t format,
 				     uint64_t modifier)
@@ -891,7 +868,7 @@ static bool vc4_format_mod_supported(struct drm_plane *plane,
 }
 
 static const struct drm_plane_funcs vc4_plane_funcs = {
-	.update_plane = vc4_update_plane,
+	.update_plane = drm_atomic_helper_update_plane,
 	.disable_plane = drm_atomic_helper_disable_plane,
 	.destroy = vc4_plane_destroy,
 	.set_property = NULL,

From 02edfd9c1f63cfd9535009419177148758b20df4 Mon Sep 17 00:00:00 2001
From: Boris Brezillon <boris.brezillon@bootlin.com>
Date: Fri, 30 Mar 2018 16:55:18 +0200
Subject: [PATCH 0406/1286] drm/atomic: Add sanity checks to
 drm_atomic_helper_async_commit()

->atomic_async_update() requires that drivers update the plane->state
object before returning. Make sure at least common properties have been
updated.

Cc: Gustavo Padovan <gustavo@padovan.org>
Signed-off-by: Boris Brezillon <boris.brezillon@bootlin.com>
Acked-by: Eric Anholt <eric@anholt.net>
Acked-by: Sean Paul <seanpaul@chromium.org>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20180330145518.29770-1-boris.brezillon@bootlin.com
---
 drivers/gpu/drm/drm_atomic_helper.c | 11 +++++++++++
 1 file changed, 11 insertions(+)

diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 0587a0a2f3aa..9cb2209f6fc8 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1572,6 +1572,17 @@ void drm_atomic_helper_async_commit(struct drm_device *dev,
 	for_each_new_plane_in_state(state, plane, plane_state, i) {
 		funcs = plane->helper_private;
 		funcs->atomic_async_update(plane, plane_state);
+
+		/*
+		 * ->atomic_async_update() is supposed to update the
+		 * plane->state in-place, make sure at least common
+		 * properties have been properly updated.
+		 */
+		WARN_ON_ONCE(plane->state->fb != plane_state->fb);
+		WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x);
+		WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y);
+		WARN_ON_ONCE(plane->state->src_x != plane_state->src_x);
+		WARN_ON_ONCE(plane->state->src_y != plane_state->src_y);
 	}
 }
 EXPORT_SYMBOL(drm_atomic_helper_async_commit);

From 4394e96423444c1b37c8bb1ed0b1de8391dedafa Mon Sep 17 00:00:00 2001
From: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
Date: Tue, 17 Apr 2018 10:40:12 +0300
Subject: [PATCH 0407/1286] drm/xen-front: Remove CMA support

It turns out this was only needed to paper over a bug in the CMA
helpers, which was addressed in

commit 998fb1a0f478b83492220ff79583bf9ad538bdd8
Author: Liviu Dudau <Liviu.Dudau@arm.com>
Date:   Fri Nov 10 13:33:10 2017 +0000

    drm: gem_cma_helper.c: Allow importing of contiguous scatterlists with nents > 1

Without this the following pipeline didn't work:

domU:
1. xen-front allocates a non-contig buffer
2. creates grants out of it

dom0:
3. converts the grants into a dma-buf. Since they're non-contig, the
scatter-list is huge.
4. imports it into rcar-du, which requires dma-contig memory for
scanout.

-> On this given platform there's an IOMMU, so in theory this should
work. But in practice this failed, because of the huge number of sg
entries, even though the IOMMU driver mapped it all into a dma-contig
range.

With a guest-contig buffer allocated in step 1, this problem doesn't
exist. But there's technically no reason to require guest-contig
memory for xen buffer sharing using grants.

Given all that, the xen-front cma support is not needed and should be
removed.

Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
Suggested-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20180417074012.21311-1-andr2000@gmail.com
---
 Documentation/gpu/xen-front.rst             | 12 ----
 drivers/gpu/drm/xen/Kconfig                 | 13 ----
 drivers/gpu/drm/xen/Makefile                |  9 +--
 drivers/gpu/drm/xen/xen_drm_front.c         | 62 +++-------------
 drivers/gpu/drm/xen/xen_drm_front.h         | 42 ++---------
 drivers/gpu/drm/xen/xen_drm_front_gem.c     | 12 +---
 drivers/gpu/drm/xen/xen_drm_front_gem.h     |  3 -
 drivers/gpu/drm/xen/xen_drm_front_gem_cma.c | 79 ---------------------
 drivers/gpu/drm/xen/xen_drm_front_shbuf.c   | 22 ------
 drivers/gpu/drm/xen/xen_drm_front_shbuf.h   |  8 ---
 10 files changed, 21 insertions(+), 241 deletions(-)
 delete mode 100644 drivers/gpu/drm/xen/xen_drm_front_gem_cma.c

diff --git a/Documentation/gpu/xen-front.rst b/Documentation/gpu/xen-front.rst
index 009d942386c5..d988da7d1983 100644
--- a/Documentation/gpu/xen-front.rst
+++ b/Documentation/gpu/xen-front.rst
@@ -18,18 +18,6 @@ Buffers allocated by the frontend driver
 .. kernel-doc:: drivers/gpu/drm/xen/xen_drm_front.h
    :doc: Buffers allocated by the frontend driver
 
-With GEM CMA helpers
-~~~~~~~~~~~~~~~~~~~~
-
-.. kernel-doc:: drivers/gpu/drm/xen/xen_drm_front.h
-   :doc: With GEM CMA helpers
-
-Without GEM CMA helpers
-~~~~~~~~~~~~~~~~~~~~~~~
-
-.. kernel-doc:: drivers/gpu/drm/xen/xen_drm_front.h
-   :doc: Without GEM CMA helpers
-
 Buffers allocated by the backend
 --------------------------------
 
diff --git a/drivers/gpu/drm/xen/Kconfig b/drivers/gpu/drm/xen/Kconfig
index 4f4abc91f3b6..4cca160782ab 100644
--- a/drivers/gpu/drm/xen/Kconfig
+++ b/drivers/gpu/drm/xen/Kconfig
@@ -15,16 +15,3 @@ config DRM_XEN_FRONTEND
 	help
 	  Choose this option if you want to enable a para-virtualized
 	  frontend DRM/KMS driver for Xen guest OSes.
-
-config DRM_XEN_FRONTEND_CMA
-	bool "Use DRM CMA to allocate dumb buffers"
-	depends on DRM_XEN_FRONTEND
-	select DRM_KMS_CMA_HELPER
-	select DRM_GEM_CMA_HELPER
-	help
-	  Use DRM CMA helpers to allocate display buffers.
-	  This is useful for the use-cases when guest driver needs to
-	  share or export buffers to other drivers which only expect
-	  contiguous buffers.
-	  Note: in this mode driver cannot use buffers allocated
-	  by the backend.
diff --git a/drivers/gpu/drm/xen/Makefile b/drivers/gpu/drm/xen/Makefile
index 352730dc6c13..712afff5ffc3 100644
--- a/drivers/gpu/drm/xen/Makefile
+++ b/drivers/gpu/drm/xen/Makefile
@@ -5,12 +5,7 @@ drm_xen_front-objs := xen_drm_front.o \
 		      xen_drm_front_conn.o \
 		      xen_drm_front_evtchnl.o \
 		      xen_drm_front_shbuf.o \
-		      xen_drm_front_cfg.o
-
-ifeq ($(CONFIG_DRM_XEN_FRONTEND_CMA),y)
-	drm_xen_front-objs += xen_drm_front_gem_cma.o
-else
-	drm_xen_front-objs += xen_drm_front_gem.o
-endif
+		      xen_drm_front_cfg.o \
+		      xen_drm_front_gem.o
 
 obj-$(CONFIG_DRM_XEN_FRONTEND) += drm_xen_front.o
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index 4a08b77f1c9e..1b0ea9ac330e 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -12,7 +12,6 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_gem.h>
-#include <drm/drm_gem_cma_helper.h>
 
 #include <linux/of_device.h>
 
@@ -167,10 +166,9 @@ int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline,
 	return ret;
 }
 
-static int be_dbuf_create_int(struct xen_drm_front_info *front_info,
+int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
 			      u64 dbuf_cookie, u32 width, u32 height,
-			      u32 bpp, u64 size, struct page **pages,
-			      struct sg_table *sgt)
+			      u32 bpp, u64 size, struct page **pages)
 {
 	struct xen_drm_front_evtchnl *evtchnl;
 	struct xen_drm_front_shbuf *shbuf;
@@ -187,7 +185,6 @@ static int be_dbuf_create_int(struct xen_drm_front_info *front_info,
 	buf_cfg.xb_dev = front_info->xb_dev;
 	buf_cfg.pages = pages;
 	buf_cfg.size = size;
-	buf_cfg.sgt = sgt;
 	buf_cfg.be_alloc = front_info->cfg.be_alloc;
 
 	shbuf = xen_drm_front_shbuf_alloc(&buf_cfg);
@@ -237,22 +234,6 @@ fail:
 	return ret;
 }
 
-int xen_drm_front_dbuf_create_from_sgt(struct xen_drm_front_info *front_info,
-				       u64 dbuf_cookie, u32 width, u32 height,
-				       u32 bpp, u64 size, struct sg_table *sgt)
-{
-	return be_dbuf_create_int(front_info, dbuf_cookie, width, height,
-				  bpp, size, NULL, sgt);
-}
-
-int xen_drm_front_dbuf_create_from_pages(struct xen_drm_front_info *front_info,
-					 u64 dbuf_cookie, u32 width, u32 height,
-					 u32 bpp, u64 size, struct page **pages)
-{
-	return be_dbuf_create_int(front_info, dbuf_cookie, width, height,
-				  bpp, size, pages, NULL);
-}
-
 static int xen_drm_front_dbuf_destroy(struct xen_drm_front_info *front_info,
 				      u64 dbuf_cookie)
 {
@@ -434,24 +415,11 @@ static int xen_drm_drv_dumb_create(struct drm_file *filp,
 		goto fail;
 	}
 
-	/*
-	 * In case of CONFIG_DRM_XEN_FRONTEND_CMA gem_obj is constructed
-	 * via DRM CMA helpers and doesn't have ->pages allocated
-	 * (xendrm_gem_get_pages will return NULL), but instead can provide
-	 * sg table
-	 */
-	if (xen_drm_front_gem_get_pages(obj))
-		ret = xen_drm_front_dbuf_create_from_pages(drm_info->front_info,
-				xen_drm_front_dbuf_to_cookie(obj),
-				args->width, args->height, args->bpp,
-				args->size,
-				xen_drm_front_gem_get_pages(obj));
-	else
-		ret = xen_drm_front_dbuf_create_from_sgt(drm_info->front_info,
-				xen_drm_front_dbuf_to_cookie(obj),
-				args->width, args->height, args->bpp,
-				args->size,
-				xen_drm_front_gem_get_sg_table(obj));
+	ret = xen_drm_front_dbuf_create(drm_info->front_info,
+					xen_drm_front_dbuf_to_cookie(obj),
+					args->width, args->height, args->bpp,
+					args->size,
+					xen_drm_front_gem_get_pages(obj));
 	if (ret)
 		goto fail_backend;
 
@@ -523,11 +491,7 @@ static const struct file_operations xen_drm_dev_fops = {
 	.poll           = drm_poll,
 	.read           = drm_read,
 	.llseek         = no_llseek,
-#ifdef CONFIG_DRM_XEN_FRONTEND_CMA
-	.mmap           = drm_gem_cma_mmap,
-#else
 	.mmap           = xen_drm_front_gem_mmap,
-#endif
 };
 
 static const struct vm_operations_struct xen_drm_drv_vm_ops = {
@@ -547,6 +511,9 @@ static struct drm_driver xen_drm_driver = {
 	.gem_prime_export          = drm_gem_prime_export,
 	.gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table,
 	.gem_prime_get_sg_table    = xen_drm_front_gem_get_sg_table,
+	.gem_prime_vmap            = xen_drm_front_gem_prime_vmap,
+	.gem_prime_vunmap          = xen_drm_front_gem_prime_vunmap,
+	.gem_prime_mmap            = xen_drm_front_gem_prime_mmap,
 	.dumb_create               = xen_drm_drv_dumb_create,
 	.fops                      = &xen_drm_dev_fops,
 	.name                      = "xendrm-du",
@@ -555,15 +522,6 @@ static struct drm_driver xen_drm_driver = {
 	.major                     = 1,
 	.minor                     = 0,
 
-#ifdef CONFIG_DRM_XEN_FRONTEND_CMA
-	.gem_prime_vmap            = drm_gem_cma_prime_vmap,
-	.gem_prime_vunmap          = drm_gem_cma_prime_vunmap,
-	.gem_prime_mmap            = drm_gem_cma_prime_mmap,
-#else
-	.gem_prime_vmap            = xen_drm_front_gem_prime_vmap,
-	.gem_prime_vunmap          = xen_drm_front_gem_prime_vunmap,
-	.gem_prime_mmap            = xen_drm_front_gem_prime_mmap,
-#endif
 };
 
 static int xen_drm_drv_init(struct xen_drm_front_info *front_info)
diff --git a/drivers/gpu/drm/xen/xen_drm_front.h b/drivers/gpu/drm/xen/xen_drm_front.h
index 16554b2463d8..2c2479b571ae 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.h
+++ b/drivers/gpu/drm/xen/xen_drm_front.h
@@ -23,40 +23,14 @@
  *
  * Depending on the requirements for the para-virtualized environment, namely
  * requirements dictated by the accompanying DRM/(v)GPU drivers running in both
- * host and guest environments, number of operating modes of para-virtualized
- * display driver are supported:
- *
- * - display buffers can be allocated by either frontend driver or backend
- * - display buffers can be allocated to be contiguous in memory or not
- *
- * Note! Frontend driver itself has no dependency on contiguous memory for
- * its operation.
+ * host and guest environments, display buffers can be allocated by either
+ * frontend driver or backend.
  */
 
 /**
  * DOC: Buffers allocated by the frontend driver
  *
- * The below modes of operation are configured at compile-time via
- * frontend driver's kernel configuration:
- */
-
-/**
- * DOC: With GEM CMA helpers
- *
- * This use-case is useful when used with accompanying DRM/vGPU driver in
- * guest domain which was designed to only work with contiguous buffers,
- * e.g. DRM driver based on GEM CMA helpers: such drivers can only import
- * contiguous PRIME buffers, thus requiring frontend driver to provide
- * such. In order to implement this mode of operation para-virtualized
- * frontend driver can be configured to use GEM CMA helpers.
- */
-
-/**
- * DOC: Without GEM CMA helpers
- *
- * If accompanying drivers can cope with non-contiguous memory then, to
- * lower pressure on CMA subsystem of the kernel, driver can allocate
- * buffers from system memory.
+ * In this mode of operation driver allocates buffers from system memory.
  *
  * Note! If used with accompanying DRM/(v)GPU drivers this mode of operation
  * may require IOMMU support on the platform, so accompanying DRM/vGPU
@@ -164,13 +138,9 @@ int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline,
 			   u32 x, u32 y, u32 width, u32 height,
 			   u32 bpp, u64 fb_cookie);
 
-int xen_drm_front_dbuf_create_from_sgt(struct xen_drm_front_info *front_info,
-				       u64 dbuf_cookie, u32 width, u32 height,
-				       u32 bpp, u64 size, struct sg_table *sgt);
-
-int xen_drm_front_dbuf_create_from_pages(struct xen_drm_front_info *front_info,
-					 u64 dbuf_cookie, u32 width, u32 height,
-					 u32 bpp, u64 size, struct page **pages);
+int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
+			      u64 dbuf_cookie, u32 width, u32 height,
+			      u32 bpp, u64 size, struct page **pages);
 
 int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info,
 			    u64 dbuf_cookie, u64 fb_cookie, u32 width,
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index 3b04a2269d7a..c85bfe7571cb 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -210,15 +210,9 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev,
 	if (ret < 0)
 		return ERR_PTR(ret);
 
-	/*
-	 * N.B. Although we have an API to create display buffer from sgt
-	 * we use pages API, because we still need those for GEM handling,
-	 * e.g. for mapping etc.
-	 */
-	ret = xen_drm_front_dbuf_create_from_pages(drm_info->front_info,
-						   xen_drm_front_dbuf_to_cookie(&xen_obj->base),
-						   0, 0, 0, size,
-						   xen_obj->pages);
+	ret = xen_drm_front_dbuf_create(drm_info->front_info,
+					xen_drm_front_dbuf_to_cookie(&xen_obj->base),
+					0, 0, 0, size, xen_obj->pages);
 	if (ret < 0)
 		return ERR_PTR(ret);
 
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.h b/drivers/gpu/drm/xen/xen_drm_front_gem.h
index 55e531f5a763..d5ab734fdafe 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.h
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.h
@@ -27,8 +27,6 @@ struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *obj);
 
 void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj);
 
-#ifndef CONFIG_DRM_XEN_FRONTEND_CMA
-
 int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 
 void *xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj);
@@ -38,6 +36,5 @@ void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj,
 
 int xen_drm_front_gem_prime_mmap(struct drm_gem_object *gem_obj,
 				 struct vm_area_struct *vma);
-#endif
 
 #endif /* __XEN_DRM_FRONT_GEM_H */
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem_cma.c b/drivers/gpu/drm/xen/xen_drm_front_gem_cma.c
deleted file mode 100644
index ba30a4bc2a39..000000000000
--- a/drivers/gpu/drm/xen/xen_drm_front_gem_cma.c
+++ /dev/null
@@ -1,79 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-
-/*
- *  Xen para-virtual DRM device
- *
- * Copyright (C) 2016-2018 EPAM Systems Inc.
- *
- * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_gem.h>
-#include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-
-#include "xen_drm_front.h"
-#include "xen_drm_front_gem.h"
-
-struct drm_gem_object *
-xen_drm_front_gem_import_sg_table(struct drm_device *dev,
-				  struct dma_buf_attachment *attach,
-				  struct sg_table *sgt)
-{
-	struct xen_drm_front_drm_info *drm_info = dev->dev_private;
-	struct drm_gem_object *gem_obj;
-	struct drm_gem_cma_object *cma_obj;
-	int ret;
-
-	gem_obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
-	if (IS_ERR_OR_NULL(gem_obj))
-		return gem_obj;
-
-	cma_obj = to_drm_gem_cma_obj(gem_obj);
-
-	ret = xen_drm_front_dbuf_create_from_sgt(drm_info->front_info,
-						 xen_drm_front_dbuf_to_cookie(gem_obj),
-						 0, 0, 0, gem_obj->size,
-						 drm_gem_cma_prime_get_sg_table(gem_obj));
-	if (ret < 0)
-		return ERR_PTR(ret);
-
-	DRM_DEBUG("Imported CMA buffer of size %zu\n", gem_obj->size);
-
-	return gem_obj;
-}
-
-struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
-{
-	return drm_gem_cma_prime_get_sg_table(gem_obj);
-}
-
-struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
-						size_t size)
-{
-	struct xen_drm_front_drm_info *drm_info = dev->dev_private;
-	struct drm_gem_cma_object *cma_obj;
-
-	if (drm_info->front_info->cfg.be_alloc) {
-		/* This use-case is not yet supported and probably won't be */
-		DRM_ERROR("Backend allocated buffers and CMA helpers are not supported at the same time\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	cma_obj = drm_gem_cma_create(dev, size);
-	if (IS_ERR_OR_NULL(cma_obj))
-		return ERR_CAST(cma_obj);
-
-	return &cma_obj->base;
-}
-
-void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj)
-{
-	drm_gem_cma_free_object(gem_obj);
-}
-
-struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *gem_obj)
-{
-	return NULL;
-}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_shbuf.c b/drivers/gpu/drm/xen/xen_drm_front_shbuf.c
index 19914dde4b3d..d5705251a0d6 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_shbuf.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_shbuf.c
@@ -89,10 +89,6 @@ void xen_drm_front_shbuf_free(struct xen_drm_front_shbuf *buf)
 	}
 	kfree(buf->grefs);
 	kfree(buf->directory);
-	if (buf->sgt) {
-		sg_free_table(buf->sgt);
-		kvfree(buf->pages);
-	}
 	kfree(buf);
 }
 
@@ -350,17 +346,6 @@ static int grant_references(struct xen_drm_front_shbuf *buf)
 
 static int alloc_storage(struct xen_drm_front_shbuf *buf)
 {
-	if (buf->sgt) {
-		buf->pages = kvmalloc_array(buf->num_pages,
-					    sizeof(struct page *), GFP_KERNEL);
-		if (!buf->pages)
-			return -ENOMEM;
-
-		if (drm_prime_sg_to_page_addr_arrays(buf->sgt, buf->pages,
-						     NULL, buf->num_pages) < 0)
-			return -EINVAL;
-	}
-
 	buf->grefs = kcalloc(buf->num_grefs, sizeof(*buf->grefs), GFP_KERNEL);
 	if (!buf->grefs)
 		return -ENOMEM;
@@ -396,12 +381,6 @@ xen_drm_front_shbuf_alloc(struct xen_drm_front_shbuf_cfg *cfg)
 	struct xen_drm_front_shbuf *buf;
 	int ret;
 
-	/* either pages or sgt, not both */
-	if (unlikely(cfg->pages && cfg->sgt)) {
-		DRM_ERROR("Cannot handle buffer allocation with both pages and sg table provided\n");
-		return NULL;
-	}
-
 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
 	if (!buf)
 		return NULL;
@@ -413,7 +392,6 @@ xen_drm_front_shbuf_alloc(struct xen_drm_front_shbuf_cfg *cfg)
 
 	buf->xb_dev = cfg->xb_dev;
 	buf->num_pages = DIV_ROUND_UP(cfg->size, PAGE_SIZE);
-	buf->sgt = cfg->sgt;
 	buf->pages = cfg->pages;
 
 	buf->ops->calc_num_grefs(buf);
diff --git a/drivers/gpu/drm/xen/xen_drm_front_shbuf.h b/drivers/gpu/drm/xen/xen_drm_front_shbuf.h
index 8c037fd7608b..7545c692539e 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_shbuf.h
+++ b/drivers/gpu/drm/xen/xen_drm_front_shbuf.h
@@ -29,16 +29,9 @@ struct xen_drm_front_shbuf {
 	grant_ref_t *grefs;
 	unsigned char *directory;
 
-	/*
-	 * there are 2 ways to provide backing storage for this shared buffer:
-	 * either pages or sgt. if buffer created from sgt then we own
-	 * the pages and must free those ourselves on closure
-	 */
 	int num_pages;
 	struct page **pages;
 
-	struct sg_table *sgt;
-
 	struct xenbus_device *xb_dev;
 
 	/* these are the ops used internally depending on be_alloc mode */
@@ -52,7 +45,6 @@ struct xen_drm_front_shbuf_cfg {
 	struct xenbus_device *xb_dev;
 	size_t size;
 	struct page **pages;
-	struct sg_table *sgt;
 	bool be_alloc;
 };
 

From 8221229046e862977ae93ec9d34aa583fbd10397 Mon Sep 17 00:00:00 2001
From: Gaurav K Singh <gaurav.k.singh@intel.com>
Date: Tue, 17 Apr 2018 23:52:18 +0530
Subject: [PATCH 0408/1286] drm/i915/audio: Fix audio detection issue on GLK

On Geminilake, sometimes audio card is not getting
detected after reboot. This is a spurious issue happening on
Geminilake. HW codec and HD audio controller link was going
out of sync for which there was a fix in i915 driver but
was not getting invoked for GLK. Extending this fix to GLK as well.

Tested by Du,Wenkai on GLK board.

Bspec: 21829

v2: Instead of checking GEN9_BC, BXT and GLK macros, use IS_GEN9 macro (Jani N)

Cc: <stable@vger.kernel.org> # b651bd2a3ae3 ("drm/i915/audio: Fix audio enumeration issue on BXT")
Cc: <stable@vger.kernel.org>
Signed-off-by: Gaurav K Singh <gaurav.k.singh@intel.com>
Reviewed-by: Abhay Kumar <abhay.Kumar@intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1523989338-29677-1-git-send-email-gaurav.k.singh@intel.com
---
 drivers/gpu/drm/i915/intel_audio.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 656f6c931341..3ea566f99450 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -729,7 +729,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
 	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
 	u32 tmp;
 
-	if (!IS_GEN9_BC(dev_priv) && !IS_BROXTON(dev_priv))
+	if (!IS_GEN9(dev_priv))
 		return;
 
 	i915_audio_component_get_power(kdev);

From 98ff5c78307b4177b7e44783a04b208189e21418 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed, 18 Apr 2018 19:40:50 +0100
Subject: [PATCH 0409/1286] drm/i915: Move the priotree struct to its own
 headers

Over time the priotree has grown from a sorted list to a more
complicated structure for propagating constraints along the dependency
chain to try and resolve priority inversion. Start to segregate this
information from the rest of the request/fence tracking.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180418184052.7129-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_request.h   | 39 +-----------------
 drivers/gpu/drm/i915/i915_scheduler.h | 57 +++++++++++++++++++++++++++
 2 files changed, 58 insertions(+), 38 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/i915_scheduler.h

diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 7d6eb82eeb91..e6f7c5f4ec7f 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -28,6 +28,7 @@
 #include <linux/dma-fence.h>
 
 #include "i915_gem.h"
+#include "i915_scheduler.h"
 #include "i915_sw_fence.h"
 
 #include <uapi/drm/i915_drm.h>
@@ -48,44 +49,6 @@ struct intel_signal_node {
 	struct list_head link;
 };
 
-struct i915_dependency {
-	struct i915_priotree *signaler;
-	struct list_head signal_link;
-	struct list_head wait_link;
-	struct list_head dfs_link;
-	unsigned long flags;
-#define I915_DEPENDENCY_ALLOC BIT(0)
-};
-
-/*
- * "People assume that time is a strict progression of cause to effect, but
- * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
- * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
- *
- * Requests exist in a complex web of interdependencies. Each request
- * has to wait for some other request to complete before it is ready to be run
- * (e.g. we have to wait until the pixels have been rendering into a texture
- * before we can copy from it). We track the readiness of a request in terms
- * of fences, but we also need to keep the dependency tree for the lifetime
- * of the request (beyond the life of an individual fence). We use the tree
- * at various points to reorder the requests whilst keeping the requests
- * in order with respect to their various dependencies.
- */
-struct i915_priotree {
-	struct list_head signalers_list; /* those before us, we depend upon */
-	struct list_head waiters_list; /* those after us, they depend upon us */
-	struct list_head link;
-	int priority;
-};
-
-enum {
-	I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
-	I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
-	I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
-
-	I915_PRIORITY_INVALID = INT_MIN
-};
-
 struct i915_capture_list {
 	struct i915_capture_list *next;
 	struct i915_vma *vma;
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
new file mode 100644
index 000000000000..9d6ea9fa6e59
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -0,0 +1,57 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef _I915_SCHEDULER_H_
+#define _I915_SCHEDULER_H_
+
+#include <linux/bitops.h>
+
+#include <uapi/drm/i915_drm.h>
+
+enum {
+	I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
+	I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
+	I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
+
+	I915_PRIORITY_INVALID = INT_MIN
+};
+
+/*
+ * "People assume that time is a strict progression of cause to effect, but
+ * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
+ * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
+ *
+ * Requests exist in a complex web of interdependencies. Each request
+ * has to wait for some other request to complete before it is ready to be run
+ * (e.g. we have to wait until the pixels have been rendering into a texture
+ * before we can copy from it). We track the readiness of a request in terms
+ * of fences, but we also need to keep the dependency tree for the lifetime
+ * of the request (beyond the life of an individual fence). We use the tree
+ * at various points to reorder the requests whilst keeping the requests
+ * in order with respect to their various dependencies.
+ *
+ * There is no active component to the "scheduler". As we know the dependency
+ * DAG of each request, we are able to insert it into a sorted queue when it
+ * is ready, and are able to reorder its portion of the graph to accommodate
+ * dynamic priority changes.
+ */
+struct i915_priotree {
+	struct list_head signalers_list; /* those before us, we depend upon */
+	struct list_head waiters_list; /* those after us, they depend upon us */
+	struct list_head link;
+	int priority;
+};
+
+struct i915_dependency {
+	struct i915_priotree *signaler;
+	struct list_head signal_link;
+	struct list_head wait_link;
+	struct list_head dfs_link;
+	unsigned long flags;
+#define I915_DEPENDENCY_ALLOC BIT(0)
+};
+
+#endif /* _I915_SCHEDULER_H_ */

From 0c7112a00272c633a79cad91ea9c1a0f40330f5d Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed, 18 Apr 2018 19:40:51 +0100
Subject: [PATCH 0410/1286] drm/i915: Rename priotree to sched

Having moved the priotree struct into i915_scheduler.h, identify it as
the scheduling element and rebrand into i915_sched. This becomes more
useful as we start attaching more information we require to propagate
through the scheduler.

v2: Use i915_sched_node for future distinctiveness

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180418184052.7129-2-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_gpu_error.c       |  2 +-
 drivers/gpu/drm/i915/i915_request.c         | 66 +++++++++---------
 drivers/gpu/drm/i915/i915_request.h         |  6 +-
 drivers/gpu/drm/i915/i915_scheduler.h       |  4 +-
 drivers/gpu/drm/i915/intel_engine_cs.c      |  4 +-
 drivers/gpu/drm/i915/intel_guc_submission.c |  8 +--
 drivers/gpu/drm/i915/intel_lrc.c            | 77 +++++++++++----------
 7 files changed, 85 insertions(+), 82 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index effaf982b19b..6b5b9b3ded02 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1278,7 +1278,7 @@ static void record_request(struct i915_request *request,
 			   struct drm_i915_error_request *erq)
 {
 	erq->context = request->ctx->hw_id;
-	erq->priority = request->priotree.priority;
+	erq->priority = request->sched.priority;
 	erq->ban_score = atomic_read(&request->ctx->ban_score);
 	erq->seqno = request->global_seqno;
 	erq->jiffies = request->emitted_jiffies;
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 9ca9c24b4421..dfcc6a0df3fb 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -125,22 +125,22 @@ i915_dependency_free(struct drm_i915_private *i915,
 }
 
 static void
-__i915_priotree_add_dependency(struct i915_priotree *pt,
-			       struct i915_priotree *signal,
-			       struct i915_dependency *dep,
-			       unsigned long flags)
+__i915_sched_node_add_dependency(struct i915_sched_node *node,
+				 struct i915_sched_node *signal,
+				 struct i915_dependency *dep,
+				 unsigned long flags)
 {
 	INIT_LIST_HEAD(&dep->dfs_link);
 	list_add(&dep->wait_link, &signal->waiters_list);
-	list_add(&dep->signal_link, &pt->signalers_list);
+	list_add(&dep->signal_link, &node->signalers_list);
 	dep->signaler = signal;
 	dep->flags = flags;
 }
 
 static int
-i915_priotree_add_dependency(struct drm_i915_private *i915,
-			     struct i915_priotree *pt,
-			     struct i915_priotree *signal)
+i915_sched_node_add_dependency(struct drm_i915_private *i915,
+			       struct i915_sched_node *node,
+			       struct i915_sched_node *signal)
 {
 	struct i915_dependency *dep;
 
@@ -148,16 +148,18 @@ i915_priotree_add_dependency(struct drm_i915_private *i915,
 	if (!dep)
 		return -ENOMEM;
 
-	__i915_priotree_add_dependency(pt, signal, dep, I915_DEPENDENCY_ALLOC);
+	__i915_sched_node_add_dependency(node, signal, dep,
+					 I915_DEPENDENCY_ALLOC);
 	return 0;
 }
 
 static void
-i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt)
+i915_sched_node_fini(struct drm_i915_private *i915,
+		     struct i915_sched_node *node)
 {
-	struct i915_dependency *dep, *next;
+	struct i915_dependency *dep, *tmp;
 
-	GEM_BUG_ON(!list_empty(&pt->link));
+	GEM_BUG_ON(!list_empty(&node->link));
 
 	/*
 	 * Everyone we depended upon (the fences we wait to be signaled)
@@ -165,8 +167,8 @@ i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt)
 	 * However, retirement is run independently on each timeline and
 	 * so we may be called out-of-order.
 	 */
-	list_for_each_entry_safe(dep, next, &pt->signalers_list, signal_link) {
-		GEM_BUG_ON(!i915_priotree_signaled(dep->signaler));
+	list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
+		GEM_BUG_ON(!i915_sched_node_signaled(dep->signaler));
 		GEM_BUG_ON(!list_empty(&dep->dfs_link));
 
 		list_del(&dep->wait_link);
@@ -175,8 +177,8 @@ i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt)
 	}
 
 	/* Remove ourselves from everyone who depends upon us */
-	list_for_each_entry_safe(dep, next, &pt->waiters_list, wait_link) {
-		GEM_BUG_ON(dep->signaler != pt);
+	list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
+		GEM_BUG_ON(dep->signaler != node);
 		GEM_BUG_ON(!list_empty(&dep->dfs_link));
 
 		list_del(&dep->signal_link);
@@ -186,12 +188,12 @@ i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt)
 }
 
 static void
-i915_priotree_init(struct i915_priotree *pt)
+i915_sched_node_init(struct i915_sched_node *node)
 {
-	INIT_LIST_HEAD(&pt->signalers_list);
-	INIT_LIST_HEAD(&pt->waiters_list);
-	INIT_LIST_HEAD(&pt->link);
-	pt->priority = I915_PRIORITY_INVALID;
+	INIT_LIST_HEAD(&node->signalers_list);
+	INIT_LIST_HEAD(&node->waiters_list);
+	INIT_LIST_HEAD(&node->link);
+	node->priority = I915_PRIORITY_INVALID;
 }
 
 static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
@@ -422,7 +424,7 @@ static void i915_request_retire(struct i915_request *request)
 	}
 	spin_unlock_irq(&request->lock);
 
-	i915_priotree_fini(request->i915, &request->priotree);
+	i915_sched_node_fini(request->i915, &request->sched);
 	i915_request_put(request);
 }
 
@@ -725,7 +727,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 	i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
 	init_waitqueue_head(&rq->execute);
 
-	i915_priotree_init(&rq->priotree);
+	i915_sched_node_init(&rq->sched);
 
 	INIT_LIST_HEAD(&rq->active_list);
 	rq->i915 = i915;
@@ -777,8 +779,8 @@ err_unwind:
 
 	/* Make sure we didn't add ourselves to external state before freeing */
 	GEM_BUG_ON(!list_empty(&rq->active_list));
-	GEM_BUG_ON(!list_empty(&rq->priotree.signalers_list));
-	GEM_BUG_ON(!list_empty(&rq->priotree.waiters_list));
+	GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
+	GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
 
 	kmem_cache_free(i915->requests, rq);
 err_unreserve:
@@ -800,9 +802,9 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
 		return 0;
 
 	if (to->engine->schedule) {
-		ret = i915_priotree_add_dependency(to->i915,
-						   &to->priotree,
-						   &from->priotree);
+		ret = i915_sched_node_add_dependency(to->i915,
+						     &to->sched,
+						     &from->sched);
 		if (ret < 0)
 			return ret;
 	}
@@ -1033,10 +1035,10 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
 		i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
 					     &request->submitq);
 		if (engine->schedule)
-			__i915_priotree_add_dependency(&request->priotree,
-						       &prev->priotree,
-						       &request->dep,
-						       0);
+			__i915_sched_node_add_dependency(&request->sched,
+							 &prev->sched,
+							 &request->dep,
+							 0);
 	}
 
 	spin_lock_irq(&timeline->lock);
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index e6f7c5f4ec7f..35b8a9856daa 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -117,7 +117,7 @@ struct i915_request {
 	 * to retirement), i.e. bidirectional dependency information for the
 	 * request not tied to individual fences.
 	 */
-	struct i915_priotree priotree;
+	struct i915_sched_node sched;
 	struct i915_dependency dep;
 
 	/**
@@ -306,10 +306,10 @@ static inline bool i915_request_started(const struct i915_request *rq)
 				 seqno - 1);
 }
 
-static inline bool i915_priotree_signaled(const struct i915_priotree *pt)
+static inline bool i915_sched_node_signaled(const struct i915_sched_node *node)
 {
 	const struct i915_request *rq =
-		container_of(pt, const struct i915_request, priotree);
+		container_of(node, const struct i915_request, sched);
 
 	return i915_request_completed(rq);
 }
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 9d6ea9fa6e59..754243e0f955 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -38,7 +38,7 @@ enum {
  * is ready, and are able to reorder its portion of the graph to accommodate
  * dynamic priority changes.
  */
-struct i915_priotree {
+struct i915_sched_node {
 	struct list_head signalers_list; /* those before us, we depend upon */
 	struct list_head waiters_list; /* those after us, they depend upon us */
 	struct list_head link;
@@ -46,7 +46,7 @@ struct i915_priotree {
 };
 
 struct i915_dependency {
-	struct i915_priotree *signaler;
+	struct i915_sched_node *signaler;
 	struct list_head signal_link;
 	struct list_head wait_link;
 	struct list_head dfs_link;
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 1a8370779bbb..b542b1a4dddc 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1123,7 +1123,7 @@ static void print_request(struct drm_printer *m,
 		   rq->global_seqno,
 		   i915_request_completed(rq) ? "!" : "",
 		   rq->fence.context, rq->fence.seqno,
-		   rq->priotree.priority,
+		   rq->sched.priority,
 		   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
 		   name);
 }
@@ -1367,7 +1367,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 		struct i915_priolist *p =
 			rb_entry(rb, typeof(*p), node);
 
-		list_for_each_entry(rq, &p->requests, priotree.link)
+		list_for_each_entry(rq, &p->requests, sched.link)
 			print_request(m, rq, "\t\tQ ");
 	}
 	spin_unlock_irq(&engine->timeline->lock);
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 97121230656c..0755f5cae950 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -659,7 +659,7 @@ static void port_assign(struct execlist_port *port, struct i915_request *rq)
 
 static inline int rq_prio(const struct i915_request *rq)
 {
-	return rq->priotree.priority;
+	return rq->sched.priority;
 }
 
 static inline int port_prio(const struct execlist_port *port)
@@ -706,11 +706,11 @@ static void guc_dequeue(struct intel_engine_cs *engine)
 		struct i915_priolist *p = to_priolist(rb);
 		struct i915_request *rq, *rn;
 
-		list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
+		list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
 			if (last && rq->ctx != last->ctx) {
 				if (port == last_port) {
 					__list_del_many(&p->requests,
-							&rq->priotree.link);
+							&rq->sched.link);
 					goto done;
 				}
 
@@ -719,7 +719,7 @@ static void guc_dequeue(struct intel_engine_cs *engine)
 				port++;
 			}
 
-			INIT_LIST_HEAD(&rq->priotree.link);
+			INIT_LIST_HEAD(&rq->sched.link);
 
 			__i915_request_submit(rq);
 			trace_i915_request_in(rq, port_index(port, execlists));
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 4f728587a756..062ed6e54420 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -177,7 +177,7 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)
 
 static inline int rq_prio(const struct i915_request *rq)
 {
-	return rq->priotree.priority;
+	return rq->sched.priority;
 }
 
 static inline bool need_preempt(const struct intel_engine_cs *engine,
@@ -258,7 +258,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
 
 static struct i915_priolist *
 lookup_priolist(struct intel_engine_cs *engine,
-		struct i915_priotree *pt,
+		struct i915_sched_node *node,
 		int prio)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -344,10 +344,10 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
 		GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
 		if (rq_prio(rq) != last_prio) {
 			last_prio = rq_prio(rq);
-			p = lookup_priolist(engine, &rq->priotree, last_prio);
+			p = lookup_priolist(engine, &rq->sched, last_prio);
 		}
 
-		list_add(&rq->priotree.link, &p->requests);
+		list_add(&rq->sched.link, &p->requests);
 	}
 }
 
@@ -654,7 +654,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 		struct i915_priolist *p = to_priolist(rb);
 		struct i915_request *rq, *rn;
 
-		list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
+		list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
 			/*
 			 * Can we combine this request with the current port?
 			 * It has to be the same context/ringbuffer and not
@@ -674,7 +674,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				 */
 				if (port == last_port) {
 					__list_del_many(&p->requests,
-							&rq->priotree.link);
+							&rq->sched.link);
 					goto done;
 				}
 
@@ -688,7 +688,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				if (ctx_single_port_submission(last->ctx) ||
 				    ctx_single_port_submission(rq->ctx)) {
 					__list_del_many(&p->requests,
-							&rq->priotree.link);
+							&rq->sched.link);
 					goto done;
 				}
 
@@ -701,7 +701,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				GEM_BUG_ON(port_isset(port));
 			}
 
-			INIT_LIST_HEAD(&rq->priotree.link);
+			INIT_LIST_HEAD(&rq->sched.link);
 			__i915_request_submit(rq);
 			trace_i915_request_in(rq, port_index(port, execlists));
 			last = rq;
@@ -882,8 +882,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
 	while (rb) {
 		struct i915_priolist *p = to_priolist(rb);
 
-		list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
-			INIT_LIST_HEAD(&rq->priotree.link);
+		list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
+			INIT_LIST_HEAD(&rq->sched.link);
 
 			dma_fence_set_error(&rq->fence, -EIO);
 			__i915_request_submit(rq);
@@ -1116,10 +1116,11 @@ static void execlists_submission_tasklet(unsigned long data)
 }
 
 static void queue_request(struct intel_engine_cs *engine,
-			  struct i915_priotree *pt,
+			  struct i915_sched_node *node,
 			  int prio)
 {
-	list_add_tail(&pt->link, &lookup_priolist(engine, pt, prio)->requests);
+	list_add_tail(&node->link,
+		      &lookup_priolist(engine, node, prio)->requests);
 }
 
 static void __submit_queue(struct intel_engine_cs *engine, int prio)
@@ -1142,24 +1143,24 @@ static void execlists_submit_request(struct i915_request *request)
 	/* Will be called from irq-context when using foreign fences. */
 	spin_lock_irqsave(&engine->timeline->lock, flags);
 
-	queue_request(engine, &request->priotree, rq_prio(request));
+	queue_request(engine, &request->sched, rq_prio(request));
 	submit_queue(engine, rq_prio(request));
 
 	GEM_BUG_ON(!engine->execlists.first);
-	GEM_BUG_ON(list_empty(&request->priotree.link));
+	GEM_BUG_ON(list_empty(&request->sched.link));
 
 	spin_unlock_irqrestore(&engine->timeline->lock, flags);
 }
 
-static struct i915_request *pt_to_request(struct i915_priotree *pt)
+static struct i915_request *sched_to_request(struct i915_sched_node *node)
 {
-	return container_of(pt, struct i915_request, priotree);
+	return container_of(node, struct i915_request, sched);
 }
 
 static struct intel_engine_cs *
-pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
+sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
 {
-	struct intel_engine_cs *engine = pt_to_request(pt)->engine;
+	struct intel_engine_cs *engine = sched_to_request(node)->engine;
 
 	GEM_BUG_ON(!locked);
 
@@ -1183,23 +1184,23 @@ static void execlists_schedule(struct i915_request *request, int prio)
 	if (i915_request_completed(request))
 		return;
 
-	if (prio <= READ_ONCE(request->priotree.priority))
+	if (prio <= READ_ONCE(request->sched.priority))
 		return;
 
 	/* Need BKL in order to use the temporary link inside i915_dependency */
 	lockdep_assert_held(&request->i915->drm.struct_mutex);
 
-	stack.signaler = &request->priotree;
+	stack.signaler = &request->sched;
 	list_add(&stack.dfs_link, &dfs);
 
 	/*
 	 * Recursively bump all dependent priorities to match the new request.
 	 *
 	 * A naive approach would be to use recursion:
-	 * static void update_priorities(struct i915_priotree *pt, prio) {
-	 *	list_for_each_entry(dep, &pt->signalers_list, signal_link)
+	 * static void update_priorities(struct i915_sched_node *node, prio) {
+	 *	list_for_each_entry(dep, &node->signalers_list, signal_link)
 	 *		update_priorities(dep->signal, prio)
-	 *	queue_request(pt);
+	 *	queue_request(node);
 	 * }
 	 * but that may have unlimited recursion depth and so runs a very
 	 * real risk of overunning the kernel stack. Instead, we build
@@ -1211,7 +1212,7 @@ static void execlists_schedule(struct i915_request *request, int prio)
 	 * last element in the list is the request we must execute first.
 	 */
 	list_for_each_entry(dep, &dfs, dfs_link) {
-		struct i915_priotree *pt = dep->signaler;
+		struct i915_sched_node *node = dep->signaler;
 
 		/*
 		 * Within an engine, there can be no cycle, but we may
@@ -1219,13 +1220,13 @@ static void execlists_schedule(struct i915_request *request, int prio)
 		 * (redundant dependencies are not eliminated) and across
 		 * engines.
 		 */
-		list_for_each_entry(p, &pt->signalers_list, signal_link) {
+		list_for_each_entry(p, &node->signalers_list, signal_link) {
 			GEM_BUG_ON(p == dep); /* no cycles! */
 
-			if (i915_priotree_signaled(p->signaler))
+			if (i915_sched_node_signaled(p->signaler))
 				continue;
 
-			GEM_BUG_ON(p->signaler->priority < pt->priority);
+			GEM_BUG_ON(p->signaler->priority < node->priority);
 			if (prio > READ_ONCE(p->signaler->priority))
 				list_move_tail(&p->dfs_link, &dfs);
 		}
@@ -1237,9 +1238,9 @@ static void execlists_schedule(struct i915_request *request, int prio)
 	 * execlists_submit_request()), we can set our own priority and skip
 	 * acquiring the engine locks.
 	 */
-	if (request->priotree.priority == I915_PRIORITY_INVALID) {
-		GEM_BUG_ON(!list_empty(&request->priotree.link));
-		request->priotree.priority = prio;
+	if (request->sched.priority == I915_PRIORITY_INVALID) {
+		GEM_BUG_ON(!list_empty(&request->sched.link));
+		request->sched.priority = prio;
 		if (stack.dfs_link.next == stack.dfs_link.prev)
 			return;
 		__list_del_entry(&stack.dfs_link);
@@ -1250,23 +1251,23 @@ static void execlists_schedule(struct i915_request *request, int prio)
 
 	/* Fifo and depth-first replacement ensure our deps execute before us */
 	list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
-		struct i915_priotree *pt = dep->signaler;
+		struct i915_sched_node *node = dep->signaler;
 
 		INIT_LIST_HEAD(&dep->dfs_link);
 
-		engine = pt_lock_engine(pt, engine);
+		engine = sched_lock_engine(node, engine);
 
-		if (prio <= pt->priority)
+		if (prio <= node->priority)
 			continue;
 
-		pt->priority = prio;
-		if (!list_empty(&pt->link)) {
-			__list_del_entry(&pt->link);
-			queue_request(engine, pt, prio);
+		node->priority = prio;
+		if (!list_empty(&node->link)) {
+			__list_del_entry(&node->link);
+			queue_request(engine, node, prio);
 		}
 
 		if (prio > engine->execlists.queue_priority &&
-		    i915_sw_fence_done(&pt_to_request(pt)->submit))
+		    i915_sw_fence_done(&sched_to_request(node)->submit))
 			__submit_queue(engine, prio);
 	}
 

From b7268c5eed0ab4f052d614b4b0e3fe8a51c9d5a1 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed, 18 Apr 2018 19:40:52 +0100
Subject: [PATCH 0411/1286] drm/i915: Pack params to engine->schedule() into a
 struct

Today we only want to pass along the priority to engine->schedule(), but
in the future we want to have much more control over the various aspects
of the GPU during a context's execution, for example controlling the
frequency allowed. As we need an ever growing number of parameters for
scheduling, move those into a struct for convenience.

v2: Move the anonymous struct into its own function for legibility and
ye olde gcc.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180418184052.7129-3-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/gvt/scheduler.c          |  2 +-
 drivers/gpu/drm/i915/i915_drv.h               |  3 ++-
 drivers/gpu/drm/i915/i915_gem.c               | 18 +++++++++--------
 drivers/gpu/drm/i915/i915_gem_context.c       |  8 ++++----
 drivers/gpu/drm/i915/i915_gem_context.h       | 13 +-----------
 drivers/gpu/drm/i915/i915_gpu_error.c         |  8 ++++----
 drivers/gpu/drm/i915/i915_gpu_error.h         |  5 +++--
 drivers/gpu/drm/i915/i915_request.c           |  4 ++--
 drivers/gpu/drm/i915/i915_request.h           |  1 +
 drivers/gpu/drm/i915/i915_scheduler.h         | 17 +++++++++++++++-
 drivers/gpu/drm/i915/intel_display.c          | 11 +++++++++-
 drivers/gpu/drm/i915/intel_engine_cs.c        | 18 ++++++++++++++---
 drivers/gpu/drm/i915/intel_guc_submission.c   |  2 +-
 drivers/gpu/drm/i915/intel_lrc.c              | 20 ++++++++++---------
 drivers/gpu/drm/i915/intel_ringbuffer.h       |  4 +++-
 .../gpu/drm/i915/selftests/intel_hangcheck.c  |  4 ++--
 drivers/gpu/drm/i915/selftests/intel_lrc.c    |  8 +++++---
 17 files changed, 91 insertions(+), 55 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 638abe84857c..f3d21849b0cb 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -1135,7 +1135,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
 		return PTR_ERR(s->shadow_ctx);
 
 	if (HAS_LOGICAL_RING_PREEMPTION(vgpu->gvt->dev_priv))
-		s->shadow_ctx->priority = INT_MAX;
+		s->shadow_ctx->sched.priority = INT_MAX;
 
 	bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8e8667d9b084..028691108125 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -75,6 +75,7 @@
 #include "i915_gem_timeline.h"
 #include "i915_gpu_error.h"
 #include "i915_request.h"
+#include "i915_scheduler.h"
 #include "i915_vma.h"
 
 #include "intel_gvt.h"
@@ -3158,7 +3159,7 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj,
 			 struct intel_rps_client *rps);
 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 				  unsigned int flags,
-				  int priority);
+				  const struct i915_sched_attr *attr);
 #define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX
 
 int __must_check
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4c9d2a6f7d28..795ca83aed7a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -564,7 +564,8 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
 	return timeout;
 }
 
-static void __fence_set_priority(struct dma_fence *fence, int prio)
+static void __fence_set_priority(struct dma_fence *fence,
+				 const struct i915_sched_attr *attr)
 {
 	struct i915_request *rq;
 	struct intel_engine_cs *engine;
@@ -577,11 +578,12 @@ static void __fence_set_priority(struct dma_fence *fence, int prio)
 
 	rcu_read_lock();
 	if (engine->schedule)
-		engine->schedule(rq, prio);
+		engine->schedule(rq, attr);
 	rcu_read_unlock();
 }
 
-static void fence_set_priority(struct dma_fence *fence, int prio)
+static void fence_set_priority(struct dma_fence *fence,
+			       const struct i915_sched_attr *attr)
 {
 	/* Recurse once into a fence-array */
 	if (dma_fence_is_array(fence)) {
@@ -589,16 +591,16 @@ static void fence_set_priority(struct dma_fence *fence, int prio)
 		int i;
 
 		for (i = 0; i < array->num_fences; i++)
-			__fence_set_priority(array->fences[i], prio);
+			__fence_set_priority(array->fences[i], attr);
 	} else {
-		__fence_set_priority(fence, prio);
+		__fence_set_priority(fence, attr);
 	}
 }
 
 int
 i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 			      unsigned int flags,
-			      int prio)
+			      const struct i915_sched_attr *attr)
 {
 	struct dma_fence *excl;
 
@@ -613,7 +615,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 			return ret;
 
 		for (i = 0; i < count; i++) {
-			fence_set_priority(shared[i], prio);
+			fence_set_priority(shared[i], attr);
 			dma_fence_put(shared[i]);
 		}
 
@@ -623,7 +625,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 	}
 
 	if (excl) {
-		fence_set_priority(excl, prio);
+		fence_set_priority(excl, attr);
 		dma_fence_put(excl);
 	}
 	return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 9b3834a846e8..74435affe23f 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -281,7 +281,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
 	kref_init(&ctx->ref);
 	list_add_tail(&ctx->link, &dev_priv->contexts.list);
 	ctx->i915 = dev_priv;
-	ctx->priority = I915_PRIORITY_NORMAL;
+	ctx->sched.priority = I915_PRIORITY_NORMAL;
 
 	INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
 	INIT_LIST_HEAD(&ctx->handles_list);
@@ -431,7 +431,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
 		return ctx;
 
 	i915_gem_context_clear_bannable(ctx);
-	ctx->priority = prio;
+	ctx->sched.priority = prio;
 	ctx->ring_size = PAGE_SIZE;
 
 	GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
@@ -753,7 +753,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
 		args->value = i915_gem_context_is_bannable(ctx);
 		break;
 	case I915_CONTEXT_PARAM_PRIORITY:
-		args->value = ctx->priority;
+		args->value = ctx->sched.priority;
 		break;
 	default:
 		ret = -EINVAL;
@@ -826,7 +826,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
 				 !capable(CAP_SYS_NICE))
 				ret = -EPERM;
 			else
-				ctx->priority = priority;
+				ctx->sched.priority = priority;
 		}
 		break;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 7854262ddfd9..b12a8a8c5af9 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -137,18 +137,7 @@ struct i915_gem_context {
 	 */
 	u32 user_handle;
 
-	/**
-	 * @priority: execution and service priority
-	 *
-	 * All clients are equal, but some are more equal than others!
-	 *
-	 * Requests from a context with a greater (more positive) value of
-	 * @priority will be executed before those with a lower @priority
-	 * value, forming a simple QoS.
-	 *
-	 * The &drm_i915_private.kernel_context is assigned the lowest priority.
-	 */
-	int priority;
+	struct i915_sched_attr sched;
 
 	/** ggtt_offset_bias: placement restriction for context objects */
 	u32 ggtt_offset_bias;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 6b5b9b3ded02..671ffa37614e 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -411,7 +411,7 @@ static void error_print_request(struct drm_i915_error_state_buf *m,
 
 	err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, prio %d, emitted %dms ago, head %08x, tail %08x\n",
 		   prefix, erq->pid, erq->ban_score,
-		   erq->context, erq->seqno, erq->priority,
+		   erq->context, erq->seqno, erq->sched_attr.priority,
 		   jiffies_to_msecs(jiffies - erq->jiffies),
 		   erq->head, erq->tail);
 }
@@ -422,7 +422,7 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
 {
 	err_printf(m, "%s%s[%d] user_handle %d hw_id %d, prio %d, ban score %d%s guilty %d active %d\n",
 		   header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id,
-		   ctx->priority, ctx->ban_score, bannable(ctx),
+		   ctx->sched_attr.priority, ctx->ban_score, bannable(ctx),
 		   ctx->guilty, ctx->active);
 }
 
@@ -1278,7 +1278,7 @@ static void record_request(struct i915_request *request,
 			   struct drm_i915_error_request *erq)
 {
 	erq->context = request->ctx->hw_id;
-	erq->priority = request->sched.priority;
+	erq->sched_attr = request->sched.attr;
 	erq->ban_score = atomic_read(&request->ctx->ban_score);
 	erq->seqno = request->global_seqno;
 	erq->jiffies = request->emitted_jiffies;
@@ -1372,7 +1372,7 @@ static void record_context(struct drm_i915_error_context *e,
 
 	e->handle = ctx->user_handle;
 	e->hw_id = ctx->hw_id;
-	e->priority = ctx->priority;
+	e->sched_attr = ctx->sched;
 	e->ban_score = atomic_read(&ctx->ban_score);
 	e->bannable = i915_gem_context_is_bannable(ctx);
 	e->guilty = atomic_read(&ctx->guilty_count);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index c05b6034d718..5d6fdcbc092c 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -20,6 +20,7 @@
 #include "i915_gem.h"
 #include "i915_gem_gtt.h"
 #include "i915_params.h"
+#include "i915_scheduler.h"
 
 struct drm_i915_private;
 struct intel_overlay_error_state;
@@ -122,11 +123,11 @@ struct i915_gpu_state {
 			pid_t pid;
 			u32 handle;
 			u32 hw_id;
-			int priority;
 			int ban_score;
 			int active;
 			int guilty;
 			bool bannable;
+			struct i915_sched_attr sched_attr;
 		} context;
 
 		struct drm_i915_error_object {
@@ -147,11 +148,11 @@ struct i915_gpu_state {
 			long jiffies;
 			pid_t pid;
 			u32 context;
-			int priority;
 			int ban_score;
 			u32 seqno;
 			u32 head;
 			u32 tail;
+			struct i915_sched_attr sched_attr;
 		} *requests, execlist[EXECLIST_MAX_PORTS];
 		unsigned int num_ports;
 
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index dfcc6a0df3fb..b692a9f7c357 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -193,7 +193,7 @@ i915_sched_node_init(struct i915_sched_node *node)
 	INIT_LIST_HEAD(&node->signalers_list);
 	INIT_LIST_HEAD(&node->waiters_list);
 	INIT_LIST_HEAD(&node->link);
-	node->priority = I915_PRIORITY_INVALID;
+	node->attr.priority = I915_PRIORITY_INVALID;
 }
 
 static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
@@ -1064,7 +1064,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
 	 */
 	rcu_read_lock();
 	if (engine->schedule)
-		engine->schedule(request, request->ctx->priority);
+		engine->schedule(request, &request->ctx->sched);
 	rcu_read_unlock();
 
 	local_bh_disable();
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 35b8a9856daa..8f31ca8272f8 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -30,6 +30,7 @@
 #include "i915_gem.h"
 #include "i915_scheduler.h"
 #include "i915_sw_fence.h"
+#include "i915_scheduler.h"
 
 #include <uapi/drm/i915_drm.h>
 
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 754243e0f955..70a42220358d 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -19,6 +19,21 @@ enum {
 	I915_PRIORITY_INVALID = INT_MIN
 };
 
+struct i915_sched_attr {
+	/**
+	 * @priority: execution and service priority
+	 *
+	 * All clients are equal, but some are more equal than others!
+	 *
+	 * Requests from a context with a greater (more positive) value of
+	 * @priority will be executed before those with a lower @priority
+	 * value, forming a simple QoS.
+	 *
+	 * The &drm_i915_private.kernel_context is assigned the lowest priority.
+	 */
+	int priority;
+};
+
 /*
  * "People assume that time is a strict progression of cause to effect, but
  * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
@@ -42,7 +57,7 @@ struct i915_sched_node {
 	struct list_head signalers_list; /* those before us, we depend upon */
 	struct list_head waiters_list; /* those after us, they depend upon us */
 	struct list_head link;
-	int priority;
+	struct i915_sched_attr attr;
 };
 
 struct i915_dependency {
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e04050ea3e28..43d54c7231ff 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12761,6 +12761,15 @@ static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
 		intel_unpin_fb_vma(vma, old_plane_state->flags);
 }
 
+static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
+{
+	struct i915_sched_attr attr = {
+		.priority = I915_PRIORITY_DISPLAY,
+	};
+
+	i915_gem_object_wait_priority(obj, 0, &attr);
+}
+
 /**
  * intel_prepare_plane_fb - Prepare fb for usage on plane
  * @plane: drm plane to prepare for
@@ -12837,7 +12846,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 
 	ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
 
-	i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
+	fb_obj_bump_render_priority(obj);
 
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 	i915_gem_object_unpin_pages(obj);
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index b542b1a4dddc..be608f7111f5 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1113,17 +1113,29 @@ unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
 	return which;
 }
 
+static void print_sched_attr(struct drm_printer *m,
+			     const struct drm_i915_private *i915,
+			     const struct i915_sched_attr *attr)
+{
+	if (attr->priority == I915_PRIORITY_INVALID)
+		return;
+
+	drm_printf(m, "prio=%d", attr->priority);
+}
+
 static void print_request(struct drm_printer *m,
 			  struct i915_request *rq,
 			  const char *prefix)
 {
 	const char *name = rq->fence.ops->get_timeline_name(&rq->fence);
 
-	drm_printf(m, "%s%x%s [%llx:%x] prio=%d @ %dms: %s\n", prefix,
+	drm_printf(m, "%s%x%s [%llx:%x] ",
+		   prefix,
 		   rq->global_seqno,
 		   i915_request_completed(rq) ? "!" : "",
-		   rq->fence.context, rq->fence.seqno,
-		   rq->sched.priority,
+		   rq->fence.context, rq->fence.seqno);
+	print_sched_attr(m, rq->i915, &rq->sched.attr);
+	drm_printf(m, " @ %dms: %s\n",
 		   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
 		   name);
 }
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 0755f5cae950..02da05875aa7 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -659,7 +659,7 @@ static void port_assign(struct execlist_port *port, struct i915_request *rq)
 
 static inline int rq_prio(const struct i915_request *rq)
 {
-	return rq->sched.priority;
+	return rq->sched.attr.priority;
 }
 
 static inline int port_prio(const struct execlist_port *port)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 062ed6e54420..029901a8fa38 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -177,7 +177,7 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)
 
 static inline int rq_prio(const struct i915_request *rq)
 {
-	return rq->sched.priority;
+	return rq->sched.attr.priority;
 }
 
 static inline bool need_preempt(const struct intel_engine_cs *engine,
@@ -1172,11 +1172,13 @@ sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
 	return engine;
 }
 
-static void execlists_schedule(struct i915_request *request, int prio)
+static void execlists_schedule(struct i915_request *request,
+			       const struct i915_sched_attr *attr)
 {
 	struct intel_engine_cs *engine;
 	struct i915_dependency *dep, *p;
 	struct i915_dependency stack;
+	const int prio = attr->priority;
 	LIST_HEAD(dfs);
 
 	GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
@@ -1184,7 +1186,7 @@ static void execlists_schedule(struct i915_request *request, int prio)
 	if (i915_request_completed(request))
 		return;
 
-	if (prio <= READ_ONCE(request->sched.priority))
+	if (prio <= READ_ONCE(request->sched.attr.priority))
 		return;
 
 	/* Need BKL in order to use the temporary link inside i915_dependency */
@@ -1226,8 +1228,8 @@ static void execlists_schedule(struct i915_request *request, int prio)
 			if (i915_sched_node_signaled(p->signaler))
 				continue;
 
-			GEM_BUG_ON(p->signaler->priority < node->priority);
-			if (prio > READ_ONCE(p->signaler->priority))
+			GEM_BUG_ON(p->signaler->attr.priority < node->attr.priority);
+			if (prio > READ_ONCE(p->signaler->attr.priority))
 				list_move_tail(&p->dfs_link, &dfs);
 		}
 	}
@@ -1238,9 +1240,9 @@ static void execlists_schedule(struct i915_request *request, int prio)
 	 * execlists_submit_request()), we can set our own priority and skip
 	 * acquiring the engine locks.
 	 */
-	if (request->sched.priority == I915_PRIORITY_INVALID) {
+	if (request->sched.attr.priority == I915_PRIORITY_INVALID) {
 		GEM_BUG_ON(!list_empty(&request->sched.link));
-		request->sched.priority = prio;
+		request->sched.attr = *attr;
 		if (stack.dfs_link.next == stack.dfs_link.prev)
 			return;
 		__list_del_entry(&stack.dfs_link);
@@ -1257,10 +1259,10 @@ static void execlists_schedule(struct i915_request *request, int prio)
 
 		engine = sched_lock_engine(node, engine);
 
-		if (prio <= node->priority)
+		if (prio <= node->attr.priority)
 			continue;
 
-		node->priority = prio;
+		node->attr.priority = prio;
 		if (!list_empty(&node->link)) {
 			__list_del_entry(&node->link);
 			queue_request(engine, node, prio);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 717041640135..c5e27905b0e1 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -14,6 +14,7 @@
 #include "intel_gpu_commands.h"
 
 struct drm_printer;
+struct i915_sched_attr;
 
 #define I915_CMD_HASH_ORDER 9
 
@@ -460,7 +461,8 @@ struct intel_engine_cs {
 	 *
 	 * Called under the struct_mutex.
 	 */
-	void		(*schedule)(struct i915_request *request, int priority);
+	void		(*schedule)(struct i915_request *request,
+				    const struct i915_sched_attr *attr);
 
 	/*
 	 * Cancel all requests on the hardware, or queued for execution.
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 24f913f26a7b..f7ee54e109ae 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -628,7 +628,7 @@ static int active_engine(void *data)
 		}
 
 		if (arg->flags & TEST_PRIORITY)
-			ctx[idx]->priority =
+			ctx[idx]->sched.priority =
 				i915_prandom_u32_max_state(512, &prng);
 
 		rq[idx] = i915_request_get(new);
@@ -683,7 +683,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
 			return err;
 
 		if (flags & TEST_PRIORITY)
-			h.ctx->priority = 1024;
+			h.ctx->sched.priority = 1024;
 	}
 
 	for_each_engine(engine, i915, id) {
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 0481e2e01146..ee7e22d18ff8 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -335,12 +335,12 @@ static int live_preempt(void *arg)
 	ctx_hi = kernel_context(i915);
 	if (!ctx_hi)
 		goto err_spin_lo;
-	ctx_hi->priority = I915_CONTEXT_MAX_USER_PRIORITY;
+	ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
 
 	ctx_lo = kernel_context(i915);
 	if (!ctx_lo)
 		goto err_ctx_hi;
-	ctx_lo->priority = I915_CONTEXT_MIN_USER_PRIORITY;
+	ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
 
 	for_each_engine(engine, i915, id) {
 		struct i915_request *rq;
@@ -407,6 +407,7 @@ static int live_late_preempt(void *arg)
 	struct i915_gem_context *ctx_hi, *ctx_lo;
 	struct spinner spin_hi, spin_lo;
 	struct intel_engine_cs *engine;
+	struct i915_sched_attr attr = {};
 	enum intel_engine_id id;
 	int err = -ENOMEM;
 
@@ -458,7 +459,8 @@ static int live_late_preempt(void *arg)
 			goto err_wedged;
 		}
 
-		engine->schedule(rq, I915_PRIORITY_MAX);
+		attr.priority = I915_PRIORITY_MAX;
+		engine->schedule(rq, &attr);
 
 		if (!wait_for_spinner(&spin_hi, rq)) {
 			pr_err("High priority context failed to preempt the low priority context\n");

From 2a5b95b448485e143ec3e004eabe53b31db78eb3 Mon Sep 17 00:00:00 2001
From: Abhay Kumar <abhay.kumar@intel.com>
Date: Wed, 18 Apr 2018 13:37:07 +0300
Subject: [PATCH 0412/1286] drm/i915/audio: set minimum CD clock to twice the
 BCLK
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

In GLK when the device boots with only 1366x768 panel without audio, HDA
codec doesn't come up. In this case, the CDCLK is less than twice the
BCLK. Even though audio isn't being enabled, having a too low CDCLK
leads to audio probe failing altogether.

Require CDCLK to be at least twice the BLCK regardless of audio. This is
a minimal fix to improve things. Unfortunately, this a) leads to too
high CDCLK being used when audio is not used, and b) is still not enough
to fix audio probe when no outputs are connected at probe time.

The proper fix would be to increase CDCLK dynamically from the audio
component hooks.

v2:
    - Address comment (Jani)
    - New design approach
v3: - Typo fix on top of v1

v4 by Jani: rewrite commit message, add comment in code

Cc: stable@vger.kernel.org
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Cc: Dhinakaran Pandiyan <dhinakaran.pandiyan@gmail.com>
Cc: Wenkai Du <wenkai.du@intel.com>
Reviewed-by: Wenkai Du <wenkai.du@intel.com>
Tested-by: Wenkai Du <wenkai.du@intel.com>
Acked-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=102937
Signed-off-by: Abhay Kumar <abhay.kumar@intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180418103707.14645-1-jani.nikula@intel.com
---
 drivers/gpu/drm/i915/intel_cdclk.c | 16 ++++++++++++++--
 1 file changed, 14 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index dc7db8a2caf8..ebca83a44d9b 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -2140,10 +2140,22 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
 		}
 	}
 
-	/* According to BSpec, "The CD clock frequency must be at least twice
+	/*
+	 * According to BSpec, "The CD clock frequency must be at least twice
 	 * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
+	 *
+	 * FIXME: Check the actual, not default, BCLK being used.
+	 *
+	 * FIXME: This does not depend on ->has_audio because the higher CDCLK
+	 * is required for audio probe, also when there are no audio capable
+	 * displays connected at probe time. This leads to unnecessarily high
+	 * CDCLK when audio is not required.
+	 *
+	 * FIXME: This limit is only applied when there are displays connected
+	 * at probe time. If we probe without displays, we'll still end up using
+	 * the platform minimum CDCLK, failing audio probe.
 	 */
-	if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9)
+	if (INTEL_GEN(dev_priv) >= 9)
 		min_cdclk = max(2 * 96000, min_cdclk);
 
 	/*

From c8da819478e8149656ff12e40fde2446f9034e81 Mon Sep 17 00:00:00 2001
From: Philippe CORNU <philippe.cornu@st.com>
Date: Sat, 7 Apr 2018 23:29:37 +0200
Subject: [PATCH 0413/1286] drm/stm: move enable/disable_vblank to crtc

enable/disable_vblank() functions at drm_driver level
are deprecated. Move them to the ltdc drm_crtc_funcs
structure.

Signed-off-by: Philippe Cornu <philippe.cornu@st.com>
Reviewed-by: Vincent Abriou <vincent.abriou@st.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180407212937.30407-1-philippe.cornu@st.com
---
 drivers/gpu/drm/stm/drv.c  |  2 --
 drivers/gpu/drm/stm/ltdc.c | 10 ++++++----
 drivers/gpu/drm/stm/ltdc.h |  2 --
 3 files changed, 6 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index 9ab00a87f7cc..8698e08313e1 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -72,8 +72,6 @@ static struct drm_driver drv_driver = {
 	.gem_prime_vmap = drm_gem_cma_prime_vmap,
 	.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
 	.gem_prime_mmap = drm_gem_cma_prime_mmap,
-	.enable_vblank = ltdc_crtc_enable_vblank,
-	.disable_vblank = ltdc_crtc_disable_vblank,
 };
 
 static int drv_load(struct drm_device *ddev)
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 1a3277e483d5..2b745cfc9000 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -569,9 +569,9 @@ static const struct drm_crtc_helper_funcs ltdc_crtc_helper_funcs = {
 	.atomic_disable = ltdc_crtc_atomic_disable,
 };
 
-int ltdc_crtc_enable_vblank(struct drm_device *ddev, unsigned int pipe)
+static int ltdc_crtc_enable_vblank(struct drm_crtc *crtc)
 {
-	struct ltdc_device *ldev = ddev->dev_private;
+	struct ltdc_device *ldev = crtc_to_ltdc(crtc);
 
 	DRM_DEBUG_DRIVER("\n");
 	reg_set(ldev->regs, LTDC_IER, IER_LIE);
@@ -579,9 +579,9 @@ int ltdc_crtc_enable_vblank(struct drm_device *ddev, unsigned int pipe)
 	return 0;
 }
 
-void ltdc_crtc_disable_vblank(struct drm_device *ddev, unsigned int pipe)
+static void ltdc_crtc_disable_vblank(struct drm_crtc *crtc)
 {
-	struct ltdc_device *ldev = ddev->dev_private;
+	struct ltdc_device *ldev = crtc_to_ltdc(crtc);
 
 	DRM_DEBUG_DRIVER("\n");
 	reg_clear(ldev->regs, LTDC_IER, IER_LIE);
@@ -594,6 +594,8 @@ static const struct drm_crtc_funcs ltdc_crtc_funcs = {
 	.reset = drm_atomic_helper_crtc_reset,
 	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
 	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+	.enable_vblank = ltdc_crtc_enable_vblank,
+	.disable_vblank = ltdc_crtc_disable_vblank,
 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
 };
 
diff --git a/drivers/gpu/drm/stm/ltdc.h b/drivers/gpu/drm/stm/ltdc.h
index edb268129c54..61a80d00bc3b 100644
--- a/drivers/gpu/drm/stm/ltdc.h
+++ b/drivers/gpu/drm/stm/ltdc.h
@@ -29,8 +29,6 @@ struct ltdc_device {
 	u32 irq_status;
 };
 
-int ltdc_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe);
-void ltdc_crtc_disable_vblank(struct drm_device *dev, unsigned int pipe);
 int ltdc_load(struct drm_device *ddev);
 void ltdc_unload(struct drm_device *ddev);
 

From 25bb1a9de3cc36ea1fd2dc10f4a375571be0ff37 Mon Sep 17 00:00:00 2001
From: Philippe CORNU <philippe.cornu@st.com>
Date: Sat, 7 Apr 2018 23:35:03 +0200
Subject: [PATCH 0414/1286] drm/stm: ltdc: add user update info in plane print
 state

This patch adds the user update information in
frames-per-second into the drm debugfs plane state.

Signed-off-by: Philippe Cornu <philippe.cornu@st.com>
Reviewed-by: Vincent Abriou <vincent.abriou@st.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180407213503.30932-1-philippe.cornu@st.com
---
 drivers/gpu/drm/stm/ltdc.c | 22 ++++++++++++++++++++++
 drivers/gpu/drm/stm/ltdc.h |  8 ++++++++
 2 files changed, 30 insertions(+)

diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 2b745cfc9000..061d2b6e5157 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -729,6 +729,8 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
 	reg_update_bits(ldev->regs, LTDC_L1CR + lofs,
 			LXCR_LEN | LXCR_CLUTEN, val);
 
+	ldev->plane_fpsi[plane->index].counter++;
+
 	mutex_lock(&ldev->err_lock);
 	if (ldev->error_status & ISR_FUIF) {
 		DRM_DEBUG_DRIVER("Fifo underrun\n");
@@ -754,6 +756,25 @@ static void ltdc_plane_atomic_disable(struct drm_plane *plane,
 			 oldstate->crtc->base.id, plane->base.id);
 }
 
+static void ltdc_plane_atomic_print_state(struct drm_printer *p,
+					  const struct drm_plane_state *state)
+{
+	struct drm_plane *plane = state->plane;
+	struct ltdc_device *ldev = plane_to_ltdc(plane);
+	struct fps_info *fpsi = &ldev->plane_fpsi[plane->index];
+	int ms_since_last;
+	ktime_t now;
+
+	now = ktime_get();
+	ms_since_last = ktime_to_ms(ktime_sub(now, fpsi->last_timestamp));
+
+	drm_printf(p, "\tuser_updates=%dfps\n",
+		   DIV_ROUND_CLOSEST(fpsi->counter * 1000, ms_since_last));
+
+	fpsi->last_timestamp = now;
+	fpsi->counter = 0;
+}
+
 static const struct drm_plane_funcs ltdc_plane_funcs = {
 	.update_plane = drm_atomic_helper_update_plane,
 	.disable_plane = drm_atomic_helper_disable_plane,
@@ -761,6 +782,7 @@ static const struct drm_plane_funcs ltdc_plane_funcs = {
 	.reset = drm_atomic_helper_plane_reset,
 	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
 	.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+	.atomic_print_state = ltdc_plane_atomic_print_state,
 };
 
 static const struct drm_plane_helper_funcs ltdc_plane_helper_funcs = {
diff --git a/drivers/gpu/drm/stm/ltdc.h b/drivers/gpu/drm/stm/ltdc.h
index 61a80d00bc3b..1e16d6afb0d2 100644
--- a/drivers/gpu/drm/stm/ltdc.h
+++ b/drivers/gpu/drm/stm/ltdc.h
@@ -20,6 +20,13 @@ struct ltdc_caps {
 	bool non_alpha_only_l1; /* non-native no-alpha formats on layer 1 */
 };
 
+#define LTDC_MAX_LAYER	4
+
+struct fps_info {
+	unsigned int counter;
+	ktime_t last_timestamp;
+};
+
 struct ltdc_device {
 	void __iomem *regs;
 	struct clk *pixel_clk;	/* lcd pixel clock */
@@ -27,6 +34,7 @@ struct ltdc_device {
 	struct ltdc_caps caps;
 	u32 error_status;
 	u32 irq_status;
+	struct fps_info plane_fpsi[LTDC_MAX_LAYER];
 };
 
 int ltdc_load(struct drm_device *ddev);

From c20f5f69c898899027c3e573afaab837195895b6 Mon Sep 17 00:00:00 2001
From: Philippe CORNU <philippe.cornu@st.com>
Date: Tue, 10 Apr 2018 15:53:12 +0200
Subject: [PATCH 0415/1286] drm/stm: ltdc: fix warning in
 ltdc_crtc_update_clut()

Fix the warning
"warn: variable dereferenced before check 'crtc' (see line 390)"
by removing unnecessary checks as ltdc_crtc_update_clut() is
only called from ltdc_crtc_atomic_flush() where crtc and
crtc->state are not NULL.

Many thanks to Dan Carpenter for the bug report
https://lists.freedesktop.org/archives/dri-devel/2018-February/166918.html

Signed-off-by: Philippe Cornu <philippe.cornu@st.com>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Reviewed-by: yannick fertre <yannick.fertre@st.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180410135312.3553-1-philippe.cornu@st.com
---
 drivers/gpu/drm/stm/ltdc.c | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 061d2b6e5157..e3121d9e4230 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -392,9 +392,6 @@ static void ltdc_crtc_update_clut(struct drm_crtc *crtc)
 	u32 val;
 	int i;
 
-	if (!crtc || !crtc->state)
-		return;
-
 	if (!crtc->state->color_mgmt_changed || !crtc->state->gamma_lut)
 		return;
 

From 7f497cc7fc34bc471f770b80bb2115e6646d6fb7 Mon Sep 17 00:00:00 2001
From: Philippe CORNU <philippe.cornu@st.com>
Date: Thu, 8 Feb 2018 15:58:05 +0100
Subject: [PATCH 0416/1286] drm/bridge/synopsys: dsi: Adopt SPDX identifiers

Add SPDX identifiers to the Synopsys DesignWare MIPI DSI
host controller driver.

Signed-off-by: Philippe Cornu <philippe.cornu@st.com>
Acked-by: Philippe Ombredanne <pombredanne@nexB.com>
Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180208145805.24762-1-philippe.cornu@st.com
---
 drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c | 6 +-----
 1 file changed, 1 insertion(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index 226171a3ece1..0c7ecf798874 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd
  * Copyright (C) STMicroelectronics SA 2017
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
  * Modified by Philippe Cornu <philippe.cornu@st.com>
  * This generic Synopsys DesignWare MIPI DSI host driver is based on the
  * Rockchip version from rockchip/dw-mipi-dsi.c with phy & bridge APIs.

From e307126a2c8e792a4b426ee3ab827d1285544e12 Mon Sep 17 00:00:00 2001
From: Jani Nikula <jani.nikula@intel.com>
Date: Thu, 19 Apr 2018 11:59:39 +0300
Subject: [PATCH 0417/1286] drm/i915/dsi: improve dphy param limits logging
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Move the limit checks near the calculations for each field, and actually
log the values that exceed limits.

Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180419085940.21505-1-jani.nikula@intel.com
---
 drivers/gpu/drm/i915/intel_dsi_vbt.c | 34 +++++++++++++++-------------
 1 file changed, 18 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c
index 91c07b0c8db9..4d6ffa7b3e7b 100644
--- a/drivers/gpu/drm/i915/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c
@@ -647,6 +647,11 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
 	/* prepare count */
 	prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * mul);
 
+	if (prepare_cnt > PREPARE_CNT_MAX) {
+		DRM_DEBUG_KMS("prepare count too high %u\n", prepare_cnt);
+		prepare_cnt = PREPARE_CNT_MAX;
+	}
+
 	/* exit zero count */
 	exit_zero_cnt = DIV_ROUND_UP(
 				(ths_prepare_hszero - ths_prepare_ns) * ui_den,
@@ -662,32 +667,29 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
 	if (exit_zero_cnt < (55 * ui_den / ui_num) && (55 * ui_den) % ui_num)
 		exit_zero_cnt += 1;
 
+	if (exit_zero_cnt > EXIT_ZERO_CNT_MAX) {
+		DRM_DEBUG_KMS("exit zero count too high %u\n", exit_zero_cnt);
+		exit_zero_cnt = EXIT_ZERO_CNT_MAX;
+	}
+
 	/* clk zero count */
 	clk_zero_cnt = DIV_ROUND_UP(
 				(tclk_prepare_clkzero -	ths_prepare_ns)
 				* ui_den, ui_num * mul);
 
+	if (clk_zero_cnt > CLK_ZERO_CNT_MAX) {
+		DRM_DEBUG_KMS("clock zero count too high %u\n", clk_zero_cnt);
+		clk_zero_cnt = CLK_ZERO_CNT_MAX;
+	}
+
 	/* trail count */
 	tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
 	trail_cnt = DIV_ROUND_UP(tclk_trail_ns * ui_den, ui_num * mul);
 
-	if (prepare_cnt > PREPARE_CNT_MAX ||
-		exit_zero_cnt > EXIT_ZERO_CNT_MAX ||
-		clk_zero_cnt > CLK_ZERO_CNT_MAX ||
-		trail_cnt > TRAIL_CNT_MAX)
-		DRM_DEBUG_DRIVER("Values crossing maximum limits, restricting to max values\n");
-
-	if (prepare_cnt > PREPARE_CNT_MAX)
-		prepare_cnt = PREPARE_CNT_MAX;
-
-	if (exit_zero_cnt > EXIT_ZERO_CNT_MAX)
-		exit_zero_cnt = EXIT_ZERO_CNT_MAX;
-
-	if (clk_zero_cnt > CLK_ZERO_CNT_MAX)
-		clk_zero_cnt = CLK_ZERO_CNT_MAX;
-
-	if (trail_cnt > TRAIL_CNT_MAX)
+	if (trail_cnt > TRAIL_CNT_MAX) {
+		DRM_DEBUG_KMS("trail count too high %u\n", trail_cnt);
 		trail_cnt = TRAIL_CNT_MAX;
+	}
 
 	/* B080 */
 	intel_dsi->dphy_reg = exit_zero_cnt << 24 | trail_cnt << 16 |

From eadd2721d080e276f35abc1daab15cddd121e40f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Fri, 16 Mar 2018 20:36:25 +0200
Subject: [PATCH 0418/1286] drm/i915: Protect PIPE_CONF_CHECK macros with do {}
 while(0)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Make the PIPE_CONF_CHECK macros a bit more robust by wrapping them
in do {} while(0). Avoids funky sirprises when you try put an 'else'
after a PIPE_CONF_CHECK invocation...

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180316183625.16316-1-ville.syrjala@linux.intel.com
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> #irc
---
 drivers/gpu/drm/i915/intel_display.c | 45 +++++++++++++++++-----------
 1 file changed, 27 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 43d54c7231ff..687e70110800 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11141,39 +11141,42 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
 		(current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
 		!(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
 
-#define PIPE_CONF_CHECK_X(name)	\
+#define PIPE_CONF_CHECK_X(name) do { \
 	if (current_config->name != pipe_config->name) { \
 		pipe_config_err(adjust, __stringify(name), \
 			  "(expected 0x%08x, found 0x%08x)\n", \
 			  current_config->name, \
 			  pipe_config->name); \
 		ret = false; \
-	}
+	} \
+} while (0)
 
-#define PIPE_CONF_CHECK_I(name)	\
+#define PIPE_CONF_CHECK_I(name) do { \
 	if (current_config->name != pipe_config->name) { \
 		pipe_config_err(adjust, __stringify(name), \
 			  "(expected %i, found %i)\n", \
 			  current_config->name, \
 			  pipe_config->name); \
 		ret = false; \
-	}
+	} \
+} while (0)
 
-#define PIPE_CONF_CHECK_BOOL(name)	\
+#define PIPE_CONF_CHECK_BOOL(name) do { \
 	if (current_config->name != pipe_config->name) { \
 		pipe_config_err(adjust, __stringify(name), \
 			  "(expected %s, found %s)\n", \
 			  yesno(current_config->name), \
 			  yesno(pipe_config->name)); \
 		ret = false; \
-	}
+	} \
+} while (0)
 
 /*
  * Checks state where we only read out the enabling, but not the entire
  * state itself (like full infoframes or ELD for audio). These states
  * require a full modeset on bootup to fix up.
  */
-#define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) \
+#define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
 	if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
 		PIPE_CONF_CHECK_BOOL(name); \
 	} else { \
@@ -11182,18 +11185,20 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
 			  yesno(current_config->name), \
 			  yesno(pipe_config->name)); \
 		ret = false; \
-	}
+	} \
+} while (0)
 
-#define PIPE_CONF_CHECK_P(name)	\
+#define PIPE_CONF_CHECK_P(name) do { \
 	if (current_config->name != pipe_config->name) { \
 		pipe_config_err(adjust, __stringify(name), \
 			  "(expected %p, found %p)\n", \
 			  current_config->name, \
 			  pipe_config->name); \
 		ret = false; \
-	}
+	} \
+} while (0)
 
-#define PIPE_CONF_CHECK_M_N(name) \
+#define PIPE_CONF_CHECK_M_N(name) do { \
 	if (!intel_compare_link_m_n(&current_config->name, \
 				    &pipe_config->name,\
 				    adjust)) { \
@@ -11211,14 +11216,15 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
 			  pipe_config->name.link_m, \
 			  pipe_config->name.link_n); \
 		ret = false; \
-	}
+	} \
+} while (0)
 
 /* This is required for BDW+ where there is only one set of registers for
  * switching between high and low RR.
  * This macro can be used whenever a comparison has to be made between one
  * hw state and multiple sw state variables.
  */
-#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
+#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
 	if (!intel_compare_link_m_n(&current_config->name, \
 				    &pipe_config->name, adjust) && \
 	    !intel_compare_link_m_n(&current_config->alt_name, \
@@ -11243,9 +11249,10 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
 			  pipe_config->name.link_m, \
 			  pipe_config->name.link_n); \
 		ret = false; \
-	}
+	} \
+} while (0)
 
-#define PIPE_CONF_CHECK_FLAGS(name, mask)	\
+#define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
 	if ((current_config->name ^ pipe_config->name) & (mask)) { \
 		pipe_config_err(adjust, __stringify(name), \
 			  "(%x) (expected %i, found %i)\n", \
@@ -11253,16 +11260,18 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
 			  current_config->name & (mask), \
 			  pipe_config->name & (mask)); \
 		ret = false; \
-	}
+	} \
+} while (0)
 
-#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
+#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
 	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
 		pipe_config_err(adjust, __stringify(name), \
 			  "(expected %i, found %i)\n", \
 			  current_config->name, \
 			  pipe_config->name); \
 		ret = false; \
-	}
+	} \
+} while (0)
 
 #define PIPE_CONF_QUIRK(quirk)	\
 	((current_config->quirks | pipe_config->quirks) & (quirk))

From b49be6622f08187129561cff0409f7b06b33de57 Mon Sep 17 00:00:00 2001
From: Imre Deak <imre.deak@intel.com>
Date: Thu, 19 Apr 2018 18:51:09 +0300
Subject: [PATCH 0419/1286] drm/i915: Enable display WA#1183 from its correct
 spot
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

The DMC FW specific part of display WA#1183 is supposed to be enabled
whenever enabling DC5 or DC6, so move it to the DC6 enable function
from the DC6 disable function.

I noticed this after Daniel's patch to remove the unused
skl_disable_dc6() function.

Fixes: 53421c2fe99c ("drm/i915: Apply Display WA #1183 on skl, kbl, and cfl")
Cc: Lucas De Marchi <lucas.demarchi@intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: <stable@vger.kernel.org>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180419155109.29451-1-imre.deak@intel.com
---
 drivers/gpu/drm/i915/intel_runtime_pm.c | 11 +++++------
 1 file changed, 5 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 53ea564f971e..66de4b2dc8b7 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -641,19 +641,18 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv)
 
 	DRM_DEBUG_KMS("Enabling DC6\n");
 
-	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
+	/* Wa Display #1183: skl,kbl,cfl */
+	if (IS_GEN9_BC(dev_priv))
+		I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
+			   SKL_SELECT_ALTERNATE_DC_EXIT);
 
+	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
 }
 
 void skl_disable_dc6(struct drm_i915_private *dev_priv)
 {
 	DRM_DEBUG_KMS("Disabling DC6\n");
 
-	/* Wa Display #1183: skl,kbl,cfl */
-	if (IS_GEN9_BC(dev_priv))
-		I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
-			   SKL_SELECT_ALTERNATE_DC_EXIT);
-
 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 }
 

From f2ce15c72ae8aae06ba22b23f90ccc7d5b5e087c Mon Sep 17 00:00:00 2001
From: Jacopo Mondi <jacopo+renesas@jmondi.org>
Date: Wed, 18 Apr 2018 16:40:28 +0200
Subject: [PATCH 0420/1286] dt-bindings: display: bridge: Document THC63LVD1024
 LVDS decoder
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Document Thine THC63LVD1024 LVDS decoder device tree bindings.

Signed-off-by: Jacopo Mondi <jacopo+renesas@jmondi.org>
Reviewed-by: Andrzej Hajda <a.hajda@samsung.com>
Reviewed-by: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se>
Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Reviewed-by: Rob Herring <robh@kernel.org>
Reviewed-by: Vladimir Zapolskiy <vladimir_zapolskiy@mentor.com>
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1524062429-325-2-git-send-email-jacopo+renesas@jmondi.org
Link: https://patchwork.freedesktop.org/patch/msgid/1524062429-325-2-git-send-email-jacopo+renesas@jmondi.org
---
 .../display/bridge/thine,thc63lvd1024.txt     | 60 +++++++++++++++++++
 1 file changed, 60 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.txt

diff --git a/Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.txt b/Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.txt
new file mode 100644
index 000000000000..37f0c04d5a28
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.txt
@@ -0,0 +1,60 @@
+Thine Electronics THC63LVD1024 LVDS decoder
+-------------------------------------------
+
+The THC63LVD1024 is a dual link LVDS receiver designed to convert LVDS streams
+to parallel data outputs. The chip supports single/dual input/output modes,
+handling up to two LVDS input streams and up to two digital CMOS/TTL outputs.
+
+Single or dual operation mode, output data mapping and DDR output modes are
+configured through input signals and the chip does not expose any control bus.
+
+Required properties:
+- compatible: Shall be "thine,thc63lvd1024"
+- vcc-supply: Power supply for TTL output, TTL CLOCKOUT signal, LVDS input,
+  PPL and digital circuitry
+
+Optional properties:
+- powerdown-gpios: Power down GPIO signal, pin name "/PDWN". Active low
+- oe-gpios: Output enable GPIO signal, pin name "OE". Active high
+
+The THC63LVD1024 video port connections are modeled according
+to OF graph bindings specified by Documentation/devicetree/bindings/graph.txt
+
+Required video port nodes:
+- port@0: First LVDS input port
+- port@2: First digital CMOS/TTL parallel output
+
+Optional video port nodes:
+- port@1: Second LVDS input port
+- port@3: Second digital CMOS/TTL parallel output
+
+Example:
+--------
+
+	thc63lvd1024: lvds-decoder {
+		compatible = "thine,thc63lvd1024";
+
+		vcc-supply = <&reg_lvds_vcc>;
+		powerdown-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>;
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+
+				lvds_dec_in_0: endpoint {
+					remote-endpoint = <&lvds_out>;
+				};
+			};
+
+			port@2{
+				reg = <2>;
+
+				lvds_dec_out_2: endpoint {
+					remote-endpoint = <&adv7511_in>;
+				};
+			};
+		};
+	};

From cdc33b8672d2075dbf6e1aaaf9d24fa9b86e05c2 Mon Sep 17 00:00:00 2001
From: Jacopo Mondi <jacopo+renesas@jmondi.org>
Date: Wed, 18 Apr 2018 16:40:29 +0200
Subject: [PATCH 0421/1286] drm: bridge: Add thc63lvd1024 LVDS decoder driver
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Add DRM bridge driver for Thine THC63LVD1024 LVDS to digital parallel
output converter.

Signed-off-by: Jacopo Mondi <jacopo+renesas@jmondi.org>
Reviewed-by: Andrzej Hajda <a.hajda@samsung.com>
Reviewed-by: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se>
Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Reviewed-by: Vladimir Zapolskiy <vladimir_zapolskiy@mentor.com>
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1524062429-325-3-git-send-email-jacopo+renesas@jmondi.org
---
 drivers/gpu/drm/bridge/Kconfig        |   6 +
 drivers/gpu/drm/bridge/Makefile       |   1 +
 drivers/gpu/drm/bridge/thc63lvd1024.c | 206 ++++++++++++++++++++++++++
 3 files changed, 213 insertions(+)
 create mode 100644 drivers/gpu/drm/bridge/thc63lvd1024.c

diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 3aa65bdecb0e..42c9c2d13752 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -93,6 +93,12 @@ config DRM_SII9234
 	  It is an I2C driver, that detects connection of MHL bridge
 	  and starts encapsulation of HDMI signal.
 
+config DRM_THINE_THC63LVD1024
+	tristate "Thine THC63LVD1024 LVDS decoder bridge"
+	depends on OF
+	---help---
+	  Thine THC63LVD1024 LVDS/parallel converter driver.
+
 config DRM_TOSHIBA_TC358767
 	tristate "Toshiba TC358767 eDP bridge"
 	depends on OF
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 373eb28f31ed..fd90b16a65c0 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
 obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o
 obj-$(CONFIG_DRM_SII902X) += sii902x.o
 obj-$(CONFIG_DRM_SII9234) += sii9234.o
+obj-$(CONFIG_DRM_THINE_THC63LVD1024) += thc63lvd1024.o
 obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
 obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/
 obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c
new file mode 100644
index 000000000000..c8b9edd5a7f4
--- /dev/null
+++ b/drivers/gpu/drm/bridge/thc63lvd1024.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * THC63LVD1024 LVDS to parallel data DRM bridge driver.
+ *
+ * Copyright (C) 2018 Jacopo Mondi <jacopo+renesas@jmondi.org>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_panel.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+enum thc63_ports {
+	THC63_LVDS_IN0,
+	THC63_LVDS_IN1,
+	THC63_RGB_OUT0,
+	THC63_RGB_OUT1,
+};
+
+struct thc63_dev {
+	struct device *dev;
+
+	struct regulator *vcc;
+
+	struct gpio_desc *pdwn;
+	struct gpio_desc *oe;
+
+	struct drm_bridge bridge;
+	struct drm_bridge *next;
+};
+
+static inline struct thc63_dev *to_thc63(struct drm_bridge *bridge)
+{
+	return container_of(bridge, struct thc63_dev, bridge);
+}
+
+static int thc63_attach(struct drm_bridge *bridge)
+{
+	struct thc63_dev *thc63 = to_thc63(bridge);
+
+	return drm_bridge_attach(bridge->encoder, thc63->next, bridge);
+}
+
+static void thc63_enable(struct drm_bridge *bridge)
+{
+	struct thc63_dev *thc63 = to_thc63(bridge);
+	int ret;
+
+	ret = regulator_enable(thc63->vcc);
+	if (ret) {
+		dev_err(thc63->dev,
+			"Failed to enable regulator \"vcc\": %d\n", ret);
+		return;
+	}
+
+	gpiod_set_value(thc63->pdwn, 0);
+	gpiod_set_value(thc63->oe, 1);
+}
+
+static void thc63_disable(struct drm_bridge *bridge)
+{
+	struct thc63_dev *thc63 = to_thc63(bridge);
+	int ret;
+
+	gpiod_set_value(thc63->oe, 0);
+	gpiod_set_value(thc63->pdwn, 1);
+
+	ret = regulator_disable(thc63->vcc);
+	if (ret)
+		dev_err(thc63->dev,
+			"Failed to disable regulator \"vcc\": %d\n", ret);
+}
+
+static const struct drm_bridge_funcs thc63_bridge_func = {
+	.attach	= thc63_attach,
+	.enable = thc63_enable,
+	.disable = thc63_disable,
+};
+
+static int thc63_parse_dt(struct thc63_dev *thc63)
+{
+	struct device_node *thc63_out;
+	struct device_node *remote;
+
+	thc63_out = of_graph_get_endpoint_by_regs(thc63->dev->of_node,
+						  THC63_RGB_OUT0, -1);
+	if (!thc63_out) {
+		dev_err(thc63->dev, "Missing endpoint in port@%u\n",
+			THC63_RGB_OUT0);
+		return -ENODEV;
+	}
+
+	remote = of_graph_get_remote_port_parent(thc63_out);
+	of_node_put(thc63_out);
+	if (!remote) {
+		dev_err(thc63->dev, "Endpoint in port@%u unconnected\n",
+			THC63_RGB_OUT0);
+		return -ENODEV;
+	}
+
+	if (!of_device_is_available(remote)) {
+		dev_err(thc63->dev, "port@%u remote endpoint is disabled\n",
+			THC63_RGB_OUT0);
+		of_node_put(remote);
+		return -ENODEV;
+	}
+
+	thc63->next = of_drm_find_bridge(remote);
+	of_node_put(remote);
+	if (!thc63->next)
+		return -EPROBE_DEFER;
+
+	return 0;
+}
+
+static int thc63_gpio_init(struct thc63_dev *thc63)
+{
+	thc63->oe = devm_gpiod_get_optional(thc63->dev, "oe", GPIOD_OUT_LOW);
+	if (IS_ERR(thc63->oe)) {
+		dev_err(thc63->dev, "Unable to get \"oe-gpios\": %ld\n",
+			PTR_ERR(thc63->oe));
+		return PTR_ERR(thc63->oe);
+	}
+
+	thc63->pdwn = devm_gpiod_get_optional(thc63->dev, "powerdown",
+					      GPIOD_OUT_HIGH);
+	if (IS_ERR(thc63->pdwn)) {
+		dev_err(thc63->dev, "Unable to get \"powerdown-gpios\": %ld\n",
+			PTR_ERR(thc63->pdwn));
+		return PTR_ERR(thc63->pdwn);
+	}
+
+	return 0;
+}
+
+static int thc63_probe(struct platform_device *pdev)
+{
+	struct thc63_dev *thc63;
+	int ret;
+
+	thc63 = devm_kzalloc(&pdev->dev, sizeof(*thc63), GFP_KERNEL);
+	if (!thc63)
+		return -ENOMEM;
+
+	thc63->dev = &pdev->dev;
+	platform_set_drvdata(pdev, thc63);
+
+	thc63->vcc = devm_regulator_get_optional(thc63->dev, "vcc");
+	if (IS_ERR(thc63->vcc)) {
+		if (PTR_ERR(thc63->vcc) == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+
+		dev_err(thc63->dev, "Unable to get \"vcc\" supply: %ld\n",
+			PTR_ERR(thc63->vcc));
+		return PTR_ERR(thc63->vcc);
+	}
+
+	ret = thc63_gpio_init(thc63);
+	if (ret)
+		return ret;
+
+	ret = thc63_parse_dt(thc63);
+	if (ret)
+		return ret;
+
+	thc63->bridge.driver_private = thc63;
+	thc63->bridge.of_node = pdev->dev.of_node;
+	thc63->bridge.funcs = &thc63_bridge_func;
+
+	drm_bridge_add(&thc63->bridge);
+
+	return 0;
+}
+
+static int thc63_remove(struct platform_device *pdev)
+{
+	struct thc63_dev *thc63 = platform_get_drvdata(pdev);
+
+	drm_bridge_remove(&thc63->bridge);
+
+	return 0;
+}
+
+static const struct of_device_id thc63_match[] = {
+	{ .compatible = "thine,thc63lvd1024", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, thc63_match);
+
+static struct platform_driver thc63_driver = {
+	.probe	= thc63_probe,
+	.remove	= thc63_remove,
+	.driver	= {
+		.name		= "thc63lvd1024",
+		.of_match_table	= thc63_match,
+	},
+};
+module_platform_driver(thc63_driver);
+
+MODULE_AUTHOR("Jacopo Mondi <jacopo@jmondi.org>");
+MODULE_DESCRIPTION("Thine THC63LVD1024 LVDS decoder DRM bridge driver");
+MODULE_LICENSE("GPL v2");

From c4c252590951704947d216a2565ee9dec21f704d Mon Sep 17 00:00:00 2001
From: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Tue, 17 Apr 2018 12:02:25 +0200
Subject: [PATCH 0422/1286] drm/i915: Remove skl dc6 enable/disable functions

One is outright unused, other can be made static.

Drive-by cleanup while accidentally reading dc code.

Cc: Imre Deak <imre.deak@intel.com>
Acked-by: Imre Deak <imre.deak@intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180417100225.12286-1-daniel.vetter@ffwll.ch
---
 drivers/gpu/drm/i915/intel_drv.h        | 2 --
 drivers/gpu/drm/i915/intel_runtime_pm.c | 9 +--------
 2 files changed, 1 insertion(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 5bd2263407b2..8b20824e806e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1580,8 +1580,6 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv);
 void bxt_disable_dc9(struct drm_i915_private *dev_priv);
 void gen9_enable_dc5(struct drm_i915_private *dev_priv);
 unsigned int skl_cdclk_get_vco(unsigned int freq);
-void skl_enable_dc6(struct drm_i915_private *dev_priv);
-void skl_disable_dc6(struct drm_i915_private *dev_priv);
 void intel_dp_get_m_n(struct intel_crtc *crtc,
 		      struct intel_crtc_state *pipe_config);
 void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 66de4b2dc8b7..ec59992cf87a 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -635,7 +635,7 @@ static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
 	assert_csr_loaded(dev_priv);
 }
 
-void skl_enable_dc6(struct drm_i915_private *dev_priv)
+static void skl_enable_dc6(struct drm_i915_private *dev_priv)
 {
 	assert_can_enable_dc6(dev_priv);
 
@@ -649,13 +649,6 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv)
 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
 }
 
-void skl_disable_dc6(struct drm_i915_private *dev_priv)
-{
-	DRM_DEBUG_KMS("Disabling DC6\n");
-
-	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-}
-
 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
 				   struct i915_power_well *power_well)
 {

From fc34044248b611ea3f8b6e55b4ed404192a4f295 Mon Sep 17 00:00:00 2001
From: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Thu, 5 Apr 2018 15:00:23 -0700
Subject: [PATCH 0423/1286] drm/i915: Enable edp psr error interrupts on hsw
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

The definitions for the error register should be valid on bdw/skl too,
but there we haven't even enabled DE_MISC handling yet.

Somewhat confusing the the moved register offset on bdw is only for
the _CTL/_AUX register, and that _IIR/IMR stayed where they have been
on bdw.

v2: Fixes from Ville.

v3: From DK
 * Rebased on drm-tip
 * Removed BDW IIR bit definition, looks like an unintentional change that
should be in the following patch.

v4: From DK
 * Don't mask REG_WRITE.

References: bspec/11974 [SRD Interrupt Bit Definition DevHSW]
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Daniel Vetter <daniel.vetter@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Signed-off-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Reviewed-by: Jose Roberto de Souza <jose.souza@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180405220023.9449-1-dhinakaran.pandiyan@intel.com
---
 drivers/gpu/drm/i915/i915_irq.c | 34 +++++++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/i915_reg.h |  8 ++++++++
 2 files changed, 42 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b03d18561b55..630fc6f514d8 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2452,6 +2452,26 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
 		ironlake_rps_change_irq_handler(dev_priv);
 }
 
+static void hsw_edp_psr_irq_handler(struct drm_i915_private *dev_priv)
+{
+	u32 edp_psr_iir = I915_READ(EDP_PSR_IIR);
+
+	if (edp_psr_iir & EDP_PSR_ERROR)
+		DRM_DEBUG_KMS("PSR error\n");
+
+	if (edp_psr_iir & EDP_PSR_PRE_ENTRY) {
+		DRM_DEBUG_KMS("PSR prepare entry in 2 vblanks\n");
+		I915_WRITE(EDP_PSR_IMR, EDP_PSR_PRE_ENTRY);
+	}
+
+	if (edp_psr_iir & EDP_PSR_POST_EXIT) {
+		DRM_DEBUG_KMS("PSR exit completed\n");
+		I915_WRITE(EDP_PSR_IMR, 0);
+	}
+
+	I915_WRITE(EDP_PSR_IIR, edp_psr_iir);
+}
+
 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
 				    u32 de_iir)
 {
@@ -2464,6 +2484,9 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
 	if (de_iir & DE_ERR_INT_IVB)
 		ivb_err_int_handler(dev_priv);
 
+	if (de_iir & DE_EDP_PSR_INT_HSW)
+		hsw_edp_psr_irq_handler(dev_priv);
+
 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
 		dp_aux_irq_handler(dev_priv);
 
@@ -3348,6 +3371,11 @@ static void ironlake_irq_reset(struct drm_device *dev)
 	if (IS_GEN7(dev_priv))
 		I915_WRITE(GEN7_ERR_INT, 0xffffffff);
 
+	if (IS_HASWELL(dev_priv)) {
+		I915_WRITE(EDP_PSR_IMR, 0xffffffff);
+		I915_WRITE(EDP_PSR_IIR, 0xffffffff);
+	}
+
 	gen5_gt_irq_reset(dev_priv);
 
 	ibx_irq_reset(dev_priv);
@@ -3762,6 +3790,12 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
 			      DE_DP_A_HOTPLUG);
 	}
 
+	if (IS_HASWELL(dev_priv)) {
+		gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
+		I915_WRITE(EDP_PSR_IMR, 0);
+		display_mask |= DE_EDP_PSR_INT_HSW;
+	}
+
 	dev_priv->irq_mask = ~display_mask;
 
 	ibx_irq_pre_postinstall(dev);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index fb106026a1f4..9f61d381fec5 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -4026,6 +4026,13 @@ enum {
 #define   EDP_PSR_TP1_TIME_0us			(3<<4)
 #define   EDP_PSR_IDLE_FRAME_SHIFT		0
 
+/* Bspec claims those aren't shifted but stay at 0x64800 */
+#define EDP_PSR_IMR				_MMIO(0x64834)
+#define EDP_PSR_IIR				_MMIO(0x64838)
+#define   EDP_PSR_ERROR				(1<<2)
+#define   EDP_PSR_POST_EXIT			(1<<1)
+#define   EDP_PSR_PRE_ENTRY			(1<<0)
+
 #define EDP_PSR_AUX_CTL				_MMIO(dev_priv->psr_mmio_base + 0x10)
 #define   EDP_PSR_AUX_CTL_TIME_OUT_MASK		(3 << 26)
 #define   EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK	(0x1f << 20)
@@ -6837,6 +6844,7 @@ enum {
 #define DE_PCH_EVENT_IVB		(1<<28)
 #define DE_DP_A_HOTPLUG_IVB		(1<<27)
 #define DE_AUX_CHANNEL_A_IVB		(1<<26)
+#define DE_EDP_PSR_INT_HSW		(1<<19)
 #define DE_SPRITEC_FLIP_DONE_IVB	(1<<14)
 #define DE_PLANEC_FLIP_DONE_IVB		(1<<13)
 #define DE_PIPEC_VBLANK_IVB		(1<<10)

From e04f7ece1c4530b4f0db182b5596fadf48628f22 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Tue, 3 Apr 2018 14:24:18 -0700
Subject: [PATCH 0424/1286] drm/i915: Enable edp psr error interrupts on bdw+
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Plug in the bdw+ irq handling for PSR interrupts. bdw+ supports psr on
any transcoder in theory, though the we don't currenty enable PSR except
on the EDP transcoder.

v2: From DK
 * Rebased on drm-tip
v3: Switched author to Ville based on IRC discussion.

Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Reviewed-by: Jose Roberto de Souza <jose.souza@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180403212420.25007-2-dhinakaran.pandiyan@intel.com
---
 drivers/gpu/drm/i915/i915_irq.c      | 57 +++++++++++++++++++++-------
 drivers/gpu/drm/i915/i915_reg.h      |  7 ++--
 drivers/gpu/drm/i915/intel_display.h |  4 ++
 3 files changed, 52 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 630fc6f514d8..ab9aac88a00b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2455,20 +2455,34 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
 static void hsw_edp_psr_irq_handler(struct drm_i915_private *dev_priv)
 {
 	u32 edp_psr_iir = I915_READ(EDP_PSR_IIR);
+	u32 edp_psr_imr = I915_READ(EDP_PSR_IMR);
+	u32 mask = BIT(TRANSCODER_EDP);
+	enum transcoder cpu_transcoder;
 
-	if (edp_psr_iir & EDP_PSR_ERROR)
-		DRM_DEBUG_KMS("PSR error\n");
+	if (INTEL_GEN(dev_priv) >= 8)
+		mask |= BIT(TRANSCODER_A) |
+			BIT(TRANSCODER_B) |
+			BIT(TRANSCODER_C);
 
-	if (edp_psr_iir & EDP_PSR_PRE_ENTRY) {
-		DRM_DEBUG_KMS("PSR prepare entry in 2 vblanks\n");
-		I915_WRITE(EDP_PSR_IMR, EDP_PSR_PRE_ENTRY);
-	}
-
-	if (edp_psr_iir & EDP_PSR_POST_EXIT) {
-		DRM_DEBUG_KMS("PSR exit completed\n");
-		I915_WRITE(EDP_PSR_IMR, 0);
+	for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, mask) {
+		if (edp_psr_iir & EDP_PSR_ERROR(cpu_transcoder))
+			DRM_DEBUG_KMS("Transcoder %s PSR error\n",
+				      transcoder_name(cpu_transcoder));
+
+		if (edp_psr_iir & EDP_PSR_PRE_ENTRY(cpu_transcoder)) {
+			DRM_DEBUG_KMS("Transcoder %s PSR prepare entry in 2 vblanks\n",
+				      transcoder_name(cpu_transcoder));
+			edp_psr_imr |= EDP_PSR_PRE_ENTRY(cpu_transcoder);
+		}
+
+		if (edp_psr_iir & EDP_PSR_POST_EXIT(cpu_transcoder)) {
+			DRM_DEBUG_KMS("Transcoder %s PSR exit completed\n",
+				      transcoder_name(cpu_transcoder));
+			edp_psr_imr &= ~EDP_PSR_PRE_ENTRY(cpu_transcoder);
+		}
 	}
 
+	I915_WRITE(EDP_PSR_IMR, edp_psr_imr);
 	I915_WRITE(EDP_PSR_IIR, edp_psr_iir);
 }
 
@@ -2616,11 +2630,22 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
 	if (master_ctl & GEN8_DE_MISC_IRQ) {
 		iir = I915_READ(GEN8_DE_MISC_IIR);
 		if (iir) {
+			bool found = false;
+
 			I915_WRITE(GEN8_DE_MISC_IIR, iir);
 			ret = IRQ_HANDLED;
-			if (iir & GEN8_DE_MISC_GSE)
+
+			if (iir & GEN8_DE_MISC_GSE) {
 				intel_opregion_asle_intr(dev_priv);
-			else
+				found = true;
+			}
+
+			if (iir & GEN8_DE_EDP_PSR) {
+				hsw_edp_psr_irq_handler(dev_priv);
+				found = true;
+			}
+
+			if (!found)
 				DRM_ERROR("Unexpected DE Misc interrupt\n");
 		}
 		else
@@ -3414,6 +3439,9 @@ static void gen8_irq_reset(struct drm_device *dev)
 
 	gen8_gt_irq_reset(dev_priv);
 
+	I915_WRITE(EDP_PSR_IMR, 0xffffffff);
+	I915_WRITE(EDP_PSR_IIR, 0xffffffff);
+
 	for_each_pipe(dev_priv, pipe)
 		if (intel_display_power_is_enabled(dev_priv,
 						   POWER_DOMAIN_PIPE(pipe)))
@@ -3906,7 +3934,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
 	uint32_t de_pipe_enables;
 	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
 	u32 de_port_enables;
-	u32 de_misc_masked = GEN8_DE_MISC_GSE;
+	u32 de_misc_masked = GEN8_DE_MISC_GSE | GEN8_DE_EDP_PSR;
 	enum pipe pipe;
 
 	if (INTEL_GEN(dev_priv) >= 9) {
@@ -3931,6 +3959,9 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
 	else if (IS_BROADWELL(dev_priv))
 		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
 
+	gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
+	I915_WRITE(EDP_PSR_IMR, 0);
+
 	for_each_pipe(dev_priv, pipe) {
 		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
 
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 9f61d381fec5..2dad655a710c 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -4029,9 +4029,9 @@ enum {
 /* Bspec claims those aren't shifted but stay at 0x64800 */
 #define EDP_PSR_IMR				_MMIO(0x64834)
 #define EDP_PSR_IIR				_MMIO(0x64838)
-#define   EDP_PSR_ERROR				(1<<2)
-#define   EDP_PSR_POST_EXIT			(1<<1)
-#define   EDP_PSR_PRE_ENTRY			(1<<0)
+#define   EDP_PSR_ERROR(trans)			(1 << (((trans) * 8 + 10) & 31))
+#define   EDP_PSR_POST_EXIT(trans)		(1 << (((trans) * 8 + 9) & 31))
+#define   EDP_PSR_PRE_ENTRY(trans)		(1 << (((trans) * 8 + 8) & 31))
 
 #define EDP_PSR_AUX_CTL				_MMIO(dev_priv->psr_mmio_base + 0x10)
 #define   EDP_PSR_AUX_CTL_TIME_OUT_MASK		(3 << 26)
@@ -6969,6 +6969,7 @@ enum {
 #define GEN8_DE_MISC_IIR _MMIO(0x44468)
 #define GEN8_DE_MISC_IER _MMIO(0x4446c)
 #define  GEN8_DE_MISC_GSE		(1 << 27)
+#define  GEN8_DE_EDP_PSR		(1 << 19)
 
 #define GEN8_PCU_ISR _MMIO(0x444e0)
 #define GEN8_PCU_IMR _MMIO(0x444e4)
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
index 4e7418b345bc..2ef31617614a 100644
--- a/drivers/gpu/drm/i915/intel_display.h
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -218,6 +218,10 @@ struct intel_link_m_n {
 	for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
 		for_each_if((__mask) & BIT(__p))
 
+#define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \
+	for ((__t) = 0; (__t) < I915_MAX_TRANSCODERS; (__t)++)	\
+		for_each_if ((__mask) & (1 << (__t)))
+
 #define for_each_universal_plane(__dev_priv, __pipe, __p)		\
 	for ((__p) = 0;							\
 	     (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1;	\

From 54fd3149598cc2f74cf0708d614470da2331a374 Mon Sep 17 00:00:00 2001
From: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Date: Wed, 4 Apr 2018 18:37:17 -0700
Subject: [PATCH 0425/1286] drm/i915/psr: Control PSR interrupts via debugfs
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Interrupts other than the one for AUX errors are required only for debug,
so unmask them via debugfs when the user requests debug.

User can make such a request with
echo 1 > <DEBUG_FS>/dri/0/i915_edp_psr_debug

There are no locks to serialize PSR debug enabling from
irq_postinstall() and debugfs for simplicity. As irq_postinstall() is
called only during module initialization/resume and IGT subtests
aren't expected to modify PSR debug at those times, we should be safe.

v2: Unroll loops (Ville)
    Avoid resetting error mask bits.

v3: Unmask interrupts in postinstall() if debug was still enabled.
    Avoid RMW (Ville)

v4: Avoid extra IMR write introduced in the previous version.(Jose)
    Style changes, renames (Jose).

Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Reviewed-by: Jose Roberto de Souza <jose.souza@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180405013717.24254-1-dhinakaran.pandiyan@intel.com
---
 drivers/gpu/drm/i915/i915_debugfs.c | 36 +++++++++++++++++-
 drivers/gpu/drm/i915/i915_drv.h     |  1 +
 drivers/gpu/drm/i915/i915_irq.c     | 51 ++++++-------------------
 drivers/gpu/drm/i915/intel_drv.h    |  2 +
 drivers/gpu/drm/i915/intel_psr.c    | 58 +++++++++++++++++++++++++++++
 5 files changed, 108 insertions(+), 40 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e0274f41bc76..a6c70ff4ae5a 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2690,6 +2690,39 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
 	return 0;
 }
 
+static int
+i915_edp_psr_debug_set(void *data, u64 val)
+{
+	struct drm_i915_private *dev_priv = data;
+
+	if (!CAN_PSR(dev_priv))
+		return -ENODEV;
+
+	DRM_DEBUG_KMS("PSR debug %s\n", enableddisabled(val));
+
+	intel_runtime_pm_get(dev_priv);
+	intel_psr_irq_control(dev_priv, !!val);
+	intel_runtime_pm_put(dev_priv);
+
+	return 0;
+}
+
+static int
+i915_edp_psr_debug_get(void *data, u64 *val)
+{
+	struct drm_i915_private *dev_priv = data;
+
+	if (!CAN_PSR(dev_priv))
+		return -ENODEV;
+
+	*val = READ_ONCE(dev_priv->psr.debug);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
+			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
+			"%llu\n");
+
 static int i915_sink_crc(struct seq_file *m, void *data)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -4862,7 +4895,8 @@ static const struct i915_debugfs_files {
 	{"i915_guc_log_relay", &i915_guc_log_relay_fops},
 	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
 	{"i915_ipc_status", &i915_ipc_status_fops},
-	{"i915_drrs_ctl", &i915_drrs_ctl_fops}
+	{"i915_drrs_ctl", &i915_drrs_ctl_fops},
+	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
 };
 
 int i915_debugfs_register(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 028691108125..e67e21799f37 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -610,6 +610,7 @@ struct i915_psr {
 	bool has_hw_tracking;
 	bool psr2_enabled;
 	u8 sink_sync_latency;
+	bool debug;
 
 	void (*enable_source)(struct intel_dp *,
 			      const struct intel_crtc_state *);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index ab9aac88a00b..96547e091e23 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2452,40 +2452,6 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
 		ironlake_rps_change_irq_handler(dev_priv);
 }
 
-static void hsw_edp_psr_irq_handler(struct drm_i915_private *dev_priv)
-{
-	u32 edp_psr_iir = I915_READ(EDP_PSR_IIR);
-	u32 edp_psr_imr = I915_READ(EDP_PSR_IMR);
-	u32 mask = BIT(TRANSCODER_EDP);
-	enum transcoder cpu_transcoder;
-
-	if (INTEL_GEN(dev_priv) >= 8)
-		mask |= BIT(TRANSCODER_A) |
-			BIT(TRANSCODER_B) |
-			BIT(TRANSCODER_C);
-
-	for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, mask) {
-		if (edp_psr_iir & EDP_PSR_ERROR(cpu_transcoder))
-			DRM_DEBUG_KMS("Transcoder %s PSR error\n",
-				      transcoder_name(cpu_transcoder));
-
-		if (edp_psr_iir & EDP_PSR_PRE_ENTRY(cpu_transcoder)) {
-			DRM_DEBUG_KMS("Transcoder %s PSR prepare entry in 2 vblanks\n",
-				      transcoder_name(cpu_transcoder));
-			edp_psr_imr |= EDP_PSR_PRE_ENTRY(cpu_transcoder);
-		}
-
-		if (edp_psr_iir & EDP_PSR_POST_EXIT(cpu_transcoder)) {
-			DRM_DEBUG_KMS("Transcoder %s PSR exit completed\n",
-				      transcoder_name(cpu_transcoder));
-			edp_psr_imr &= ~EDP_PSR_PRE_ENTRY(cpu_transcoder);
-		}
-	}
-
-	I915_WRITE(EDP_PSR_IMR, edp_psr_imr);
-	I915_WRITE(EDP_PSR_IIR, edp_psr_iir);
-}
-
 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
 				    u32 de_iir)
 {
@@ -2498,8 +2464,12 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
 	if (de_iir & DE_ERR_INT_IVB)
 		ivb_err_int_handler(dev_priv);
 
-	if (de_iir & DE_EDP_PSR_INT_HSW)
-		hsw_edp_psr_irq_handler(dev_priv);
+	if (de_iir & DE_EDP_PSR_INT_HSW) {
+		u32 psr_iir = I915_READ(EDP_PSR_IIR);
+
+		intel_psr_irq_handler(dev_priv, psr_iir);
+		I915_WRITE(EDP_PSR_IIR, psr_iir);
+	}
 
 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
 		dp_aux_irq_handler(dev_priv);
@@ -2641,7 +2611,10 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
 			}
 
 			if (iir & GEN8_DE_EDP_PSR) {
-				hsw_edp_psr_irq_handler(dev_priv);
+				u32 psr_iir = I915_READ(EDP_PSR_IIR);
+
+				intel_psr_irq_handler(dev_priv, psr_iir);
+				I915_WRITE(EDP_PSR_IIR, psr_iir);
 				found = true;
 			}
 
@@ -3820,7 +3793,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
 
 	if (IS_HASWELL(dev_priv)) {
 		gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
-		I915_WRITE(EDP_PSR_IMR, 0);
+		intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
 		display_mask |= DE_EDP_PSR_INT_HSW;
 	}
 
@@ -3960,7 +3933,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
 		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
 
 	gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
-	I915_WRITE(EDP_PSR_IMR, 0);
+	intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
 
 	for_each_pipe(dev_priv, pipe) {
 		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 8b20824e806e..44ed248f1fe9 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1899,6 +1899,8 @@ void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
 				   unsigned frontbuffer_bits);
 void intel_psr_compute_config(struct intel_dp *intel_dp,
 			      struct intel_crtc_state *crtc_state);
+void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug);
+void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
 
 /* intel_runtime_pm.c */
 int intel_power_domains_init(struct drm_i915_private *);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 69a5b276f4d8..ae6a916523c2 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -93,6 +93,64 @@ static void psr_aux_io_power_put(struct intel_dp *intel_dp)
 	intel_display_power_put(dev_priv, psr_aux_domain(intel_dp));
 }
 
+void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug)
+{
+	u32 debug_mask, mask;
+
+	/* No PSR interrupts on VLV/CHV */
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+		return;
+
+	mask = EDP_PSR_ERROR(TRANSCODER_EDP);
+	debug_mask = EDP_PSR_POST_EXIT(TRANSCODER_EDP) |
+		     EDP_PSR_PRE_ENTRY(TRANSCODER_EDP);
+
+	if (INTEL_GEN(dev_priv) >= 8) {
+		mask |= EDP_PSR_ERROR(TRANSCODER_A) |
+			EDP_PSR_ERROR(TRANSCODER_B) |
+			EDP_PSR_ERROR(TRANSCODER_C);
+
+		debug_mask |= EDP_PSR_POST_EXIT(TRANSCODER_A) |
+			      EDP_PSR_PRE_ENTRY(TRANSCODER_A) |
+			      EDP_PSR_POST_EXIT(TRANSCODER_B) |
+			      EDP_PSR_PRE_ENTRY(TRANSCODER_B) |
+			      EDP_PSR_POST_EXIT(TRANSCODER_C) |
+			      EDP_PSR_PRE_ENTRY(TRANSCODER_C);
+	}
+
+	if (debug)
+		mask |= debug_mask;
+
+	WRITE_ONCE(dev_priv->psr.debug, debug);
+	I915_WRITE(EDP_PSR_IMR, ~mask);
+}
+
+void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
+{
+	u32 transcoders = BIT(TRANSCODER_EDP);
+	enum transcoder cpu_transcoder;
+
+	if (INTEL_GEN(dev_priv) >= 8)
+		transcoders |= BIT(TRANSCODER_A) |
+			       BIT(TRANSCODER_B) |
+			       BIT(TRANSCODER_C);
+
+	for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
+		/* FIXME: Exit PSR and link train manually when this happens. */
+		if (psr_iir & EDP_PSR_ERROR(cpu_transcoder))
+			DRM_DEBUG_KMS("[transcoder %s] PSR aux error\n",
+				      transcoder_name(cpu_transcoder));
+
+		if (psr_iir & EDP_PSR_PRE_ENTRY(cpu_transcoder))
+			DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
+				      transcoder_name(cpu_transcoder));
+
+		if (psr_iir & EDP_PSR_POST_EXIT(cpu_transcoder))
+			DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
+				      transcoder_name(cpu_transcoder));
+	}
+}
+
 static bool intel_dp_get_y_coord_required(struct intel_dp *intel_dp)
 {
 	uint8_t psr_caps = 0;

From 3f983e54fdad452582843b9fbc22df0eb67daacd Mon Sep 17 00:00:00 2001
From: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Date: Tue, 3 Apr 2018 14:24:20 -0700
Subject: [PATCH 0426/1286] drm/i915/psr: Timestamps for PSR entry and exit
 interrupts.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Timestamps are useful for IGT tests that trigger PSR exit and/or wait for
PSR entry.

v2: Removed seqlock (Ville)
    Removed erroneous warning in irq loop (Chris)

Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Reviewed-by: Jose Roberto de Souza <jose.souza@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180403212420.25007-4-dhinakaran.pandiyan@intel.com
---
 drivers/gpu/drm/i915/i915_debugfs.c | 7 +++++++
 drivers/gpu/drm/i915/i915_drv.h     | 2 ++
 drivers/gpu/drm/i915/intel_psr.c    | 9 +++++++--
 3 files changed, 16 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a6c70ff4ae5a..2f05f5262bba 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2686,6 +2686,13 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
 	}
 	mutex_unlock(&dev_priv->psr.lock);
 
+	if (READ_ONCE(dev_priv->psr.debug)) {
+		seq_printf(m, "Last attempted entry at: %lld\n",
+			   dev_priv->psr.last_entry_attempt);
+		seq_printf(m, "Last exit at: %lld\n",
+			   dev_priv->psr.last_exit);
+	}
+
 	intel_runtime_pm_put(dev_priv);
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e67e21799f37..8444ca8d5aa3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -611,6 +611,8 @@ struct i915_psr {
 	bool psr2_enabled;
 	u8 sink_sync_latency;
 	bool debug;
+	ktime_t last_entry_attempt;
+	ktime_t last_exit;
 
 	void (*enable_source)(struct intel_dp *,
 			      const struct intel_crtc_state *);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index ae6a916523c2..0d548292dd09 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -129,6 +129,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
 {
 	u32 transcoders = BIT(TRANSCODER_EDP);
 	enum transcoder cpu_transcoder;
+	ktime_t time_ns =  ktime_get();
 
 	if (INTEL_GEN(dev_priv) >= 8)
 		transcoders |= BIT(TRANSCODER_A) |
@@ -141,13 +142,17 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
 			DRM_DEBUG_KMS("[transcoder %s] PSR aux error\n",
 				      transcoder_name(cpu_transcoder));
 
-		if (psr_iir & EDP_PSR_PRE_ENTRY(cpu_transcoder))
+		if (psr_iir & EDP_PSR_PRE_ENTRY(cpu_transcoder)) {
+			dev_priv->psr.last_entry_attempt = time_ns;
 			DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
 				      transcoder_name(cpu_transcoder));
+		}
 
-		if (psr_iir & EDP_PSR_POST_EXIT(cpu_transcoder))
+		if (psr_iir & EDP_PSR_POST_EXIT(cpu_transcoder)) {
+			dev_priv->psr.last_exit = time_ns;
 			DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
 				      transcoder_name(cpu_transcoder));
+		}
 	}
 }
 

From 011f22eb545a35f972036bb6a245c95c2e7e15a0 Mon Sep 17 00:00:00 2001
From: Hans de Goede <j.w.r.degoede@gmail.com>
Date: Fri, 20 Apr 2018 11:59:33 +0200
Subject: [PATCH 0427/1286] drm/i915: Do NOT skip the first 4k of stolen memory
 for pre-allocated buffers v2

Before this commit the WaSkipStolenMemoryFirstPage workaround code was
skipping the first 4k by passing 4096 as start of the address range passed
to drm_mm_init(). This means that calling drm_mm_reserve_node() to try and
reserve the firmware framebuffer so that we can inherit it would always
fail, as the firmware framebuffer starts at address 0.

Commit d43537610470 ("drm/i915: skip the first 4k of stolen memory on
everything >= gen8") says in its commit message: "This is confirmed to fix
Skylake screen flickering issues (probably caused by the fact that we
initialized a ring in the first page of stolen, but I didn't 100% confirm
this theory)."

Which suggests that it is safe to use the first page for a linear
framebuffer as the firmware is doing (see note below).

This commit always passes 0 as start to drm_mm_init() and works around
WaSkipStolenMemoryFirstPage in i915_gem_stolen_insert_node_in_range()
by insuring the start address passed by to drm_mm_insert_node_in_range()
is always 4k or more. All entry points to i915_gem_stolen.c go through
i915_gem_stolen_insert_node_in_range(), so that any newly allocated
objects such as ring-buffers will not be allocated in the first 4k.

The one exception is i915_gem_object_create_stolen_for_preallocated()
which directly calls drm_mm_reserve_node() which now will be able to
use the first 4k.

This fixes the i915 driver no longer being able to inherit the firmware
framebuffer on gen8+, which fixes the video output changing from the
vendor logo to a black screen as soon as the i915 driver is loaded
(on systems without fbcon).

Some notes about the mapping of the BIOS framebuffer:

v1 led to some discussion if the assumption of the intel_display.c code
that the firmware framebuffer is a linear mapping of the stolen memory
starting at offset 0 is still correct, because that would mean that the
GOP does not implement the WaSkipStolenMemoryFirstPage workaround.

To verify this the following code was added at the end of
i915_gem_object_create_stolen_for_preallocated() :

pr_err("first ggtt entry before bind: 0x%016llx\n",
       readq(dev_priv->ggtt.gsm));
ret = i915_vma_bind(vma,
            HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE,
            PIN_UPDATE);
pr_err("i915_vma_bind ret %d\n", ret);
pr_err("first ggtt entry after bind: 0x%016llx\n",
       readq(dev_priv->ggtt.gsm));

Which prints the mapping of the first page, then does a vma_bind() to
force update the mapping with our linear view of the framebuffer and
then prints the mapping of the first page again.

On an Asrock B150M Pro4S/D3 mainboard with i5-6500 CPU this prints:

[    1.651141] first ggtt entry before bind: 0x0000000078c00001
[    1.651151] i915_vma_bind ret 0
[    1.651152] first ggtt entry after bind: 0x0000000078c00083

And "sudo cat /proc/iomem | grep Stolen" gives:
  78c00000-88bfffff : Graphics Stolen Memory

There are no visual changes with this patch (BIOS vendor logo still
stays in place when we inherit the BIOS framebuffer), so the vma_bind()
does not impact which memory is being scanned out.

The address of the first ggtt entry matches with the start of stolen
and the i915_vma_bind call only changes the first gtt entry's flags,
or-ing in _PAGE_RW (BIT(1)) and PPAT_CACHED (BIT(7)), which perfectly
matches what we would expect based on gen8_pte_encode()'s behavior.

So it seems that the GOP indeed does NOT implement the wa and the i915's
code assuming a linear mapping at the start of stolen for the BIOS fb
still holds true for gen8+.

I've also tested this on a Cherry Trail based device (a GPD Win)
with identical results (the flags are 0x1b after the vma_bind
on CHT, which matches with I915_CACHE_NONE).

Changed in v2: No code changes, extended the commit message with the
verification that the intel_display.c BIOS framebuffer mapping is still
correct.

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Hans de Goede <hdegoede@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180420095933.16442-1-hdegoede@redhat.com
---
 drivers/gpu/drm/i915/i915_gem_stolen.c | 15 ++++++---------
 1 file changed, 6 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index af915d041281..ad949cc30928 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -51,6 +51,10 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
 	if (!drm_mm_initialized(&dev_priv->mm.stolen))
 		return -ENODEV;
 
+	/* WaSkipStolenMemoryFirstPage:bdw+ */
+	if (INTEL_GEN(dev_priv) >= 8 && start < 4096)
+		start = 4096;
+
 	mutex_lock(&dev_priv->mm.stolen_lock);
 	ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node,
 					  size, alignment, 0,
@@ -343,7 +347,6 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
 {
 	resource_size_t reserved_base, stolen_top;
 	resource_size_t reserved_total, reserved_size;
-	resource_size_t stolen_usable_start;
 
 	mutex_init(&dev_priv->mm.stolen_lock);
 
@@ -435,17 +438,11 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
 			 (u64)resource_size(&dev_priv->dsm) >> 10,
 			 ((u64)resource_size(&dev_priv->dsm) - reserved_total) >> 10);
 
-	stolen_usable_start = 0;
-	/* WaSkipStolenMemoryFirstPage:bdw+ */
-	if (INTEL_GEN(dev_priv) >= 8)
-		stolen_usable_start = 4096;
-
 	dev_priv->stolen_usable_size =
-		resource_size(&dev_priv->dsm) - reserved_total - stolen_usable_start;
+		resource_size(&dev_priv->dsm) - reserved_total;
 
 	/* Basic memrange allocator for stolen space. */
-	drm_mm_init(&dev_priv->mm.stolen, stolen_usable_start,
-		    dev_priv->stolen_usable_size);
+	drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->stolen_usable_size);
 
 	return 0;
 }

From 292bb0d38a5714440b59ef910404408d5e9a8017 Mon Sep 17 00:00:00 2001
From: Zhipeng Gong <zhipeng.gong@intel.com>
Date: Wed, 4 Apr 2018 08:43:52 +0800
Subject: [PATCH 0428/1286] drm/i915/gvt: Use real time to do timer check

intel_gvt_schedule check timer through a counter and is supposed
to wake up to increase the counter every ms.
In a system with heavy workload, gvt_service_thread can not get
a chance to run right after wake up and will be delayed several
milliseconds. As a result, one hundred counter interval means
several hundred milliseconds in real time.

This patch use real time instead of counter to do timer check.

v2: remove static variable. (Zhenyu)
v3: correct expire_time update. (Zhenyu)

Signed-off-by: Zhipeng Gong <zhipeng.gong@intel.com>
Cc: Zhenyu Wang <zhenyuw@linux.intel.com>
Cc: Min He <min.he@intel.com>
Signed-off-by: Zhi Wang <zhi.a.wang@intel.com>
---
 drivers/gpu/drm/i915/gvt/sched_policy.c | 9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 75b7bc7b344c..8876a57f407c 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -66,6 +66,7 @@ struct gvt_sched_data {
 	struct hrtimer timer;
 	unsigned long period;
 	struct list_head lru_runq_head;
+	ktime_t expire_time;
 };
 
 static void vgpu_update_timeslice(struct intel_vgpu *pre_vgpu)
@@ -226,14 +227,18 @@ out:
 void intel_gvt_schedule(struct intel_gvt *gvt)
 {
 	struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
-	static uint64_t timer_check;
 
 	mutex_lock(&gvt->lock);
 
 	if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
 				(void *)&gvt->service_request)) {
-		if (!(timer_check++ % GVT_TS_BALANCE_PERIOD_MS))
+		ktime_t cur_time = ktime_get();
+
+		if (cur_time >= sched_data->expire_time) {
 			gvt_balance_timeslice(sched_data);
+			sched_data->expire_time = ktime_add_ms(
+				cur_time, GVT_TS_BALANCE_PERIOD_MS);
+		}
 	}
 	clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED, (void *)&gvt->service_request);
 

From 89babe7cf18e4f93c6ba1e6abfe2e5aa5e4fc66c Mon Sep 17 00:00:00 2001
From: Zhipeng Gong <zhipeng.gong@intel.com>
Date: Wed, 4 Apr 2018 08:43:53 +0800
Subject: [PATCH 0429/1286] drm/i915/gvt: Update time slice more frequently

When there is only one vGPU in GVT-g and it submits workloads
continuously, it will not be scheduled out, vgpu_update_timeslice
is not called and its sched_in_time is not updated in a long time,
which can be several seconds or longer.
Once GVT-g pauses to submit workload for this vGPU due to heavy
host CPU workload, this vGPU get scheduled out and
vgpu_update_timeslice is called, its left_ts will be subtract
by a big value from sched_out_time - sched_in_time.
When GVT-g is going to submit workload for this vGPU again,
it will not be scheduled in until gvt_balance_timeslice reaches
stage 0 and reset its left_ts, which introduces several
hunderand milliseconds latency.

This patch updates time slice in every ms to update sched_in_time
timely.

v2: revise commit message
v3: use more concise expr. (Zhenyu)

Signed-off-by: Zhipeng Gong <zhipeng.gong@intel.com>
Cc: Zhenyu Wang <zhenyuw@linux.intel.com>
Cc: Min He <min.he@intel.com>
Signed-off-by: Zhi Wang <zhi.a.wang@intel.com>
---
 drivers/gpu/drm/i915/gvt/sched_policy.c | 26 ++++++++++++-------------
 1 file changed, 13 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 8876a57f407c..d053cbe1dc94 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -53,7 +53,6 @@ struct vgpu_sched_data {
 	bool active;
 
 	ktime_t sched_in_time;
-	ktime_t sched_out_time;
 	ktime_t sched_time;
 	ktime_t left_ts;
 	ktime_t allocated_ts;
@@ -69,15 +68,19 @@ struct gvt_sched_data {
 	ktime_t expire_time;
 };
 
-static void vgpu_update_timeslice(struct intel_vgpu *pre_vgpu)
+static void vgpu_update_timeslice(struct intel_vgpu *vgpu, ktime_t cur_time)
 {
 	ktime_t delta_ts;
-	struct vgpu_sched_data *vgpu_data = pre_vgpu->sched_data;
+	struct vgpu_sched_data *vgpu_data;
 
-	delta_ts = vgpu_data->sched_out_time - vgpu_data->sched_in_time;
+	if (!vgpu || vgpu == vgpu->gvt->idle_vgpu)
+		return;
 
-	vgpu_data->sched_time += delta_ts;
-	vgpu_data->left_ts -= delta_ts;
+	vgpu_data = vgpu->sched_data;
+	delta_ts = ktime_sub(cur_time, vgpu_data->sched_in_time);
+	vgpu_data->sched_time = ktime_add(vgpu_data->sched_time, delta_ts);
+	vgpu_data->left_ts = ktime_sub(vgpu_data->left_ts, delta_ts);
+	vgpu_data->sched_in_time = cur_time;
 }
 
 #define GVT_TS_BALANCE_PERIOD_MS 100
@@ -151,11 +154,7 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
 	}
 
 	cur_time = ktime_get();
-	if (scheduler->current_vgpu) {
-		vgpu_data = scheduler->current_vgpu->sched_data;
-		vgpu_data->sched_out_time = cur_time;
-		vgpu_update_timeslice(scheduler->current_vgpu);
-	}
+	vgpu_update_timeslice(scheduler->current_vgpu, cur_time);
 	vgpu_data = scheduler->next_vgpu->sched_data;
 	vgpu_data->sched_in_time = cur_time;
 
@@ -227,13 +226,13 @@ out:
 void intel_gvt_schedule(struct intel_gvt *gvt)
 {
 	struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
+	ktime_t cur_time;
 
 	mutex_lock(&gvt->lock);
+	cur_time = ktime_get();
 
 	if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
 				(void *)&gvt->service_request)) {
-		ktime_t cur_time = ktime_get();
-
 		if (cur_time >= sched_data->expire_time) {
 			gvt_balance_timeslice(sched_data);
 			sched_data->expire_time = ktime_add_ms(
@@ -242,6 +241,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt)
 	}
 	clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED, (void *)&gvt->service_request);
 
+	vgpu_update_timeslice(gvt->scheduler.current_vgpu, cur_time);
 	tbs_sched_func(sched_data);
 
 	mutex_unlock(&gvt->lock);

From 96bebe39b2f4533af14c509061cd2b551ca81e8d Mon Sep 17 00:00:00 2001
From: Zhao Yan <yan.y.zhao@intel.com>
Date: Wed, 4 Apr 2018 13:57:09 +0800
Subject: [PATCH 0430/1286] drm/i915/gvt: scan non-privileged batch buffer for
 debug purpose

For perfomance purpose, scanning of non-privileged batch buffer is turned
off by default. But for debugging purpose, it can be turned on via debugfs.
After scanning, we submit the original non-privileged batch buffer into
hardware, so that the scanning is only a peeking window of guest submitted
commands and will not affect the execution results.

v4:
- refine debugfs print format&content (zhenyu wang)
- print engine id instread of engine name to prevent potential memory leak
  in debugfs warning message. (zhenyu wang)

v3:
- change vgpu->scan_nonprivbb from type bool to u32, so it is able to
  selectively turn on/off scanning of non-privileged batch buffer on engine
  level. e.g.
  if vgpu->scan_nonprivbb=3, then it will scan non-privileged batch buffer
  on engine 0 and 1.
- in debugfs interface to set vgpu->scan_nonprivbb, print warning message
  to warn user and explicitly tell state change in kernel log (zhenyu wang)
v2:
- rebase
- update comments for start_gma_offset (henry)

Signed-off-by: Zhao Yan <yan.y.zhao@intel.com>
Reviewed-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Signed-off-by: Zhi Wang <zhi.a.wang@intel.com>
---
 drivers/gpu/drm/i915/gvt/cmd_parser.c | 55 +++++++++++++++++-----
 drivers/gpu/drm/i915/gvt/debugfs.c    | 67 ++++++++++++++++++++++++++
 drivers/gpu/drm/i915/gvt/gvt.h        |  1 +
 drivers/gpu/drm/i915/gvt/scheduler.c  | 68 ++++++++++++++++++---------
 drivers/gpu/drm/i915/gvt/scheduler.h  |  1 +
 drivers/gpu/drm/i915/gvt/trace.h      | 24 ++++++++--
 6 files changed, 175 insertions(+), 41 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index db6b94dda5df..9ec2cd982705 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1603,7 +1603,8 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
 	if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
 		|| IS_KABYLAKE(gvt->dev_priv)) {
 		/* BDW decides privilege based on address space */
-		if (cmd_val(s, 0) & (1 << 8))
+		if (cmd_val(s, 0) & (1 << 8) &&
+			!(s->vgpu->scan_nonprivbb & (1 << s->ring_id)))
 			return 0;
 	}
 	return 1;
@@ -1617,6 +1618,8 @@ static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
 	bool bb_end = false;
 	struct intel_vgpu *vgpu = s->vgpu;
 	u32 cmd;
+	struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ?
+		s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm;
 
 	*bb_size = 0;
 
@@ -1628,18 +1631,22 @@ static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
 	cmd = cmd_val(s, 0);
 	info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
 	if (info == NULL) {
-		gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
-				cmd, get_opcode(cmd, s->ring_id));
+		gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
+				cmd, get_opcode(cmd, s->ring_id),
+				(s->buf_addr_type == PPGTT_BUFFER) ?
+				"ppgtt" : "ggtt", s->ring_id, s->workload);
 		return -EBADRQC;
 	}
 	do {
-		if (copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
+		if (copy_gma_to_hva(s->vgpu, mm,
 				gma, gma + 4, &cmd) < 0)
 			return -EFAULT;
 		info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
 		if (info == NULL) {
-			gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
-				cmd, get_opcode(cmd, s->ring_id));
+			gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
+				cmd, get_opcode(cmd, s->ring_id),
+				(s->buf_addr_type == PPGTT_BUFFER) ?
+				"ppgtt" : "ggtt", s->ring_id, s->workload);
 			return -EBADRQC;
 		}
 
@@ -1665,6 +1672,9 @@ static int perform_bb_shadow(struct parser_exec_state *s)
 	unsigned long gma = 0;
 	unsigned long bb_size;
 	int ret = 0;
+	struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ?
+		s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm;
+	unsigned long gma_start_offset = 0;
 
 	/* get the start gm address of the batch buffer */
 	gma = get_gma_bb_from_cmd(s, 1);
@@ -1679,8 +1689,24 @@ static int perform_bb_shadow(struct parser_exec_state *s)
 	if (!bb)
 		return -ENOMEM;
 
+	bb->ppgtt = (s->buf_addr_type == GTT_BUFFER) ? false : true;
+
+	/* the gma_start_offset stores the batch buffer's start gma's
+	 * offset relative to page boundary. so for non-privileged batch
+	 * buffer, the shadowed gem object holds exactly the same page
+	 * layout as original gem object. This is for the convience of
+	 * replacing the whole non-privilged batch buffer page to this
+	 * shadowed one in PPGTT at the same gma address. (this replacing
+	 * action is not implemented yet now, but may be necessary in
+	 * future).
+	 * for prileged batch buffer, we just change start gma address to
+	 * that of shadowed page.
+	 */
+	if (bb->ppgtt)
+		gma_start_offset = gma & ~I915_GTT_PAGE_MASK;
+
 	bb->obj = i915_gem_object_create(s->vgpu->gvt->dev_priv,
-					 roundup(bb_size, PAGE_SIZE));
+			 roundup(bb_size + gma_start_offset, PAGE_SIZE));
 	if (IS_ERR(bb->obj)) {
 		ret = PTR_ERR(bb->obj);
 		goto err_free_bb;
@@ -1701,9 +1727,9 @@ static int perform_bb_shadow(struct parser_exec_state *s)
 		bb->clflush &= ~CLFLUSH_BEFORE;
 	}
 
-	ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
+	ret = copy_gma_to_hva(s->vgpu, mm,
 			      gma, gma + bb_size,
-			      bb->va);
+			      bb->va + gma_start_offset);
 	if (ret < 0) {
 		gvt_vgpu_err("fail to copy guest ring buffer\n");
 		ret = -EFAULT;
@@ -1729,7 +1755,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
 	 * buffer's gma in pair. After all, we don't want to pin the shadow
 	 * buffer here (too early).
 	 */
-	s->ip_va = bb->va;
+	s->ip_va = bb->va + gma_start_offset;
 	s->ip_gma = gma;
 	return 0;
 err_unmap:
@@ -2468,15 +2494,18 @@ static int cmd_parser_exec(struct parser_exec_state *s)
 
 	info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
 	if (info == NULL) {
-		gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
-				cmd, get_opcode(cmd, s->ring_id));
+		gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
+				cmd, get_opcode(cmd, s->ring_id),
+				(s->buf_addr_type == PPGTT_BUFFER) ?
+				"ppgtt" : "ggtt", s->ring_id, s->workload);
 		return -EBADRQC;
 	}
 
 	s->info = info;
 
 	trace_gvt_command(vgpu->id, s->ring_id, s->ip_gma, s->ip_va,
-			  cmd_length(s), s->buf_type);
+			  cmd_length(s), s->buf_type, s->buf_addr_type,
+			  s->workload, info->name);
 
 	if (info->handler) {
 		ret = info->handler(s);
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index f7d0078eb61b..2ec89bcb59f1 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -124,6 +124,68 @@ static int vgpu_mmio_diff_show(struct seq_file *s, void *unused)
 }
 DEFINE_SHOW_ATTRIBUTE(vgpu_mmio_diff);
 
+static int
+vgpu_scan_nonprivbb_get(void *data, u64 *val)
+{
+	struct intel_vgpu *vgpu = (struct intel_vgpu *)data;
+	*val = vgpu->scan_nonprivbb;
+	return 0;
+}
+
+/*
+ * set/unset bit engine_id of vgpu->scan_nonprivbb to turn on/off scanning
+ * of non-privileged batch buffer. e.g.
+ * if vgpu->scan_nonprivbb=3, then it will scan non-privileged batch buffer
+ * on engine 0 and 1.
+ */
+static int
+vgpu_scan_nonprivbb_set(void *data, u64 val)
+{
+	struct intel_vgpu *vgpu = (struct intel_vgpu *)data;
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	enum intel_engine_id id;
+	char buf[128], *s;
+	int len;
+
+	val &= (1 << I915_NUM_ENGINES) - 1;
+
+	if (vgpu->scan_nonprivbb == val)
+		return 0;
+
+	if (!val)
+		goto done;
+
+	len = sprintf(buf,
+		"gvt: vgpu %d turns on non-privileged batch buffers scanning on Engines:",
+		vgpu->id);
+
+	s = buf + len;
+
+	for (id = 0; id < I915_NUM_ENGINES; id++) {
+		struct intel_engine_cs *engine;
+
+		engine = dev_priv->engine[id];
+		if (engine && (val & (1 << id))) {
+			len = snprintf(s, 4, "%d, ", engine->id);
+			s += len;
+		} else
+			val &=  ~(1 << id);
+	}
+
+	if (val)
+		sprintf(s, "low performance expected.");
+
+	pr_warn("%s\n", buf);
+
+done:
+	vgpu->scan_nonprivbb = val;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(vgpu_scan_nonprivbb_fops,
+			vgpu_scan_nonprivbb_get, vgpu_scan_nonprivbb_set,
+			"0x%llx\n");
+
 /**
  * intel_gvt_debugfs_add_vgpu - register debugfs entries for a vGPU
  * @vgpu: a vGPU
@@ -151,6 +213,11 @@ int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
 	if (!ent)
 		return -ENOMEM;
 
+	ent = debugfs_create_file("scan_nonprivbb", 0644, vgpu->debugfs,
+				 vgpu, &vgpu_scan_nonprivbb_fops);
+	if (!ent)
+		return -ENOMEM;
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index efacd8abbedc..6ec888822a0f 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -226,6 +226,7 @@ struct intel_vgpu {
 
 	struct completion vblank_done;
 
+	u32 scan_nonprivbb;
 };
 
 /* validating GM healthy status*/
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 638abe84857c..1bd7aa0c694a 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -452,12 +452,6 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
 	int ret;
 
 	list_for_each_entry(bb, &workload->shadow_bb, list) {
-		bb->vma = i915_gem_object_ggtt_pin(bb->obj, NULL, 0, 0, 0);
-		if (IS_ERR(bb->vma)) {
-			ret = PTR_ERR(bb->vma);
-			goto err;
-		}
-
 		/* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va
 		 * is only updated into ring_scan_buffer, not real ring address
 		 * allocated in later copy_workload_to_ring_buffer. pls be noted
@@ -469,25 +463,53 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
 			bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
 				+ bb->bb_offset;
 
-		/* relocate shadow batch buffer */
-		bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
-		if (gmadr_bytes == 8)
-			bb->bb_start_cmd_va[2] = 0;
+		if (bb->ppgtt) {
+			/* for non-priv bb, scan&shadow is only for
+			 * debugging purpose, so the content of shadow bb
+			 * is the same as original bb. Therefore,
+			 * here, rather than switch to shadow bb's gma
+			 * address, we directly use original batch buffer's
+			 * gma address, and send original bb to hardware
+			 * directly
+			 */
+			if (bb->clflush & CLFLUSH_AFTER) {
+				drm_clflush_virt_range(bb->va,
+						bb->obj->base.size);
+				bb->clflush &= ~CLFLUSH_AFTER;
+			}
+			i915_gem_obj_finish_shmem_access(bb->obj);
+			bb->accessing = false;
 
-		/* No one is going to touch shadow bb from now on. */
-		if (bb->clflush & CLFLUSH_AFTER) {
-			drm_clflush_virt_range(bb->va, bb->obj->base.size);
-			bb->clflush &= ~CLFLUSH_AFTER;
+		} else {
+			bb->vma = i915_gem_object_ggtt_pin(bb->obj,
+					NULL, 0, 0, 0);
+			if (IS_ERR(bb->vma)) {
+				ret = PTR_ERR(bb->vma);
+				goto err;
+			}
+
+			/* relocate shadow batch buffer */
+			bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
+			if (gmadr_bytes == 8)
+				bb->bb_start_cmd_va[2] = 0;
+
+			/* No one is going to touch shadow bb from now on. */
+			if (bb->clflush & CLFLUSH_AFTER) {
+				drm_clflush_virt_range(bb->va,
+						bb->obj->base.size);
+				bb->clflush &= ~CLFLUSH_AFTER;
+			}
+
+			ret = i915_gem_object_set_to_gtt_domain(bb->obj,
+					false);
+			if (ret)
+				goto err;
+
+			i915_gem_obj_finish_shmem_access(bb->obj);
+			bb->accessing = false;
+
+			i915_vma_move_to_active(bb->vma, workload->req, 0);
 		}
-
-		ret = i915_gem_object_set_to_gtt_domain(bb->obj, false);
-		if (ret)
-			goto err;
-
-		i915_gem_obj_finish_shmem_access(bb->obj);
-		bb->accessing = false;
-
-		i915_vma_move_to_active(bb->vma, workload->req, 0);
 	}
 	return 0;
 err:
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 486ed57a4ad1..6c644782193e 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -125,6 +125,7 @@ struct intel_vgpu_shadow_bb {
 	unsigned int clflush;
 	bool accessing;
 	unsigned long bb_offset;
+	bool ppgtt;
 };
 
 #define workload_q_head(vgpu, ring_id) \
diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h
index 82093f1e8612..1fd64202d74e 100644
--- a/drivers/gpu/drm/i915/gvt/trace.h
+++ b/drivers/gpu/drm/i915/gvt/trace.h
@@ -224,19 +224,25 @@ TRACE_EVENT(oos_sync,
 	TP_printk("%s", __entry->buf)
 );
 
+#define GVT_CMD_STR_LEN 40
 TRACE_EVENT(gvt_command,
-	TP_PROTO(u8 vgpu_id, u8 ring_id, u32 ip_gma, u32 *cmd_va, u32 cmd_len,
-		 u32 buf_type),
+	TP_PROTO(u8 vgpu_id, u8 ring_id, u32 ip_gma, u32 *cmd_va,
+		u32 cmd_len,  u32 buf_type, u32 buf_addr_type,
+		void *workload, char *cmd_name),
 
-	TP_ARGS(vgpu_id, ring_id, ip_gma, cmd_va, cmd_len, buf_type),
+	TP_ARGS(vgpu_id, ring_id, ip_gma, cmd_va, cmd_len, buf_type,
+		buf_addr_type, workload, cmd_name),
 
 	TP_STRUCT__entry(
 		__field(u8, vgpu_id)
 		__field(u8, ring_id)
 		__field(u32, ip_gma)
 		__field(u32, buf_type)
+		__field(u32, buf_addr_type)
 		__field(u32, cmd_len)
+		__field(void*, workload)
 		__dynamic_array(u32, raw_cmd, cmd_len)
+		__array(char, cmd_name, GVT_CMD_STR_LEN)
 	),
 
 	TP_fast_assign(
@@ -244,17 +250,25 @@ TRACE_EVENT(gvt_command,
 		__entry->ring_id = ring_id;
 		__entry->ip_gma = ip_gma;
 		__entry->buf_type = buf_type;
+		__entry->buf_addr_type = buf_addr_type;
 		__entry->cmd_len = cmd_len;
+		__entry->workload = workload;
+		snprintf(__entry->cmd_name, GVT_CMD_STR_LEN, "%s", cmd_name);
 		memcpy(__get_dynamic_array(raw_cmd), cmd_va, cmd_len * sizeof(*cmd_va));
 	),
 
 
-	TP_printk("vgpu%d ring %d: buf_type %u, ip_gma %08x, raw cmd %s",
+	TP_printk("vgpu%d ring %d: address_type %u, buf_type %u, ip_gma %08x,cmd (name=%s,len=%u,raw cmd=%s), workload=%p\n",
 		__entry->vgpu_id,
 		__entry->ring_id,
+		__entry->buf_addr_type,
 		__entry->buf_type,
 		__entry->ip_gma,
-		__print_array(__get_dynamic_array(raw_cmd), __entry->cmd_len, 4))
+		__entry->cmd_name,
+		__entry->cmd_len,
+		__print_array(__get_dynamic_array(raw_cmd),
+			__entry->cmd_len, 4),
+		__entry->workload)
 );
 
 #define GVT_TEMP_STR_LEN 10

From 41e7ccc19c2a52b13b77f2a489a466b140d2d235 Mon Sep 17 00:00:00 2001
From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
Date: Thu, 22 Mar 2018 13:21:54 -0500
Subject: [PATCH 0431/1286] drm/i915/gvt/scheduler: Remove unnecessary NULL
 checks in sr_oa_regs

The checks are misleading and not required [1].

[1] https://lkml.org/lkml/2018/3/19/1792

Addresses-Coverity-ID: 1466017
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Zhi Wang <zhi.a.wang@intel.com>
---
 drivers/gpu/drm/i915/gvt/scheduler.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 1bd7aa0c694a..d1a8fd88eed9 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -97,7 +97,7 @@ static void sr_oa_regs(struct intel_vgpu_workload *workload,
 		i915_mmio_reg_offset(EU_PERF_CNTL6),
 	};
 
-	if (!workload || !reg_state || workload->ring_id != RCS)
+	if (workload->ring_id != RCS)
 		return;
 
 	if (save) {

From 3eda0d22ead04f81ea59c9584bcbf5b496745e92 Mon Sep 17 00:00:00 2001
From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
Date: Thu, 22 Mar 2018 12:27:54 -0500
Subject: [PATCH 0432/1286] drm/i915/gvt: Mark expected switch fall-through in
 handle_g2v_notification

In preparation to enabling -Wimplicit-fallthrough, mark switch cases
where we are expecting to fall through.

Addresses-Coverity-ID: 1466154 ("Missing break in switch")
Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
Signed-off-by: Zhi Wang <zhi.a.wang@intel.com>
---
 drivers/gpu/drm/i915/gvt/handlers.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 8c5d5d005854..a33c1c3e4a21 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1150,6 +1150,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
 	switch (notification) {
 	case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
 		root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
+		/* fall through */
 	case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
 		mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps);
 		return PTR_ERR_OR_ZERO(mm);

From ccaf509037f1995fd428fddf56aa3068a0e06d2e Mon Sep 17 00:00:00 2001
From: Fabio Estevam <fabio.estevam@nxp.com>
Date: Thu, 15 Mar 2018 15:04:17 -0300
Subject: [PATCH 0433/1286] drm: dw-hdmi-i2s: Remove owner assignment from
 platform_driver

platform_driver does not need to set the owner field, as this will
be populated by the driver core.

Generated by scripts/coccinelle/api/platform_no_drv_owner.cocci.

Signed-off-by: Fabio Estevam <fabio.estevam@nxp.com>
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1521137057-14773-1-git-send-email-festevam@gmail.com
---
 drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
index 3b7e5c59a5e9..8f9c8a6b46de 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
@@ -152,7 +152,6 @@ static struct platform_driver snd_dw_hdmi_driver = {
 	.remove	= snd_dw_hdmi_remove,
 	.driver	= {
 		.name = DRIVER_NAME,
-		.owner = THIS_MODULE,
 	},
 };
 module_platform_driver(snd_dw_hdmi_driver);

From e19233955d9e9a9ae202723b9a38ef38e755b5c0 Mon Sep 17 00:00:00 2001
From: Boris Brezillon <boris.brezillon@bootlin.com>
Date: Sat, 21 Apr 2018 09:08:45 +0200
Subject: [PATCH 0434/1286] drm/bridge: Add Cadence DSI driver

Add a driver for Cadence DPI -> DSI bridge.

This driver only support a subset of Cadence DSI bridge capabilities.

This driver has been tested/debugged in a simulated environment which
explains why some of the features are missing.  Here is a
non-exhaustive list of missing features:
 * burst mode
 * DPHY init/configuration steps
 * support for additional input interfaces (SDI input)

DSI commands and non-burst video mode have been tested.

Signed-off-by: Boris Brezillon <boris.brezillon@bootlin.com>
Reviewed-by: Andrzej Hajda <a.hajda@samsung.com>
Acked-by: Eric Anholt <eric@anholt.net>
Reviewed-by: Archit Taneja <architt@codeaurora.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20180421070846.10330-1-boris.brezillon@bootlin.com
---
 drivers/gpu/drm/bridge/Kconfig    |   10 +
 drivers/gpu/drm/bridge/Makefile   |    1 +
 drivers/gpu/drm/bridge/cdns-dsi.c | 1623 +++++++++++++++++++++++++++++
 3 files changed, 1634 insertions(+)
 create mode 100644 drivers/gpu/drm/bridge/cdns-dsi.c

diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 42c9c2d13752..1d75d3a1f951 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -25,6 +25,16 @@ config DRM_ANALOGIX_ANX78XX
 	  the HDMI output of an application processor to MyDP
 	  or DisplayPort.
 
+config DRM_CDNS_DSI
+	tristate "Cadence DPI/DSI bridge"
+	select DRM_KMS_HELPER
+	select DRM_MIPI_DSI
+	select DRM_PANEL_BRIDGE
+	depends on OF
+	help
+	  Support Cadence DPI to DSI bridge. This is an internal
+	  bridge and is meant to be directly embedded in a SoC.
+
 config DRM_DUMB_VGA_DAC
 	tristate "Dumb VGA DAC Bridge support"
 	depends on OF
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index fd90b16a65c0..35f88d48ec20 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,5 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_DRM_ANALOGIX_ANX78XX) += analogix-anx78xx.o
+obj-$(CONFIG_DRM_CDNS_DSI) += cdns-dsi.o
 obj-$(CONFIG_DRM_DUMB_VGA_DAC) += dumb-vga-dac.o
 obj-$(CONFIG_DRM_LVDS_ENCODER) += lvds-encoder.o
 obj-$(CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW) += megachips-stdpxxxx-ge-b850v3-fw.o
diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c
new file mode 100644
index 000000000000..c255fc3e1be5
--- /dev/null
+++ b/drivers/gpu/drm/bridge/cdns-dsi.c
@@ -0,0 +1,1623 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright: 2017 Cadence Design Systems, Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+#include <video/mipi_display.h>
+
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#define IP_CONF				0x0
+#define SP_HS_FIFO_DEPTH(x)		(((x) & GENMASK(30, 26)) >> 26)
+#define SP_LP_FIFO_DEPTH(x)		(((x) & GENMASK(25, 21)) >> 21)
+#define VRS_FIFO_DEPTH(x)		(((x) & GENMASK(20, 16)) >> 16)
+#define DIRCMD_FIFO_DEPTH(x)		(((x) & GENMASK(15, 13)) >> 13)
+#define SDI_IFACE_32			BIT(12)
+#define INTERNAL_DATAPATH_32		(0 << 10)
+#define INTERNAL_DATAPATH_16		(1 << 10)
+#define INTERNAL_DATAPATH_8		(3 << 10)
+#define INTERNAL_DATAPATH_SIZE		((x) & GENMASK(11, 10))
+#define NUM_IFACE(x)			((((x) & GENMASK(9, 8)) >> 8) + 1)
+#define MAX_LANE_NB(x)			(((x) & GENMASK(7, 6)) >> 6)
+#define RX_FIFO_DEPTH(x)		((x) & GENMASK(5, 0))
+
+#define MCTL_MAIN_DATA_CTL		0x4
+#define TE_MIPI_POLLING_EN		BIT(25)
+#define TE_HW_POLLING_EN		BIT(24)
+#define DISP_EOT_GEN			BIT(18)
+#define HOST_EOT_GEN			BIT(17)
+#define DISP_GEN_CHECKSUM		BIT(16)
+#define DISP_GEN_ECC			BIT(15)
+#define BTA_EN				BIT(14)
+#define READ_EN				BIT(13)
+#define REG_TE_EN			BIT(12)
+#define IF_TE_EN(x)			BIT(8 + (x))
+#define TVG_SEL				BIT(6)
+#define VID_EN				BIT(5)
+#define IF_VID_SELECT(x)		((x) << 2)
+#define IF_VID_SELECT_MASK		GENMASK(3, 2)
+#define IF_VID_MODE			BIT(1)
+#define LINK_EN				BIT(0)
+
+#define MCTL_MAIN_PHY_CTL		0x8
+#define HS_INVERT_DAT(x)		BIT(19 + ((x) * 2))
+#define SWAP_PINS_DAT(x)		BIT(18 + ((x) * 2))
+#define HS_INVERT_CLK			BIT(17)
+#define SWAP_PINS_CLK			BIT(16)
+#define HS_SKEWCAL_EN			BIT(15)
+#define WAIT_BURST_TIME(x)		((x) << 10)
+#define DATA_ULPM_EN(x)			BIT(6 + (x))
+#define CLK_ULPM_EN			BIT(5)
+#define CLK_CONTINUOUS			BIT(4)
+#define DATA_LANE_EN(x)			BIT((x) - 1)
+
+#define MCTL_MAIN_EN			0xc
+#define DATA_FORCE_STOP			BIT(17)
+#define CLK_FORCE_STOP			BIT(16)
+#define IF_EN(x)			BIT(13 + (x))
+#define DATA_LANE_ULPM_REQ(l)		BIT(9 + (l))
+#define CLK_LANE_ULPM_REQ		BIT(8)
+#define DATA_LANE_START(x)		BIT(4 + (x))
+#define CLK_LANE_EN			BIT(3)
+#define PLL_START			BIT(0)
+
+#define MCTL_DPHY_CFG0			0x10
+#define DPHY_C_RSTB			BIT(20)
+#define DPHY_D_RSTB(x)			GENMASK(15 + (x), 16)
+#define DPHY_PLL_PDN			BIT(10)
+#define DPHY_CMN_PDN			BIT(9)
+#define DPHY_C_PDN			BIT(8)
+#define DPHY_D_PDN(x)			GENMASK(3 + (x), 4)
+#define DPHY_ALL_D_PDN			GENMASK(7, 4)
+#define DPHY_PLL_PSO			BIT(1)
+#define DPHY_CMN_PSO			BIT(0)
+
+#define MCTL_DPHY_TIMEOUT1		0x14
+#define HSTX_TIMEOUT(x)			((x) << 4)
+#define HSTX_TIMEOUT_MAX		GENMASK(17, 0)
+#define CLK_DIV(x)			(x)
+#define CLK_DIV_MAX			GENMASK(3, 0)
+
+#define MCTL_DPHY_TIMEOUT2		0x18
+#define LPRX_TIMEOUT(x)			(x)
+
+#define MCTL_ULPOUT_TIME		0x1c
+#define DATA_LANE_ULPOUT_TIME(x)	((x) << 9)
+#define CLK_LANE_ULPOUT_TIME(x)		(x)
+
+#define MCTL_3DVIDEO_CTL		0x20
+#define VID_VSYNC_3D_EN			BIT(7)
+#define VID_VSYNC_3D_LR			BIT(5)
+#define VID_VSYNC_3D_SECOND_EN		BIT(4)
+#define VID_VSYNC_3DFORMAT_LINE		(0 << 2)
+#define VID_VSYNC_3DFORMAT_FRAME	(1 << 2)
+#define VID_VSYNC_3DFORMAT_PIXEL	(2 << 2)
+#define VID_VSYNC_3DMODE_OFF		0
+#define VID_VSYNC_3DMODE_PORTRAIT	1
+#define VID_VSYNC_3DMODE_LANDSCAPE	2
+
+#define MCTL_MAIN_STS			0x24
+#define MCTL_MAIN_STS_CTL		0x130
+#define MCTL_MAIN_STS_CLR		0x150
+#define MCTL_MAIN_STS_FLAG		0x170
+#define HS_SKEWCAL_DONE			BIT(11)
+#define IF_UNTERM_PKT_ERR(x)		BIT(8 + (x))
+#define LPRX_TIMEOUT_ERR		BIT(7)
+#define HSTX_TIMEOUT_ERR		BIT(6)
+#define DATA_LANE_RDY(l)		BIT(2 + (l))
+#define CLK_LANE_RDY			BIT(1)
+#define PLL_LOCKED			BIT(0)
+
+#define MCTL_DPHY_ERR			0x28
+#define MCTL_DPHY_ERR_CTL1		0x148
+#define MCTL_DPHY_ERR_CLR		0x168
+#define MCTL_DPHY_ERR_FLAG		0x188
+#define ERR_CONT_LP(x, l)		BIT(18 + ((x) * 4) + (l))
+#define ERR_CONTROL(l)			BIT(14 + (l))
+#define ERR_SYNESC(l)			BIT(10 + (l))
+#define ERR_ESC(l)			BIT(6 + (l))
+
+#define MCTL_DPHY_ERR_CTL2		0x14c
+#define ERR_CONT_LP_EDGE(x, l)		BIT(12 + ((x) * 4) + (l))
+#define ERR_CONTROL_EDGE(l)		BIT(8 + (l))
+#define ERR_SYN_ESC_EDGE(l)		BIT(4 + (l))
+#define ERR_ESC_EDGE(l)			BIT(0 + (l))
+
+#define MCTL_LANE_STS			0x2c
+#define PPI_C_TX_READY_HS		BIT(18)
+#define DPHY_PLL_LOCK			BIT(17)
+#define PPI_D_RX_ULPS_ESC(x)		(((x) & GENMASK(15, 12)) >> 12)
+#define LANE_STATE_START		0
+#define LANE_STATE_IDLE			1
+#define LANE_STATE_WRITE		2
+#define LANE_STATE_ULPM			3
+#define LANE_STATE_READ			4
+#define DATA_LANE_STATE(l, val)		\
+	(((val) >> (2 + 2 * (l) + ((l) ? 1 : 0))) & GENMASK((l) ? 1 : 2, 0))
+#define CLK_LANE_STATE_HS		2
+#define CLK_LANE_STATE(val)		((val) & GENMASK(1, 0))
+
+#define DSC_MODE_CTL			0x30
+#define DSC_MODE_EN			BIT(0)
+
+#define DSC_CMD_SEND			0x34
+#define DSC_SEND_PPS			BIT(0)
+#define DSC_EXECUTE_QUEUE		BIT(1)
+
+#define DSC_PPS_WRDAT			0x38
+
+#define DSC_MODE_STS			0x3c
+#define DSC_PPS_DONE			BIT(1)
+#define DSC_EXEC_DONE			BIT(2)
+
+#define CMD_MODE_CTL			0x70
+#define IF_LP_EN(x)			BIT(9 + (x))
+#define IF_VCHAN_ID(x, c)		((c) << ((x) * 2))
+
+#define CMD_MODE_CTL2			0x74
+#define TE_TIMEOUT(x)			((x) << 11)
+#define FILL_VALUE(x)			((x) << 3)
+#define ARB_IF_WITH_HIGHEST_PRIORITY(x)	((x) << 1)
+#define ARB_ROUND_ROBIN_MODE		BIT(0)
+
+#define CMD_MODE_STS			0x78
+#define CMD_MODE_STS_CTL		0x134
+#define CMD_MODE_STS_CLR		0x154
+#define CMD_MODE_STS_FLAG		0x174
+#define ERR_IF_UNDERRUN(x)		BIT(4 + (x))
+#define ERR_UNWANTED_READ		BIT(3)
+#define ERR_TE_MISS			BIT(2)
+#define ERR_NO_TE			BIT(1)
+#define CSM_RUNNING			BIT(0)
+
+#define DIRECT_CMD_SEND			0x80
+
+#define DIRECT_CMD_MAIN_SETTINGS	0x84
+#define TRIGGER_VAL(x)			((x) << 25)
+#define CMD_LP_EN			BIT(24)
+#define CMD_SIZE(x)			((x) << 16)
+#define CMD_VCHAN_ID(x)			((x) << 14)
+#define CMD_DATATYPE(x)			((x) << 8)
+#define CMD_LONG			BIT(3)
+#define WRITE_CMD			0
+#define READ_CMD			1
+#define TE_REQ				4
+#define TRIGGER_REQ			5
+#define BTA_REQ				6
+
+#define DIRECT_CMD_STS			0x88
+#define DIRECT_CMD_STS_CTL		0x138
+#define DIRECT_CMD_STS_CLR		0x158
+#define DIRECT_CMD_STS_FLAG		0x178
+#define RCVD_ACK_VAL(val)		((val) >> 16)
+#define RCVD_TRIGGER_VAL(val)		(((val) & GENMASK(14, 11)) >> 11)
+#define READ_COMPLETED_WITH_ERR		BIT(10)
+#define BTA_FINISHED			BIT(9)
+#define BTA_COMPLETED			BIT(8)
+#define TE_RCVD				BIT(7)
+#define TRIGGER_RCVD			BIT(6)
+#define ACK_WITH_ERR_RCVD		BIT(5)
+#define ACK_RCVD			BIT(4)
+#define READ_COMPLETED			BIT(3)
+#define TRIGGER_COMPLETED		BIT(2)
+#define WRITE_COMPLETED			BIT(1)
+#define SENDING_CMD			BIT(0)
+
+#define DIRECT_CMD_STOP_READ		0x8c
+
+#define DIRECT_CMD_WRDATA		0x90
+
+#define DIRECT_CMD_FIFO_RST		0x94
+
+#define DIRECT_CMD_RDDATA		0xa0
+
+#define DIRECT_CMD_RD_PROPS		0xa4
+#define RD_DCS				BIT(18)
+#define RD_VCHAN_ID(val)		(((val) >> 16) & GENMASK(1, 0))
+#define RD_SIZE(val)			((val) & GENMASK(15, 0))
+
+#define DIRECT_CMD_RD_STS		0xa8
+#define DIRECT_CMD_RD_STS_CTL		0x13c
+#define DIRECT_CMD_RD_STS_CLR		0x15c
+#define DIRECT_CMD_RD_STS_FLAG		0x17c
+#define ERR_EOT_WITH_ERR		BIT(8)
+#define ERR_MISSING_EOT			BIT(7)
+#define ERR_WRONG_LENGTH		BIT(6)
+#define ERR_OVERSIZE			BIT(5)
+#define ERR_RECEIVE			BIT(4)
+#define ERR_UNDECODABLE			BIT(3)
+#define ERR_CHECKSUM			BIT(2)
+#define ERR_UNCORRECTABLE		BIT(1)
+#define ERR_FIXED			BIT(0)
+
+#define VID_MAIN_CTL			0xb0
+#define VID_IGNORE_MISS_VSYNC		BIT(31)
+#define VID_FIELD_SW			BIT(28)
+#define VID_INTERLACED_EN		BIT(27)
+#define RECOVERY_MODE(x)		((x) << 25)
+#define RECOVERY_MODE_NEXT_HSYNC	0
+#define RECOVERY_MODE_NEXT_STOP_POINT	2
+#define RECOVERY_MODE_NEXT_VSYNC	3
+#define REG_BLKEOL_MODE(x)		((x) << 23)
+#define REG_BLKLINE_MODE(x)		((x) << 21)
+#define REG_BLK_MODE_NULL_PKT		0
+#define REG_BLK_MODE_BLANKING_PKT	1
+#define REG_BLK_MODE_LP			2
+#define SYNC_PULSE_HORIZONTAL		BIT(20)
+#define SYNC_PULSE_ACTIVE		BIT(19)
+#define BURST_MODE			BIT(18)
+#define VID_PIXEL_MODE_MASK		GENMASK(17, 14)
+#define VID_PIXEL_MODE_RGB565		(0 << 14)
+#define VID_PIXEL_MODE_RGB666_PACKED	(1 << 14)
+#define VID_PIXEL_MODE_RGB666		(2 << 14)
+#define VID_PIXEL_MODE_RGB888		(3 << 14)
+#define VID_PIXEL_MODE_RGB101010	(4 << 14)
+#define VID_PIXEL_MODE_RGB121212	(5 << 14)
+#define VID_PIXEL_MODE_YUV420		(8 << 14)
+#define VID_PIXEL_MODE_YUV422_PACKED	(9 << 14)
+#define VID_PIXEL_MODE_YUV422		(10 << 14)
+#define VID_PIXEL_MODE_YUV422_24B	(11 << 14)
+#define VID_PIXEL_MODE_DSC_COMP		(12 << 14)
+#define VID_DATATYPE(x)			((x) << 8)
+#define VID_VIRTCHAN_ID(iface, x)	((x) << (4 + (iface) * 2))
+#define STOP_MODE(x)			((x) << 2)
+#define START_MODE(x)			(x)
+
+#define VID_VSIZE1			0xb4
+#define VFP_LEN(x)			((x) << 12)
+#define VBP_LEN(x)			((x) << 6)
+#define VSA_LEN(x)			(x)
+
+#define VID_VSIZE2			0xb8
+#define VACT_LEN(x)			(x)
+
+#define VID_HSIZE1			0xc0
+#define HBP_LEN(x)			((x) << 16)
+#define HSA_LEN(x)			(x)
+
+#define VID_HSIZE2			0xc4
+#define HFP_LEN(x)			((x) << 16)
+#define HACT_LEN(x)			(x)
+
+#define VID_BLKSIZE1			0xcc
+#define BLK_EOL_PKT_LEN(x)		((x) << 15)
+#define BLK_LINE_EVENT_PKT_LEN(x)	(x)
+
+#define VID_BLKSIZE2			0xd0
+#define BLK_LINE_PULSE_PKT_LEN(x)	(x)
+
+#define VID_PKT_TIME			0xd8
+#define BLK_EOL_DURATION(x)		(x)
+
+#define VID_DPHY_TIME			0xdc
+#define REG_WAKEUP_TIME(x)		((x) << 17)
+#define REG_LINE_DURATION(x)		(x)
+
+#define VID_ERR_COLOR1			0xe0
+#define COL_GREEN(x)			((x) << 12)
+#define COL_RED(x)			(x)
+
+#define VID_ERR_COLOR2			0xe4
+#define PAD_VAL(x)			((x) << 12)
+#define COL_BLUE(x)			(x)
+
+#define VID_VPOS			0xe8
+#define LINE_VAL(val)			(((val) & GENMASK(14, 2)) >> 2)
+#define LINE_POS(val)			((val) & GENMASK(1, 0))
+
+#define VID_HPOS			0xec
+#define HORIZ_VAL(val)			(((val) & GENMASK(17, 3)) >> 3)
+#define HORIZ_POS(val)			((val) & GENMASK(2, 0))
+
+#define VID_MODE_STS			0xf0
+#define VID_MODE_STS_CTL		0x140
+#define VID_MODE_STS_CLR		0x160
+#define VID_MODE_STS_FLAG		0x180
+#define VSG_RECOVERY			BIT(10)
+#define ERR_VRS_WRONG_LEN		BIT(9)
+#define ERR_LONG_READ			BIT(8)
+#define ERR_LINE_WRITE			BIT(7)
+#define ERR_BURST_WRITE			BIT(6)
+#define ERR_SMALL_HEIGHT		BIT(5)
+#define ERR_SMALL_LEN			BIT(4)
+#define ERR_MISSING_VSYNC		BIT(3)
+#define ERR_MISSING_HSYNC		BIT(2)
+#define ERR_MISSING_DATA		BIT(1)
+#define VSG_RUNNING			BIT(0)
+
+#define VID_VCA_SETTING1		0xf4
+#define BURST_LP			BIT(16)
+#define MAX_BURST_LIMIT(x)		(x)
+
+#define VID_VCA_SETTING2		0xf8
+#define MAX_LINE_LIMIT(x)		((x) << 16)
+#define EXACT_BURST_LIMIT(x)		(x)
+
+#define TVG_CTL				0xfc
+#define TVG_STRIPE_SIZE(x)		((x) << 5)
+#define TVG_MODE_MASK			GENMASK(4, 3)
+#define TVG_MODE_SINGLE_COLOR		(0 << 3)
+#define TVG_MODE_VSTRIPES		(2 << 3)
+#define TVG_MODE_HSTRIPES		(3 << 3)
+#define TVG_STOPMODE_MASK		GENMASK(2, 1)
+#define TVG_STOPMODE_EOF		(0 << 1)
+#define TVG_STOPMODE_EOL		(1 << 1)
+#define TVG_STOPMODE_NOW		(2 << 1)
+#define TVG_RUN				BIT(0)
+
+#define TVG_IMG_SIZE			0x100
+#define TVG_NBLINES(x)			((x) << 16)
+#define TVG_LINE_SIZE(x)		(x)
+
+#define TVG_COLOR1			0x104
+#define TVG_COL1_GREEN(x)		((x) << 12)
+#define TVG_COL1_RED(x)			(x)
+
+#define TVG_COLOR1_BIS			0x108
+#define TVG_COL1_BLUE(x)		(x)
+
+#define TVG_COLOR2			0x10c
+#define TVG_COL2_GREEN(x)		((x) << 12)
+#define TVG_COL2_RED(x)			(x)
+
+#define TVG_COLOR2_BIS			0x110
+#define TVG_COL2_BLUE(x)		(x)
+
+#define TVG_STS				0x114
+#define TVG_STS_CTL			0x144
+#define TVG_STS_CLR			0x164
+#define TVG_STS_FLAG			0x184
+#define TVG_STS_RUNNING			BIT(0)
+
+#define STS_CTL_EDGE(e)			((e) << 16)
+
+#define DPHY_LANES_MAP			0x198
+#define DAT_REMAP_CFG(b, l)		((l) << ((b) * 8))
+
+#define DPI_IRQ_EN			0x1a0
+#define DPI_IRQ_CLR			0x1a4
+#define DPI_IRQ_STS			0x1a8
+#define PIXEL_BUF_OVERFLOW		BIT(0)
+
+#define DPI_CFG				0x1ac
+#define DPI_CFG_FIFO_DEPTH(x)		((x) >> 16)
+#define DPI_CFG_FIFO_LEVEL(x)		((x) & GENMASK(15, 0))
+
+#define TEST_GENERIC			0x1f0
+#define TEST_STATUS(x)			((x) >> 16)
+#define TEST_CTRL(x)			(x)
+
+#define ID_REG				0x1fc
+#define REV_VENDOR_ID(x)		(((x) & GENMASK(31, 20)) >> 20)
+#define REV_PRODUCT_ID(x)		(((x) & GENMASK(19, 12)) >> 12)
+#define REV_HW(x)			(((x) & GENMASK(11, 8)) >> 8)
+#define REV_MAJOR(x)			(((x) & GENMASK(7, 4)) >> 4)
+#define REV_MINOR(x)			((x) & GENMASK(3, 0))
+
+#define DSI_OUTPUT_PORT			0
+#define DSI_INPUT_PORT(inputid)		(1 + (inputid))
+
+#define DSI_HBP_FRAME_OVERHEAD		12
+#define DSI_HSA_FRAME_OVERHEAD		14
+#define DSI_HFP_FRAME_OVERHEAD		6
+#define DSI_HSS_VSS_VSE_FRAME_OVERHEAD	4
+#define DSI_BLANKING_FRAME_OVERHEAD	6
+#define DSI_NULL_FRAME_OVERHEAD		6
+#define DSI_EOT_PKT_SIZE		4
+
+#define REG_WAKEUP_TIME_NS		800
+#define DPHY_PLL_RATE_HZ		108000000
+
+/* DPHY registers */
+#define DPHY_PMA_CMN(reg)		(reg)
+#define DPHY_PMA_LCLK(reg)		(0x100 + (reg))
+#define DPHY_PMA_LDATA(lane, reg)	(0x200 + ((lane) * 0x100) + (reg))
+#define DPHY_PMA_RCLK(reg)		(0x600 + (reg))
+#define DPHY_PMA_RDATA(lane, reg)	(0x700 + ((lane) * 0x100) + (reg))
+#define DPHY_PCS(reg)			(0xb00 + (reg))
+
+#define DPHY_CMN_SSM			DPHY_PMA_CMN(0x20)
+#define DPHY_CMN_SSM_EN			BIT(0)
+#define DPHY_CMN_TX_MODE_EN		BIT(9)
+
+#define DPHY_CMN_PWM			DPHY_PMA_CMN(0x40)
+#define DPHY_CMN_PWM_DIV(x)		((x) << 20)
+#define DPHY_CMN_PWM_LOW(x)		((x) << 10)
+#define DPHY_CMN_PWM_HIGH(x)		(x)
+
+#define DPHY_CMN_FBDIV			DPHY_PMA_CMN(0x4c)
+#define DPHY_CMN_FBDIV_VAL(low, high)	(((high) << 11) | ((low) << 22))
+#define DPHY_CMN_FBDIV_FROM_REG		(BIT(10) | BIT(21))
+
+#define DPHY_CMN_OPIPDIV		DPHY_PMA_CMN(0x50)
+#define DPHY_CMN_IPDIV_FROM_REG		BIT(0)
+#define DPHY_CMN_IPDIV(x)		((x) << 1)
+#define DPHY_CMN_OPDIV_FROM_REG		BIT(6)
+#define DPHY_CMN_OPDIV(x)		((x) << 7)
+
+#define DPHY_PSM_CFG			DPHY_PCS(0x4)
+#define DPHY_PSM_CFG_FROM_REG		BIT(0)
+#define DPHY_PSM_CLK_DIV(x)		((x) << 1)
+
+struct cdns_dsi_output {
+	struct mipi_dsi_device *dev;
+	struct drm_panel *panel;
+	struct drm_bridge *bridge;
+};
+
+enum cdns_dsi_input_id {
+	CDNS_SDI_INPUT,
+	CDNS_DPI_INPUT,
+	CDNS_DSC_INPUT,
+};
+
+struct cdns_dphy_cfg {
+	u8 pll_ipdiv;
+	u8 pll_opdiv;
+	u16 pll_fbdiv;
+	unsigned long lane_bps;
+	unsigned int nlanes;
+};
+
+struct cdns_dsi_cfg {
+	unsigned int hfp;
+	unsigned int hsa;
+	unsigned int hbp;
+	unsigned int hact;
+	unsigned int htotal;
+};
+
+struct cdns_dphy;
+
+enum cdns_dphy_clk_lane_cfg {
+	DPHY_CLK_CFG_LEFT_DRIVES_ALL = 0,
+	DPHY_CLK_CFG_LEFT_DRIVES_RIGHT = 1,
+	DPHY_CLK_CFG_LEFT_DRIVES_LEFT = 2,
+	DPHY_CLK_CFG_RIGHT_DRIVES_ALL = 3,
+};
+
+struct cdns_dphy_ops {
+	int (*probe)(struct cdns_dphy *dphy);
+	void (*remove)(struct cdns_dphy *dphy);
+	void (*set_psm_div)(struct cdns_dphy *dphy, u8 div);
+	void (*set_clk_lane_cfg)(struct cdns_dphy *dphy,
+				 enum cdns_dphy_clk_lane_cfg cfg);
+	void (*set_pll_cfg)(struct cdns_dphy *dphy,
+			    const struct cdns_dphy_cfg *cfg);
+	unsigned long (*get_wakeup_time_ns)(struct cdns_dphy *dphy);
+};
+
+struct cdns_dphy {
+	struct cdns_dphy_cfg cfg;
+	void __iomem *regs;
+	struct clk *psm_clk;
+	struct clk *pll_ref_clk;
+	const struct cdns_dphy_ops *ops;
+};
+
+struct cdns_dsi_input {
+	enum cdns_dsi_input_id id;
+	struct drm_bridge bridge;
+};
+
+struct cdns_dsi {
+	struct mipi_dsi_host base;
+	void __iomem *regs;
+	struct cdns_dsi_input input;
+	struct cdns_dsi_output output;
+	unsigned int direct_cmd_fifo_depth;
+	unsigned int rx_fifo_depth;
+	struct completion direct_cmd_comp;
+	struct clk *dsi_p_clk;
+	struct reset_control *dsi_p_rst;
+	struct clk *dsi_sys_clk;
+	bool link_initialized;
+	struct cdns_dphy *dphy;
+};
+
+static inline struct cdns_dsi *input_to_dsi(struct cdns_dsi_input *input)
+{
+	return container_of(input, struct cdns_dsi, input);
+}
+
+static inline struct cdns_dsi *to_cdns_dsi(struct mipi_dsi_host *host)
+{
+	return container_of(host, struct cdns_dsi, base);
+}
+
+static inline struct cdns_dsi_input *
+bridge_to_cdns_dsi_input(struct drm_bridge *bridge)
+{
+	return container_of(bridge, struct cdns_dsi_input, bridge);
+}
+
+static int cdns_dsi_get_dphy_pll_cfg(struct cdns_dphy *dphy,
+				     struct cdns_dphy_cfg *cfg,
+				     unsigned int dpi_htotal,
+				     unsigned int dpi_bpp,
+				     unsigned int dpi_hz,
+				     unsigned int dsi_htotal,
+				     unsigned int dsi_nlanes,
+				     unsigned int *dsi_hfp_ext)
+{
+	u64 dlane_bps, dlane_bps_max, fbdiv, fbdiv_max, adj_dsi_htotal;
+	unsigned long pll_ref_hz = clk_get_rate(dphy->pll_ref_clk);
+
+	memset(cfg, 0, sizeof(*cfg));
+
+	cfg->nlanes = dsi_nlanes;
+
+	if (pll_ref_hz < 9600000 || pll_ref_hz >= 150000000)
+		return -EINVAL;
+	else if (pll_ref_hz < 19200000)
+		cfg->pll_ipdiv = 1;
+	else if (pll_ref_hz < 38400000)
+		cfg->pll_ipdiv = 2;
+	else if (pll_ref_hz < 76800000)
+		cfg->pll_ipdiv = 4;
+	else
+		cfg->pll_ipdiv = 8;
+
+	/*
+	 * Make sure DSI htotal is aligned on a lane boundary when calculating
+	 * the expected data rate. This is done by extending HFP in case of
+	 * misalignment.
+	 */
+	adj_dsi_htotal = dsi_htotal;
+	if (dsi_htotal % dsi_nlanes)
+		adj_dsi_htotal += dsi_nlanes - (dsi_htotal % dsi_nlanes);
+
+	dlane_bps = (u64)dpi_hz * adj_dsi_htotal;
+
+	/* data rate in bytes/sec is not an integer, refuse the mode. */
+	if (do_div(dlane_bps, dsi_nlanes * dpi_htotal))
+		return -EINVAL;
+
+	/* data rate was in bytes/sec, convert to bits/sec. */
+	dlane_bps *= 8;
+
+	if (dlane_bps > 2500000000UL || dlane_bps < 160000000UL)
+		return -EINVAL;
+	else if (dlane_bps >= 1250000000)
+		cfg->pll_opdiv = 1;
+	else if (dlane_bps >= 630000000)
+		cfg->pll_opdiv = 2;
+	else if (dlane_bps >= 320000000)
+		cfg->pll_opdiv = 4;
+	else if (dlane_bps >= 160000000)
+		cfg->pll_opdiv = 8;
+
+	/*
+	 * Allow a deviation of 0.2% on the per-lane data rate to try to
+	 * recover a potential mismatch between DPI and PPI clks.
+	 */
+	dlane_bps_max = dlane_bps + DIV_ROUND_DOWN_ULL(dlane_bps, 500);
+	fbdiv_max = DIV_ROUND_DOWN_ULL(dlane_bps_max * 2 *
+				       cfg->pll_opdiv * cfg->pll_ipdiv,
+				       pll_ref_hz);
+	fbdiv = DIV_ROUND_UP_ULL(dlane_bps * 2 * cfg->pll_opdiv *
+				 cfg->pll_ipdiv,
+				 pll_ref_hz);
+
+	/*
+	 * Iterate over all acceptable fbdiv and try to find an adjusted DSI
+	 * htotal length providing an exact match.
+	 *
+	 * Note that we could do something even trickier by relying on the fact
+	 * that a new line is not necessarily aligned on a lane boundary, so,
+	 * by making adj_dsi_htotal non aligned on a dsi_lanes we can improve a
+	 * bit the precision. With this, the step would be
+	 *
+	 *	pll_ref_hz / (2 * opdiv * ipdiv * nlanes)
+	 *
+	 * instead of
+	 *
+	 *	pll_ref_hz / (2 * opdiv * ipdiv)
+	 *
+	 * The drawback of this approach is that we would need to make sure the
+	 * number or lines is a multiple of the realignment periodicity which is
+	 * a function of the number of lanes and the original misalignment. For
+	 * example, for NLANES = 4 and HTOTAL % NLANES = 3, it takes 4 lines
+	 * to realign on a lane:
+	 * LINE 0: expected number of bytes, starts emitting first byte of
+	 *	   LINE 1 on LANE 3
+	 * LINE 1: expected number of bytes, starts emitting first 2 bytes of
+	 *	   LINE 2 on LANES 2 and 3
+	 * LINE 2: expected number of bytes, starts emitting first 3 bytes of
+	 *	   of LINE 3 on LANES 1, 2 and 3
+	 * LINE 3: one byte less, now things are realigned on LANE 0 for LINE 4
+	 *
+	 * I figured this extra complexity was not worth the benefit, but if
+	 * someone really has unfixable mismatch, that would be something to
+	 * investigate.
+	 */
+	for (; fbdiv <= fbdiv_max; fbdiv++) {
+		u32 rem;
+
+		adj_dsi_htotal = (u64)fbdiv * pll_ref_hz * dsi_nlanes *
+				 dpi_htotal;
+
+		/*
+		 * Do the division in 2 steps to avoid an overflow on the
+		 * divider.
+		 */
+		rem = do_div(adj_dsi_htotal, dpi_hz);
+		if (rem)
+			continue;
+
+		rem = do_div(adj_dsi_htotal,
+			     cfg->pll_opdiv * cfg->pll_ipdiv * 2 * 8);
+		if (rem)
+			continue;
+
+		cfg->pll_fbdiv = fbdiv;
+		*dsi_hfp_ext = adj_dsi_htotal - dsi_htotal;
+		break;
+	}
+
+	/* No match, let's just reject the display mode. */
+	if (!cfg->pll_fbdiv)
+		return -EINVAL;
+
+	dlane_bps = DIV_ROUND_DOWN_ULL((u64)dpi_hz * adj_dsi_htotal * 8,
+				       dsi_nlanes * dpi_htotal);
+	cfg->lane_bps = dlane_bps;
+
+	return 0;
+}
+
+static int cdns_dphy_setup_psm(struct cdns_dphy *dphy)
+{
+	unsigned long psm_clk_hz = clk_get_rate(dphy->psm_clk);
+	unsigned long psm_div;
+
+	if (!psm_clk_hz || psm_clk_hz > 100000000)
+		return -EINVAL;
+
+	psm_div = DIV_ROUND_CLOSEST(psm_clk_hz, 1000000);
+	if (dphy->ops->set_psm_div)
+		dphy->ops->set_psm_div(dphy, psm_div);
+
+	return 0;
+}
+
+static void cdns_dphy_set_clk_lane_cfg(struct cdns_dphy *dphy,
+				       enum cdns_dphy_clk_lane_cfg cfg)
+{
+	if (dphy->ops->set_clk_lane_cfg)
+		dphy->ops->set_clk_lane_cfg(dphy, cfg);
+}
+
+static void cdns_dphy_set_pll_cfg(struct cdns_dphy *dphy,
+				  const struct cdns_dphy_cfg *cfg)
+{
+	if (dphy->ops->set_pll_cfg)
+		dphy->ops->set_pll_cfg(dphy, cfg);
+}
+
+static unsigned long cdns_dphy_get_wakeup_time_ns(struct cdns_dphy *dphy)
+{
+	return dphy->ops->get_wakeup_time_ns(dphy);
+}
+
+static unsigned int dpi_to_dsi_timing(unsigned int dpi_timing,
+				      unsigned int dpi_bpp,
+				      unsigned int dsi_pkt_overhead)
+{
+	unsigned int dsi_timing = DIV_ROUND_UP(dpi_timing * dpi_bpp, 8);
+
+	if (dsi_timing < dsi_pkt_overhead)
+		dsi_timing = 0;
+	else
+		dsi_timing -= dsi_pkt_overhead;
+
+	return dsi_timing;
+}
+
+static int cdns_dsi_mode2cfg(struct cdns_dsi *dsi,
+			     const struct drm_display_mode *mode,
+			     struct cdns_dsi_cfg *dsi_cfg,
+			     struct cdns_dphy_cfg *dphy_cfg,
+			     bool mode_valid_check)
+{
+	unsigned long dsi_htotal = 0, dsi_hss_hsa_hse_hbp = 0;
+	struct cdns_dsi_output *output = &dsi->output;
+	unsigned int dsi_hfp_ext = 0, dpi_hfp, tmp;
+	bool sync_pulse = false;
+	int bpp, nlanes, ret;
+
+	memset(dsi_cfg, 0, sizeof(*dsi_cfg));
+
+	if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+		sync_pulse = true;
+
+	bpp = mipi_dsi_pixel_format_to_bpp(output->dev->format);
+	nlanes = output->dev->lanes;
+
+	if (mode_valid_check)
+		tmp = mode->htotal -
+		      (sync_pulse ? mode->hsync_end : mode->hsync_start);
+	else
+		tmp = mode->crtc_htotal -
+		      (sync_pulse ?
+		       mode->crtc_hsync_end : mode->crtc_hsync_start);
+
+	dsi_cfg->hbp = dpi_to_dsi_timing(tmp, bpp, DSI_HBP_FRAME_OVERHEAD);
+	dsi_htotal += dsi_cfg->hbp + DSI_HBP_FRAME_OVERHEAD;
+	dsi_hss_hsa_hse_hbp += dsi_cfg->hbp + DSI_HBP_FRAME_OVERHEAD;
+
+	if (sync_pulse) {
+		if (mode_valid_check)
+			tmp = mode->hsync_end - mode->hsync_start;
+		else
+			tmp = mode->crtc_hsync_end - mode->crtc_hsync_start;
+
+		dsi_cfg->hsa = dpi_to_dsi_timing(tmp, bpp,
+						 DSI_HSA_FRAME_OVERHEAD);
+		dsi_htotal += dsi_cfg->hsa + DSI_HSA_FRAME_OVERHEAD;
+		dsi_hss_hsa_hse_hbp += dsi_cfg->hsa + DSI_HSA_FRAME_OVERHEAD;
+	}
+
+	dsi_cfg->hact = dpi_to_dsi_timing(mode_valid_check ?
+					  mode->hdisplay : mode->crtc_hdisplay,
+					  bpp, 0);
+	dsi_htotal += dsi_cfg->hact;
+
+	if (mode_valid_check)
+		dpi_hfp = mode->hsync_start - mode->hdisplay;
+	else
+		dpi_hfp = mode->crtc_hsync_start - mode->crtc_hdisplay;
+
+	dsi_cfg->hfp = dpi_to_dsi_timing(dpi_hfp, bpp, DSI_HFP_FRAME_OVERHEAD);
+	dsi_htotal += dsi_cfg->hfp + DSI_HFP_FRAME_OVERHEAD;
+
+	if (mode_valid_check)
+		ret = cdns_dsi_get_dphy_pll_cfg(dsi->dphy, dphy_cfg,
+						mode->htotal, bpp,
+						mode->clock * 1000,
+						dsi_htotal, nlanes,
+						&dsi_hfp_ext);
+	else
+		ret = cdns_dsi_get_dphy_pll_cfg(dsi->dphy, dphy_cfg,
+						mode->crtc_htotal, bpp,
+						mode->crtc_clock * 1000,
+						dsi_htotal, nlanes,
+						&dsi_hfp_ext);
+
+	if (ret)
+		return ret;
+
+	dsi_cfg->hfp += dsi_hfp_ext;
+	dsi_htotal += dsi_hfp_ext;
+	dsi_cfg->htotal = dsi_htotal;
+
+	/*
+	 * Make sure DPI(HFP) > DSI(HSS+HSA+HSE+HBP) to guarantee that the FIFO
+	 * is empty before we start a receiving a new line on the DPI
+	 * interface.
+	 */
+	if ((u64)dphy_cfg->lane_bps * dpi_hfp * nlanes <
+	    (u64)dsi_hss_hsa_hse_hbp *
+	    (mode_valid_check ? mode->clock : mode->crtc_clock) * 1000)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int cdns_dsi_bridge_attach(struct drm_bridge *bridge)
+{
+	struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
+	struct cdns_dsi *dsi = input_to_dsi(input);
+	struct cdns_dsi_output *output = &dsi->output;
+
+	if (!drm_core_check_feature(bridge->dev, DRIVER_ATOMIC)) {
+		dev_err(dsi->base.dev,
+			"cdns-dsi driver is only compatible with DRM devices supporting atomic updates");
+		return -ENOTSUPP;
+	}
+
+	return drm_bridge_attach(bridge->encoder, output->bridge, bridge);
+}
+
+static enum drm_mode_status
+cdns_dsi_bridge_mode_valid(struct drm_bridge *bridge,
+			   const struct drm_display_mode *mode)
+{
+	struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
+	struct cdns_dsi *dsi = input_to_dsi(input);
+	struct cdns_dsi_output *output = &dsi->output;
+	struct cdns_dphy_cfg dphy_cfg;
+	struct cdns_dsi_cfg dsi_cfg;
+	int bpp, nlanes, ret;
+
+	/*
+	 * VFP_DSI should be less than VFP_DPI and VFP_DSI should be at
+	 * least 1.
+	 */
+	if (mode->vtotal - mode->vsync_end < 2)
+		return MODE_V_ILLEGAL;
+
+	/* VSA_DSI = VSA_DPI and must be at least 2. */
+	if (mode->vsync_end - mode->vsync_start < 2)
+		return MODE_V_ILLEGAL;
+
+	/* HACT must be 32-bits aligned. */
+	bpp = mipi_dsi_pixel_format_to_bpp(output->dev->format);
+	if ((mode->hdisplay * bpp) % 32)
+		return MODE_H_ILLEGAL;
+
+	nlanes = output->dev->lanes;
+
+	ret = cdns_dsi_mode2cfg(dsi, mode, &dsi_cfg, &dphy_cfg, true);
+	if (ret)
+		return MODE_CLOCK_RANGE;
+
+	return MODE_OK;
+}
+
+static void cdns_dsi_bridge_disable(struct drm_bridge *bridge)
+{
+	struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
+	struct cdns_dsi *dsi = input_to_dsi(input);
+	u32 val;
+
+	val = readl(dsi->regs + MCTL_MAIN_DATA_CTL);
+	val &= ~(IF_VID_SELECT_MASK | IF_VID_MODE | VID_EN | HOST_EOT_GEN |
+		 DISP_EOT_GEN);
+	writel(val, dsi->regs + MCTL_MAIN_DATA_CTL);
+
+	val = readl(dsi->regs + MCTL_MAIN_EN) & ~IF_EN(input->id);
+	writel(val, dsi->regs + MCTL_MAIN_EN);
+	pm_runtime_put(dsi->base.dev);
+}
+
+static void cdns_dsi_hs_init(struct cdns_dsi *dsi,
+			     const struct cdns_dphy_cfg *dphy_cfg)
+{
+	u32 status;
+
+	/*
+	 * Power all internal DPHY blocks down and maintain their reset line
+	 * asserted before changing the DPHY config.
+	 */
+	writel(DPHY_CMN_PSO | DPHY_PLL_PSO | DPHY_ALL_D_PDN | DPHY_C_PDN |
+	       DPHY_CMN_PDN | DPHY_PLL_PDN,
+	       dsi->regs + MCTL_DPHY_CFG0);
+
+	/*
+	 * Configure the internal PSM clk divider so that the DPHY has a
+	 * 1MHz clk (or something close).
+	 */
+	WARN_ON_ONCE(cdns_dphy_setup_psm(dsi->dphy));
+
+	/*
+	 * Configure attach clk lanes to data lanes: the DPHY has 2 clk lanes
+	 * and 8 data lanes, each clk lane can be attache different set of
+	 * data lanes. The 2 groups are named 'left' and 'right', so here we
+	 * just say that we want the 'left' clk lane to drive the 'left' data
+	 * lanes.
+	 */
+	cdns_dphy_set_clk_lane_cfg(dsi->dphy, DPHY_CLK_CFG_LEFT_DRIVES_LEFT);
+
+	/*
+	 * Configure the DPHY PLL that will be used to generate the TX byte
+	 * clk.
+	 */
+	cdns_dphy_set_pll_cfg(dsi->dphy, dphy_cfg);
+
+	/* Start TX state machine. */
+	writel(DPHY_CMN_SSM_EN | DPHY_CMN_TX_MODE_EN,
+	       dsi->dphy->regs + DPHY_CMN_SSM);
+
+	/* Activate the PLL and wait until it's locked. */
+	writel(PLL_LOCKED, dsi->regs + MCTL_MAIN_STS_CLR);
+	writel(DPHY_CMN_PSO | DPHY_ALL_D_PDN | DPHY_C_PDN | DPHY_CMN_PDN,
+	       dsi->regs + MCTL_DPHY_CFG0);
+	WARN_ON_ONCE(readl_poll_timeout(dsi->regs + MCTL_MAIN_STS, status,
+					status & PLL_LOCKED, 100, 100));
+	/* De-assert data and clock reset lines. */
+	writel(DPHY_CMN_PSO | DPHY_ALL_D_PDN | DPHY_C_PDN | DPHY_CMN_PDN |
+	       DPHY_D_RSTB(dphy_cfg->nlanes) | DPHY_C_RSTB,
+	       dsi->regs + MCTL_DPHY_CFG0);
+}
+
+static void cdns_dsi_init_link(struct cdns_dsi *dsi)
+{
+	struct cdns_dsi_output *output = &dsi->output;
+	unsigned long sysclk_period, ulpout;
+	u32 val;
+	int i;
+
+	if (dsi->link_initialized)
+		return;
+
+	val = 0;
+	for (i = 1; i < output->dev->lanes; i++)
+		val |= DATA_LANE_EN(i);
+
+	if (!(output->dev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
+		val |= CLK_CONTINUOUS;
+
+	writel(val, dsi->regs + MCTL_MAIN_PHY_CTL);
+
+	/* ULPOUT should be set to 1ms and is expressed in sysclk cycles. */
+	sysclk_period = NSEC_PER_SEC / clk_get_rate(dsi->dsi_sys_clk);
+	ulpout = DIV_ROUND_UP(NSEC_PER_MSEC, sysclk_period);
+	writel(CLK_LANE_ULPOUT_TIME(ulpout) | DATA_LANE_ULPOUT_TIME(ulpout),
+	       dsi->regs + MCTL_ULPOUT_TIME);
+
+	writel(LINK_EN, dsi->regs + MCTL_MAIN_DATA_CTL);
+
+	val = CLK_LANE_EN | PLL_START;
+	for (i = 0; i < output->dev->lanes; i++)
+		val |= DATA_LANE_START(i);
+
+	writel(val, dsi->regs + MCTL_MAIN_EN);
+
+	dsi->link_initialized = true;
+}
+
+static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
+{
+	struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
+	struct cdns_dsi *dsi = input_to_dsi(input);
+	struct cdns_dsi_output *output = &dsi->output;
+	struct drm_display_mode *mode;
+	struct cdns_dphy_cfg dphy_cfg;
+	unsigned long tx_byte_period;
+	struct cdns_dsi_cfg dsi_cfg;
+	u32 tmp, reg_wakeup, div;
+	int bpp, nlanes;
+
+	if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0))
+		return;
+
+	mode = &bridge->encoder->crtc->state->adjusted_mode;
+	bpp = mipi_dsi_pixel_format_to_bpp(output->dev->format);
+	nlanes = output->dev->lanes;
+
+	WARN_ON_ONCE(cdns_dsi_mode2cfg(dsi, mode, &dsi_cfg, &dphy_cfg, false));
+
+	cdns_dsi_hs_init(dsi, &dphy_cfg);
+	cdns_dsi_init_link(dsi);
+
+	writel(HBP_LEN(dsi_cfg.hbp) | HSA_LEN(dsi_cfg.hsa),
+	       dsi->regs + VID_HSIZE1);
+	writel(HFP_LEN(dsi_cfg.hfp) | HACT_LEN(dsi_cfg.hact),
+	       dsi->regs + VID_HSIZE2);
+
+	writel(VBP_LEN(mode->crtc_vtotal - mode->crtc_vsync_end - 1) |
+	       VFP_LEN(mode->crtc_vsync_start - mode->crtc_vdisplay) |
+	       VSA_LEN(mode->crtc_vsync_end - mode->crtc_vsync_start + 1),
+	       dsi->regs + VID_VSIZE1);
+	writel(mode->crtc_vdisplay, dsi->regs + VID_VSIZE2);
+
+	tmp = dsi_cfg.htotal -
+	      (dsi_cfg.hsa + DSI_BLANKING_FRAME_OVERHEAD +
+	       DSI_HSA_FRAME_OVERHEAD);
+	writel(BLK_LINE_PULSE_PKT_LEN(tmp), dsi->regs + VID_BLKSIZE2);
+	if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+		writel(MAX_LINE_LIMIT(tmp - DSI_NULL_FRAME_OVERHEAD),
+		       dsi->regs + VID_VCA_SETTING2);
+
+	tmp = dsi_cfg.htotal -
+	      (DSI_HSS_VSS_VSE_FRAME_OVERHEAD + DSI_BLANKING_FRAME_OVERHEAD);
+	writel(BLK_LINE_EVENT_PKT_LEN(tmp), dsi->regs + VID_BLKSIZE1);
+	if (!(output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE))
+		writel(MAX_LINE_LIMIT(tmp - DSI_NULL_FRAME_OVERHEAD),
+		       dsi->regs + VID_VCA_SETTING2);
+
+	tmp = DIV_ROUND_UP(dsi_cfg.htotal, nlanes) -
+	      DIV_ROUND_UP(dsi_cfg.hsa, nlanes);
+
+	if (!(output->dev->mode_flags & MIPI_DSI_MODE_EOT_PACKET))
+		tmp -= DIV_ROUND_UP(DSI_EOT_PKT_SIZE, nlanes);
+
+	tx_byte_period = DIV_ROUND_DOWN_ULL((u64)NSEC_PER_SEC * 8,
+					    dphy_cfg.lane_bps);
+	reg_wakeup = cdns_dphy_get_wakeup_time_ns(dsi->dphy) /
+		     tx_byte_period;
+	writel(REG_WAKEUP_TIME(reg_wakeup) | REG_LINE_DURATION(tmp),
+	       dsi->regs + VID_DPHY_TIME);
+
+	/*
+	 * HSTX and LPRX timeouts are both expressed in TX byte clk cycles and
+	 * both should be set to at least the time it takes to transmit a
+	 * frame.
+	 */
+	tmp = NSEC_PER_SEC / drm_mode_vrefresh(mode);
+	tmp /= tx_byte_period;
+
+	for (div = 0; div <= CLK_DIV_MAX; div++) {
+		if (tmp <= HSTX_TIMEOUT_MAX)
+			break;
+
+		tmp >>= 1;
+	}
+
+	if (tmp > HSTX_TIMEOUT_MAX)
+		tmp = HSTX_TIMEOUT_MAX;
+
+	writel(CLK_DIV(div) | HSTX_TIMEOUT(tmp),
+	       dsi->regs + MCTL_DPHY_TIMEOUT1);
+
+	writel(LPRX_TIMEOUT(tmp), dsi->regs + MCTL_DPHY_TIMEOUT2);
+
+	if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO) {
+		switch (output->dev->format) {
+		case MIPI_DSI_FMT_RGB888:
+			tmp = VID_PIXEL_MODE_RGB888 |
+			      VID_DATATYPE(MIPI_DSI_PACKED_PIXEL_STREAM_24);
+			break;
+
+		case MIPI_DSI_FMT_RGB666:
+			tmp = VID_PIXEL_MODE_RGB666 |
+			      VID_DATATYPE(MIPI_DSI_PIXEL_STREAM_3BYTE_18);
+			break;
+
+		case MIPI_DSI_FMT_RGB666_PACKED:
+			tmp = VID_PIXEL_MODE_RGB666_PACKED |
+			      VID_DATATYPE(MIPI_DSI_PACKED_PIXEL_STREAM_18);
+			break;
+
+		case MIPI_DSI_FMT_RGB565:
+			tmp = VID_PIXEL_MODE_RGB565 |
+			      VID_DATATYPE(MIPI_DSI_PACKED_PIXEL_STREAM_16);
+			break;
+
+		default:
+			dev_err(dsi->base.dev, "Unsupported DSI format\n");
+			return;
+		}
+
+		if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+			tmp |= SYNC_PULSE_ACTIVE | SYNC_PULSE_HORIZONTAL;
+
+		tmp |= REG_BLKLINE_MODE(REG_BLK_MODE_BLANKING_PKT) |
+		       REG_BLKEOL_MODE(REG_BLK_MODE_BLANKING_PKT) |
+		       RECOVERY_MODE(RECOVERY_MODE_NEXT_HSYNC) |
+		       VID_IGNORE_MISS_VSYNC;
+
+		writel(tmp, dsi->regs + VID_MAIN_CTL);
+	}
+
+	tmp = readl(dsi->regs + MCTL_MAIN_DATA_CTL);
+	tmp &= ~(IF_VID_SELECT_MASK | HOST_EOT_GEN | IF_VID_MODE);
+
+	if (!(output->dev->mode_flags & MIPI_DSI_MODE_EOT_PACKET))
+		tmp |= HOST_EOT_GEN;
+
+	if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO)
+		tmp |= IF_VID_MODE | IF_VID_SELECT(input->id) | VID_EN;
+
+	writel(tmp, dsi->regs + MCTL_MAIN_DATA_CTL);
+
+	tmp = readl(dsi->regs + MCTL_MAIN_EN) | IF_EN(input->id);
+	writel(tmp, dsi->regs + MCTL_MAIN_EN);
+}
+
+static const struct drm_bridge_funcs cdns_dsi_bridge_funcs = {
+	.attach = cdns_dsi_bridge_attach,
+	.mode_valid = cdns_dsi_bridge_mode_valid,
+	.disable = cdns_dsi_bridge_disable,
+	.enable = cdns_dsi_bridge_enable,
+};
+
+static int cdns_dsi_attach(struct mipi_dsi_host *host,
+			   struct mipi_dsi_device *dev)
+{
+	struct cdns_dsi *dsi = to_cdns_dsi(host);
+	struct cdns_dsi_output *output = &dsi->output;
+	struct cdns_dsi_input *input = &dsi->input;
+	struct drm_bridge *bridge;
+	struct drm_panel *panel;
+	struct device_node *np;
+	int ret;
+
+	/*
+	 * We currently do not support connecting several DSI devices to the
+	 * same host. In order to support that we'd need the DRM bridge
+	 * framework to allow dynamic reconfiguration of the bridge chain.
+	 */
+	if (output->dev)
+		return -EBUSY;
+
+	/* We do not support burst mode yet. */
+	if (dev->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
+		return -ENOTSUPP;
+
+	/*
+	 * The host <-> device link might be described using an OF-graph
+	 * representation, in this case we extract the device of_node from
+	 * this representation, otherwise we use dsidev->dev.of_node which
+	 * should have been filled by the core.
+	 */
+	np = of_graph_get_remote_node(dsi->base.dev->of_node, DSI_OUTPUT_PORT,
+				      dev->channel);
+	if (!np)
+		np = of_node_get(dev->dev.of_node);
+
+	panel = of_drm_find_panel(np);
+	if (panel) {
+		bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DSI);
+	} else {
+		bridge = of_drm_find_bridge(dev->dev.of_node);
+		if (!bridge)
+			bridge = ERR_PTR(-EINVAL);
+	}
+
+	of_node_put(np);
+
+	if (IS_ERR(bridge)) {
+		ret = PTR_ERR(bridge);
+		dev_err(host->dev, "failed to add DSI device %s (err = %d)",
+			dev->name, ret);
+		return ret;
+	}
+
+	output->dev = dev;
+	output->bridge = bridge;
+	output->panel = panel;
+
+	/*
+	 * The DSI output has been properly configured, we can now safely
+	 * register the input to the bridge framework so that it can take place
+	 * in a display pipeline.
+	 */
+	drm_bridge_add(&input->bridge);
+
+	return 0;
+}
+
+static int cdns_dsi_detach(struct mipi_dsi_host *host,
+			   struct mipi_dsi_device *dev)
+{
+	struct cdns_dsi *dsi = to_cdns_dsi(host);
+	struct cdns_dsi_output *output = &dsi->output;
+	struct cdns_dsi_input *input = &dsi->input;
+
+	drm_bridge_remove(&input->bridge);
+	if (output->panel)
+		drm_panel_bridge_remove(output->bridge);
+
+	return 0;
+}
+
+static irqreturn_t cdns_dsi_interrupt(int irq, void *data)
+{
+	struct cdns_dsi *dsi = data;
+	irqreturn_t ret = IRQ_NONE;
+	u32 flag, ctl;
+
+	flag = readl(dsi->regs + DIRECT_CMD_STS_FLAG);
+	if (flag) {
+		ctl = readl(dsi->regs + DIRECT_CMD_STS_CTL);
+		ctl &= ~flag;
+		writel(ctl, dsi->regs + DIRECT_CMD_STS_CTL);
+		complete(&dsi->direct_cmd_comp);
+		ret = IRQ_HANDLED;
+	}
+
+	return ret;
+}
+
+static ssize_t cdns_dsi_transfer(struct mipi_dsi_host *host,
+				 const struct mipi_dsi_msg *msg)
+{
+	struct cdns_dsi *dsi = to_cdns_dsi(host);
+	u32 cmd, sts, val, wait = WRITE_COMPLETED, ctl = 0;
+	struct mipi_dsi_packet packet;
+	int ret, i, tx_len, rx_len;
+
+	ret = pm_runtime_get_sync(host->dev);
+	if (ret < 0)
+		return ret;
+
+	cdns_dsi_init_link(dsi);
+
+	ret = mipi_dsi_create_packet(&packet, msg);
+	if (ret)
+		goto out;
+
+	tx_len = msg->tx_buf ? msg->tx_len : 0;
+	rx_len = msg->rx_buf ? msg->rx_len : 0;
+
+	/* For read operations, the maximum TX len is 2. */
+	if (rx_len && tx_len > 2) {
+		ret = -ENOTSUPP;
+		goto out;
+	}
+
+	/* TX len is limited by the CMD FIFO depth. */
+	if (tx_len > dsi->direct_cmd_fifo_depth) {
+		ret = -ENOTSUPP;
+		goto out;
+	}
+
+	/* RX len is limited by the RX FIFO depth. */
+	if (rx_len > dsi->rx_fifo_depth) {
+		ret = -ENOTSUPP;
+		goto out;
+	}
+
+	cmd = CMD_SIZE(tx_len) | CMD_VCHAN_ID(msg->channel) |
+	      CMD_DATATYPE(msg->type);
+
+	if (msg->flags & MIPI_DSI_MSG_USE_LPM)
+		cmd |= CMD_LP_EN;
+
+	if (mipi_dsi_packet_format_is_long(msg->type))
+		cmd |= CMD_LONG;
+
+	if (rx_len) {
+		cmd |= READ_CMD;
+		wait = READ_COMPLETED_WITH_ERR | READ_COMPLETED;
+		ctl = READ_EN | BTA_EN;
+	} else if (msg->flags & MIPI_DSI_MSG_REQ_ACK) {
+		cmd |= BTA_REQ;
+		wait = ACK_WITH_ERR_RCVD | ACK_RCVD;
+		ctl = BTA_EN;
+	}
+
+	writel(readl(dsi->regs + MCTL_MAIN_DATA_CTL) | ctl,
+	       dsi->regs + MCTL_MAIN_DATA_CTL);
+
+	writel(cmd, dsi->regs + DIRECT_CMD_MAIN_SETTINGS);
+
+	for (i = 0; i < tx_len; i += 4) {
+		const u8 *buf = msg->tx_buf;
+		int j;
+
+		val = 0;
+		for (j = 0; j < 4 && j + i < tx_len; j++)
+			val |= (u32)buf[i + j] << (8 * j);
+
+		writel(val, dsi->regs + DIRECT_CMD_WRDATA);
+	}
+
+	/* Clear status flags before sending the command. */
+	writel(wait, dsi->regs + DIRECT_CMD_STS_CLR);
+	writel(wait, dsi->regs + DIRECT_CMD_STS_CTL);
+	reinit_completion(&dsi->direct_cmd_comp);
+	writel(0, dsi->regs + DIRECT_CMD_SEND);
+
+	wait_for_completion_timeout(&dsi->direct_cmd_comp,
+				    msecs_to_jiffies(1000));
+
+	sts = readl(dsi->regs + DIRECT_CMD_STS);
+	writel(wait, dsi->regs + DIRECT_CMD_STS_CLR);
+	writel(0, dsi->regs + DIRECT_CMD_STS_CTL);
+
+	writel(readl(dsi->regs + MCTL_MAIN_DATA_CTL) & ~ctl,
+	       dsi->regs + MCTL_MAIN_DATA_CTL);
+
+	/* We did not receive the events we were waiting for. */
+	if (!(sts & wait)) {
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	/* 'READ' or 'WRITE with ACK' failed. */
+	if (sts & (READ_COMPLETED_WITH_ERR | ACK_WITH_ERR_RCVD)) {
+		ret = -EIO;
+		goto out;
+	}
+
+	for (i = 0; i < rx_len; i += 4) {
+		u8 *buf = msg->rx_buf;
+		int j;
+
+		val = readl(dsi->regs + DIRECT_CMD_RDDATA);
+		for (j = 0; j < 4 && j + i < rx_len; j++)
+			buf[i + j] = val >> (8 * j);
+	}
+
+out:
+	pm_runtime_put(host->dev);
+	return ret;
+}
+
+static const struct mipi_dsi_host_ops cdns_dsi_ops = {
+	.attach = cdns_dsi_attach,
+	.detach = cdns_dsi_detach,
+	.transfer = cdns_dsi_transfer,
+};
+
+static int cdns_dsi_resume(struct device *dev)
+{
+	struct cdns_dsi *dsi = dev_get_drvdata(dev);
+
+	reset_control_deassert(dsi->dsi_p_rst);
+	clk_prepare_enable(dsi->dsi_p_clk);
+	clk_prepare_enable(dsi->dsi_sys_clk);
+	clk_prepare_enable(dsi->dphy->psm_clk);
+	clk_prepare_enable(dsi->dphy->pll_ref_clk);
+
+	return 0;
+}
+
+static int cdns_dsi_suspend(struct device *dev)
+{
+	struct cdns_dsi *dsi = dev_get_drvdata(dev);
+
+	clk_disable_unprepare(dsi->dphy->pll_ref_clk);
+	clk_disable_unprepare(dsi->dphy->psm_clk);
+	clk_disable_unprepare(dsi->dsi_sys_clk);
+	clk_disable_unprepare(dsi->dsi_p_clk);
+	reset_control_assert(dsi->dsi_p_rst);
+	dsi->link_initialized = false;
+	return 0;
+}
+
+static UNIVERSAL_DEV_PM_OPS(cdns_dsi_pm_ops, cdns_dsi_suspend, cdns_dsi_resume,
+			    NULL);
+
+static unsigned long cdns_dphy_ref_get_wakeup_time_ns(struct cdns_dphy *dphy)
+{
+	/* Default wakeup time is 800 ns (in a simulated environment). */
+	return 800;
+}
+
+static void cdns_dphy_ref_set_pll_cfg(struct cdns_dphy *dphy,
+				      const struct cdns_dphy_cfg *cfg)
+{
+	u32 fbdiv_low, fbdiv_high;
+
+	fbdiv_low = (cfg->pll_fbdiv / 4) - 2;
+	fbdiv_high = cfg->pll_fbdiv - fbdiv_low - 2;
+
+	writel(DPHY_CMN_IPDIV_FROM_REG | DPHY_CMN_OPDIV_FROM_REG |
+	       DPHY_CMN_IPDIV(cfg->pll_ipdiv) |
+	       DPHY_CMN_OPDIV(cfg->pll_opdiv),
+	       dphy->regs + DPHY_CMN_OPIPDIV);
+	writel(DPHY_CMN_FBDIV_FROM_REG |
+	       DPHY_CMN_FBDIV_VAL(fbdiv_low, fbdiv_high),
+	       dphy->regs + DPHY_CMN_FBDIV);
+	writel(DPHY_CMN_PWM_HIGH(6) | DPHY_CMN_PWM_LOW(0x101) |
+	       DPHY_CMN_PWM_DIV(0x8),
+	       dphy->regs + DPHY_CMN_PWM);
+}
+
+static void cdns_dphy_ref_set_psm_div(struct cdns_dphy *dphy, u8 div)
+{
+	writel(DPHY_PSM_CFG_FROM_REG | DPHY_PSM_CLK_DIV(div),
+	       dphy->regs + DPHY_PSM_CFG);
+}
+
+/*
+ * This is the reference implementation of DPHY hooks. Specific integration of
+ * this IP may have to re-implement some of them depending on how they decided
+ * to wire things in the SoC.
+ */
+static const struct cdns_dphy_ops ref_dphy_ops = {
+	.get_wakeup_time_ns = cdns_dphy_ref_get_wakeup_time_ns,
+	.set_pll_cfg = cdns_dphy_ref_set_pll_cfg,
+	.set_psm_div = cdns_dphy_ref_set_psm_div,
+};
+
+static const struct of_device_id cdns_dphy_of_match[] = {
+	{ .compatible = "cdns,dphy", .data = &ref_dphy_ops },
+	{ /* sentinel */ },
+};
+
+static struct cdns_dphy *cdns_dphy_probe(struct platform_device *pdev)
+{
+	const struct of_device_id *match;
+	struct cdns_dphy *dphy;
+	struct of_phandle_args args;
+	struct resource res;
+	int ret;
+
+	ret = of_parse_phandle_with_args(pdev->dev.of_node, "phys",
+					 "#phy-cells", 0, &args);
+	if (ret)
+		return ERR_PTR(-ENOENT);
+
+	match = of_match_node(cdns_dphy_of_match, args.np);
+	if (!match || !match->data)
+		return ERR_PTR(-EINVAL);
+
+	dphy = devm_kzalloc(&pdev->dev, sizeof(*dphy), GFP_KERNEL);
+	if (!dphy)
+		return ERR_PTR(-ENOMEM);
+
+	dphy->ops = match->data;
+
+	ret = of_address_to_resource(args.np, 0, &res);
+	if (ret)
+		return ERR_PTR(ret);
+
+	dphy->regs = devm_ioremap_resource(&pdev->dev, &res);
+	if (IS_ERR(dphy->regs))
+		return ERR_CAST(dphy->regs);
+
+	dphy->psm_clk = of_clk_get_by_name(args.np, "psm");
+	if (IS_ERR(dphy->psm_clk))
+		return ERR_CAST(dphy->psm_clk);
+
+	dphy->pll_ref_clk = of_clk_get_by_name(args.np, "pll_ref");
+	if (IS_ERR(dphy->pll_ref_clk)) {
+		ret = PTR_ERR(dphy->pll_ref_clk);
+		goto err_put_psm_clk;
+	}
+
+	if (dphy->ops->probe) {
+		ret = dphy->ops->probe(dphy);
+		if (ret)
+			goto err_put_pll_ref_clk;
+	}
+
+	return dphy;
+
+err_put_pll_ref_clk:
+	clk_put(dphy->pll_ref_clk);
+
+err_put_psm_clk:
+	clk_put(dphy->psm_clk);
+
+	return ERR_PTR(ret);
+}
+
+static void cdns_dphy_remove(struct cdns_dphy *dphy)
+{
+	if (dphy->ops->remove)
+		dphy->ops->remove(dphy);
+
+	clk_put(dphy->pll_ref_clk);
+	clk_put(dphy->psm_clk);
+}
+
+static int cdns_dsi_drm_probe(struct platform_device *pdev)
+{
+	struct cdns_dsi *dsi;
+	struct cdns_dsi_input *input;
+	struct resource *res;
+	int ret, irq;
+	u32 val;
+
+	dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
+	if (!dsi)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, dsi);
+
+	input = &dsi->input;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dsi->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(dsi->regs))
+		return PTR_ERR(dsi->regs);
+
+	dsi->dsi_p_clk = devm_clk_get(&pdev->dev, "dsi_p_clk");
+	if (IS_ERR(dsi->dsi_p_clk))
+		return PTR_ERR(dsi->dsi_p_clk);
+
+	dsi->dsi_p_rst = devm_reset_control_get_optional_exclusive(&pdev->dev,
+								"dsi_p_rst");
+	if (IS_ERR(dsi->dsi_p_rst))
+		return PTR_ERR(dsi->dsi_p_rst);
+
+	dsi->dsi_sys_clk = devm_clk_get(&pdev->dev, "dsi_sys_clk");
+	if (IS_ERR(dsi->dsi_sys_clk))
+		return PTR_ERR(dsi->dsi_sys_clk);
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
+		return irq;
+
+	dsi->dphy = cdns_dphy_probe(pdev);
+	if (IS_ERR(dsi->dphy))
+		return PTR_ERR(dsi->dphy);
+
+	ret = clk_prepare_enable(dsi->dsi_p_clk);
+	if (ret)
+		goto err_remove_dphy;
+
+	val = readl(dsi->regs + ID_REG);
+	if (REV_VENDOR_ID(val) != 0xcad) {
+		dev_err(&pdev->dev, "invalid vendor id\n");
+		ret = -EINVAL;
+		goto err_disable_pclk;
+	}
+
+	val = readl(dsi->regs + IP_CONF);
+	dsi->direct_cmd_fifo_depth = 1 << (DIRCMD_FIFO_DEPTH(val) + 2);
+	dsi->rx_fifo_depth = RX_FIFO_DEPTH(val);
+	init_completion(&dsi->direct_cmd_comp);
+
+	writel(0, dsi->regs + MCTL_MAIN_DATA_CTL);
+	writel(0, dsi->regs + MCTL_MAIN_EN);
+	writel(0, dsi->regs + MCTL_MAIN_PHY_CTL);
+
+	/*
+	 * We only support the DPI input, so force input->id to
+	 * CDNS_DPI_INPUT.
+	 */
+	input->id = CDNS_DPI_INPUT;
+	input->bridge.funcs = &cdns_dsi_bridge_funcs;
+	input->bridge.of_node = pdev->dev.of_node;
+
+	/* Mask all interrupts before registering the IRQ handler. */
+	writel(0, dsi->regs + MCTL_MAIN_STS_CTL);
+	writel(0, dsi->regs + MCTL_DPHY_ERR_CTL1);
+	writel(0, dsi->regs + CMD_MODE_STS_CTL);
+	writel(0, dsi->regs + DIRECT_CMD_STS_CTL);
+	writel(0, dsi->regs + DIRECT_CMD_RD_STS_CTL);
+	writel(0, dsi->regs + VID_MODE_STS_CTL);
+	writel(0, dsi->regs + TVG_STS_CTL);
+	writel(0, dsi->regs + DPI_IRQ_EN);
+	ret = devm_request_irq(&pdev->dev, irq, cdns_dsi_interrupt, 0,
+			       dev_name(&pdev->dev), dsi);
+	if (ret)
+		goto err_disable_pclk;
+
+	pm_runtime_enable(&pdev->dev);
+	dsi->base.dev = &pdev->dev;
+	dsi->base.ops = &cdns_dsi_ops;
+
+	ret = mipi_dsi_host_register(&dsi->base);
+	if (ret)
+		goto err_disable_runtime_pm;
+
+	clk_disable_unprepare(dsi->dsi_p_clk);
+
+	return 0;
+
+err_disable_runtime_pm:
+	pm_runtime_disable(&pdev->dev);
+
+err_disable_pclk:
+	clk_disable_unprepare(dsi->dsi_p_clk);
+
+err_remove_dphy:
+	cdns_dphy_remove(dsi->dphy);
+
+	return ret;
+}
+
+static int cdns_dsi_drm_remove(struct platform_device *pdev)
+{
+	struct cdns_dsi *dsi = platform_get_drvdata(pdev);
+
+	mipi_dsi_host_unregister(&dsi->base);
+	pm_runtime_disable(&pdev->dev);
+	cdns_dphy_remove(dsi->dphy);
+
+	return 0;
+}
+
+static const struct of_device_id cdns_dsi_of_match[] = {
+	{ .compatible = "cdns,dsi" },
+	{ },
+};
+
+static struct platform_driver cdns_dsi_platform_driver = {
+	.probe  = cdns_dsi_drm_probe,
+	.remove = cdns_dsi_drm_remove,
+	.driver = {
+		.name   = "cdns-dsi",
+		.of_match_table = cdns_dsi_of_match,
+		.pm = &cdns_dsi_pm_ops,
+	},
+};
+module_platform_driver(cdns_dsi_platform_driver);
+
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon@bootlin.com>");
+MODULE_DESCRIPTION("Cadence DSI driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:cdns-dsi");
+

From 044c59890559928225aeb7bc844a254d01689828 Mon Sep 17 00:00:00 2001
From: Boris Brezillon <boris.brezillon@bootlin.com>
Date: Sat, 21 Apr 2018 09:08:46 +0200
Subject: [PATCH 0435/1286] dt-bindings: drm/bridge: Document Cadence DSI
 bridge bindings

Document the bindings used for the Cadence DSI bridge.

Signed-off-by: Boris Brezillon <boris.brezillon@bootlin.com>
Reviewed-by: Rob Herring <robh@kernel.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20180421070846.10330-2-boris.brezillon@bootlin.com
---
 .../bindings/display/bridge/cdns,dsi.txt      | 133 ++++++++++++++++++
 1 file changed, 133 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/display/bridge/cdns,dsi.txt

diff --git a/Documentation/devicetree/bindings/display/bridge/cdns,dsi.txt b/Documentation/devicetree/bindings/display/bridge/cdns,dsi.txt
new file mode 100644
index 000000000000..f5725bb6c61c
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/cdns,dsi.txt
@@ -0,0 +1,133 @@
+Cadence DSI bridge
+==================
+
+The Cadence DSI bridge is a DPI to DSI bridge supporting up to 4 DSI lanes.
+
+Required properties:
+- compatible: should be set to "cdns,dsi".
+- reg: physical base address and length of the controller's registers.
+- interrupts: interrupt line connected to the DSI bridge.
+- clocks: DSI bridge clocks.
+- clock-names: must contain "dsi_p_clk" and "dsi_sys_clk".
+- phys: phandle link to the MIPI D-PHY controller.
+- phy-names: must contain "dphy".
+- #address-cells: must be set to 1.
+- #size-cells: must be set to 0.
+
+Optional properties:
+- resets: DSI reset lines.
+- reset-names: can contain "dsi_p_rst".
+
+Required subnodes:
+- ports: Ports as described in Documentation/devicetree/bindings/graph.txt.
+  2 ports are available:
+  * port 0: this port is only needed if some of your DSI devices are
+	    controlled through  an external bus like I2C or SPI. Can have at
+	    most 4 endpoints. The endpoint number is directly encoding the
+	    DSI virtual channel used by this device.
+  * port 1: represents the DPI input.
+  Other ports will be added later to support the new kind of inputs.
+
+- one subnode per DSI device connected on the DSI bus. Each DSI device should
+  contain a reg property encoding its virtual channel.
+
+Cadence DPHY
+============
+
+Cadence DPHY block.
+
+Required properties:
+- compatible: should be set to "cdns,dphy".
+- reg: physical base address and length of the DPHY registers.
+- clocks: DPHY reference clocks.
+- clock-names: must contain "psm" and "pll_ref".
+- #phy-cells: must be set to 0.
+
+
+Example:
+	dphy0: dphy@fd0e0000{
+		compatible = "cdns,dphy";
+		reg = <0x0 0xfd0e0000 0x0 0x1000>;
+		clocks = <&psm_clk>, <&pll_ref_clk>;
+		clock-names = "psm", "pll_ref";
+		#phy-cells = <0>;
+	};
+
+	dsi0: dsi@fd0c0000 {
+		compatible = "cdns,dsi";
+		reg = <0x0 0xfd0c0000 0x0 0x1000>;
+		clocks = <&pclk>, <&sysclk>;
+		clock-names = "dsi_p_clk", "dsi_sys_clk";
+		interrupts = <1>;
+		phys = <&dphy0>;
+		phy-names = "dphy";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@1 {
+				reg = <1>;
+				dsi0_dpi_input: endpoint {
+					remote-endpoint = <&xxx_dpi_output>;
+				};
+			};
+		};
+
+		panel: dsi-dev@0 {
+			compatible = "<vendor,panel>";
+			reg = <0>;
+		};
+	};
+
+or
+
+	dsi0: dsi@fd0c0000 {
+		compatible = "cdns,dsi";
+		reg = <0x0 0xfd0c0000 0x0 0x1000>;
+		clocks = <&pclk>, <&sysclk>;
+		clock-names = "dsi_p_clk", "dsi_sys_clk";
+		interrupts = <1>;
+		phys = <&dphy1>;
+		phy-names = "dphy";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				dsi0_output: endpoint@0 {
+					reg = <0>;
+					remote-endpoint = <&dsi_panel_input>;
+				};
+			};
+
+			port@1 {
+				reg = <1>;
+				dsi0_dpi_input: endpoint {
+					remote-endpoint = <&xxx_dpi_output>;
+				};
+			};
+		};
+	};
+
+	i2c@xxx {
+		panel: panel@59 {
+			compatible = "<vendor,panel>";
+			reg = <0x59>;
+
+			port {
+				dsi_panel_input: endpoint {
+					remote-endpoint = <&dsi0_output>;
+				};
+			};
+		};
+	};

From 9f900de31dcc14b3c5765a383ef52b9efba7f542 Mon Sep 17 00:00:00 2001
From: Wolfram Sang <wsa+renesas@sang-engineering.com>
Date: Thu, 19 Apr 2018 16:05:46 +0200
Subject: [PATCH 0436/1286] gpu: drm: vc4: simplify getting .drvdata

We should get drvdata from struct device directly. Going via
platform_device is an unneeded step back and forth.

Signed-off-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
Signed-off-by: Eric Anholt <eric@anholt.net>
Reviewed-by: Eric Anholt <eric@anholt.net>
Link: https://patchwork.freedesktop.org/patch/msgid/20180419140641.27926-17-wsa+renesas@sang-engineering.com
---
 drivers/gpu/drm/vc4/vc4_drv.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 94b99c90425a..af9515ae0e0a 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -318,8 +318,7 @@ dev_unref:
 
 static void vc4_drm_unbind(struct device *dev)
 {
-	struct platform_device *pdev = to_platform_device(dev);
-	struct drm_device *drm = platform_get_drvdata(pdev);
+	struct drm_device *drm = dev_get_drvdata(dev);
 
 	drm_dev_unregister(drm);
 

From 22445f0316a253406472ddcda98c3341b1eebaf4 Mon Sep 17 00:00:00 2001
From: Stefan Schake <stschake@gmail.com>
Date: Fri, 20 Apr 2018 17:09:54 -0700
Subject: [PATCH 0437/1286] drm/vc4: Add support for plane alpha

The HVS supports mixing fixed alpha with per-pixel alpha or
setting a fixed plane alpha in case there is no per-pixel information.
This allows us to support the generic DRM plane alpha property.

Signed-off-by: Stefan Schake <stschake@gmail.com>
Signed-off-by: Eric Anholt <eric@anholt.net>
Reviewed-by: Eric Anholt <eric@anholt.net>
Link: https://patchwork.freedesktop.org/patch/msgid/20180421000954.18936-1-stschake@gmail.com
---
 drivers/gpu/drm/vc4/vc4_plane.c | 21 +++++++++++++++++----
 drivers/gpu/drm/vc4/vc4_regs.h  |  1 +
 2 files changed, 18 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index c3a37a99e601..3483c05cc3d6 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -201,6 +201,7 @@ static void vc4_plane_reset(struct drm_plane *plane)
 		return;
 
 	plane->state = &vc4_state->base;
+	plane->state->alpha = DRM_BLEND_ALPHA_OPAQUE;
 	vc4_state->base.plane = plane;
 }
 
@@ -467,6 +468,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
 	u32 ctl0_offset = vc4_state->dlist_count;
 	const struct hvs_format *format = vc4_get_hvs_format(fb->format->format);
 	int num_planes = drm_format_num_planes(format->drm);
+	bool mix_plane_alpha;
 	bool covers_screen;
 	u32 scl0, scl1, pitch0;
 	u32 lbm_size, tiling;
@@ -552,7 +554,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
 	/* Position Word 0: Image Positions and Alpha Value */
 	vc4_state->pos0_offset = vc4_state->dlist_count;
 	vc4_dlist_write(vc4_state,
-			VC4_SET_FIELD(0xff, SCALER_POS0_FIXED_ALPHA) |
+			VC4_SET_FIELD(state->alpha >> 8, SCALER_POS0_FIXED_ALPHA) |
 			VC4_SET_FIELD(vc4_state->crtc_x, SCALER_POS0_START_X) |
 			VC4_SET_FIELD(vc4_state->crtc_y, SCALER_POS0_START_Y));
 
@@ -565,6 +567,13 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
 					      SCALER_POS1_SCL_HEIGHT));
 	}
 
+	/* Don't waste cycles mixing with plane alpha if the set alpha
+	 * is opaque or there is no per-pixel alpha information.
+	 * In any case we use the alpha property value as the fixed alpha.
+	 */
+	mix_plane_alpha = state->alpha != DRM_BLEND_ALPHA_OPAQUE &&
+			  fb->format->has_alpha;
+
 	/* Position Word 2: Source Image Size, Alpha */
 	vc4_state->pos2_offset = vc4_state->dlist_count;
 	vc4_dlist_write(vc4_state,
@@ -572,6 +581,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
 				      SCALER_POS2_ALPHA_MODE_PIPELINE :
 				      SCALER_POS2_ALPHA_MODE_FIXED,
 				      SCALER_POS2_ALPHA_MODE) |
+			(mix_plane_alpha ? SCALER_POS2_ALPHA_MIX : 0) |
 			(fb->format->has_alpha ? SCALER_POS2_ALPHA_PREMULT : 0) |
 			VC4_SET_FIELD(vc4_state->src_w[0], SCALER_POS2_WIDTH) |
 			VC4_SET_FIELD(vc4_state->src_h[0], SCALER_POS2_HEIGHT));
@@ -653,10 +663,11 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
 			vc4_state->crtc_w == state->crtc->mode.hdisplay &&
 			vc4_state->crtc_h == state->crtc->mode.vdisplay;
 	/* Background fill might be necessary when the plane has per-pixel
-	 * alpha content and blends from the background or does not cover
-	 * the entire screen.
+	 * alpha content or a non-opaque plane alpha and could blend from the
+	 * background or does not cover the entire screen.
 	 */
-	vc4_state->needs_bg_fill = fb->format->has_alpha || !covers_screen;
+	vc4_state->needs_bg_fill = fb->format->has_alpha || !covers_screen ||
+				   state->alpha != DRM_BLEND_ALPHA_OPAQUE;
 
 	return 0;
 }
@@ -916,5 +927,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
 
 	drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
 
+	drm_plane_create_alpha_property(plane);
+
 	return plane;
 }
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index 4af3e29d076a..d1fb6fec46eb 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -945,6 +945,7 @@ enum hvs_pixel_format {
 #define SCALER_POS2_ALPHA_MODE_FIXED_NONZERO	2
 #define SCALER_POS2_ALPHA_MODE_FIXED_OVER_0x07	3
 #define SCALER_POS2_ALPHA_PREMULT		BIT(29)
+#define SCALER_POS2_ALPHA_MIX			BIT(28)
 
 #define SCALER_POS2_HEIGHT_MASK			VC4_MASK(27, 16)
 #define SCALER_POS2_HEIGHT_SHIFT		16

From 766cc6b1f7fc9e10f096a84d147e48dead18ba59 Mon Sep 17 00:00:00 2001
From: Stefan Schake <stschake@gmail.com>
Date: Fri, 20 Apr 2018 05:25:44 -0700
Subject: [PATCH 0438/1286] drm/vc4: Add CTM support

The hardware has a single block for applying a CTM prior to gamma lut.
It can be fed with pixels from one of our CRTC at a time and uses a
matrix with S0.9 scalars. Use private atomic state to reject attempts
from userland to apply CTM for more than one CRTC at a time and reject
matrices with scalars that we can't approximate without integer bits.

Signed-off-by: Stefan Schake <stschake@gmail.com>
Signed-off-by: Eric Anholt <eric@anholt.net>
Reviewed-by: Eric Anholt <eric@anholt.net>
Link: https://patchwork.freedesktop.org/patch/218067/
---
 drivers/gpu/drm/vc4/vc4_crtc.c |   5 +
 drivers/gpu/drm/vc4/vc4_drv.c  |   3 +
 drivers/gpu/drm/vc4/vc4_drv.h  |   4 +
 drivers/gpu/drm/vc4/vc4_kms.c  | 204 ++++++++++++++++++++++++++++++++-
 4 files changed, 215 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 08fe8dd7d8df..83d3b7912fc2 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -1018,6 +1018,11 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
 	drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r));
 	drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size);
 
+	/* We support CTM, but only for one CRTC at a time. It's therefore
+	 * implemented as private driver state in vc4_kms, not here.
+	 */
+	drm_crtc_enable_color_mgmt(crtc, 0, true, crtc->gamma_size);
+
 	/* Set up some arbitrary number of planes.  We're not limited
 	 * by a set number of physical registers, just the space in
 	 * the HVS (16k) and how small an plane can be (28 bytes).
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index af9515ae0e0a..40ddeaafd65f 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -319,6 +319,7 @@ dev_unref:
 static void vc4_drm_unbind(struct device *dev)
 {
 	struct drm_device *drm = dev_get_drvdata(dev);
+	struct vc4_dev *vc4 = to_vc4_dev(drm);
 
 	drm_dev_unregister(drm);
 
@@ -326,6 +327,8 @@ static void vc4_drm_unbind(struct device *dev)
 
 	drm_mode_config_cleanup(drm);
 
+	drm_atomic_private_obj_fini(&vc4->ctm_manager);
+
 	drm_dev_unref(drm);
 }
 
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 4288615b66a2..22589d39083c 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -10,6 +10,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_encoder.h>
 #include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_atomic.h>
 
 #include "uapi/drm/vc4_drm.h"
 
@@ -193,6 +194,9 @@ struct vc4_dev {
 	} hangcheck;
 
 	struct semaphore async_modeset;
+
+	struct drm_modeset_lock ctm_state_lock;
+	struct drm_private_obj ctm_manager;
 };
 
 static inline struct vc4_dev *
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index e791e498a3dd..8a411e5f8776 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -23,6 +23,117 @@
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include "vc4_drv.h"
+#include "vc4_regs.h"
+
+struct vc4_ctm_state {
+	struct drm_private_state base;
+	struct drm_color_ctm *ctm;
+	int fifo;
+};
+
+static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)
+{
+	return container_of(priv, struct vc4_ctm_state, base);
+}
+
+static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
+					       struct drm_private_obj *manager)
+{
+	struct drm_device *dev = state->dev;
+	struct vc4_dev *vc4 = dev->dev_private;
+	struct drm_private_state *priv_state;
+	int ret;
+
+	ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx);
+	if (ret)
+		return ERR_PTR(ret);
+
+	priv_state = drm_atomic_get_private_obj_state(state, manager);
+	if (IS_ERR(priv_state))
+		return ERR_CAST(priv_state);
+
+	return to_vc4_ctm_state(priv_state);
+}
+
+static struct drm_private_state *
+vc4_ctm_duplicate_state(struct drm_private_obj *obj)
+{
+	struct vc4_ctm_state *state;
+
+	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+	if (!state)
+		return NULL;
+
+	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
+
+	return &state->base;
+}
+
+static void vc4_ctm_destroy_state(struct drm_private_obj *obj,
+				  struct drm_private_state *state)
+{
+	struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state);
+
+	kfree(ctm_state);
+}
+
+static const struct drm_private_state_funcs vc4_ctm_state_funcs = {
+	.atomic_duplicate_state = vc4_ctm_duplicate_state,
+	.atomic_destroy_state = vc4_ctm_destroy_state,
+};
+
+/* Converts a DRM S31.32 value to the HW S0.9 format. */
+static u16 vc4_ctm_s31_32_to_s0_9(u64 in)
+{
+	u16 r;
+
+	/* Sign bit. */
+	r = in & BIT_ULL(63) ? BIT(9) : 0;
+
+	if ((in & GENMASK_ULL(62, 32)) > 0) {
+		/* We have zero integer bits so we can only saturate here. */
+		r |= GENMASK(8, 0);
+	} else {
+		/* Otherwise take the 9 most important fractional bits. */
+		r |= (in >> 23) & GENMASK(8, 0);
+	}
+
+	return r;
+}
+
+static void
+vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
+{
+	struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state);
+	struct drm_color_ctm *ctm = ctm_state->ctm;
+
+	if (ctm_state->fifo) {
+		HVS_WRITE(SCALER_OLEDCOEF2,
+			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]),
+					SCALER_OLEDCOEF2_R_TO_R) |
+			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]),
+					SCALER_OLEDCOEF2_R_TO_G) |
+			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]),
+					SCALER_OLEDCOEF2_R_TO_B));
+		HVS_WRITE(SCALER_OLEDCOEF1,
+			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]),
+					SCALER_OLEDCOEF1_G_TO_R) |
+			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]),
+					SCALER_OLEDCOEF1_G_TO_G) |
+			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]),
+					SCALER_OLEDCOEF1_G_TO_B));
+		HVS_WRITE(SCALER_OLEDCOEF0,
+			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]),
+					SCALER_OLEDCOEF0_B_TO_R) |
+			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]),
+					SCALER_OLEDCOEF0_B_TO_G) |
+			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]),
+					SCALER_OLEDCOEF0_B_TO_B));
+	}
+
+	HVS_WRITE(SCALER_OLEDOFFS,
+		  VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
+}
 
 static void
 vc4_atomic_complete_commit(struct drm_atomic_state *state)
@@ -36,6 +147,8 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state)
 
 	drm_atomic_helper_commit_modeset_disables(dev, state);
 
+	vc4_ctm_commit(vc4, state);
+
 	drm_atomic_helper_commit_planes(dev, state, 0);
 
 	drm_atomic_helper_commit_modeset_enables(dev, state);
@@ -207,9 +320,89 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
 	return drm_gem_fb_create(dev, file_priv, mode_cmd);
 }
 
+/* Our CTM has some peculiar limitations: we can only enable it for one CRTC
+ * at a time and the HW only supports S0.9 scalars. To account for the latter,
+ * we don't allow userland to set a CTM that we have no hope of approximating.
+ */
+static int
+vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
+{
+	struct vc4_dev *vc4 = to_vc4_dev(dev);
+	struct vc4_ctm_state *ctm_state = NULL;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+	struct drm_color_ctm *ctm;
+	int i;
+
+	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+		/* CTM is being disabled. */
+		if (!new_crtc_state->ctm && old_crtc_state->ctm) {
+			ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
+			if (IS_ERR(ctm_state))
+				return PTR_ERR(ctm_state);
+			ctm_state->fifo = 0;
+		}
+	}
+
+	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+		if (new_crtc_state->ctm == old_crtc_state->ctm)
+			continue;
+
+		if (!ctm_state) {
+			ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
+			if (IS_ERR(ctm_state))
+				return PTR_ERR(ctm_state);
+		}
+
+		/* CTM is being enabled or the matrix changed. */
+		if (new_crtc_state->ctm) {
+			/* fifo is 1-based since 0 disables CTM. */
+			int fifo = to_vc4_crtc(crtc)->channel + 1;
+
+			/* Check userland isn't trying to turn on CTM for more
+			 * than one CRTC at a time.
+			 */
+			if (ctm_state->fifo && ctm_state->fifo != fifo) {
+				DRM_DEBUG_DRIVER("Too many CTM configured\n");
+				return -EINVAL;
+			}
+
+			/* Check we can approximate the specified CTM.
+			 * We disallow scalars |c| > 1.0 since the HW has
+			 * no integer bits.
+			 */
+			ctm = new_crtc_state->ctm->data;
+			for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) {
+				u64 val = ctm->matrix[i];
+
+				val &= ~BIT_ULL(63);
+				if (val > BIT_ULL(32))
+					return -EINVAL;
+			}
+
+			ctm_state->fifo = fifo;
+			ctm_state->ctm = ctm;
+		}
+	}
+
+	return 0;
+}
+
+static int
+vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
+{
+	int ret;
+
+	ret = vc4_ctm_atomic_check(dev, state);
+	if (ret < 0)
+		return ret;
+
+	return drm_atomic_helper_check(dev, state);
+}
+
 static const struct drm_mode_config_funcs vc4_mode_funcs = {
 	.output_poll_changed = drm_fb_helper_output_poll_changed,
-	.atomic_check = drm_atomic_helper_check,
+	.atomic_check = vc4_atomic_check,
 	.atomic_commit = vc4_atomic_commit,
 	.fb_create = vc4_fb_create,
 };
@@ -217,6 +410,7 @@ static const struct drm_mode_config_funcs vc4_mode_funcs = {
 int vc4_kms_load(struct drm_device *dev)
 {
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
+	struct vc4_ctm_state *ctm_state;
 	int ret;
 
 	sema_init(&vc4->async_modeset, 1);
@@ -237,6 +431,14 @@ int vc4_kms_load(struct drm_device *dev)
 	dev->mode_config.async_page_flip = true;
 	dev->mode_config.allow_fb_modifiers = true;
 
+	drm_modeset_lock_init(&vc4->ctm_state_lock);
+
+	ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
+	if (!ctm_state)
+		return -ENOMEM;
+	drm_atomic_private_obj_init(&vc4->ctm_manager, &ctm_state->base,
+				    &vc4_ctm_state_funcs);
+
 	drm_mode_config_reset(dev);
 
 	if (dev->mode_config.num_connector)

From c5dc6cf7f91f9e965a528910cf3ab96710d82dcf Mon Sep 17 00:00:00 2001
From: Stefan Schake <stschake@gmail.com>
Date: Fri, 20 Apr 2018 05:25:45 -0700
Subject: [PATCH 0439/1286] drm/vc4: Add CTM registers to debugfs

Now that we set the OLED* registers to do CTM, it's helpful to have them
in the register dump.

Signed-off-by: Stefan Schake <stschake@gmail.com>
Signed-off-by: Eric Anholt <eric@anholt.net>
Reviewed-by: Eric Anholt <eric@anholt.net>
Link: https://patchwork.freedesktop.org/patch/msgid/20180420122545.40014-2-stschake@gmail.com
---
 drivers/gpu/drm/vc4/vc4_hvs.c | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 2b62fc5b8d85..5d8c749c9749 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -58,6 +58,10 @@ static const struct {
 	HVS_REG(SCALER_DISPSTAT2),
 	HVS_REG(SCALER_DISPBASE2),
 	HVS_REG(SCALER_DISPALPHA2),
+	HVS_REG(SCALER_OLEDOFFS),
+	HVS_REG(SCALER_OLEDCOEF0),
+	HVS_REG(SCALER_OLEDCOEF1),
+	HVS_REG(SCALER_OLEDCOEF2),
 };
 
 void vc4_hvs_dump_state(struct drm_device *dev)

From 672e314b21dc614894e69bb56a2b55cc7d256810 Mon Sep 17 00:00:00 2001
From: Matt Atwood <matthew.s.atwood@intel.com>
Date: Mon, 23 Apr 2018 15:28:03 -0700
Subject: [PATCH 0440/1286] drm/i915/kbl: Add KBL GT2 sku

Adding a missing GT2 sku discovered off hardware.

Signed-off-by: Matt Atwood <matthew.s.atwood@intel.com>
Reviewed-by: Clint Taylor <clinton.a.taylor@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1524522483-19987-1-git-send-email-matthew.s.atwood@intel.com
---
 include/drm/i915_pciids.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 70f0c2535b87..bab70ff6e78b 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -349,6 +349,7 @@
 #define INTEL_KBL_GT2_IDS(info)	\
 	INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \
 	INTEL_VGA_DEVICE(0x5917, info), /* Mobile GT2 */ \
+	INTEL_VGA_DEVICE(0x591C, info), /* ULX GT2 */ \
 	INTEL_VGA_DEVICE(0x5921, info), /* ULT GT2F */ \
 	INTEL_VGA_DEVICE(0x591E, info), /* ULX GT2 */ \
 	INTEL_VGA_DEVICE(0x5912, info), /* DT  GT2 */ \

From 93cba9dab19fd95633e721f0c413629f10e663cd Mon Sep 17 00:00:00 2001
From: Lin Huang <hl@rock-chips.com>
Date: Mon, 23 Apr 2018 12:49:37 +0200
Subject: [PATCH 0441/1286] drm/bridge: analogix_dp: Move enable video into
 config_video()
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

We need to enable video before analogix_dp_is_video_stream_on(), so
we can get the right video stream status.

We needed to increase the delay in the timeout loop because there is
random "Timeout of video streamclk ok" message happen when debug edp
panel, this time do not define in the spec.

Cc: 征增 王 <wzz@rock-chips.com>
Cc: Stéphane Marchesin <marcheu@chromium.org>
Signed-off-by: Lin Huang <hl@rock-chips.com>
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Thierry Escande <thierry.escande@collabora.com>
Reviewed-by: Andrzej Hajda <a.hajda@samsung.com>
Signed-off-by: Enric Balletbo i Serra <enric.balletbo@collabora.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180423105003.9004-2-enric.balletbo@collabora.com
---
 drivers/gpu/drm/bridge/analogix/analogix_dp_core.c | 11 +++++------
 1 file changed, 5 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 5c52307146c7..05a0ca4af057 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -819,11 +819,10 @@ static int analogix_dp_config_video(struct analogix_dp_device *dp)
 		if (analogix_dp_is_slave_video_stream_clock_on(dp) == 0)
 			break;
 		if (timeout_loop > DP_TIMEOUT_LOOP_COUNT) {
-			dev_err(dp->dev, "Timeout of video streamclk ok\n");
+			dev_err(dp->dev, "Timeout of slave video streamclk ok\n");
 			return -ETIMEDOUT;
 		}
-
-		usleep_range(1, 2);
+		usleep_range(1000, 1001);
 	}
 
 	/* Set to use the register calculated M/N video */
@@ -838,6 +837,9 @@ static int analogix_dp_config_video(struct analogix_dp_device *dp)
 	/* Configure video slave mode */
 	analogix_dp_enable_video_master(dp, 0);
 
+	/* Enable video */
+	analogix_dp_start_video(dp);
+
 	timeout_loop = 0;
 
 	for (;;) {
@@ -948,9 +950,6 @@ static void analogix_dp_commit(struct analogix_dp_device *dp)
 			DRM_ERROR("failed to enable the panel\n");
 	}
 
-	/* Enable video */
-	analogix_dp_start_video(dp);
-
 	dp->psr_enable = analogix_dp_detect_sink_psr(dp);
 	if (dp->psr_enable)
 		analogix_dp_enable_sink_psr(dp);

From c2021db1905ed5b4480882836d8d3631ca786869 Mon Sep 17 00:00:00 2001
From: Lin Huang <hl@rock-chips.com>
Date: Mon, 23 Apr 2018 12:49:38 +0200
Subject: [PATCH 0442/1286] drm/bridge: analogix_dp: Check AUX_EN status when
 doing AUX transfer
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

We should check AUX_EN bit to confirm the AUX CH operation is completed.

Cc: Stéphane Marchesin <marcheu@chromium.org>
Signed-off-by: Lin Huang <hl@rock-chips.com>
Signed-off-by: zain wang <wzz@rock-chips.com>
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Thierry Escande <thierry.escande@collabora.com>
Signed-off-by: Enric Balletbo i Serra <enric.balletbo@collabora.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Archit Taneja <architt@codeaurora.org>
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180423105003.9004-3-enric.balletbo@collabora.com
---
 .../gpu/drm/bridge/analogix/analogix_dp_reg.c | 25 +++++++++++--------
 1 file changed, 14 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
index 9df2f3ef000c..e78c861b9e06 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -1073,9 +1073,9 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
 {
 	u32 reg;
 	u8 *buffer = msg->buffer;
-	int timeout_loop = 0;
 	unsigned int i;
 	int num_transferred = 0;
+	int ret;
 
 	/* Buffer size of AUX CH is 16 bytes */
 	if (WARN_ON(msg->size > 16))
@@ -1139,17 +1139,20 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
 
 	writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2);
 
-	/* Is AUX CH command reply received? */
+	ret = readx_poll_timeout(readl, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2,
+				 reg, !(reg & AUX_EN), 25, 500 * 1000);
+	if (ret) {
+		dev_err(dp->dev, "AUX CH enable timeout!\n");
+		return -ETIMEDOUT;
+	}
+
 	/* TODO: Wait for an interrupt instead of looping? */
-	reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
-	while (!(reg & RPLY_RECEIV)) {
-		timeout_loop++;
-		if (timeout_loop > DP_TIMEOUT_LOOP_COUNT) {
-			dev_err(dp->dev, "AUX CH command reply failed!\n");
-			return -ETIMEDOUT;
-		}
-		reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
-		usleep_range(10, 11);
+	/* Is AUX CH command reply received? */
+	ret = readx_poll_timeout(readl, dp->reg_base + ANALOGIX_DP_INT_STA,
+				 reg, reg & RPLY_RECEIV, 10, 20 * 1000);
+	if (ret) {
+		dev_err(dp->dev, "AUX CH cmd reply timeout!\n");
+		return -ETIMEDOUT;
 	}
 
 	/* Clear interrupt source for AUX CH command reply */

From 7ba8fb5704e958b94b6f0260f20afca58b35a55a Mon Sep 17 00:00:00 2001
From: zain wang <wzz@rock-chips.com>
Date: Mon, 23 Apr 2018 12:49:39 +0200
Subject: [PATCH 0443/1286] drm/bridge: analogix_dp: Don't use fast link
 training when panel just powered up
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Panel would reset its setting when it powers down. It would forget the last
succeeded link training setting. So we can't use the last successful link
training setting to do fast link training. Let's reset fast_train_enable in
analogix_dp_bridge_disable();

Cc: Stéphane Marchesin <marcheu@chromium.org>
Signed-off-by: zain wang <wzz@rock-chips.com>
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Thierry Escande <thierry.escande@collabora.com>
Signed-off-by: Enric Balletbo i Serra <enric.balletbo@collabora.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Archit Taneja <architt@codeaurora.org>
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180423105003.9004-4-enric.balletbo@collabora.com
---
 drivers/gpu/drm/bridge/analogix/analogix_dp_core.c | 9 +++++----
 drivers/gpu/drm/bridge/analogix/analogix_dp_core.h | 2 +-
 2 files changed, 6 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 05a0ca4af057..85dbb1ca9886 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -579,14 +579,14 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
 		if (retval != 1) {
 			dev_err(dp->dev, "failed to read downspread %d\n",
 				retval);
-			dp->fast_train_support = false;
+			dp->fast_train_enable = false;
 		} else {
-			dp->fast_train_support =
+			dp->fast_train_enable =
 				(spread & DP_NO_AUX_HANDSHAKE_LINK_TRAINING) ?
 					true : false;
 		}
 		dev_dbg(dp->dev, "fast link training %s\n",
-			dp->fast_train_support ? "supported" : "unsupported");
+			dp->fast_train_enable ? "supported" : "unsupported");
 
 		/* set enhanced mode if available */
 		analogix_dp_set_enhanced_mode(dp);
@@ -793,7 +793,7 @@ static int analogix_dp_fast_link_train(struct analogix_dp_device *dp)
 
 static int analogix_dp_train_link(struct analogix_dp_device *dp)
 {
-	if (dp->fast_train_support)
+	if (dp->fast_train_enable)
 		return analogix_dp_fast_link_train(dp);
 
 	return analogix_dp_full_link_train(dp, dp->video_info.max_lane_count,
@@ -1197,6 +1197,7 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
 		DRM_ERROR("failed to setup the panel ret = %d\n", ret);
 
 	dp->psr_enable = false;
+	dp->fast_train_enable = false;
 	dp->dpms_mode = DRM_MODE_DPMS_OFF;
 }
 
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
index 6a96ef7e6934..403ff853464b 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
@@ -173,7 +173,7 @@ struct analogix_dp_device {
 	int			hpd_gpio;
 	bool                    force_hpd;
 	bool			psr_enable;
-	bool			fast_train_support;
+	bool			fast_train_enable;
 
 	struct mutex		panel_lock;
 	bool			panel_is_modeset;

From 8a335736f94edc9c8c977d931301e6a0326fbc3e Mon Sep 17 00:00:00 2001
From: zain wang <wzz@rock-chips.com>
Date: Mon, 23 Apr 2018 12:49:40 +0200
Subject: [PATCH 0444/1286] drm/bridge: analogix_dp: Retry bridge enable when
 it failed
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

When we enable bridge failed, we have to retry it, otherwise we would get
the abnormal display.

Cc: Stéphane Marchesin <marcheu@chromium.org>
Signed-off-by: zain wang <wzz@rock-chips.com>
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Thierry Escande <thierry.escande@collabora.com>
Reviewed-by: Andrzej Hajda <a.hajda@samsung.com>
Signed-off-by: Enric Balletbo i Serra <enric.balletbo@collabora.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Archit Taneja <architt@codeaurora.org>
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180423105003.9004-5-enric.balletbo@collabora.com
---
 .../drm/bridge/analogix/analogix_dp_core.c    | 67 ++++++++++++++-----
 .../drm/bridge/analogix/analogix_dp_core.h    |  3 +-
 .../gpu/drm/bridge/analogix/analogix_dp_reg.c |  5 +-
 3 files changed, 57 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 85dbb1ca9886..bf805f156272 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -43,8 +43,10 @@ struct bridge_init {
 	struct device_node *node;
 };
 
-static void analogix_dp_init_dp(struct analogix_dp_device *dp)
+static int analogix_dp_init_dp(struct analogix_dp_device *dp)
 {
+	int ret;
+
 	analogix_dp_reset(dp);
 
 	analogix_dp_swreset(dp);
@@ -56,10 +58,13 @@ static void analogix_dp_init_dp(struct analogix_dp_device *dp)
 	analogix_dp_enable_sw_function(dp);
 
 	analogix_dp_config_interrupt(dp);
-	analogix_dp_init_analog_func(dp);
+	ret = analogix_dp_init_analog_func(dp);
+	if (ret)
+		return ret;
 
 	analogix_dp_init_hpd(dp);
 	analogix_dp_init_aux(dp);
+	return 0;
 }
 
 static int analogix_dp_detect_hpd(struct analogix_dp_device *dp)
@@ -918,7 +923,7 @@ static irqreturn_t analogix_dp_irq_thread(int irq, void *arg)
 	return IRQ_HANDLED;
 }
 
-static void analogix_dp_commit(struct analogix_dp_device *dp)
+static int analogix_dp_commit(struct analogix_dp_device *dp)
 {
 	int ret;
 
@@ -928,11 +933,10 @@ static void analogix_dp_commit(struct analogix_dp_device *dp)
 			DRM_ERROR("failed to disable the panel\n");
 	}
 
-	ret = readx_poll_timeout(analogix_dp_train_link, dp, ret, !ret, 100,
-				 DP_TIMEOUT_TRAINING_US * 5);
+	ret = analogix_dp_train_link(dp);
 	if (ret) {
 		dev_err(dp->dev, "unable to do link train, ret=%d\n", ret);
-		return;
+		return ret;
 	}
 
 	analogix_dp_enable_scramble(dp, 1);
@@ -953,6 +957,7 @@ static void analogix_dp_commit(struct analogix_dp_device *dp)
 	dp->psr_enable = analogix_dp_detect_sink_psr(dp);
 	if (dp->psr_enable)
 		analogix_dp_enable_sink_psr(dp);
+	return 0;
 }
 
 /*
@@ -1149,12 +1154,9 @@ static void analogix_dp_bridge_pre_enable(struct drm_bridge *bridge)
 		DRM_ERROR("failed to setup the panel ret = %d\n", ret);
 }
 
-static void analogix_dp_bridge_enable(struct drm_bridge *bridge)
+static int analogix_dp_set_bridge(struct analogix_dp_device *dp)
 {
-	struct analogix_dp_device *dp = bridge->driver_private;
-
-	if (dp->dpms_mode == DRM_MODE_DPMS_ON)
-		return;
+	int ret;
 
 	pm_runtime_get_sync(dp->dev);
 
@@ -1162,11 +1164,46 @@ static void analogix_dp_bridge_enable(struct drm_bridge *bridge)
 		dp->plat_data->power_on(dp->plat_data);
 
 	phy_power_on(dp->phy);
-	analogix_dp_init_dp(dp);
-	enable_irq(dp->irq);
-	analogix_dp_commit(dp);
 
-	dp->dpms_mode = DRM_MODE_DPMS_ON;
+	ret = analogix_dp_init_dp(dp);
+	if (ret)
+		goto out_dp_init;
+
+	ret = analogix_dp_commit(dp);
+	if (ret)
+		goto out_dp_init;
+
+	enable_irq(dp->irq);
+	return 0;
+
+out_dp_init:
+	phy_power_off(dp->phy);
+	if (dp->plat_data->power_off)
+		dp->plat_data->power_off(dp->plat_data);
+	pm_runtime_put_sync(dp->dev);
+
+	return ret;
+}
+
+static void analogix_dp_bridge_enable(struct drm_bridge *bridge)
+{
+	struct analogix_dp_device *dp = bridge->driver_private;
+	int timeout_loop = 0;
+
+	if (dp->dpms_mode == DRM_MODE_DPMS_ON)
+		return;
+
+	while (timeout_loop < MAX_PLL_LOCK_LOOP) {
+		if (analogix_dp_set_bridge(dp) == 0) {
+			dp->dpms_mode = DRM_MODE_DPMS_ON;
+			return;
+		}
+		dev_err(dp->dev, "failed to set bridge, retry: %d\n",
+			timeout_loop);
+		timeout_loop++;
+		usleep_range(10, 11);
+	}
+	dev_err(dp->dev, "too many times retry set bridge, give it up\n");
 }
 
 static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
index 403ff853464b..769255dc6e99 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
@@ -19,6 +19,7 @@
 #define DP_TIMEOUT_LOOP_COUNT 100
 #define MAX_CR_LOOP 5
 #define MAX_EQ_LOOP 5
+#define MAX_PLL_LOCK_LOOP 5
 
 /* Training takes 22ms if AUX channel comm fails. Use this as retry interval */
 #define DP_TIMEOUT_TRAINING_US			22000
@@ -197,7 +198,7 @@ void analogix_dp_set_pll_power_down(struct analogix_dp_device *dp, bool enable);
 void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp,
 				       enum analog_power_block block,
 				       bool enable);
-void analogix_dp_init_analog_func(struct analogix_dp_device *dp);
+int analogix_dp_init_analog_func(struct analogix_dp_device *dp);
 void analogix_dp_init_hpd(struct analogix_dp_device *dp);
 void analogix_dp_force_hpd(struct analogix_dp_device *dp);
 enum dp_irq_type analogix_dp_get_irq_type(struct analogix_dp_device *dp);
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
index e78c861b9e06..b47c5af43560 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -333,7 +333,7 @@ void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp,
 	}
 }
 
-void analogix_dp_init_analog_func(struct analogix_dp_device *dp)
+int analogix_dp_init_analog_func(struct analogix_dp_device *dp)
 {
 	u32 reg;
 	int timeout_loop = 0;
@@ -355,7 +355,7 @@ void analogix_dp_init_analog_func(struct analogix_dp_device *dp)
 			timeout_loop++;
 			if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
 				dev_err(dp->dev, "failed to get pll lock status\n");
-				return;
+				return -ETIMEDOUT;
 			}
 			usleep_range(10, 20);
 		}
@@ -366,6 +366,7 @@ void analogix_dp_init_analog_func(struct analogix_dp_device *dp)
 	reg &= ~(SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N
 		| AUX_FUNC_EN_N);
 	writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2);
+	return 0;
 }
 
 void analogix_dp_clear_hotplug_interrupts(struct analogix_dp_device *dp)

From 7f6414143a61f9eb369c90ef0550c9ef8e603d57 Mon Sep 17 00:00:00 2001
From: zain wang <wzz@rock-chips.com>
Date: Mon, 23 Apr 2018 12:49:41 +0200
Subject: [PATCH 0445/1286] drm/bridge: analogix_dp: Wait for HPD signal before
 configuring link
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

According to DP spec v1.3 chap 3.5.1.2 Link Training, Link Policy Maker
must first detect that the HPD signal is asserted high by the Downstream
Device before establishing a link with it.

Cc: Stéphane Marchesin <marcheu@chromium.org>
Signed-off-by: zain wang <wzz@rock-chips.com>
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Thierry Escande <thierry.escande@collabora.com>
Signed-off-by: Enric Balletbo i Serra <enric.balletbo@collabora.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Archit Taneja <architt@codeaurora.org>
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180423105003.9004-6-enric.balletbo@collabora.com
---
 drivers/gpu/drm/bridge/analogix/analogix_dp_core.c | 11 +++++++++++
 1 file changed, 11 insertions(+)

diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index bf805f156272..3269deec739d 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1169,6 +1169,17 @@ static int analogix_dp_set_bridge(struct analogix_dp_device *dp)
 	if (ret)
 		goto out_dp_init;
 
+	/*
+	 * According to DP spec v1.3 chap 3.5.1.2 Link Training,
+	 * We should first make sure the HPD signal is asserted high by device
+	 * when we want to establish a link with it.
+	 */
+	ret = analogix_dp_detect_hpd(dp);
+	if (ret) {
+		DRM_ERROR("failed to get hpd single ret = %d\n", ret);
+		goto out_dp_init;
+	}
+
 	ret = analogix_dp_commit(dp);
 	if (ret)
 		goto out_dp_init;

From d79acb593290e1faeec6d9032c08fc0ab38080e0 Mon Sep 17 00:00:00 2001
From: zain wang <wzz@rock-chips.com>
Date: Mon, 23 Apr 2018 12:49:42 +0200
Subject: [PATCH 0446/1286] drm/bridge: analogix_dp: Set PD_INC_BG first when
 powering up edp phy
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Following the correct power up sequence:
dp_pd=ff => dp_pd=7f => wait 10us => dp_pd=00

Cc: Stéphane Marchesin <marcheu@chromium.org>
Signed-off-by: zain wang <wzz@rock-chips.com>
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Thierry Escande <thierry.escande@collabora.com>
Signed-off-by: Enric Balletbo i Serra <enric.balletbo@collabora.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Archit Taneja <architt@codeaurora.org>
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180423105003.9004-7-enric.balletbo@collabora.com
---
 drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c | 10 ++++++++--
 drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h |  3 +++
 2 files changed, 11 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
index b47c5af43560..bb72f8b0e603 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -321,10 +321,16 @@ void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp,
 		break;
 	case POWER_ALL:
 		if (enable) {
-			reg = DP_PHY_PD | AUX_PD | CH3_PD | CH2_PD |
-				CH1_PD | CH0_PD;
+			reg = DP_ALL_PD;
 			writel(reg, dp->reg_base + phy_pd_addr);
 		} else {
+			reg = DP_ALL_PD;
+			writel(reg, dp->reg_base + phy_pd_addr);
+			usleep_range(10, 15);
+			reg &= ~DP_INC_BG;
+			writel(reg, dp->reg_base + phy_pd_addr);
+			usleep_range(10, 15);
+
 			writel(0x00, dp->reg_base + phy_pd_addr);
 		}
 		break;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
index 40200c652533..9602668669f4 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
@@ -342,12 +342,15 @@
 #define DP_PLL_REF_BIT_1_2500V			(0x7 << 0)
 
 /* ANALOGIX_DP_PHY_PD */
+#define DP_INC_BG				(0x1 << 7)
+#define DP_EXP_BG				(0x1 << 6)
 #define DP_PHY_PD				(0x1 << 5)
 #define AUX_PD					(0x1 << 4)
 #define CH3_PD					(0x1 << 3)
 #define CH2_PD					(0x1 << 2)
 #define CH1_PD					(0x1 << 1)
 #define CH0_PD					(0x1 << 0)
+#define DP_ALL_PD				(0xff)
 
 /* ANALOGIX_DP_PHY_TEST */
 #define MACRO_RST				(0x1 << 5)

From 63872659fca7997af290ddfaaf73a61bdd7cbedb Mon Sep 17 00:00:00 2001
From: Lin Huang <hl@rock-chips.com>
Date: Mon, 23 Apr 2018 12:49:43 +0200
Subject: [PATCH 0447/1286] drm/bridge: analogix_dp: Ensure edp is disabled
 when shutting down the panel
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

When panel is shut down, we should make sure edp can be disabled to avoid
undefined behavior.

Cc: Stéphane Marchesin <marcheu@chromium.org>
Signed-off-by: Lin Huang <hl@rock-chips.com>
Signed-off-by: zain wang <wzz@rock-chips.com>
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Thierry Escande <thierry.escande@collabora.com>
Reviewed-by: Andrzej Hajda <a.hajda@samsung.com>
Signed-off-by: Enric Balletbo i Serra <enric.balletbo@collabora.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Archit Taneja <architt@codeaurora.org>
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180423105003.9004-8-enric.balletbo@collabora.com
---
 drivers/gpu/drm/bridge/analogix/analogix_dp_core.c | 11 +++++++++++
 1 file changed, 11 insertions(+)

diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 3269deec739d..5957e2338071 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1160,6 +1160,12 @@ static int analogix_dp_set_bridge(struct analogix_dp_device *dp)
 
 	pm_runtime_get_sync(dp->dev);
 
+	ret = clk_prepare_enable(dp->clock);
+	if (ret < 0) {
+		DRM_ERROR("Failed to prepare_enable the clock clk [%d]\n", ret);
+		goto out_dp_clk_pre;
+	}
+
 	if (dp->plat_data->power_on)
 		dp->plat_data->power_on(dp->plat_data);
 
@@ -1191,6 +1197,8 @@ out_dp_init:
 	phy_power_off(dp->phy);
 	if (dp->plat_data->power_off)
 		dp->plat_data->power_off(dp->plat_data);
+	clk_disable_unprepare(dp->clock);
+out_dp_clk_pre:
 	pm_runtime_put_sync(dp->dev);
 
 	return ret;
@@ -1233,11 +1241,14 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
 	}
 
 	disable_irq(dp->irq);
+	analogix_dp_set_analog_power_down(dp, POWER_ALL, 1);
 	phy_power_off(dp->phy);
 
 	if (dp->plat_data->power_off)
 		dp->plat_data->power_off(dp->plat_data);
 
+	clk_disable_unprepare(dp->clock);
+
 	pm_runtime_put_sync(dp->dev);
 
 	ret = analogix_dp_prepare_panel(dp, false, true);

From 606c5e64c6bcd26e8831878091a026da3ab63aae Mon Sep 17 00:00:00 2001
From: Lin Huang <hl@rock-chips.com>
Date: Mon, 23 Apr 2018 12:49:44 +0200
Subject: [PATCH 0448/1286] drm/bridge: analogix_dp: Extend hpd check time to
 100ms
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

There was a 1ms delay to detect the hpd signal, which is too short to
detect a short pulse. This patch extends this delay to 100ms.

Cc: Stéphane Marchesin <marcheu@chromium.org>
Cc: 征增 王 <wzz@rock-chips.com>
Signed-off-by: Lin Huang <hl@rock-chips.com>
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Thierry Escande <thierry.escande@collabora.com>
Reviewed-by: Andrzej Hajda <a.hajda@samsung.com>
Signed-off-by: Enric Balletbo i Serra <enric.balletbo@collabora.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Archit Taneja <architt@codeaurora.org>
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180423105003.9004-9-enric.balletbo@collabora.com
---
 drivers/gpu/drm/bridge/analogix/analogix_dp_core.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 5957e2338071..1f4f34149019 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -76,7 +76,7 @@ static int analogix_dp_detect_hpd(struct analogix_dp_device *dp)
 			return 0;
 
 		timeout_loop++;
-		usleep_range(10, 11);
+		usleep_range(1000, 1100);
 	}
 
 	/*

From 1932250df1f0d6fe81fc785c5bb98f9474ae913e Mon Sep 17 00:00:00 2001
From: zain wang <wzz@rock-chips.com>
Date: Mon, 23 Apr 2018 12:49:45 +0200
Subject: [PATCH 0449/1286] drm/bridge: analogix_dp: Fix incorrect usage of
 enhanced mode

Enhanced mode is required by the eDP 1.2 specification, and not doing it
early could result in a period of time where we have a link transmitting
idle packets without it. Since there is no reason to disable it, we just
enable it at the beginning of link training and then keep it on all the
time.

Cc: Tomasz Figa <tfiga@chromium.org>
Signed-off-by: zain wang <wzz@rock-chips.com>
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Thierry Escande <thierry.escande@collabora.com>
Reviewed-by: Andrzej Hajda <a.hajda@samsung.com>
Signed-off-by: Enric Balletbo i Serra <enric.balletbo@collabora.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Archit Taneja <architt@codeaurora.org>
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180423105003.9004-10-enric.balletbo@collabora.com
---
 drivers/gpu/drm/bridge/analogix/analogix_dp_core.c | 6 ++----
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 1f4f34149019..1e1743b59c77 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -281,6 +281,8 @@ static int analogix_dp_link_start(struct analogix_dp_device *dp)
 	retval = drm_dp_dpcd_write(&dp->aux, DP_LINK_BW_SET, buf, 2);
 	if (retval < 0)
 		return retval;
+	/* set enhanced mode if available */
+	analogix_dp_set_enhanced_mode(dp);
 
 	/* Set TX pre-emphasis to minimum */
 	for (lane = 0; lane < lane_count; lane++)
@@ -593,8 +595,6 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
 		dev_dbg(dp->dev, "fast link training %s\n",
 			dp->fast_train_enable ? "supported" : "unsupported");
 
-		/* set enhanced mode if available */
-		analogix_dp_set_enhanced_mode(dp);
 		dp->link_train.lt_state = FINISHED;
 
 		return 0;
@@ -940,8 +940,6 @@ static int analogix_dp_commit(struct analogix_dp_device *dp)
 	}
 
 	analogix_dp_enable_scramble(dp, 1);
-	analogix_dp_enable_rx_to_enhanced_mode(dp, 1);
-	analogix_dp_enable_enhanced_mode(dp, 1);
 
 	analogix_dp_init_video(dp);
 	ret = analogix_dp_config_video(dp);

From ccdc578b69f21037ce13bdf5830d2c2807194375 Mon Sep 17 00:00:00 2001
From: Lin Huang <hl@rock-chips.com>
Date: Mon, 23 Apr 2018 12:49:46 +0200
Subject: [PATCH 0450/1286] drm/bridge: analogix_dp: Check dpcd write/read
 status

We need to check the dpcd write/read return value to see whether the
write/read was successful

Cc: Kristian H. Kristensen <hoegsberg@chromium.org>
Signed-off-by: Lin Huang <hl@rock-chips.com>
Signed-off-by: zain wang <wzz@rock-chips.com>
Signed-off-by: Douglas Anderson <dianders@chromium.org>
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Thierry Escande <thierry.escande@collabora.com>
Reviewed-by: Andrzej Hajda <a.hajda@samsung.com>
Signed-off-by: Enric Balletbo i Serra <enric.balletbo@collabora.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Archit Taneja <architt@codeaurora.org>
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180423105003.9004-11-enric.balletbo@collabora.com
---
 .../drm/bridge/analogix/analogix_dp_core.c    | 169 +++++++++++++-----
 1 file changed, 127 insertions(+), 42 deletions(-)

diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 1e1743b59c77..75e61ebf6722 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -160,80 +160,137 @@ int analogix_dp_disable_psr(struct analogix_dp_device *dp)
 }
 EXPORT_SYMBOL_GPL(analogix_dp_disable_psr);
 
-static bool analogix_dp_detect_sink_psr(struct analogix_dp_device *dp)
+static int analogix_dp_detect_sink_psr(struct analogix_dp_device *dp)
 {
 	unsigned char psr_version;
+	int ret;
+
+	ret = drm_dp_dpcd_readb(&dp->aux, DP_PSR_SUPPORT, &psr_version);
+	if (ret != 1) {
+		dev_err(dp->dev, "failed to get PSR version, disable it\n");
+		return ret;
+	}
 
-	drm_dp_dpcd_readb(&dp->aux, DP_PSR_SUPPORT, &psr_version);
 	dev_dbg(dp->dev, "Panel PSR version : %x\n", psr_version);
 
-	return (psr_version & DP_PSR_IS_SUPPORTED) ? true : false;
+	dp->psr_enable = (psr_version & DP_PSR_IS_SUPPORTED) ? true : false;
+
+	return 0;
 }
 
-static void analogix_dp_enable_sink_psr(struct analogix_dp_device *dp)
+static int analogix_dp_enable_sink_psr(struct analogix_dp_device *dp)
 {
 	unsigned char psr_en;
+	int ret;
 
 	/* Disable psr function */
-	drm_dp_dpcd_readb(&dp->aux, DP_PSR_EN_CFG, &psr_en);
+	ret = drm_dp_dpcd_readb(&dp->aux, DP_PSR_EN_CFG, &psr_en);
+	if (ret != 1) {
+		dev_err(dp->dev, "failed to get psr config\n");
+		goto end;
+	}
+
 	psr_en &= ~DP_PSR_ENABLE;
-	drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en);
+	ret = drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en);
+	if (ret != 1) {
+		dev_err(dp->dev, "failed to disable panel psr\n");
+		goto end;
+	}
 
 	/* Main-Link transmitter remains active during PSR active states */
 	psr_en = DP_PSR_MAIN_LINK_ACTIVE | DP_PSR_CRC_VERIFICATION;
-	drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en);
+	ret = drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en);
+	if (ret != 1) {
+		dev_err(dp->dev, "failed to set panel psr\n");
+		goto end;
+	}
 
 	/* Enable psr function */
 	psr_en = DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE |
 		 DP_PSR_CRC_VERIFICATION;
-	drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en);
+	ret = drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en);
+	if (ret != 1) {
+		dev_err(dp->dev, "failed to set panel psr\n");
+		goto end;
+	}
 
 	analogix_dp_enable_psr_crc(dp);
+
+	return 0;
+end:
+	dev_err(dp->dev, "enable psr fail, force to disable psr\n");
+	dp->psr_enable = false;
+
+	return ret;
 }
 
-static void
+static int
 analogix_dp_enable_rx_to_enhanced_mode(struct analogix_dp_device *dp,
 				       bool enable)
 {
 	u8 data;
+	int ret;
 
-	drm_dp_dpcd_readb(&dp->aux, DP_LANE_COUNT_SET, &data);
+	ret = drm_dp_dpcd_readb(&dp->aux, DP_LANE_COUNT_SET, &data);
+	if (ret != 1)
+		return ret;
 
 	if (enable)
-		drm_dp_dpcd_writeb(&dp->aux, DP_LANE_COUNT_SET,
-				   DP_LANE_COUNT_ENHANCED_FRAME_EN |
-					DPCD_LANE_COUNT_SET(data));
+		ret = drm_dp_dpcd_writeb(&dp->aux, DP_LANE_COUNT_SET,
+					 DP_LANE_COUNT_ENHANCED_FRAME_EN |
+					 DPCD_LANE_COUNT_SET(data));
 	else
-		drm_dp_dpcd_writeb(&dp->aux, DP_LANE_COUNT_SET,
-				   DPCD_LANE_COUNT_SET(data));
+		ret = drm_dp_dpcd_writeb(&dp->aux, DP_LANE_COUNT_SET,
+					 DPCD_LANE_COUNT_SET(data));
+
+	return ret < 0 ? ret : 0;
 }
 
-static int analogix_dp_is_enhanced_mode_available(struct analogix_dp_device *dp)
+static int analogix_dp_is_enhanced_mode_available(struct analogix_dp_device *dp,
+						  u8 *enhanced_mode_support)
 {
 	u8 data;
-	int retval;
+	int ret;
 
-	drm_dp_dpcd_readb(&dp->aux, DP_MAX_LANE_COUNT, &data);
-	retval = DPCD_ENHANCED_FRAME_CAP(data);
+	ret = drm_dp_dpcd_readb(&dp->aux, DP_MAX_LANE_COUNT, &data);
+	if (ret != 1) {
+		*enhanced_mode_support = 0;
+		return ret;
+	}
 
-	return retval;
+	*enhanced_mode_support = DPCD_ENHANCED_FRAME_CAP(data);
+
+	return 0;
 }
 
-static void analogix_dp_set_enhanced_mode(struct analogix_dp_device *dp)
+static int analogix_dp_set_enhanced_mode(struct analogix_dp_device *dp)
 {
 	u8 data;
+	int ret;
+
+	ret = analogix_dp_is_enhanced_mode_available(dp, &data);
+	if (ret < 0)
+		return ret;
+
+	ret = analogix_dp_enable_rx_to_enhanced_mode(dp, data);
+	if (ret < 0)
+		return ret;
 
-	data = analogix_dp_is_enhanced_mode_available(dp);
-	analogix_dp_enable_rx_to_enhanced_mode(dp, data);
 	analogix_dp_enable_enhanced_mode(dp, data);
+
+	return 0;
 }
 
-static void analogix_dp_training_pattern_dis(struct analogix_dp_device *dp)
+static int analogix_dp_training_pattern_dis(struct analogix_dp_device *dp)
 {
+	int ret;
+
 	analogix_dp_set_training_pattern(dp, DP_NONE);
 
-	drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
-			   DP_TRAINING_PATTERN_DISABLE);
+	ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+				 DP_TRAINING_PATTERN_DISABLE);
+
+	return ret < 0 ? ret : 0;
 }
 
 static void
@@ -282,7 +339,11 @@ static int analogix_dp_link_start(struct analogix_dp_device *dp)
 	if (retval < 0)
 		return retval;
 	/* set enhanced mode if available */
-	analogix_dp_set_enhanced_mode(dp);
+	retval = analogix_dp_set_enhanced_mode(dp);
+	if (retval < 0) {
+		dev_err(dp->dev, "failed to set enhance mode\n");
+		return retval;
+	}
 
 	/* Set TX pre-emphasis to minimum */
 	for (lane = 0; lane < lane_count; lane++)
@@ -567,10 +628,11 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
 
 	if (!analogix_dp_channel_eq_ok(link_status, link_align, lane_count)) {
 		/* traing pattern Set to Normal */
-		analogix_dp_training_pattern_dis(dp);
+		retval = analogix_dp_training_pattern_dis(dp);
+		if (retval < 0)
+			return retval;
 
 		dev_info(dp->dev, "Link Training success!\n");
-
 		analogix_dp_get_link_bandwidth(dp, &reg);
 		dp->link_train.link_rate = reg;
 		dev_dbg(dp->dev, "final bandwidth = %.2x\n",
@@ -867,24 +929,32 @@ static int analogix_dp_config_video(struct analogix_dp_device *dp)
 	return 0;
 }
 
-static void analogix_dp_enable_scramble(struct analogix_dp_device *dp,
-					bool enable)
+static int analogix_dp_enable_scramble(struct analogix_dp_device *dp,
+				       bool enable)
 {
 	u8 data;
+	int ret;
 
 	if (enable) {
 		analogix_dp_enable_scrambling(dp);
 
-		drm_dp_dpcd_readb(&dp->aux, DP_TRAINING_PATTERN_SET, &data);
-		drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+		ret = drm_dp_dpcd_readb(&dp->aux, DP_TRAINING_PATTERN_SET,
+					&data);
+		if (ret != 1)
+			return ret;
+		ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
 				   (u8)(data & ~DP_LINK_SCRAMBLING_DISABLE));
 	} else {
 		analogix_dp_disable_scrambling(dp);
 
-		drm_dp_dpcd_readb(&dp->aux, DP_TRAINING_PATTERN_SET, &data);
-		drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+		ret = drm_dp_dpcd_readb(&dp->aux, DP_TRAINING_PATTERN_SET,
+					&data);
+		if (ret != 1)
+			return ret;
+		ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
 				   (u8)(data | DP_LINK_SCRAMBLING_DISABLE));
 	}
+	return ret < 0 ? ret : 0;
 }
 
 static irqreturn_t analogix_dp_hardirq(int irq, void *arg)
@@ -939,23 +1009,36 @@ static int analogix_dp_commit(struct analogix_dp_device *dp)
 		return ret;
 	}
 
-	analogix_dp_enable_scramble(dp, 1);
+	ret = analogix_dp_enable_scramble(dp, 1);
+	if (ret < 0) {
+		dev_err(dp->dev, "can not enable scramble\n");
+		return ret;
+	}
 
 	analogix_dp_init_video(dp);
 	ret = analogix_dp_config_video(dp);
-	if (ret)
+	if (ret) {
 		dev_err(dp->dev, "unable to config video\n");
+		return ret;
+	}
 
 	/* Safe to enable the panel now */
 	if (dp->plat_data->panel) {
-		if (drm_panel_enable(dp->plat_data->panel))
+		ret = drm_panel_enable(dp->plat_data->panel);
+		if (ret) {
 			DRM_ERROR("failed to enable the panel\n");
+			return ret;
+		}
 	}
 
-	dp->psr_enable = analogix_dp_detect_sink_psr(dp);
+	ret = analogix_dp_detect_sink_psr(dp);
+	if (ret)
+		return ret;
+
 	if (dp->psr_enable)
-		analogix_dp_enable_sink_psr(dp);
-	return 0;
+		ret = analogix_dp_enable_sink_psr(dp);
+
+	return ret;
 }
 
 /*
@@ -1185,8 +1268,10 @@ static int analogix_dp_set_bridge(struct analogix_dp_device *dp)
 	}
 
 	ret = analogix_dp_commit(dp);
-	if (ret)
+	if (ret) {
+		DRM_ERROR("dp commit error, ret = %d\n", ret);
 		goto out_dp_init;
+	}
 
 	enable_irq(dp->irq);
 	return 0;

From f12da6877ed02ae45922fa1dfacab1f425f20d56 Mon Sep 17 00:00:00 2001
From: zain wang <wzz@rock-chips.com>
Date: Mon, 23 Apr 2018 12:49:47 +0200
Subject: [PATCH 0451/1286] drm/bridge: analogix_dp: Fix AUX_PD bit for
 Rockchip

There are some different bits between Rockchip and Exynos in register
"AUX_PD". This patch fixes the incorrect operations about it.

Cc: Douglas Anderson <dianders@chromium.org>
Signed-off-by: zain wang <wzz@rock-chips.com>
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Thierry Escande <thierry.escande@collabora.com>
Reviewed-by: Andrzej Hajda <a.hajda@samsung.com>
Signed-off-by: Enric Balletbo i Serra <enric.balletbo@collabora.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Archit Taneja <architt@codeaurora.org>
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180423105003.9004-12-enric.balletbo@collabora.com
---
 .../gpu/drm/bridge/analogix/analogix_dp_reg.c | 117 ++++++++++--------
 .../gpu/drm/bridge/analogix/analogix_dp_reg.h |   2 +
 2 files changed, 65 insertions(+), 54 deletions(-)

diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
index bb72f8b0e603..dee1ba109b5f 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -248,76 +248,85 @@ void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp,
 {
 	u32 reg;
 	u32 phy_pd_addr = ANALOGIX_DP_PHY_PD;
+	u32 mask;
 
 	if (dp->plat_data && is_rockchip(dp->plat_data->dev_type))
 		phy_pd_addr = ANALOGIX_DP_PD;
 
 	switch (block) {
 	case AUX_BLOCK:
-		if (enable) {
-			reg = readl(dp->reg_base + phy_pd_addr);
-			reg |= AUX_PD;
-			writel(reg, dp->reg_base + phy_pd_addr);
-		} else {
-			reg = readl(dp->reg_base + phy_pd_addr);
-			reg &= ~AUX_PD;
-			writel(reg, dp->reg_base + phy_pd_addr);
-		}
+		if (dp->plat_data && is_rockchip(dp->plat_data->dev_type))
+			mask = RK_AUX_PD;
+		else
+			mask = AUX_PD;
+
+		reg = readl(dp->reg_base + phy_pd_addr);
+		if (enable)
+			reg |= mask;
+		else
+			reg &= ~mask;
+		writel(reg, dp->reg_base + phy_pd_addr);
 		break;
 	case CH0_BLOCK:
-		if (enable) {
-			reg = readl(dp->reg_base + phy_pd_addr);
-			reg |= CH0_PD;
-			writel(reg, dp->reg_base + phy_pd_addr);
-		} else {
-			reg = readl(dp->reg_base + phy_pd_addr);
-			reg &= ~CH0_PD;
-			writel(reg, dp->reg_base + phy_pd_addr);
-		}
+		mask = CH0_PD;
+		reg = readl(dp->reg_base + phy_pd_addr);
+
+		if (enable)
+			reg |= mask;
+		else
+			reg &= ~mask;
+		writel(reg, dp->reg_base + phy_pd_addr);
 		break;
 	case CH1_BLOCK:
-		if (enable) {
-			reg = readl(dp->reg_base + phy_pd_addr);
-			reg |= CH1_PD;
-			writel(reg, dp->reg_base + phy_pd_addr);
-		} else {
-			reg = readl(dp->reg_base + phy_pd_addr);
-			reg &= ~CH1_PD;
-			writel(reg, dp->reg_base + phy_pd_addr);
-		}
+		mask = CH1_PD;
+		reg = readl(dp->reg_base + phy_pd_addr);
+
+		if (enable)
+			reg |= mask;
+		else
+			reg &= ~mask;
+		writel(reg, dp->reg_base + phy_pd_addr);
 		break;
 	case CH2_BLOCK:
-		if (enable) {
-			reg = readl(dp->reg_base + phy_pd_addr);
-			reg |= CH2_PD;
-			writel(reg, dp->reg_base + phy_pd_addr);
-		} else {
-			reg = readl(dp->reg_base + phy_pd_addr);
-			reg &= ~CH2_PD;
-			writel(reg, dp->reg_base + phy_pd_addr);
-		}
+		mask = CH2_PD;
+		reg = readl(dp->reg_base + phy_pd_addr);
+
+		if (enable)
+			reg |= mask;
+		else
+			reg &= ~mask;
+		writel(reg, dp->reg_base + phy_pd_addr);
 		break;
 	case CH3_BLOCK:
-		if (enable) {
-			reg = readl(dp->reg_base + phy_pd_addr);
-			reg |= CH3_PD;
-			writel(reg, dp->reg_base + phy_pd_addr);
-		} else {
-			reg = readl(dp->reg_base + phy_pd_addr);
-			reg &= ~CH3_PD;
-			writel(reg, dp->reg_base + phy_pd_addr);
-		}
+		mask = CH3_PD;
+		reg = readl(dp->reg_base + phy_pd_addr);
+
+		if (enable)
+			reg |= mask;
+		else
+			reg &= ~mask;
+		writel(reg, dp->reg_base + phy_pd_addr);
 		break;
 	case ANALOG_TOTAL:
-		if (enable) {
-			reg = readl(dp->reg_base + phy_pd_addr);
-			reg |= DP_PHY_PD;
-			writel(reg, dp->reg_base + phy_pd_addr);
-		} else {
-			reg = readl(dp->reg_base + phy_pd_addr);
-			reg &= ~DP_PHY_PD;
-			writel(reg, dp->reg_base + phy_pd_addr);
-		}
+		/*
+		 * There is no bit named DP_PHY_PD, so We used DP_INC_BG
+		 * to power off everything instead of DP_PHY_PD in
+		 * Rockchip
+		 */
+		if (dp->plat_data && is_rockchip(dp->plat_data->dev_type))
+			mask = DP_INC_BG;
+		else
+			mask = DP_PHY_PD;
+
+		reg = readl(dp->reg_base + phy_pd_addr);
+		if (enable)
+			reg |= mask;
+		else
+			reg &= ~mask;
+
+		writel(reg, dp->reg_base + phy_pd_addr);
+		if (dp->plat_data && is_rockchip(dp->plat_data->dev_type))
+			usleep_range(10, 15);
 		break;
 	case POWER_ALL:
 		if (enable) {
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
index 9602668669f4..b633a4a5082a 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
@@ -345,7 +345,9 @@
 #define DP_INC_BG				(0x1 << 7)
 #define DP_EXP_BG				(0x1 << 6)
 #define DP_PHY_PD				(0x1 << 5)
+#define RK_AUX_PD				(0x1 << 5)
 #define AUX_PD					(0x1 << 4)
+#define RK_PLL_PD				(0x1 << 4)
 #define CH3_PD					(0x1 << 3)
 #define CH2_PD					(0x1 << 2)
 #define CH1_PD					(0x1 << 1)

From d44ba84433a2e42aa14fc5b9cc228050f0783e5c Mon Sep 17 00:00:00 2001
From: Lin Huang <hl@rock-chips.com>
Date: Mon, 23 Apr 2018 12:49:48 +0200
Subject: [PATCH 0452/1286] drm/bridge: analogix_dp: Reset aux channel if an
 error occurred
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

AUX errors are caused by many different reasons. We may not know what
happened in aux channel on failure, so let's reset aux channel if some
errors occurred.

Cc: 征增 王 <wzz@rock-chips.com>
Cc: Douglas Anderson <dianders@chromium.org>
Signed-off-by: Lin Huang <hl@rock-chips.com>
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Thierry Escande <thierry.escande@collabora.com>
Reviewed-by: Andrzej Hajda <a.hajda@samsung.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Archit Taneja <architt@codeaurora.org>
Signed-off-by: Enric Balletbo i Serra <enric.balletbo@collabora.com>
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180423105003.9004-13-enric.balletbo@collabora.com
---
 .../gpu/drm/bridge/analogix/analogix_dp_reg.c  | 18 ++++++++++++++----
 1 file changed, 14 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
index dee1ba109b5f..7b7fd227e1f9 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -466,6 +466,10 @@ void analogix_dp_init_aux(struct analogix_dp_device *dp)
 	reg = RPLY_RECEIV | AUX_ERR;
 	writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA);
 
+	analogix_dp_set_analog_power_down(dp, AUX_BLOCK, true);
+	usleep_range(10, 11);
+	analogix_dp_set_analog_power_down(dp, AUX_BLOCK, false);
+
 	analogix_dp_reset_aux(dp);
 
 	/* Disable AUX transaction H/W retry */
@@ -1159,7 +1163,7 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
 				 reg, !(reg & AUX_EN), 25, 500 * 1000);
 	if (ret) {
 		dev_err(dp->dev, "AUX CH enable timeout!\n");
-		return -ETIMEDOUT;
+		goto aux_error;
 	}
 
 	/* TODO: Wait for an interrupt instead of looping? */
@@ -1168,7 +1172,7 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
 				 reg, reg & RPLY_RECEIV, 10, 20 * 1000);
 	if (ret) {
 		dev_err(dp->dev, "AUX CH cmd reply timeout!\n");
-		return -ETIMEDOUT;
+		goto aux_error;
 	}
 
 	/* Clear interrupt source for AUX CH command reply */
@@ -1178,7 +1182,7 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
 	reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
 	if (reg & AUX_ERR) {
 		writel(AUX_ERR, dp->reg_base + ANALOGIX_DP_INT_STA);
-		return -EREMOTEIO;
+		goto aux_error;
 	}
 
 	/* Check AUX CH error access status */
@@ -1186,7 +1190,7 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
 	if ((reg & AUX_STATUS_MASK)) {
 		dev_err(dp->dev, "AUX CH error happened: %d\n\n",
 			reg & AUX_STATUS_MASK);
-		return -EREMOTEIO;
+		goto aux_error;
 	}
 
 	if (msg->request & DP_AUX_I2C_READ) {
@@ -1212,4 +1216,10 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
 		msg->reply = DP_AUX_NATIVE_REPLY_ACK;
 
 	return num_transferred > 0 ? num_transferred : -EBUSY;
+
+aux_error:
+	/* if aux err happen, reset aux */
+	analogix_dp_init_aux(dp);
+
+	return -EREMOTEIO;
 }

From 2a7b44c524d17d79f57d3fdb00c1c122ab720a7b Mon Sep 17 00:00:00 2001
From: zain wang <wzz@rock-chips.com>
Date: Mon, 23 Apr 2018 12:49:49 +0200
Subject: [PATCH 0453/1286] drm/rockchip: Restore psr->state when
 enable/disable psr failed

If we failed disable psr, it would hang the display until next psr
cycle coming. So we should restore psr->state when it failed.

Cc: Tomasz Figa <tfiga@chromium.org>
Signed-off-by: zain wang <wzz@rock-chips.com>
Signed-off-by: Douglas Anderson <dianders@chromium.org>
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Thierry Escande <thierry.escande@collabora.com>
Signed-off-by: Enric Balletbo i Serra <enric.balletbo@collabora.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Heiko Stuebner <heiko@sntech.de>
Reviewed-by: Archit Taneja <architt@codeaurora.org>
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180423105003.9004-14-enric.balletbo@collabora.com
---
 .../drm/bridge/analogix/analogix_dp_core.c    |  4 +++-
 .../gpu/drm/rockchip/analogix_dp-rockchip.c   | 10 +++++-----
 drivers/gpu/drm/rockchip/rockchip_drm_psr.c   | 20 ++++++++++++-------
 drivers/gpu/drm/rockchip/rockchip_drm_psr.h   |  2 +-
 4 files changed, 22 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 75e61ebf6722..5540e2dfc2ec 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -153,8 +153,10 @@ int analogix_dp_disable_psr(struct analogix_dp_device *dp)
 	psr_vsc.DB1 = 0;
 
 	ret = drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
-	if (ret != 1)
+	if (ret != 1) {
 		dev_err(dp->dev, "Failed to set DP Power0 %d\n", ret);
+		return ret;
+	}
 
 	return analogix_dp_send_psr_spd(dp, &psr_vsc, false);
 }
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 3e8bf79bea58..8c884f9ce713 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -77,13 +77,13 @@ struct rockchip_dp_device {
 	struct analogix_dp_plat_data plat_data;
 };
 
-static void analogix_dp_psr_set(struct drm_encoder *encoder, bool enabled)
+static int analogix_dp_psr_set(struct drm_encoder *encoder, bool enabled)
 {
 	struct rockchip_dp_device *dp = to_dp(encoder);
 	int ret;
 
 	if (!analogix_dp_psr_enabled(dp->adp))
-		return;
+		return 0;
 
 	DRM_DEV_DEBUG(dp->dev, "%s PSR...\n", enabled ? "Entry" : "Exit");
 
@@ -91,13 +91,13 @@ static void analogix_dp_psr_set(struct drm_encoder *encoder, bool enabled)
 					 PSR_WAIT_LINE_FLAG_TIMEOUT_MS);
 	if (ret) {
 		DRM_DEV_ERROR(dp->dev, "line flag interrupt did not arrive\n");
-		return;
+		return -ETIMEDOUT;
 	}
 
 	if (enabled)
-		analogix_dp_enable_psr(dp->adp);
+		return analogix_dp_enable_psr(dp->adp);
 	else
-		analogix_dp_disable_psr(dp->adp);
+		return analogix_dp_disable_psr(dp->adp);
 }
 
 static int rockchip_dp_pre_init(struct rockchip_dp_device *dp)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
index b339ca943139..9376f4396b6b 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
@@ -36,7 +36,7 @@ struct psr_drv {
 
 	struct delayed_work	flush_work;
 
-	void (*set)(struct drm_encoder *encoder, bool enable);
+	int (*set)(struct drm_encoder *encoder, bool enable);
 };
 
 static struct psr_drv *find_psr_by_crtc(struct drm_crtc *crtc)
@@ -93,19 +93,25 @@ static void psr_set_state_locked(struct psr_drv *psr, enum psr_state state)
 		return;
 	}
 
-	psr->state = state;
-
 	/* Actually commit the state change to hardware */
-	switch (psr->state) {
+	switch (state) {
 	case PSR_ENABLE:
-		psr->set(psr->encoder, true);
+		if (psr->set(psr->encoder, true))
+			return;
 		break;
 
 	case PSR_DISABLE:
 	case PSR_FLUSH:
-		psr->set(psr->encoder, false);
+		if (psr->set(psr->encoder, false))
+			return;
 		break;
+
+	default:
+		pr_err("%s: Unknown state %d\n", __func__, state);
+		return;
 	}
+
+	psr->state = state;
 }
 
 static void psr_set_state(struct psr_drv *psr, enum psr_state state)
@@ -229,7 +235,7 @@ EXPORT_SYMBOL(rockchip_drm_psr_flush_all);
  * Zero on success, negative errno on failure.
  */
 int rockchip_drm_psr_register(struct drm_encoder *encoder,
-			void (*psr_set)(struct drm_encoder *, bool enable))
+			int (*psr_set)(struct drm_encoder *, bool enable))
 {
 	struct rockchip_drm_private *drm_drv = encoder->dev->dev_private;
 	struct psr_drv *psr;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.h b/drivers/gpu/drm/rockchip/rockchip_drm_psr.h
index b1ea0155e57c..06537ee27565 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_psr.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.h
@@ -22,7 +22,7 @@ int rockchip_drm_psr_activate(struct drm_encoder *encoder);
 int rockchip_drm_psr_deactivate(struct drm_encoder *encoder);
 
 int rockchip_drm_psr_register(struct drm_encoder *encoder,
-			void (*psr_set)(struct drm_encoder *, bool enable));
+			int (*psr_set)(struct drm_encoder *, bool enable));
 void rockchip_drm_psr_unregister(struct drm_encoder *encoder);
 
 #endif /* __ROCKCHIP_DRM_PSR__ */

From ac0c0b611d5aef4f259625f4be44ed9e2d03b711 Mon Sep 17 00:00:00 2001
From: zain wang <wzz@rock-chips.com>
Date: Mon, 23 Apr 2018 12:49:50 +0200
Subject: [PATCH 0454/1286] drm/bridge: analogix_dp: Don't use
 ANALOGIX_DP_PLL_CTL to control pll

There is no register named ANALOGIX_DP_PLL_CTL in Rockchip edp phy reg
list.  We should use BIT_4 in ANALOGIX_DP_PD to control the pll power
instead of ANALOGIX_DP_PLL_CTL.

Cc: Douglas Anderson <dianders@chromium.org>
Signed-off-by: zain wang <wzz@rock-chips.com>
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Thierry Escande <thierry.escande@collabora.com>
Reviewed-by: Andrzej Hajda <a.hajda@samsung.com>
Signed-off-by: Enric Balletbo i Serra <enric.balletbo@collabora.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Archit Taneja <architt@codeaurora.org>
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180423105003.9004-15-enric.balletbo@collabora.com
---
 .../gpu/drm/bridge/analogix/analogix_dp_reg.c | 20 +++++++++++--------
 1 file changed, 12 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
index 7b7fd227e1f9..02ab1aaa9993 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -230,16 +230,20 @@ enum pll_status analogix_dp_get_pll_lock_status(struct analogix_dp_device *dp)
 void analogix_dp_set_pll_power_down(struct analogix_dp_device *dp, bool enable)
 {
 	u32 reg;
+	u32 mask = DP_PLL_PD;
+	u32 pd_addr = ANALOGIX_DP_PLL_CTL;
 
-	if (enable) {
-		reg = readl(dp->reg_base + ANALOGIX_DP_PLL_CTL);
-		reg |= DP_PLL_PD;
-		writel(reg, dp->reg_base + ANALOGIX_DP_PLL_CTL);
-	} else {
-		reg = readl(dp->reg_base + ANALOGIX_DP_PLL_CTL);
-		reg &= ~DP_PLL_PD;
-		writel(reg, dp->reg_base + ANALOGIX_DP_PLL_CTL);
+	if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) {
+		pd_addr = ANALOGIX_DP_PD;
+		mask = RK_PLL_PD;
 	}
+
+	reg = readl(dp->reg_base + pd_addr);
+	if (enable)
+		reg |= mask;
+	else
+		reg &= ~mask;
+	writel(reg, dp->reg_base + pd_addr);
 }
 
 void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp,

From c4d3b1a21ec2d3a9065b46c13021344ffa7ecead Mon Sep 17 00:00:00 2001
From: zain wang <wzz@rock-chips.com>
Date: Mon, 23 Apr 2018 12:49:51 +0200
Subject: [PATCH 0455/1286] drm/bridge: analogix_dp: Fix timeout of video
 streamclk config

The STRM_VALID bit in register ANALOGIX_DP_SYS_CTL_3 may be unstable,
so we may hit the error log "Timeout of video streamclk ok" since
checked this unstable bit.
In fact, we can go continue and the streamclk is ok if we wait enough time,
it does no effect on display.
Let's change this error to warn.

Cc: Douglas Anderson <dianders@chromium.org>
Signed-off-by: zain wang <wzz@rock-chips.com>
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Thierry Escande <thierry.escande@collabora.com>
Reviewed-by: Andrzej Hajda <a.hajda@samsung.com>
Signed-off-by: Enric Balletbo i Serra <enric.balletbo@collabora.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Archit Taneja <architt@codeaurora.org>
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180423105003.9004-16-enric.balletbo@collabora.com
---
 drivers/gpu/drm/bridge/analogix/analogix_dp_core.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 5540e2dfc2ec..a72e454a7292 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -921,8 +921,9 @@ static int analogix_dp_config_video(struct analogix_dp_device *dp)
 			done_count = 0;
 		}
 		if (timeout_loop > DP_TIMEOUT_LOOP_COUNT) {
-			dev_err(dp->dev, "Timeout of video streamclk ok\n");
-			return -ETIMEDOUT;
+			dev_warn(dp->dev,
+				 "Ignoring timeout of video streamclk ok\n");
+			break;
 		}
 
 		usleep_range(1000, 1001);

From 4805b7ce5031381e8fe9f25b0f5a323653259178 Mon Sep 17 00:00:00 2001
From: zain wang <wzz@rock-chips.com>
Date: Mon, 23 Apr 2018 12:49:52 +0200
Subject: [PATCH 0456/1286] drm/bridge: analogix_dp: Fix incorrect operations
 with register ANALOGIX_DP_FUNC_EN_1

Register ANALOGIX_DP_FUNC_EN_1(offset 0x18), Rockchip is different to
Exynos:

on Exynos edp phy,
BIT 7		MASTER_VID_FUNC_EN_N
BIT 6		reserved
BIT 5		SLAVE_VID_FUNC_EN_N

on Rockchip edp phy,
BIT 7		reserved
BIT 6		RK_VID_CAP_FUNC_EN_N
BIT 5		RK_VID_FIFO_FUNC_EN_N

So, we should do some private operations to Rockchip.

Cc: Tomasz Figa <tfiga@chromium.org>
Signed-off-by: zain wang <wzz@rock-chips.com>
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Thierry Escande <thierry.escande@collabora.com>
Reviewed-by: Andrzej Hajda <a.hajda@samsung.com>
Signed-off-by: Enric Balletbo i Serra <enric.balletbo@collabora.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Archit Taneja <architt@codeaurora.org>
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180423105003.9004-17-enric.balletbo@collabora.com
---
 .../gpu/drm/bridge/analogix/analogix_dp_reg.c | 19 ++++++++++++++-----
 .../gpu/drm/bridge/analogix/analogix_dp_reg.h |  2 ++
 2 files changed, 16 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
index 02ab1aaa9993..4eae206ec31b 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -126,9 +126,14 @@ void analogix_dp_reset(struct analogix_dp_device *dp)
 	analogix_dp_stop_video(dp);
 	analogix_dp_enable_video_mute(dp, 0);
 
-	reg = MASTER_VID_FUNC_EN_N | SLAVE_VID_FUNC_EN_N |
-		AUD_FIFO_FUNC_EN_N | AUD_FUNC_EN_N |
-		HDCP_FUNC_EN_N | SW_FUNC_EN_N;
+	if (dp->plat_data && is_rockchip(dp->plat_data->dev_type))
+		reg = RK_VID_CAP_FUNC_EN_N | RK_VID_FIFO_FUNC_EN_N |
+			SW_FUNC_EN_N;
+	else
+		reg = MASTER_VID_FUNC_EN_N | SLAVE_VID_FUNC_EN_N |
+			AUD_FIFO_FUNC_EN_N | AUD_FUNC_EN_N |
+			HDCP_FUNC_EN_N | SW_FUNC_EN_N;
+
 	writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1);
 
 	reg = SSC_FUNC_EN_N | AUX_FUNC_EN_N |
@@ -971,8 +976,12 @@ void analogix_dp_config_video_slave_mode(struct analogix_dp_device *dp)
 	u32 reg;
 
 	reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_1);
-	reg &= ~(MASTER_VID_FUNC_EN_N | SLAVE_VID_FUNC_EN_N);
-	reg |= MASTER_VID_FUNC_EN_N;
+	if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) {
+		reg &= ~(RK_VID_CAP_FUNC_EN_N | RK_VID_FIFO_FUNC_EN_N);
+	} else {
+		reg &= ~(MASTER_VID_FUNC_EN_N | SLAVE_VID_FUNC_EN_N);
+		reg |= MASTER_VID_FUNC_EN_N;
+	}
 	writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1);
 
 	reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10);
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
index b633a4a5082a..0cf27c731727 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
@@ -127,7 +127,9 @@
 
 /* ANALOGIX_DP_FUNC_EN_1 */
 #define MASTER_VID_FUNC_EN_N			(0x1 << 7)
+#define RK_VID_CAP_FUNC_EN_N			(0x1 << 6)
 #define SLAVE_VID_FUNC_EN_N			(0x1 << 5)
+#define RK_VID_FIFO_FUNC_EN_N			(0x1 << 5)
 #define AUD_FIFO_FUNC_EN_N			(0x1 << 4)
 #define AUD_FUNC_EN_N				(0x1 << 3)
 #define HDCP_FUNC_EN_N				(0x1 << 2)

From 6f4638a19685d4c899f8fff28d60566c8557325b Mon Sep 17 00:00:00 2001
From: zain wang <wzz@rock-chips.com>
Date: Mon, 23 Apr 2018 12:49:53 +0200
Subject: [PATCH 0457/1286] drm/bridge: analogix_dp: Move fast link training
 detect to set_bridge

It's too early to detect fast link training, if other step after it
failed, we will set fast_link flag to 1, and retry set_bridge again. In
this case we will power down and power up panel power supply, and we
will do fast link training since we have set fast_link flag to 1. In
fact, we should do full link training now, not the fast link training.
So we should move the fast link detection at the end of set_bridge.

Cc: Tomasz Figa <tfiga@chromium.org>
Signed-off-by: zain wang <wzz@rock-chips.com>
Signed-off-by: Douglas Anderson <dianders@chromium.org>
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Thierry Escande <thierry.escande@collabora.com>
Reviewed-by: Andrzej Hajda <a.hajda@samsung.com>
Signed-off-by: Enric Balletbo i Serra <enric.balletbo@collabora.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Archit Taneja <architt@codeaurora.org>
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180423105003.9004-18-enric.balletbo@collabora.com
---
 .../drm/bridge/analogix/analogix_dp_core.c    | 42 ++++++++++++-------
 1 file changed, 26 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index a72e454a7292..69b2c16e5776 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -601,7 +601,7 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
 {
 	int lane, lane_count, retval;
 	u32 reg;
-	u8 link_align, link_status[2], adjust_request[2], spread;
+	u8 link_align, link_status[2], adjust_request[2];
 
 	usleep_range(400, 401);
 
@@ -645,20 +645,6 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
 		dev_dbg(dp->dev, "final lane count = %.2x\n",
 			dp->link_train.lane_count);
 
-		retval = drm_dp_dpcd_readb(&dp->aux, DP_MAX_DOWNSPREAD,
-					   &spread);
-		if (retval != 1) {
-			dev_err(dp->dev, "failed to read downspread %d\n",
-				retval);
-			dp->fast_train_enable = false;
-		} else {
-			dp->fast_train_enable =
-				(spread & DP_NO_AUX_HANDSHAKE_LINK_TRAINING) ?
-					true : false;
-		}
-		dev_dbg(dp->dev, "fast link training %s\n",
-			dp->fast_train_enable ? "supported" : "unsupported");
-
 		dp->link_train.lt_state = FINISHED;
 
 		return 0;
@@ -996,6 +982,22 @@ static irqreturn_t analogix_dp_irq_thread(int irq, void *arg)
 	return IRQ_HANDLED;
 }
 
+static int analogix_dp_fast_link_train_detection(struct analogix_dp_device *dp)
+{
+	int ret;
+	u8 spread;
+
+	ret = drm_dp_dpcd_readb(&dp->aux, DP_MAX_DOWNSPREAD, &spread);
+	if (ret != 1) {
+		dev_err(dp->dev, "failed to read downspread %d\n", ret);
+		return ret;
+	}
+	dp->fast_train_enable = !!(spread & DP_NO_AUX_HANDSHAKE_LINK_TRAINING);
+	dev_dbg(dp->dev, "fast link training %s\n",
+		dp->fast_train_enable ? "supported" : "unsupported");
+	return 0;
+}
+
 static int analogix_dp_commit(struct analogix_dp_device *dp)
 {
 	int ret;
@@ -1038,8 +1040,16 @@ static int analogix_dp_commit(struct analogix_dp_device *dp)
 	if (ret)
 		return ret;
 
-	if (dp->psr_enable)
+	if (dp->psr_enable) {
 		ret = analogix_dp_enable_sink_psr(dp);
+		if (ret)
+			return ret;
+	}
+
+	/* Check whether panel supports fast training */
+	ret =  analogix_dp_fast_link_train_detection(dp);
+	if (ret)
+		dp->psr_enable = false;
 
 	return ret;
 }

From 2f8d216002f8e3700b59c774bc7f76d777632890 Mon Sep 17 00:00:00 2001
From: Douglas Anderson <dianders@chromium.org>
Date: Mon, 23 Apr 2018 12:49:54 +0200
Subject: [PATCH 0458/1286] drm/bridge: analogix_dp: Reorder
 plat_data->power_off to happen sooner

The current user of the analogix power_off is "analogix_dp-rockchip".
That driver does this:
- deactivate PSR
- turn off a clock

Both of these things (especially deactive PSR) should be done before
we turn the PHY power off and turn off analog power.  Let's move the
callback up.

Note that without this patch (and with
https://patchwork.kernel.org/patch/9553349/ [seanpaul: this patch was
not applied, but it seems like the race can still occur]), I experienced
an error in reboot testing where one thread was at:

  rockchip_drm_psr_deactivate
  rockchip_dp_powerdown
  analogix_dp_bridge_disable
  drm_bridge_disable

...and the other thread was at:

  analogix_dp_send_psr_spd
  analogix_dp_enable_psr
  analogix_dp_psr_set
  psr_flush_handler

The flush handler thread was finding AUX channel errors and eventually
reported "Failed to apply PSR", where I had a kgdb breakpoint. Presumably
the device would have eventually given up and shut down anyway, but it
seems better to fix the order to be more correct.

Cc: Kristian H. Kristensen <hoegsberg@chromium.org>
Signed-off-by: Douglas Anderson <dianders@chromium.org>
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Thierry Escande <thierry.escande@collabora.com>
Reviewed-by: Andrzej Hajda <a.hajda@samsung.com>
Signed-off-by: Enric Balletbo i Serra <enric.balletbo@collabora.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Archit Taneja <architt@codeaurora.org>
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180423105003.9004-19-enric.balletbo@collabora.com
---
 drivers/gpu/drm/bridge/analogix/analogix_dp_core.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 69b2c16e5776..a260de4f0bd8 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1337,12 +1337,13 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
 	}
 
 	disable_irq(dp->irq);
-	analogix_dp_set_analog_power_down(dp, POWER_ALL, 1);
-	phy_power_off(dp->phy);
 
 	if (dp->plat_data->power_off)
 		dp->plat_data->power_off(dp->plat_data);
 
+	analogix_dp_set_analog_power_down(dp, POWER_ALL, 1);
+	phy_power_off(dp->phy);
+
 	clk_disable_unprepare(dp->clock);
 
 	pm_runtime_put_sync(dp->dev);

From 71cef82434640fb5d219365a568c859944fedb80 Mon Sep 17 00:00:00 2001
From: Douglas Anderson <dianders@chromium.org>
Date: Mon, 23 Apr 2018 12:49:55 +0200
Subject: [PATCH 0459/1286] drm/bridge: analogix_dp: Properly log AUX CH errors
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

The code in analogix_dp_transfer() that was supposed to print out:
  AUX CH error happened

Was actually dead code. That's because the previous check (whether
the interrupt status indicated any errors) would have hit for all
errors anyway.

Let's combine the two error checks so we can actually see AUX CH
errors.  We'll also downgrade the message to a warning since some of
these types of errors might be expected for some displays.  If this
gets too noisy we can downgrade again to debug.

Cc: 征增 王 <wzz@rock-chips.com>
Signed-off-by: Douglas Anderson <dianders@chromium.org>
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Thierry Escande <thierry.escande@collabora.com>
Reviewed-by: Andrzej Hajda <a.hajda@samsung.com>
Signed-off-by: Enric Balletbo i Serra <enric.balletbo@collabora.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Archit Taneja <architt@codeaurora.org>
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180423105003.9004-20-enric.balletbo@collabora.com
---
 drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c | 13 +++++--------
 1 file changed, 5 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
index 4eae206ec31b..58e8a28e99aa 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -1105,6 +1105,7 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
 			     struct drm_dp_aux_msg *msg)
 {
 	u32 reg;
+	u32 status_reg;
 	u8 *buffer = msg->buffer;
 	unsigned int i;
 	int num_transferred = 0;
@@ -1193,16 +1194,12 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
 
 	/* Clear interrupt source for AUX CH access error */
 	reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
-	if (reg & AUX_ERR) {
+	status_reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_STA);
+	if ((reg & AUX_ERR) || (status_reg & AUX_STATUS_MASK)) {
 		writel(AUX_ERR, dp->reg_base + ANALOGIX_DP_INT_STA);
-		goto aux_error;
-	}
 
-	/* Check AUX CH error access status */
-	reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_STA);
-	if ((reg & AUX_STATUS_MASK)) {
-		dev_err(dp->dev, "AUX CH error happened: %d\n\n",
-			reg & AUX_STATUS_MASK);
+		dev_warn(dp->dev, "AUX CH error happened: %#x (%d)\n",
+			 status_reg & AUX_STATUS_MASK, !!(reg & AUX_ERR));
 		goto aux_error;
 	}
 

From 7bd0fd9850382252d906bfeb5abfb38aefe28a4f Mon Sep 17 00:00:00 2001
From: Douglas Anderson <dianders@chromium.org>
Date: Mon, 23 Apr 2018 12:49:56 +0200
Subject: [PATCH 0460/1286] drm/bridge: analogix_dp: Properly disable aux chan
 retries on rockchip
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

The comments in analogix_dp_init_aux() claim that we're disabling aux
channel retries, but then right below it for Rockchip it sets them to
3.  If we actually need 3 retries for Rockchip then we could adjust
the comment, but it seems more likely that we want the same retry
behavior across all platforms.

Cc: Stéphane Marchesin <marcheu@chromium.org>
Cc: 征增 王 <wzz@rock-chips.com>
Signed-off-by: Douglas Anderson <dianders@chromium.org>
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Thierry Escande <thierry.escande@collabora.com>
Signed-off-by: Enric Balletbo i Serra <enric.balletbo@collabora.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Archit Taneja <architt@codeaurora.org>
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180423105003.9004-21-enric.balletbo@collabora.com
---
 drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c | 15 ++++++++-------
 1 file changed, 8 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
index 58e8a28e99aa..a5f2763d72e4 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -481,15 +481,16 @@ void analogix_dp_init_aux(struct analogix_dp_device *dp)
 
 	analogix_dp_reset_aux(dp);
 
-	/* Disable AUX transaction H/W retry */
+	/* AUX_BIT_PERIOD_EXPECTED_DELAY doesn't apply to Rockchip IP */
 	if (dp->plat_data && is_rockchip(dp->plat_data->dev_type))
-		reg = AUX_BIT_PERIOD_EXPECTED_DELAY(0) |
-		      AUX_HW_RETRY_COUNT_SEL(3) |
-		      AUX_HW_RETRY_INTERVAL_600_MICROSECONDS;
+		reg = 0;
 	else
-		reg = AUX_BIT_PERIOD_EXPECTED_DELAY(3) |
-		      AUX_HW_RETRY_COUNT_SEL(0) |
-		      AUX_HW_RETRY_INTERVAL_600_MICROSECONDS;
+		reg = AUX_BIT_PERIOD_EXPECTED_DELAY(3);
+
+	/* Disable AUX transaction H/W retry */
+	reg |= AUX_HW_RETRY_COUNT_SEL(0) |
+	       AUX_HW_RETRY_INTERVAL_600_MICROSECONDS;
+
 	writel(reg, dp->reg_base + ANALOGIX_DP_AUX_HW_RETRY_CTL);
 
 	/* Receive AUX Channel DEFER commands equal to DEFFER_COUNT*64 */

From 6bda8112fe9f62294f2f2b338c46a3476fc6e238 Mon Sep 17 00:00:00 2001
From: Mark Yao <mark.yao@rock-chips.com>
Date: Mon, 23 Apr 2018 12:49:57 +0200
Subject: [PATCH 0461/1286] drm/rockchip: pre dither down when output bpc is
 8bit

Some encoder have a crc verification check, crc check fail if
input and output data is not equal.

That means encoder input and output need use same color depth,
vop can output 10bit data to encoder, but some panel only support
8bit depth, that would make crc check die.

So pre dither down vop data to 8bit if panel's bpc is 8.

Signed-off-by: Mark Yao <mark.yao@rock-chips.com>
[seanpaul resolved conflict in rockchip_drm_vop.c]
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Thierry Escande <thierry.escande@collabora.com>
Signed-off-by: Enric Balletbo i Serra <enric.balletbo@collabora.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Archit Taneja <architt@codeaurora.org>
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180423105003.9004-22-enric.balletbo@collabora.com
---
 drivers/gpu/drm/rockchip/analogix_dp-rockchip.c | 2 ++
 drivers/gpu/drm/rockchip/rockchip_drm_drv.h     | 1 +
 drivers/gpu/drm/rockchip/rockchip_drm_vop.c     | 6 ++++++
 drivers/gpu/drm/rockchip/rockchip_drm_vop.h     | 1 +
 drivers/gpu/drm/rockchip/rockchip_vop_reg.c     | 1 +
 5 files changed, 11 insertions(+)

diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 8c884f9ce713..b3f46ed24cdc 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -218,6 +218,7 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
 				      struct drm_connector_state *conn_state)
 {
 	struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+	struct drm_display_info *di = &conn_state->connector->display_info;
 
 	/*
 	 * The hardware IC designed that VOP must output the RGB10 video
@@ -229,6 +230,7 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
 
 	s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
 	s->output_type = DRM_MODE_CONNECTOR_eDP;
+	s->output_bpc = di->bpc;
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index 9c064a40458b..3a6ebfc26036 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -36,6 +36,7 @@ struct rockchip_crtc_state {
 	struct drm_crtc_state base;
 	int output_type;
 	int output_mode;
+	int output_bpc;
 };
 #define to_rockchip_crtc_state(s) \
 		container_of(s, struct rockchip_crtc_state, base)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 510cdf076bb1..026df454a5d5 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -925,6 +925,12 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
 	if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA &&
 	    !(vop_data->feature & VOP_FEATURE_OUTPUT_RGB10))
 		s->output_mode = ROCKCHIP_OUT_MODE_P888;
+
+	if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA && s->output_bpc == 8)
+		VOP_REG_SET(vop, common, pre_dither_down, 1);
+	else
+		VOP_REG_SET(vop, common, pre_dither_down, 0);
+
 	VOP_REG_SET(vop, common, out_mode, s->output_mode);
 
 	VOP_REG_SET(vop, modeset, htotal_pw, (htotal << 16) | hsync_len);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 56bbd2e2a8ef..084acdd0019a 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -67,6 +67,7 @@ struct vop_common {
 	struct vop_reg cfg_done;
 	struct vop_reg dsp_blank;
 	struct vop_reg data_blank;
+	struct vop_reg pre_dither_down;
 	struct vop_reg dither_down;
 	struct vop_reg dither_up;
 	struct vop_reg gate_en;
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 2e4eea3459fe..08023d3ecb76 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -264,6 +264,7 @@ static const struct vop_common rk3288_common = {
 	.standby = VOP_REG_SYNC(RK3288_SYS_CTRL, 0x1, 22),
 	.gate_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 23),
 	.mmu_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 20),
+	.pre_dither_down = VOP_REG(RK3288_DSP_CTRL1, 0x1, 1),
 	.dither_down = VOP_REG(RK3288_DSP_CTRL1, 0xf, 1),
 	.dither_up = VOP_REG(RK3288_DSP_CTRL1, 0x1, 6),
 	.data_blank = VOP_REG(RK3288_DSP_CTRL0, 0x1, 19),

From 7bb3bb4d56d8f3e0b29b8e4a70f2ab7a8e04a935 Mon Sep 17 00:00:00 2001
From: Douglas Anderson <dianders@chromium.org>
Date: Mon, 23 Apr 2018 12:49:58 +0200
Subject: [PATCH 0462/1286] drm/bridge: analogix_dp: Split the
 platform-specific poweron in two parts

Some of the platform-specific stuff in rockchip_dp_poweron() needs to
happen before the generic code.  Some needs to happen after.  Let's
split the callback in two.

Specifically we can't start doing PSR work until _after_ the whole
controller is up, so don't set the enable until the end.

Cc: Kristian H. Kristensen <hoegsberg@chromium.org>
Signed-off-by: Douglas Anderson <dianders@chromium.org>
[seanpaul added exynos change]
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Thierry Escande <thierry.escande@collabora.com>
Reviewed-by: Andrzej Hajda <a.hajda@samsung.com>
Signed-off-by: Enric Balletbo i Serra <enric.balletbo@collabora.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Archit Taneja <architt@codeaurora.org>
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180423105003.9004-23-enric.balletbo@collabora.com
---
 drivers/gpu/drm/bridge/analogix/analogix_dp_core.c |  7 +++++--
 drivers/gpu/drm/exynos/exynos_dp.c                 |  2 +-
 drivers/gpu/drm/rockchip/analogix_dp-rockchip.c    | 12 ++++++++++--
 include/drm/bridge/analogix_dp.h                   |  3 ++-
 4 files changed, 18 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index a260de4f0bd8..2bcbfadb6ac5 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1260,8 +1260,8 @@ static int analogix_dp_set_bridge(struct analogix_dp_device *dp)
 		goto out_dp_clk_pre;
 	}
 
-	if (dp->plat_data->power_on)
-		dp->plat_data->power_on(dp->plat_data);
+	if (dp->plat_data->power_on_start)
+		dp->plat_data->power_on_start(dp->plat_data);
 
 	phy_power_on(dp->phy);
 
@@ -1286,6 +1286,9 @@ static int analogix_dp_set_bridge(struct analogix_dp_device *dp)
 		goto out_dp_init;
 	}
 
+	if (dp->plat_data->power_on_end)
+		dp->plat_data->power_on_end(dp->plat_data);
+
 	enable_irq(dp->irq);
 	return 0;
 
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 964831dab102..86330f396784 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -162,7 +162,7 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
 	dp->drm_dev = drm_dev;
 
 	dp->plat_data.dev_type = EXYNOS_DP;
-	dp->plat_data.power_on = exynos_dp_poweron;
+	dp->plat_data.power_on_start = exynos_dp_poweron;
 	dp->plat_data.power_off = exynos_dp_poweroff;
 	dp->plat_data.attach = exynos_dp_bridge_attach;
 	dp->plat_data.get_modes = exynos_dp_get_modes;
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index b3f46ed24cdc..23317a2269e1 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -109,7 +109,7 @@ static int rockchip_dp_pre_init(struct rockchip_dp_device *dp)
 	return 0;
 }
 
-static int rockchip_dp_poweron(struct analogix_dp_plat_data *plat_data)
+static int rockchip_dp_poweron_start(struct analogix_dp_plat_data *plat_data)
 {
 	struct rockchip_dp_device *dp = to_dp(plat_data);
 	int ret;
@@ -127,6 +127,13 @@ static int rockchip_dp_poweron(struct analogix_dp_plat_data *plat_data)
 		return ret;
 	}
 
+	return ret;
+}
+
+static int rockchip_dp_poweron_end(struct analogix_dp_plat_data *plat_data)
+{
+	struct rockchip_dp_device *dp = to_dp(plat_data);
+
 	return rockchip_drm_psr_activate(&dp->encoder);
 }
 
@@ -330,7 +337,8 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
 	dp->plat_data.encoder = &dp->encoder;
 
 	dp->plat_data.dev_type = dp->data->chip_type;
-	dp->plat_data.power_on = rockchip_dp_poweron;
+	dp->plat_data.power_on_start = rockchip_dp_poweron_start;
+	dp->plat_data.power_on_end = rockchip_dp_poweron_end;
 	dp->plat_data.power_off = rockchip_dp_powerdown;
 	dp->plat_data.get_modes = rockchip_dp_get_modes;
 
diff --git a/include/drm/bridge/analogix_dp.h b/include/drm/bridge/analogix_dp.h
index e9a1116d2f8e..475b706b49de 100644
--- a/include/drm/bridge/analogix_dp.h
+++ b/include/drm/bridge/analogix_dp.h
@@ -33,7 +33,8 @@ struct analogix_dp_plat_data {
 	struct drm_connector *connector;
 	bool skip_connector;
 
-	int (*power_on)(struct analogix_dp_plat_data *);
+	int (*power_on_start)(struct analogix_dp_plat_data *);
+	int (*power_on_end)(struct analogix_dp_plat_data *);
 	int (*power_off)(struct analogix_dp_plat_data *);
 	int (*attach)(struct analogix_dp_plat_data *, struct drm_bridge *,
 		      struct drm_connector *);

From a4169609def769c66f88140678970b2be6f64ac7 Mon Sep 17 00:00:00 2001
From: Tomasz Figa <tfiga@chromium.org>
Date: Mon, 23 Apr 2018 12:49:59 +0200
Subject: [PATCH 0463/1286] drm/rockchip: analogix_dp: Do not call Analogix
 code before bind

Driver callbacks, such as system suspend or resume can be called any
time, specifically they can be called before the component bind
callback. Let's use dp->adp pointer as a safeguard and skip calling
Analogix entry points if it is an ERR_PTR().

Signed-off-by: Tomasz Figa <tfiga@chromium.org>
Signed-off-by: Thierry Escande <thierry.escande@collabora.com>
Signed-off-by: Enric Balletbo i Serra <enric.balletbo@collabora.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Archit Taneja <architt@codeaurora.org>
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180423105003.9004-24-enric.balletbo@collabora.com
---
 drivers/gpu/drm/rockchip/analogix_dp-rockchip.c | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 23317a2269e1..6d45d62466b3 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -368,6 +368,8 @@ static void rockchip_dp_unbind(struct device *dev, struct device *master,
 	analogix_dp_unbind(dp->adp);
 	rockchip_drm_psr_unregister(&dp->encoder);
 	dp->encoder.funcs->destroy(&dp->encoder);
+
+	dp->adp = ERR_PTR(-ENODEV);
 }
 
 static const struct component_ops rockchip_dp_component_ops = {
@@ -391,6 +393,7 @@ static int rockchip_dp_probe(struct platform_device *pdev)
 		return -ENOMEM;
 
 	dp->dev = dev;
+	dp->adp = ERR_PTR(-ENODEV);
 	dp->plat_data.panel = panel;
 
 	ret = rockchip_dp_of_probe(dp);
@@ -414,6 +417,9 @@ static int rockchip_dp_suspend(struct device *dev)
 {
 	struct rockchip_dp_device *dp = dev_get_drvdata(dev);
 
+	if (IS_ERR(dp->adp))
+		return 0;
+
 	return analogix_dp_suspend(dp->adp);
 }
 
@@ -421,6 +427,9 @@ static int rockchip_dp_resume(struct device *dev)
 {
 	struct rockchip_dp_device *dp = dev_get_drvdata(dev);
 
+	if (IS_ERR(dp->adp))
+		return 0;
+
 	return analogix_dp_resume(dp->adp);
 }
 #endif

From 39b138ea861a3494d2962467d6e275ae9f80a364 Mon Sep 17 00:00:00 2001
From: Tomasz Figa <tfiga@chromium.org>
Date: Mon, 23 Apr 2018 12:50:00 +0200
Subject: [PATCH 0464/1286] drm/rockchip: psr: Avoid redundant calls to .set()
 callback

The first time after we call rockchip_drm_do_flush() after
rockchip_drm_psr_register(), we go from PSR_DISABLE to PSR_FLUSH. The
difference between PSR_DISABLE and PSR_FLUSH is whether or not we have a
delayed work pending - PSR is off in either state.  However
psr_set_state() only catches the transition from PSR_FLUSH to
PSR_DISABLE (which never happens), while going from PSR_DISABLE to
PSR_FLUSH triggers a call to psr->set() to disable PSR while it's
already disabled. This triggers the eDP PHY power-on sequence without
being shut down first and this seems to occasionally leave the encoder
unable to later enable PSR. Let's just simplify the state machine and
simply consider PSR_DISABLE and PSR_FLUSH the same state.

Signed-off-by: Tomasz Figa <tfiga@chromium.org>
Signed-off-by: Kristian H. Kristensen <hoegsberg@chromium.org>
Signed-off-by: Thierry Escande <thierry.escande@collabora.com>
Signed-off-by: Enric Balletbo i Serra <enric.balletbo@collabora.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180423105003.9004-25-enric.balletbo@collabora.com
---
 drivers/gpu/drm/rockchip/rockchip_drm_psr.c | 78 ++++++---------------
 1 file changed, 23 insertions(+), 55 deletions(-)

diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
index 9376f4396b6b..1a6157ffecec 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
@@ -20,19 +20,13 @@
 
 #define PSR_FLUSH_TIMEOUT_MS	100
 
-enum psr_state {
-	PSR_FLUSH,
-	PSR_ENABLE,
-	PSR_DISABLE,
-};
-
 struct psr_drv {
 	struct list_head	list;
 	struct drm_encoder	*encoder;
 
 	struct mutex		lock;
 	bool			active;
-	enum psr_state		state;
+	bool			enabled;
 
 	struct delayed_work	flush_work;
 
@@ -73,52 +67,22 @@ out:
 	return psr;
 }
 
-static void psr_set_state_locked(struct psr_drv *psr, enum psr_state state)
+static int psr_set_state_locked(struct psr_drv *psr, bool enable)
 {
-	/*
-	 * Allowed finite state machine:
-	 *
-	 *   PSR_ENABLE  < = = = = = >  PSR_FLUSH
-	 *       | ^                        |
-	 *       | |                        |
-	 *       v |                        |
-	 *   PSR_DISABLE < - - - - - - - - -
-	 */
-	if (state == psr->state || !psr->active)
-		return;
+	int ret;
 
-	/* Already disabled in flush, change the state, but not the hardware */
-	if (state == PSR_DISABLE && psr->state == PSR_FLUSH) {
-		psr->state = state;
-		return;
-	}
+	if (!psr->active)
+		return -EINVAL;
 
-	/* Actually commit the state change to hardware */
-	switch (state) {
-	case PSR_ENABLE:
-		if (psr->set(psr->encoder, true))
-			return;
-		break;
+	if (enable == psr->enabled)
+		return 0;
 
-	case PSR_DISABLE:
-	case PSR_FLUSH:
-		if (psr->set(psr->encoder, false))
-			return;
-		break;
+	ret = psr->set(psr->encoder, enable);
+	if (ret)
+		return ret;
 
-	default:
-		pr_err("%s: Unknown state %d\n", __func__, state);
-		return;
-	}
-
-	psr->state = state;
-}
-
-static void psr_set_state(struct psr_drv *psr, enum psr_state state)
-{
-	mutex_lock(&psr->lock);
-	psr_set_state_locked(psr, state);
-	mutex_unlock(&psr->lock);
+	psr->enabled = enable;
+	return 0;
 }
 
 static void psr_flush_handler(struct work_struct *work)
@@ -126,10 +90,8 @@ static void psr_flush_handler(struct work_struct *work)
 	struct psr_drv *psr = container_of(to_delayed_work(work),
 					   struct psr_drv, flush_work);
 
-	/* If the state has changed since we initiated the flush, do nothing */
 	mutex_lock(&psr->lock);
-	if (psr->state == PSR_FLUSH)
-		psr_set_state_locked(psr, PSR_ENABLE);
+	psr_set_state_locked(psr, true);
 	mutex_unlock(&psr->lock);
 }
 
@@ -171,6 +133,7 @@ int rockchip_drm_psr_deactivate(struct drm_encoder *encoder)
 
 	mutex_lock(&psr->lock);
 	psr->active = false;
+	psr->enabled = false;
 	mutex_unlock(&psr->lock);
 	cancel_delayed_work_sync(&psr->flush_work);
 
@@ -180,8 +143,13 @@ EXPORT_SYMBOL(rockchip_drm_psr_deactivate);
 
 static void rockchip_drm_do_flush(struct psr_drv *psr)
 {
-	psr_set_state(psr, PSR_FLUSH);
-	mod_delayed_work(system_wq, &psr->flush_work, PSR_FLUSH_TIMEOUT_MS);
+	cancel_delayed_work_sync(&psr->flush_work);
+
+	mutex_lock(&psr->lock);
+	if (!psr_set_state_locked(psr, false))
+		mod_delayed_work(system_wq, &psr->flush_work,
+				 PSR_FLUSH_TIMEOUT_MS);
+	mutex_unlock(&psr->lock);
 }
 
 /**
@@ -250,8 +218,8 @@ int rockchip_drm_psr_register(struct drm_encoder *encoder,
 	INIT_DELAYED_WORK(&psr->flush_work, psr_flush_handler);
 	mutex_init(&psr->lock);
 
-	psr->active = true;
-	psr->state = PSR_DISABLE;
+	psr->active = false;
+	psr->enabled = false;
 	psr->encoder = encoder;
 	psr->set = psr_set;
 

From 6e6cf3e2f2651c24c121aaba63f591166a9957dc Mon Sep 17 00:00:00 2001
From: Tomasz Figa <tfiga@chromium.org>
Date: Mon, 23 Apr 2018 12:50:01 +0200
Subject: [PATCH 0465/1286] drm/rockchip: psr: Sanitize semantics of
 allow/inhibit API

Currently both rockchip_drm_psr_activate() and _deactivate() only set the
boolean "active" flag without actually making sure that hardware state
complies with it.

Since we are going to extend the usage of this API to properly lock PSR
for the duration of atomic commits, we change the semantics in following
way:
 - a counter is used to track the number of inhibit requests,
 - PSR is actually disabled in hardware on first inhibit request,
 - PSR enable work is scheduled on last allow request.

The above allows using the API as a way to deterministically synchronize
PSR state changes with other DRM events, i.e. atomic commits and cursor
updates. As a nice side effect, the naming is sorted out and we have
"inhibit" for stopping the software logic and "enable" for hardware
state.

Signed-off-by: Tomasz Figa <tfiga@chromium.org>
Signed-off-by: Thierry Escande <thierry.escande@collabora.com>
Signed-off-by: Enric Balletbo i Serra <enric.balletbo@collabora.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Andrzej Hajda <a.hajda@samsung.com>
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180423105003.9004-26-enric.balletbo@collabora.com
---
 .../gpu/drm/rockchip/analogix_dp-rockchip.c   |  4 +-
 drivers/gpu/drm/rockchip/rockchip_drm_psr.c   | 55 ++++++++++++++-----
 drivers/gpu/drm/rockchip/rockchip_drm_psr.h   |  4 +-
 3 files changed, 46 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 6d45d62466b3..080f05352195 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -134,7 +134,7 @@ static int rockchip_dp_poweron_end(struct analogix_dp_plat_data *plat_data)
 {
 	struct rockchip_dp_device *dp = to_dp(plat_data);
 
-	return rockchip_drm_psr_activate(&dp->encoder);
+	return rockchip_drm_psr_inhibit_put(&dp->encoder);
 }
 
 static int rockchip_dp_powerdown(struct analogix_dp_plat_data *plat_data)
@@ -142,7 +142,7 @@ static int rockchip_dp_powerdown(struct analogix_dp_plat_data *plat_data)
 	struct rockchip_dp_device *dp = to_dp(plat_data);
 	int ret;
 
-	ret = rockchip_drm_psr_deactivate(&dp->encoder);
+	ret = rockchip_drm_psr_inhibit_get(&dp->encoder);
 	if (ret != 0)
 		return ret;
 
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
index 1a6157ffecec..74f6a6a887dd 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
@@ -25,7 +25,7 @@ struct psr_drv {
 	struct drm_encoder	*encoder;
 
 	struct mutex		lock;
-	bool			active;
+	int			inhibit_count;
 	bool			enabled;
 
 	struct delayed_work	flush_work;
@@ -71,7 +71,7 @@ static int psr_set_state_locked(struct psr_drv *psr, bool enable)
 {
 	int ret;
 
-	if (!psr->active)
+	if (psr->inhibit_count > 0)
 		return -EINVAL;
 
 	if (enable == psr->enabled)
@@ -96,13 +96,18 @@ static void psr_flush_handler(struct work_struct *work)
 }
 
 /**
- * rockchip_drm_psr_activate - activate PSR on the given pipe
+ * rockchip_drm_psr_inhibit_put - release PSR inhibit on given encoder
  * @encoder: encoder to obtain the PSR encoder
  *
+ * Decrements PSR inhibit count on given encoder. Should be called only
+ * for a PSR inhibit count increment done before. If PSR inhibit counter
+ * reaches zero, PSR flush work is scheduled to make the hardware enter
+ * PSR mode in PSR_FLUSH_TIMEOUT_MS.
+ *
  * Returns:
  * Zero on success, negative errno on failure.
  */
-int rockchip_drm_psr_activate(struct drm_encoder *encoder)
+int rockchip_drm_psr_inhibit_put(struct drm_encoder *encoder)
 {
 	struct psr_drv *psr = find_psr_by_encoder(encoder);
 
@@ -110,21 +115,30 @@ int rockchip_drm_psr_activate(struct drm_encoder *encoder)
 		return PTR_ERR(psr);
 
 	mutex_lock(&psr->lock);
-	psr->active = true;
+	--psr->inhibit_count;
+	WARN_ON(psr->inhibit_count < 0);
+	if (!psr->inhibit_count)
+		mod_delayed_work(system_wq, &psr->flush_work,
+				 PSR_FLUSH_TIMEOUT_MS);
 	mutex_unlock(&psr->lock);
 
 	return 0;
 }
-EXPORT_SYMBOL(rockchip_drm_psr_activate);
+EXPORT_SYMBOL(rockchip_drm_psr_inhibit_put);
 
 /**
- * rockchip_drm_psr_deactivate - deactivate PSR on the given pipe
+ * rockchip_drm_psr_inhibit_get - acquire PSR inhibit on given encoder
  * @encoder: encoder to obtain the PSR encoder
  *
+ * Increments PSR inhibit count on given encoder. This function guarantees
+ * that after it returns PSR is turned off on given encoder and no PSR-related
+ * hardware state change occurs at least until a matching call to
+ * rockchip_drm_psr_inhibit_put() is done.
+ *
  * Returns:
  * Zero on success, negative errno on failure.
  */
-int rockchip_drm_psr_deactivate(struct drm_encoder *encoder)
+int rockchip_drm_psr_inhibit_get(struct drm_encoder *encoder)
 {
 	struct psr_drv *psr = find_psr_by_encoder(encoder);
 
@@ -132,14 +146,14 @@ int rockchip_drm_psr_deactivate(struct drm_encoder *encoder)
 		return PTR_ERR(psr);
 
 	mutex_lock(&psr->lock);
-	psr->active = false;
-	psr->enabled = false;
+	psr_set_state_locked(psr, false);
+	++psr->inhibit_count;
 	mutex_unlock(&psr->lock);
 	cancel_delayed_work_sync(&psr->flush_work);
 
 	return 0;
 }
-EXPORT_SYMBOL(rockchip_drm_psr_deactivate);
+EXPORT_SYMBOL(rockchip_drm_psr_inhibit_get);
 
 static void rockchip_drm_do_flush(struct psr_drv *psr)
 {
@@ -199,6 +213,11 @@ EXPORT_SYMBOL(rockchip_drm_psr_flush_all);
  * @encoder: encoder that obtain the PSR function
  * @psr_set: call back to set PSR state
  *
+ * The function returns with PSR inhibit counter initialized with one
+ * and the caller (typically encoder driver) needs to call
+ * rockchip_drm_psr_inhibit_put() when it becomes ready to accept PSR
+ * enable request.
+ *
  * Returns:
  * Zero on success, negative errno on failure.
  */
@@ -218,7 +237,7 @@ int rockchip_drm_psr_register(struct drm_encoder *encoder,
 	INIT_DELAYED_WORK(&psr->flush_work, psr_flush_handler);
 	mutex_init(&psr->lock);
 
-	psr->active = false;
+	psr->inhibit_count = 1;
 	psr->enabled = false;
 	psr->encoder = encoder;
 	psr->set = psr_set;
@@ -236,6 +255,11 @@ EXPORT_SYMBOL(rockchip_drm_psr_register);
  * @encoder: encoder that obtain the PSR function
  * @psr_set: call back to set PSR state
  *
+ * It is expected that the PSR inhibit counter is 1 when this function is
+ * called, which corresponds to a state when related encoder has been
+ * disconnected from any CRTCs and its driver called
+ * rockchip_drm_psr_inhibit_get() to stop the PSR logic.
+ *
  * Returns:
  * Zero on success, negative errno on failure.
  */
@@ -247,7 +271,12 @@ void rockchip_drm_psr_unregister(struct drm_encoder *encoder)
 	mutex_lock(&drm_drv->psr_list_lock);
 	list_for_each_entry_safe(psr, n, &drm_drv->psr_list, list) {
 		if (psr->encoder == encoder) {
-			cancel_delayed_work_sync(&psr->flush_work);
+			/*
+			 * Any other value would mean that the encoder
+			 * is still in use.
+			 */
+			WARN_ON(psr->inhibit_count != 1);
+
 			list_del(&psr->list);
 			kfree(psr);
 		}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.h b/drivers/gpu/drm/rockchip/rockchip_drm_psr.h
index 06537ee27565..40e026c14168 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_psr.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.h
@@ -18,8 +18,8 @@
 void rockchip_drm_psr_flush_all(struct drm_device *dev);
 int rockchip_drm_psr_flush(struct drm_crtc *crtc);
 
-int rockchip_drm_psr_activate(struct drm_encoder *encoder);
-int rockchip_drm_psr_deactivate(struct drm_encoder *encoder);
+int rockchip_drm_psr_inhibit_put(struct drm_encoder *encoder);
+int rockchip_drm_psr_inhibit_get(struct drm_encoder *encoder);
 
 int rockchip_drm_psr_register(struct drm_encoder *encoder,
 			int (*psr_set)(struct drm_encoder *, bool enable));

From d2d4f51d5ae4151c08c3d380426625a48e79b5b3 Mon Sep 17 00:00:00 2001
From: Tomasz Figa <tfiga@chromium.org>
Date: Mon, 23 Apr 2018 12:50:02 +0200
Subject: [PATCH 0466/1286] drm/rockchip: Disallow PSR for the whole atomic
 commit

Currently PSR flush is triggered from CRTC's .atomic_begin() callback,
which is executed after modeset disables and enables and before plane
updates are committed. Since PSR flush and re-enable can be triggered
asynchronously by external sources (input event, delayed work), it can
race with hardware programming done in the aforementioned stages.

This patch blocks the PSR completely before hardware programming part
begins and unblock after it ends. This relies on reference counted PSR
disable introduced with previous patch.

Cc: Kristian H. Kristensen <hoegsberg@chromium.org>
Signed-off-by: Tomasz Figa <tfiga@chromium.org>
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Thierry Escande <thierry.escande@collabora.com>
Signed-off-by: Enric Balletbo i Serra <enric.balletbo@collabora.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Andrzej Hajda <a.hajda@samsung.com>
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180423105003.9004-27-enric.balletbo@collabora.com
---
 drivers/gpu/drm/rockchip/rockchip_drm_fb.c  | 61 ++++++++++++++++++++-
 drivers/gpu/drm/rockchip/rockchip_drm_vop.c |  7 ---
 2 files changed, 60 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index e266539e04e5..d4f4118b482d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -167,8 +167,67 @@ err_gem_object_unreference:
 	return ERR_PTR(ret);
 }
 
+static void
+rockchip_drm_psr_inhibit_get_state(struct drm_atomic_state *state)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	struct drm_encoder *encoder;
+	u32 encoder_mask = 0;
+	int i;
+
+	for_each_old_crtc_in_state(state, crtc, crtc_state, i) {
+		encoder_mask |= crtc_state->encoder_mask;
+		encoder_mask |= crtc->state->encoder_mask;
+	}
+
+	drm_for_each_encoder_mask(encoder, state->dev, encoder_mask)
+		rockchip_drm_psr_inhibit_get(encoder);
+}
+
+static void
+rockchip_drm_psr_inhibit_put_state(struct drm_atomic_state *state)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	struct drm_encoder *encoder;
+	u32 encoder_mask = 0;
+	int i;
+
+	for_each_old_crtc_in_state(state, crtc, crtc_state, i) {
+		encoder_mask |= crtc_state->encoder_mask;
+		encoder_mask |= crtc->state->encoder_mask;
+	}
+
+	drm_for_each_encoder_mask(encoder, state->dev, encoder_mask)
+		rockchip_drm_psr_inhibit_put(encoder);
+}
+
+static void
+rockchip_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state)
+{
+	struct drm_device *dev = old_state->dev;
+
+	rockchip_drm_psr_inhibit_get_state(old_state);
+
+	drm_atomic_helper_commit_modeset_disables(dev, old_state);
+
+	drm_atomic_helper_commit_modeset_enables(dev, old_state);
+
+	drm_atomic_helper_commit_planes(dev, old_state,
+					DRM_PLANE_COMMIT_ACTIVE_ONLY);
+
+	rockchip_drm_psr_inhibit_put_state(old_state);
+
+	drm_atomic_helper_commit_hw_done(old_state);
+
+	drm_atomic_helper_wait_for_vblanks(dev, old_state);
+
+	drm_atomic_helper_cleanup_planes(dev, old_state);
+}
+
 static const struct drm_mode_config_helper_funcs rockchip_mode_config_helpers = {
-	.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
+	.atomic_commit_tail = rockchip_atomic_helper_commit_tail_rpm,
 };
 
 static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 026df454a5d5..fe3faa7c38d9 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -1029,16 +1029,9 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
 	}
 }
 
-static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
-				  struct drm_crtc_state *old_crtc_state)
-{
-	rockchip_drm_psr_flush(crtc);
-}
-
 static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
 	.mode_fixup = vop_crtc_mode_fixup,
 	.atomic_flush = vop_crtc_atomic_flush,
-	.atomic_begin = vop_crtc_atomic_begin,
 	.atomic_enable = vop_crtc_atomic_enable,
 	.atomic_disable = vop_crtc_atomic_disable,
 };

From 98bd0331d13f68c4719499c47f023d583121f847 Mon Sep 17 00:00:00 2001
From: Tomasz Figa <tfiga@chromium.org>
Date: Mon, 23 Apr 2018 12:50:03 +0200
Subject: [PATCH 0467/1286] drm/rockchip: psr: Remove flush by CRTC

It is not used anymore after last changes and it was not even correct to
begin with as it assumed a 1:1 relation between a CRTC and encoder,
while in fact a CRTC can be attached to multiple encoders.

Signed-off-by: Tomasz Figa <tfiga@chromium.org>
Signed-off-by: Thierry Escande <thierry.escande@collabora.com>
Signed-off-by: Enric Balletbo i Serra <enric.balletbo@collabora.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Andrzej Hajda <a.hajda@samsung.com>
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180423105003.9004-28-enric.balletbo@collabora.com
---
 drivers/gpu/drm/rockchip/rockchip_drm_psr.c | 35 ---------------------
 drivers/gpu/drm/rockchip/rockchip_drm_psr.h |  1 -
 2 files changed, 36 deletions(-)

diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
index 74f6a6a887dd..79d00d861a31 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
@@ -33,23 +33,6 @@ struct psr_drv {
 	int (*set)(struct drm_encoder *encoder, bool enable);
 };
 
-static struct psr_drv *find_psr_by_crtc(struct drm_crtc *crtc)
-{
-	struct rockchip_drm_private *drm_drv = crtc->dev->dev_private;
-	struct psr_drv *psr;
-
-	mutex_lock(&drm_drv->psr_list_lock);
-	list_for_each_entry(psr, &drm_drv->psr_list, list) {
-		if (psr->encoder->crtc == crtc)
-			goto out;
-	}
-	psr = ERR_PTR(-ENODEV);
-
-out:
-	mutex_unlock(&drm_drv->psr_list_lock);
-	return psr;
-}
-
 static struct psr_drv *find_psr_by_encoder(struct drm_encoder *encoder)
 {
 	struct rockchip_drm_private *drm_drv = encoder->dev->dev_private;
@@ -166,24 +149,6 @@ static void rockchip_drm_do_flush(struct psr_drv *psr)
 	mutex_unlock(&psr->lock);
 }
 
-/**
- * rockchip_drm_psr_flush - flush a single pipe
- * @crtc: CRTC of the pipe to flush
- *
- * Returns:
- * 0 on success, -errno on fail
- */
-int rockchip_drm_psr_flush(struct drm_crtc *crtc)
-{
-	struct psr_drv *psr = find_psr_by_crtc(crtc);
-	if (IS_ERR(psr))
-		return PTR_ERR(psr);
-
-	rockchip_drm_do_flush(psr);
-	return 0;
-}
-EXPORT_SYMBOL(rockchip_drm_psr_flush);
-
 /**
  * rockchip_drm_psr_flush_all - force to flush all registered PSR encoders
  * @dev: drm device
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.h b/drivers/gpu/drm/rockchip/rockchip_drm_psr.h
index 40e026c14168..860c62494496 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_psr.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.h
@@ -16,7 +16,6 @@
 #define __ROCKCHIP_DRM_PSR___
 
 void rockchip_drm_psr_flush_all(struct drm_device *dev);
-int rockchip_drm_psr_flush(struct drm_crtc *crtc);
 
 int rockchip_drm_psr_inhibit_put(struct drm_encoder *encoder);
 int rockchip_drm_psr_inhibit_get(struct drm_encoder *encoder);

From f0316f93897c4c4e67278b175bfbfd3a95ba650a Mon Sep 17 00:00:00 2001
From: Russell King <rmk+kernel@armlinux.org.uk>
Date: Sat, 5 Dec 2015 18:41:28 +0000
Subject: [PATCH 0468/1286] drm/i2c: tda9950: add CEC driver

Add a CEC driver for the TDA9950, which is a stand-alone I2C CEC device,
but is also integrated into HDMI transceivers such as the TDA9989 and
TDA19989.

The TDA9950 contains a command processor which handles retransmissions
and the low level bus protocol.  The driver just has to read and write
the messages, and handle error conditions.

Reviewed-by: Hans Verkuil <hans.verkuil@cisco.com>
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
---
 drivers/gpu/drm/i2c/Kconfig           |   5 +
 drivers/gpu/drm/i2c/Makefile          |   1 +
 drivers/gpu/drm/i2c/tda9950.c         | 509 ++++++++++++++++++++++++++
 include/linux/platform_data/tda9950.h |  16 +
 4 files changed, 531 insertions(+)
 create mode 100644 drivers/gpu/drm/i2c/tda9950.c
 create mode 100644 include/linux/platform_data/tda9950.h

diff --git a/drivers/gpu/drm/i2c/Kconfig b/drivers/gpu/drm/i2c/Kconfig
index a6c92beb410a..3a232f5ff0a1 100644
--- a/drivers/gpu/drm/i2c/Kconfig
+++ b/drivers/gpu/drm/i2c/Kconfig
@@ -26,4 +26,9 @@ config DRM_I2C_NXP_TDA998X
 	help
 	  Support for NXP Semiconductors TDA998X HDMI encoders.
 
+config DRM_I2C_NXP_TDA9950
+	tristate "NXP Semiconductors TDA9950/TDA998X HDMI CEC"
+	select CEC_NOTIFIER
+	select CEC_CORE
+
 endmenu
diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile
index b20100c18ffb..a962f6f08568 100644
--- a/drivers/gpu/drm/i2c/Makefile
+++ b/drivers/gpu/drm/i2c/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_DRM_I2C_SIL164) += sil164.o
 
 tda998x-y := tda998x_drv.o
 obj-$(CONFIG_DRM_I2C_NXP_TDA998X) += tda998x.o
+obj-$(CONFIG_DRM_I2C_NXP_TDA9950) += tda9950.o
diff --git a/drivers/gpu/drm/i2c/tda9950.c b/drivers/gpu/drm/i2c/tda9950.c
new file mode 100644
index 000000000000..3f7396caad48
--- /dev/null
+++ b/drivers/gpu/drm/i2c/tda9950.c
@@ -0,0 +1,509 @@
+/*
+ *  TDA9950 Consumer Electronics Control driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * The NXP TDA9950 implements the HDMI Consumer Electronics Control
+ * interface.  The host interface is similar to a mailbox: the data
+ * registers starting at REG_CDR0 are written to send a command to the
+ * internal CPU, and replies are read from these registers.
+ *
+ * As the data registers represent a mailbox, they must be accessed
+ * as a single I2C transaction.  See the TDA9950 data sheet for details.
+ */
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_data/tda9950.h>
+#include <linux/slab.h>
+#include <drm/drm_edid.h>
+#include <media/cec.h>
+#include <media/cec-notifier.h>
+
+enum {
+	REG_CSR = 0x00,
+	CSR_BUSY = BIT(7),
+	CSR_INT  = BIT(6),
+	CSR_ERR  = BIT(5),
+
+	REG_CER = 0x01,
+
+	REG_CVR = 0x02,
+
+	REG_CCR = 0x03,
+	CCR_RESET = BIT(7),
+	CCR_ON    = BIT(6),
+
+	REG_ACKH = 0x04,
+	REG_ACKL = 0x05,
+
+	REG_CCONR = 0x06,
+	CCONR_ENABLE_ERROR = BIT(4),
+	CCONR_RETRY_MASK = 7,
+
+	REG_CDR0 = 0x07,
+
+	CDR1_REQ = 0x00,
+	CDR1_CNF = 0x01,
+	CDR1_IND = 0x81,
+	CDR1_ERR = 0x82,
+	CDR1_IER = 0x83,
+
+	CDR2_CNF_SUCCESS    = 0x00,
+	CDR2_CNF_OFF_STATE  = 0x80,
+	CDR2_CNF_BAD_REQ    = 0x81,
+	CDR2_CNF_CEC_ACCESS = 0x82,
+	CDR2_CNF_ARB_ERROR  = 0x83,
+	CDR2_CNF_BAD_TIMING = 0x84,
+	CDR2_CNF_NACK_ADDR  = 0x85,
+	CDR2_CNF_NACK_DATA  = 0x86,
+};
+
+struct tda9950_priv {
+	struct i2c_client *client;
+	struct device *hdmi;
+	struct cec_adapter *adap;
+	struct tda9950_glue *glue;
+	u16 addresses;
+	struct cec_msg rx_msg;
+	struct cec_notifier *notify;
+	bool open;
+};
+
+static int tda9950_write_range(struct i2c_client *client, u8 addr, u8 *p, int cnt)
+{
+	struct i2c_msg msg;
+	u8 buf[cnt + 1];
+	int ret;
+
+	buf[0] = addr;
+	memcpy(buf + 1, p, cnt);
+
+	msg.addr = client->addr;
+	msg.flags = 0;
+	msg.len = cnt + 1;
+	msg.buf = buf;
+
+	dev_dbg(&client->dev, "wr 0x%02x: %*ph\n", addr, cnt, p);
+
+	ret = i2c_transfer(client->adapter, &msg, 1);
+	if (ret < 0)
+		dev_err(&client->dev, "Error %d writing to cec:0x%x\n", ret, addr);
+	return ret < 0 ? ret : 0;
+}
+
+static void tda9950_write(struct i2c_client *client, u8 addr, u8 val)
+{
+	tda9950_write_range(client, addr, &val, 1);
+}
+
+static int tda9950_read_range(struct i2c_client *client, u8 addr, u8 *p, int cnt)
+{
+	struct i2c_msg msg[2];
+	int ret;
+
+	msg[0].addr = client->addr;
+	msg[0].flags = 0;
+	msg[0].len = 1;
+	msg[0].buf = &addr;
+	msg[1].addr = client->addr;
+	msg[1].flags = I2C_M_RD;
+	msg[1].len = cnt;
+	msg[1].buf = p;
+
+	ret = i2c_transfer(client->adapter, msg, 2);
+	if (ret < 0)
+		dev_err(&client->dev, "Error %d reading from cec:0x%x\n", ret, addr);
+
+	dev_dbg(&client->dev, "rd 0x%02x: %*ph\n", addr, cnt, p);
+
+	return ret;
+}
+
+static u8 tda9950_read(struct i2c_client *client, u8 addr)
+{
+	int ret;
+	u8 val;
+
+	ret = tda9950_read_range(client, addr, &val, 1);
+	if (ret < 0)
+		val = 0;
+
+	return val;
+}
+
+static irqreturn_t tda9950_irq(int irq, void *data)
+{
+	struct tda9950_priv *priv = data;
+	unsigned int tx_status;
+	u8 csr, cconr, buf[19];
+	u8 arb_lost_cnt, nack_cnt, err_cnt;
+
+	if (!priv->open)
+		return IRQ_NONE;
+
+	csr = tda9950_read(priv->client, REG_CSR);
+	if (!(csr & CSR_INT))
+		return IRQ_NONE;
+
+	cconr = tda9950_read(priv->client, REG_CCONR) & CCONR_RETRY_MASK;
+
+	tda9950_read_range(priv->client, REG_CDR0, buf, sizeof(buf));
+
+	/*
+	 * This should never happen: the data sheet says that there will
+	 * always be a valid message if the interrupt line is asserted.
+	 */
+	if (buf[0] == 0) {
+		dev_warn(&priv->client->dev, "interrupt pending, but no message?\n");
+		return IRQ_NONE;
+	}
+
+	switch (buf[1]) {
+	case CDR1_CNF: /* transmit result */
+		arb_lost_cnt = nack_cnt = err_cnt = 0;
+		switch (buf[2]) {
+		case CDR2_CNF_SUCCESS:
+			tx_status = CEC_TX_STATUS_OK;
+			break;
+
+		case CDR2_CNF_ARB_ERROR:
+			tx_status = CEC_TX_STATUS_ARB_LOST;
+			arb_lost_cnt = cconr;
+			break;
+
+		case CDR2_CNF_NACK_ADDR:
+			tx_status = CEC_TX_STATUS_NACK;
+			nack_cnt = cconr;
+			break;
+
+		default: /* some other error, refer to TDA9950 docs */
+			dev_err(&priv->client->dev, "CNF reply error 0x%02x\n",
+				buf[2]);
+			tx_status = CEC_TX_STATUS_ERROR;
+			err_cnt = cconr;
+			break;
+		}
+		/* TDA9950 executes all retries for us */
+		tx_status |= CEC_TX_STATUS_MAX_RETRIES;
+		cec_transmit_done(priv->adap, tx_status, arb_lost_cnt,
+				  nack_cnt, 0, err_cnt);
+		break;
+
+	case CDR1_IND:
+		priv->rx_msg.len = buf[0] - 2;
+		if (priv->rx_msg.len > CEC_MAX_MSG_SIZE)
+			priv->rx_msg.len = CEC_MAX_MSG_SIZE;
+
+		memcpy(priv->rx_msg.msg, buf + 2, priv->rx_msg.len);
+		cec_received_msg(priv->adap, &priv->rx_msg);
+		break;
+
+	default: /* unknown */
+		dev_err(&priv->client->dev, "unknown service id 0x%02x\n",
+			buf[1]);
+		break;
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int tda9950_cec_transmit(struct cec_adapter *adap, u8 attempts,
+				u32 signal_free_time, struct cec_msg *msg)
+{
+	struct tda9950_priv *priv = adap->priv;
+	u8 buf[CEC_MAX_MSG_SIZE + 2];
+
+	buf[0] = 2 + msg->len;
+	buf[1] = CDR1_REQ;
+	memcpy(buf + 2, msg->msg, msg->len);
+
+	if (attempts > 5)
+		attempts = 5;
+
+	tda9950_write(priv->client, REG_CCONR, attempts);
+
+	return tda9950_write_range(priv->client, REG_CDR0, buf, 2 + msg->len);
+}
+
+static int tda9950_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
+{
+	struct tda9950_priv *priv = adap->priv;
+	u16 addresses;
+	u8 buf[2];
+
+	if (addr == CEC_LOG_ADDR_INVALID)
+		addresses = priv->addresses = 0;
+	else
+		addresses = priv->addresses |= BIT(addr);
+
+	/* TDA9950 doesn't want address 15 set */
+	addresses &= 0x7fff;
+	buf[0] = addresses >> 8;
+	buf[1] = addresses;
+
+	return tda9950_write_range(priv->client, REG_ACKH, buf, 2);
+}
+
+/*
+ * When operating as part of the TDA998x, we need additional handling
+ * to initialise and shut down the TDA9950 part of the device.  These
+ * two hooks are provided to allow the TDA998x code to perform those
+ * activities.
+ */
+static int tda9950_glue_open(struct tda9950_priv *priv)
+{
+	int ret = 0;
+
+	if (priv->glue && priv->glue->open)
+		ret = priv->glue->open(priv->glue->data);
+
+	priv->open = true;
+
+	return ret;
+}
+
+static void tda9950_glue_release(struct tda9950_priv *priv)
+{
+	priv->open = false;
+
+	if (priv->glue && priv->glue->release)
+		priv->glue->release(priv->glue->data);
+}
+
+static int tda9950_open(struct tda9950_priv *priv)
+{
+	struct i2c_client *client = priv->client;
+	int ret;
+
+	ret = tda9950_glue_open(priv);
+	if (ret)
+		return ret;
+
+	/* Reset the TDA9950, and wait 250ms for it to recover */
+	tda9950_write(client, REG_CCR, CCR_RESET);
+	msleep(250);
+
+	tda9950_cec_adap_log_addr(priv->adap, CEC_LOG_ADDR_INVALID);
+
+	/* Start the command processor */
+	tda9950_write(client, REG_CCR, CCR_ON);
+
+	return 0;
+}
+
+static void tda9950_release(struct tda9950_priv *priv)
+{
+	struct i2c_client *client = priv->client;
+	int timeout = 50;
+	u8 csr;
+
+	/* Stop the command processor */
+	tda9950_write(client, REG_CCR, 0);
+
+	/* Wait up to .5s for it to signal non-busy */
+	do {
+		csr = tda9950_read(client, REG_CSR);
+		if (!(csr & CSR_BUSY) || --timeout)
+			break;
+		msleep(10);
+	} while (1);
+
+	/* Warn the user that their IRQ may die if it's shared. */
+	if (csr & CSR_BUSY)
+		dev_warn(&client->dev, "command processor failed to stop, irq%d may die (csr=0x%02x)\n",
+			 client->irq, csr);
+
+	tda9950_glue_release(priv);
+}
+
+static int tda9950_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+	struct tda9950_priv *priv = adap->priv;
+
+	if (!enable) {
+		tda9950_release(priv);
+		return 0;
+	} else {
+		return tda9950_open(priv);
+	}
+}
+
+static const struct cec_adap_ops tda9950_cec_ops = {
+	.adap_enable = tda9950_cec_adap_enable,
+	.adap_log_addr = tda9950_cec_adap_log_addr,
+	.adap_transmit = tda9950_cec_transmit,
+};
+
+/*
+ * When operating as part of the TDA998x, we need to claim additional
+ * resources.  These two hooks permit the management of those resources.
+ */
+static void tda9950_devm_glue_exit(void *data)
+{
+	struct tda9950_glue *glue = data;
+
+	if (glue && glue->exit)
+		glue->exit(glue->data);
+}
+
+static int tda9950_devm_glue_init(struct device *dev, struct tda9950_glue *glue)
+{
+	int ret;
+
+	if (glue && glue->init) {
+		ret = glue->init(glue->data);
+		if (ret)
+			return ret;
+	}
+
+	ret = devm_add_action(dev, tda9950_devm_glue_exit, glue);
+	if (ret)
+		tda9950_devm_glue_exit(glue);
+
+	return ret;
+}
+
+static void tda9950_cec_del(void *data)
+{
+	struct tda9950_priv *priv = data;
+
+	cec_delete_adapter(priv->adap);
+}
+
+static int tda9950_probe(struct i2c_client *client,
+			 const struct i2c_device_id *id)
+{
+	struct tda9950_glue *glue = client->dev.platform_data;
+	struct device *dev = &client->dev;
+	struct tda9950_priv *priv;
+	unsigned long irqflags;
+	int ret;
+	u8 cvr;
+
+	/*
+	 * We must have I2C functionality: our multi-byte accesses
+	 * must be performed as a single contiguous transaction.
+	 */
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		dev_err(&client->dev,
+			"adapter does not support I2C functionality\n");
+		return -ENXIO;
+	}
+
+	/* We must have an interrupt to be functional. */
+	if (client->irq <= 0) {
+		dev_err(&client->dev, "driver requires an interrupt\n");
+		return -ENXIO;
+	}
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->client = client;
+	priv->glue = glue;
+
+	i2c_set_clientdata(client, priv);
+
+	/*
+	 * If we're part of a TDA998x, we want the class devices to be
+	 * associated with the HDMI Tx so we have a tight relationship
+	 * between the HDMI interface and the CEC interface.
+	 */
+	priv->hdmi = dev;
+	if (glue && glue->parent)
+		priv->hdmi = glue->parent;
+
+	priv->adap = cec_allocate_adapter(&tda9950_cec_ops, priv, "tda9950",
+					  CEC_CAP_DEFAULTS,
+					  CEC_MAX_LOG_ADDRS);
+	if (IS_ERR(priv->adap))
+		return PTR_ERR(priv->adap);
+
+	ret = devm_add_action(dev, tda9950_cec_del, priv);
+	if (ret) {
+		cec_delete_adapter(priv->adap);
+		return ret;
+	}
+
+	ret = tda9950_devm_glue_init(dev, glue);
+	if (ret)
+		return ret;
+
+	ret = tda9950_glue_open(priv);
+	if (ret)
+		return ret;
+
+	cvr = tda9950_read(client, REG_CVR);
+
+	dev_info(&client->dev,
+		 "TDA9950 CEC interface, hardware version %u.%u\n",
+		 cvr >> 4, cvr & 15);
+
+	tda9950_glue_release(priv);
+
+	irqflags = IRQF_TRIGGER_FALLING;
+	if (glue)
+		irqflags = glue->irq_flags;
+
+	ret = devm_request_threaded_irq(dev, client->irq, NULL, tda9950_irq,
+					irqflags | IRQF_SHARED | IRQF_ONESHOT,
+					dev_name(&client->dev), priv);
+	if (ret < 0)
+		return ret;
+
+	priv->notify = cec_notifier_get(priv->hdmi);
+	if (!priv->notify)
+		return -ENOMEM;
+
+	ret = cec_register_adapter(priv->adap, priv->hdmi);
+	if (ret < 0) {
+		cec_notifier_put(priv->notify);
+		return ret;
+	}
+
+	/*
+	 * CEC documentation says we must not call cec_delete_adapter
+	 * after a successful call to cec_register_adapter().
+	 */
+	devm_remove_action(dev, tda9950_cec_del, priv);
+
+	cec_register_cec_notifier(priv->adap, priv->notify);
+
+	return 0;
+}
+
+static int tda9950_remove(struct i2c_client *client)
+{
+	struct tda9950_priv *priv = i2c_get_clientdata(client);
+
+	cec_unregister_adapter(priv->adap);
+	cec_notifier_put(priv->notify);
+
+	return 0;
+}
+
+static struct i2c_device_id tda9950_ids[] = {
+	{ "tda9950", 0 },
+	{ },
+};
+MODULE_DEVICE_TABLE(i2c, tda9950_ids);
+
+static struct i2c_driver tda9950_driver = {
+	.probe = tda9950_probe,
+	.remove = tda9950_remove,
+	.driver = {
+		.name = "tda9950",
+	},
+	.id_table = tda9950_ids,
+};
+
+module_i2c_driver(tda9950_driver);
+
+MODULE_AUTHOR("Russell King <rmk+kernel@armlinux.org.uk>");
+MODULE_DESCRIPTION("TDA9950/TDA998x Consumer Electronics Control Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/include/linux/platform_data/tda9950.h b/include/linux/platform_data/tda9950.h
new file mode 100644
index 000000000000..c65efd461102
--- /dev/null
+++ b/include/linux/platform_data/tda9950.h
@@ -0,0 +1,16 @@
+#ifndef LINUX_PLATFORM_DATA_TDA9950_H
+#define LINUX_PLATFORM_DATA_TDA9950_H
+
+struct device;
+
+struct tda9950_glue {
+	struct device *parent;
+	unsigned long irq_flags;
+	void *data;
+	int (*init)(void *);
+	void (*exit)(void *);
+	int (*open)(void *);
+	void (*release)(void *);
+};
+
+#endif

From 7e8675f000bc7e20f4efb72cf624f4109301002b Mon Sep 17 00:00:00 2001
From: Russell King <rmk+kernel@armlinux.org.uk>
Date: Wed, 5 Oct 2016 12:47:50 +0100
Subject: [PATCH 0469/1286] drm/i2c: tda998x: add CEC support

The TDA998x is a HDMI transmitter with a TDA9950 CEC engine integrated
onto the same die.  Add support for the TDA9950 CEC engine to the
TDA998x driver.

Reviewed-by: Hans Verkuil <hans.verkuil@cisco.com>
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
---
 drivers/gpu/drm/i2c/Kconfig       |   1 +
 drivers/gpu/drm/i2c/tda998x_drv.c | 195 ++++++++++++++++++++++++++++--
 2 files changed, 187 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i2c/Kconfig b/drivers/gpu/drm/i2c/Kconfig
index 3a232f5ff0a1..65d3acb61c03 100644
--- a/drivers/gpu/drm/i2c/Kconfig
+++ b/drivers/gpu/drm/i2c/Kconfig
@@ -22,6 +22,7 @@ config DRM_I2C_SIL164
 config DRM_I2C_NXP_TDA998X
 	tristate "NXP Semiconductors TDA998X HDMI encoder"
 	default m if DRM_TILCDC
+	select CEC_CORE if CEC_NOTIFIER
 	select SND_SOC_HDMI_CODEC if SND_SOC
 	help
 	  Support for NXP Semiconductors TDA998X HDMI encoders.
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 16e0439cad44..eb9916bd84a4 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -16,8 +16,10 @@
  */
 
 #include <linux/component.h>
+#include <linux/gpio/consumer.h>
 #include <linux/hdmi.h>
 #include <linux/module.h>
+#include <linux/platform_data/tda9950.h>
 #include <linux/irq.h>
 #include <sound/asoundef.h>
 #include <sound/hdmi-codec.h>
@@ -29,6 +31,8 @@
 #include <drm/drm_of.h>
 #include <drm/i2c/tda998x.h>
 
+#include <media/cec-notifier.h>
+
 #define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
 
 struct tda998x_audio_port {
@@ -55,6 +59,7 @@ struct tda998x_priv {
 	struct platform_device *audio_pdev;
 	struct mutex audio_mutex;
 
+	struct mutex edid_mutex;
 	wait_queue_head_t wq_edid;
 	volatile int wq_edid_wait;
 
@@ -67,6 +72,9 @@ struct tda998x_priv {
 	struct drm_connector connector;
 
 	struct tda998x_audio_port audio_port[2];
+	struct tda9950_glue cec_glue;
+	struct gpio_desc *calib;
+	struct cec_notifier *cec_notify;
 };
 
 #define conn_to_tda998x_priv(x) \
@@ -345,6 +353,12 @@ struct tda998x_priv {
 #define REG_CEC_INTSTATUS	  0xee		      /* read */
 # define CEC_INTSTATUS_CEC	  (1 << 0)
 # define CEC_INTSTATUS_HDMI	  (1 << 1)
+#define REG_CEC_CAL_XOSC_CTRL1    0xf2
+# define CEC_CAL_XOSC_CTRL1_ENA_CAL	BIT(0)
+#define REG_CEC_DES_FREQ2         0xf5
+# define CEC_DES_FREQ2_DIS_AUTOCAL BIT(7)
+#define REG_CEC_CLK               0xf6
+# define CEC_CLK_FRO              0x11
 #define REG_CEC_FRO_IM_CLK_CTRL   0xfb                /* read/write */
 # define CEC_FRO_IM_CLK_CTRL_GHOST_DIS (1 << 7)
 # define CEC_FRO_IM_CLK_CTRL_ENA_OTP   (1 << 6)
@@ -359,6 +373,7 @@ struct tda998x_priv {
 # define CEC_RXSHPDLEV_HPD        (1 << 1)
 
 #define REG_CEC_ENAMODS           0xff                /* read/write */
+# define CEC_ENAMODS_EN_CEC_CLK   (1 << 7)
 # define CEC_ENAMODS_DIS_FRO      (1 << 6)
 # define CEC_ENAMODS_DIS_CCLK     (1 << 5)
 # define CEC_ENAMODS_EN_RXSENS    (1 << 2)
@@ -417,6 +432,114 @@ cec_read(struct tda998x_priv *priv, u8 addr)
 	return val;
 }
 
+static void cec_enamods(struct tda998x_priv *priv, u8 mods, bool enable)
+{
+	int val = cec_read(priv, REG_CEC_ENAMODS);
+
+	if (val < 0)
+		return;
+
+	if (enable)
+		val |= mods;
+	else
+		val &= ~mods;
+
+	cec_write(priv, REG_CEC_ENAMODS, val);
+}
+
+static void tda998x_cec_set_calibration(struct tda998x_priv *priv, bool enable)
+{
+	if (enable) {
+		u8 val;
+
+		cec_write(priv, 0xf3, 0xc0);
+		cec_write(priv, 0xf4, 0xd4);
+
+		/* Enable automatic calibration mode */
+		val = cec_read(priv, REG_CEC_DES_FREQ2);
+		val &= ~CEC_DES_FREQ2_DIS_AUTOCAL;
+		cec_write(priv, REG_CEC_DES_FREQ2, val);
+
+		/* Enable free running oscillator */
+		cec_write(priv, REG_CEC_CLK, CEC_CLK_FRO);
+		cec_enamods(priv, CEC_ENAMODS_DIS_FRO, false);
+
+		cec_write(priv, REG_CEC_CAL_XOSC_CTRL1,
+			  CEC_CAL_XOSC_CTRL1_ENA_CAL);
+	} else {
+		cec_write(priv, REG_CEC_CAL_XOSC_CTRL1, 0);
+	}
+}
+
+/*
+ * Calibration for the internal oscillator: we need to set calibration mode,
+ * and then pulse the IRQ line low for a 10ms ± 1% period.
+ */
+static void tda998x_cec_calibration(struct tda998x_priv *priv)
+{
+	struct gpio_desc *calib = priv->calib;
+
+	mutex_lock(&priv->edid_mutex);
+	if (priv->hdmi->irq > 0)
+		disable_irq(priv->hdmi->irq);
+	gpiod_direction_output(calib, 1);
+	tda998x_cec_set_calibration(priv, true);
+
+	local_irq_disable();
+	gpiod_set_value(calib, 0);
+	mdelay(10);
+	gpiod_set_value(calib, 1);
+	local_irq_enable();
+
+	tda998x_cec_set_calibration(priv, false);
+	gpiod_direction_input(calib);
+	if (priv->hdmi->irq > 0)
+		enable_irq(priv->hdmi->irq);
+	mutex_unlock(&priv->edid_mutex);
+}
+
+static int tda998x_cec_hook_init(void *data)
+{
+	struct tda998x_priv *priv = data;
+	struct gpio_desc *calib;
+
+	calib = gpiod_get(&priv->hdmi->dev, "nxp,calib", GPIOD_ASIS);
+	if (IS_ERR(calib)) {
+		dev_warn(&priv->hdmi->dev, "failed to get calibration gpio: %ld\n",
+			 PTR_ERR(calib));
+		return PTR_ERR(calib);
+	}
+
+	priv->calib = calib;
+
+	return 0;
+}
+
+static void tda998x_cec_hook_exit(void *data)
+{
+	struct tda998x_priv *priv = data;
+
+	gpiod_put(priv->calib);
+	priv->calib = NULL;
+}
+
+static int tda998x_cec_hook_open(void *data)
+{
+	struct tda998x_priv *priv = data;
+
+	cec_enamods(priv, CEC_ENAMODS_EN_CEC_CLK | CEC_ENAMODS_EN_CEC, true);
+	tda998x_cec_calibration(priv);
+
+	return 0;
+}
+
+static void tda998x_cec_hook_release(void *data)
+{
+	struct tda998x_priv *priv = data;
+
+	cec_enamods(priv, CEC_ENAMODS_EN_CEC_CLK | CEC_ENAMODS_EN_CEC, false);
+}
+
 static int
 set_page(struct tda998x_priv *priv, u16 reg)
 {
@@ -657,10 +780,13 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data)
 			sta, cec, lvl, flag0, flag1, flag2);
 
 		if (cec & CEC_RXSHPDINT_HPD) {
-			if (lvl & CEC_RXSHPDLEV_HPD)
+			if (lvl & CEC_RXSHPDLEV_HPD) {
 				tda998x_edid_delay_start(priv);
-			else
+			} else {
 				schedule_work(&priv->detect_work);
+				cec_notifier_set_phys_addr(priv->cec_notify,
+						   CEC_PHYS_ADDR_INVALID);
+			}
 
 			handled = true;
 		}
@@ -981,6 +1107,8 @@ static int tda998x_connector_fill_modes(struct drm_connector *connector,
 	if (connector->edid_blob_ptr) {
 		struct edid *edid = (void *)connector->edid_blob_ptr->data;
 
+		cec_notifier_set_phys_addr_from_edid(priv->cec_notify, edid);
+
 		priv->sink_has_audio = drm_detect_monitor_audio(edid);
 	} else {
 		priv->sink_has_audio = false;
@@ -1024,6 +1152,8 @@ static int read_edid_block(void *data, u8 *buf, unsigned int blk, size_t length)
 	offset = (blk & 1) ? 128 : 0;
 	segptr = blk / 2;
 
+	mutex_lock(&priv->edid_mutex);
+
 	reg_write(priv, REG_DDC_ADDR, 0xa0);
 	reg_write(priv, REG_DDC_OFFS, offset);
 	reg_write(priv, REG_DDC_SEGM_ADDR, 0x60);
@@ -1043,14 +1173,15 @@ static int read_edid_block(void *data, u8 *buf, unsigned int blk, size_t length)
 					msecs_to_jiffies(100));
 		if (i < 0) {
 			dev_err(&priv->hdmi->dev, "read edid wait err %d\n", i);
-			return i;
+			ret = i;
+			goto failed;
 		}
 	} else {
 		for (i = 100; i > 0; i--) {
 			msleep(1);
 			ret = reg_read(priv, REG_INT_FLAGS_2);
 			if (ret < 0)
-				return ret;
+				goto failed;
 			if (ret & INT_FLAGS_2_EDID_BLK_RD)
 				break;
 		}
@@ -1058,17 +1189,22 @@ static int read_edid_block(void *data, u8 *buf, unsigned int blk, size_t length)
 
 	if (i == 0) {
 		dev_err(&priv->hdmi->dev, "read edid timeout\n");
-		return -ETIMEDOUT;
+		ret = -ETIMEDOUT;
+		goto failed;
 	}
 
 	ret = reg_read_range(priv, REG_EDID_DATA_0, buf, length);
 	if (ret != length) {
 		dev_err(&priv->hdmi->dev, "failed to read edid block %d: %d\n",
 			blk, ret);
-		return ret;
+		goto failed;
 	}
 
-	return 0;
+	ret = 0;
+
+ failed:
+	mutex_unlock(&priv->edid_mutex);
+	return ret;
 }
 
 static int tda998x_connector_get_modes(struct drm_connector *connector)
@@ -1423,6 +1559,9 @@ static void tda998x_destroy(struct tda998x_priv *priv)
 	cancel_work_sync(&priv->detect_work);
 
 	i2c_unregister_device(priv->cec);
+
+	if (priv->cec_notify)
+		cec_notifier_put(priv->cec_notify);
 }
 
 /* I2C driver functions */
@@ -1472,11 +1611,13 @@ static int tda998x_get_audio_ports(struct tda998x_priv *priv,
 static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
 {
 	struct device_node *np = client->dev.of_node;
+	struct i2c_board_info cec_info;
 	u32 video;
 	int rev_lo, rev_hi, ret;
 
 	mutex_init(&priv->mutex);	/* protect the page access */
 	mutex_init(&priv->audio_mutex); /* protect access from audio thread */
+	mutex_init(&priv->edid_mutex);
 	init_waitqueue_head(&priv->edid_delay_waitq);
 	timer_setup(&priv->edid_delay_timer, tda998x_edid_delay_done, 0);
 	INIT_WORK(&priv->detect_work, tda998x_detect_work);
@@ -1564,6 +1705,9 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
 
 		irq_flags =
 			irqd_get_trigger_type(irq_get_irq_data(client->irq));
+
+		priv->cec_glue.irq_flags = irq_flags;
+
 		irq_flags |= IRQF_SHARED | IRQF_ONESHOT;
 		ret = request_threaded_irq(client->irq, NULL,
 					   tda998x_irq_thread, irq_flags,
@@ -1579,7 +1723,34 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
 		cec_write(priv, REG_CEC_RXSHPDINTENA, CEC_RXSHPDLEV_HPD);
 	}
 
-	priv->cec = i2c_new_dummy(client->adapter, priv->cec_addr);
+	priv->cec_notify = cec_notifier_get(&client->dev);
+	if (!priv->cec_notify) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	priv->cec_glue.parent = &client->dev;
+	priv->cec_glue.data = priv;
+	priv->cec_glue.init = tda998x_cec_hook_init;
+	priv->cec_glue.exit = tda998x_cec_hook_exit;
+	priv->cec_glue.open = tda998x_cec_hook_open;
+	priv->cec_glue.release = tda998x_cec_hook_release;
+
+	/*
+	 * Some TDA998x are actually two I2C devices merged onto one piece
+	 * of silicon: TDA9989 and TDA19989 combine the HDMI transmitter
+	 * with a slightly modified TDA9950 CEC device.  The CEC device
+	 * is at the TDA9950 address, with the address pins strapped across
+	 * to the TDA998x address pins.  Hence, it always has the same
+	 * offset.
+	 */
+	memset(&cec_info, 0, sizeof(cec_info));
+	strlcpy(cec_info.type, "tda9950", sizeof(cec_info.type));
+	cec_info.addr = priv->cec_addr;
+	cec_info.platform_data = &priv->cec_glue;
+	cec_info.irq = client->irq;
+
+	priv->cec = i2c_new_device(client->adapter, &cec_info);
 	if (!priv->cec) {
 		ret = -ENODEV;
 		goto fail;
@@ -1609,10 +1780,16 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
 	return 0;
 
 fail:
+	/* if encoder_init fails, the encoder slave is never registered,
+	 * so cleanup here:
+	 */
+	if (priv->cec)
+		i2c_unregister_device(priv->cec);
+	if (priv->cec_notify)
+		cec_notifier_put(priv->cec_notify);
 	if (client->irq)
 		free_irq(client->irq, priv);
 err_irq:
-	i2c_unregister_device(priv->cec);
 	return ret;
 }
 

From ba52762fb1430b2a2ea8127c1a292c15f13b8dac Mon Sep 17 00:00:00 2001
From: Russell King <rmk+kernel@armlinux.org.uk>
Date: Wed, 29 Nov 2017 10:27:12 +0000
Subject: [PATCH 0470/1286] dt-bindings: tda998x: add the calibration gpio

Add the optional calibration gpio for integrated TDA9950 CEC support.
This GPIO corresponds with the interrupt from the TDA998x, as the
calibration requires driving the interrupt pin low.

Reviewed-by: Rob Herring <robh@kernel.org>
Acked-by: Hans Verkuil <hans.verkuil@cisco.com>
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
---
 Documentation/devicetree/bindings/display/bridge/tda998x.txt | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/Documentation/devicetree/bindings/display/bridge/tda998x.txt b/Documentation/devicetree/bindings/display/bridge/tda998x.txt
index 24cc2466185a..1a4eaca40d94 100644
--- a/Documentation/devicetree/bindings/display/bridge/tda998x.txt
+++ b/Documentation/devicetree/bindings/display/bridge/tda998x.txt
@@ -27,6 +27,9 @@ Optional properties:
 	in question is used. The implementation allows one or two DAIs. If two
 	DAIs are defined, they must be of different type.
 
+  - nxp,calib-gpios: calibration GPIO, which must correspond with the
+	gpio used for the TDA998x interrupt pin.
+
 [1] Documentation/sound/alsa/soc/DAI.txt
 [2] include/dt-bindings/display/tda998x.h
 

From 068b01d843a838325aeda5b73df6e6799aa48cf7 Mon Sep 17 00:00:00 2001
From: Hans de Goede <hdegoede@redhat.com>
Date: Wed, 18 Apr 2018 14:36:41 +0200
Subject: [PATCH 0471/1286] drm: panel-orientation-quirks: Add quirk for Lenovo
 Ideapad Mixx 310

Some production batches of the Lenovo Ideapad Mixx 310 laptop use
a portrait LCD panel, add a quirk for this.

Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: Hans de Goede <hdegoede@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180418123642.11088-1-hdegoede@redhat.com
---
 drivers/gpu/drm/drm_panel_orientation_quirks.c | 17 +++++++++++++++++
 1 file changed, 17 insertions(+)

diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 902cc1a71e45..9274237b7f57 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -60,6 +60,12 @@ static const struct drm_dmi_panel_orientation_data itworks_tw891 = {
 	.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
 };
 
+static const struct drm_dmi_panel_orientation_data lenovo_ideapad_miix_310 = {
+	.width = 800,
+	.height = 1280,
+	.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
 static const struct drm_dmi_panel_orientation_data vios_lth17 = {
 	.width = 800,
 	.height = 1280,
@@ -102,6 +108,17 @@ static const struct dmi_system_id orientation_data[] = {
 		  DMI_EXACT_MATCH(DMI_BOARD_NAME, "TW891"),
 		},
 		.driver_data = (void *)&itworks_tw891,
+	}, {	/*
+		 * Lenovo Ideapad Miix 310 laptop, only some production batches
+		 * have a portrait screen, the resolution checks makes the quirk
+		 * apply only to those batches.
+		 */
+		.matches = {
+		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "80SG"),
+		  DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "MIIX 310-10ICR"),
+		},
+		.driver_data = (void *)&lenovo_ideapad_miix_310,
 	}, {	/* VIOS LTH17 */
 		.matches = {
 		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),

From f55826f0437ed6832e399cd417f9288756ad13a2 Mon Sep 17 00:00:00 2001
From: Hans de Goede <hdegoede@redhat.com>
Date: Wed, 18 Apr 2018 14:36:42 +0200
Subject: [PATCH 0472/1286] drm: panel-orientation-quirks: Add quirk for Lenovo
 Ideapad Mixx 320

The Lenovo Ideapad Mixx 320 laptop uses a portrait LCD panel, add a
quirk for this.

While at it instead of duplicating the same drm_dmi_panel_orientation_data
for 3 laptops add a generic lcd800x1280_rightside_up orientation_data and
use that for all 3 (including the new Mixx 320 entry).

Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: Hans de Goede <hdegoede@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180418123642.11088-2-hdegoede@redhat.com
---
 .../gpu/drm/drm_panel_orientation_quirks.c    | 19 ++++++++++---------
 1 file changed, 10 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 9274237b7f57..caebddda8bce 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -60,13 +60,7 @@ static const struct drm_dmi_panel_orientation_data itworks_tw891 = {
 	.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
 };
 
-static const struct drm_dmi_panel_orientation_data lenovo_ideapad_miix_310 = {
-	.width = 800,
-	.height = 1280,
-	.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
-};
-
-static const struct drm_dmi_panel_orientation_data vios_lth17 = {
+static const struct drm_dmi_panel_orientation_data lcd800x1280_rightside_up = {
 	.width = 800,
 	.height = 1280,
 	.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
@@ -118,13 +112,20 @@ static const struct dmi_system_id orientation_data[] = {
 		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "80SG"),
 		  DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "MIIX 310-10ICR"),
 		},
-		.driver_data = (void *)&lenovo_ideapad_miix_310,
+		.driver_data = (void *)&lcd800x1280_rightside_up,
+	}, {	/* Lenovo Ideapad Miix 320 */
+		.matches = {
+		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "80XF"),
+		  DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"),
+		},
+		.driver_data = (void *)&lcd800x1280_rightside_up,
 	}, {	/* VIOS LTH17 */
 		.matches = {
 		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),
 		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LTH17"),
 		},
-		.driver_data = (void *)&vios_lth17,
+		.driver_data = (void *)&lcd800x1280_rightside_up,
 	},
 	{}
 };

From 3fbe86be6c24c7405117026813b49f10acc7d322 Mon Sep 17 00:00:00 2001
From: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Thu, 5 Apr 2018 17:44:41 +0200
Subject: [PATCH 0473/1286] drm/vmwgfx: Remove no-op prepare/cleanup_fb
 callbacks

Less hits to go through when I git grep over all drivers. These
callbacks are optional.

Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: VMware Graphics <linux-graphics-maintainer@vmware.com>
Cc: Sinclair Yeh <syeh@vmware.com>
Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180405154449.23038-2-daniel.vetter@ffwll.ch
---
 drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | 35 -----------------------------
 1 file changed, 35 deletions(-)

diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 3824595fece1..4a5907e3f560 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -281,39 +281,6 @@ drm_connector_helper_funcs vmw_ldu_connector_helper_funcs = {
  * Legacy Display Plane Functions
  */
 
-/**
- * vmw_ldu_primary_plane_cleanup_fb - Noop
- *
- * @plane:  display plane
- * @old_state: Contains the FB to clean up
- *
- * Unpins the display surface
- *
- * Returns 0 on success
- */
-static void
-vmw_ldu_primary_plane_cleanup_fb(struct drm_plane *plane,
-				 struct drm_plane_state *old_state)
-{
-}
-
-
-/**
- * vmw_ldu_primary_plane_prepare_fb - Noop
- *
- * @plane:  display plane
- * @new_state: info on the new plane state, including the FB
- *
- * Returns 0 on success
- */
-static int
-vmw_ldu_primary_plane_prepare_fb(struct drm_plane *plane,
-				 struct drm_plane_state *new_state)
-{
-	return 0;
-}
-
-
 static void
 vmw_ldu_primary_plane_atomic_update(struct drm_plane *plane,
 				    struct drm_plane_state *old_state)
@@ -373,8 +340,6 @@ static const struct
 drm_plane_helper_funcs vmw_ldu_primary_plane_helper_funcs = {
 	.atomic_check = vmw_du_primary_plane_atomic_check,
 	.atomic_update = vmw_ldu_primary_plane_atomic_update,
-	.prepare_fb = vmw_ldu_primary_plane_prepare_fb,
-	.cleanup_fb = vmw_ldu_primary_plane_cleanup_fb,
 };
 
 static const struct drm_crtc_helper_funcs vmw_ldu_crtc_helper_funcs = {

From ccc3b2b3482c2c05d05fd2cfbf0c28d644b4b0c2 Mon Sep 17 00:00:00 2001
From: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Thu, 5 Apr 2018 17:44:42 +0200
Subject: [PATCH 0474/1286] drm: Move simple_display_pipe prepare_fb helper
 into gem fb helpers
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

There's nothing tinydrm specific to this, and there's a few more
copies of the same in various other drivers.

Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Gustavo Padovan <gustavo@padovan.org>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Sean Paul <seanpaul@chromium.org>
Cc: David Airlie <airlied@linux.ie>
Cc: David Lechner <david@lechnology.com>
Cc: "Noralf Trønnes" <noralf@tronnes.org>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Shawn Guo <shawnguo@kernel.org>
Cc: Neil Armstrong <narmstrong@baylibre.com>
Cc: Daniel Stone <daniels@collabora.com>
Cc: Haneen Mohammed <hamohammed.sa@gmail.com>
Cc: Ben Widawsky <ben@bwidawsk.net>
Cc: "Ville Syrjälä" <ville.syrjala@linux.intel.com>
Reviewed-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
Acked-by: David Lechner <david@lechnology.com>
Reviewed-by: Noralf Trønnes <noralf@tronnes.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20180405154449.23038-3-daniel.vetter@ffwll.ch
---
 drivers/gpu/drm/drm_gem_framebuffer_helper.c | 19 +++++++++++++++++++
 drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c  | 17 -----------------
 drivers/gpu/drm/tinydrm/ili9225.c            |  2 +-
 drivers/gpu/drm/tinydrm/mi0283qt.c           |  3 ++-
 drivers/gpu/drm/tinydrm/repaper.c            |  2 +-
 drivers/gpu/drm/tinydrm/st7586.c             |  2 +-
 drivers/gpu/drm/tinydrm/st7735r.c            |  2 +-
 include/drm/drm_gem_framebuffer_helper.h     |  3 +++
 include/drm/drm_simple_kms_helper.h          |  3 +++
 include/drm/tinydrm/tinydrm.h                |  2 --
 10 files changed, 31 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index 4d682a6e8bcb..acfbc0641a06 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -22,6 +22,7 @@
 #include <drm/drm_gem.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_modeset_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 /**
  * DOC: overview
@@ -265,6 +266,24 @@ int drm_gem_fb_prepare_fb(struct drm_plane *plane,
 }
 EXPORT_SYMBOL_GPL(drm_gem_fb_prepare_fb);
 
+/**
+ * drm_gem_fb_simple_display_pipe_prepare_fb - prepare_fb helper for
+ *     &drm_simple_display_pipe
+ * @pipe: Simple display pipe
+ * @plane_state: Plane state
+ *
+ * This function uses drm_gem_fb_prepare_fb() to check if the plane FB has a
+ * &dma_buf attached, extracts the exclusive fence and attaches it to plane
+ * state for the atomic helper to wait on. Drivers can use this as their
+ * &drm_simple_display_pipe_funcs.prepare_fb callback.
+ */
+int drm_gem_fb_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
+					      struct drm_plane_state *plane_state)
+{
+	return drm_gem_fb_prepare_fb(&pipe->plane, plane_state);
+}
+EXPORT_SYMBOL(drm_gem_fb_simple_display_pipe_prepare_fb);
+
 /**
  * drm_gem_fbdev_fb_create - Create a GEM backed &drm_framebuffer for fbdev
  *                           emulation
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
index e68b528ae64d..7e8e24d0b7a7 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
@@ -138,23 +138,6 @@ void tinydrm_display_pipe_update(struct drm_simple_display_pipe *pipe,
 }
 EXPORT_SYMBOL(tinydrm_display_pipe_update);
 
-/**
- * tinydrm_display_pipe_prepare_fb - Display pipe prepare_fb helper
- * @pipe: Simple display pipe
- * @plane_state: Plane state
- *
- * This function uses drm_gem_fb_prepare_fb() to check if the plane FB has an
- * dma-buf attached, extracts the exclusive fence and attaches it to plane
- * state for the atomic helper to wait on. Drivers can use this as their
- * &drm_simple_display_pipe_funcs->prepare_fb callback.
- */
-int tinydrm_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
-				    struct drm_plane_state *plane_state)
-{
-	return drm_gem_fb_prepare_fb(&pipe->plane, plane_state);
-}
-EXPORT_SYMBOL(tinydrm_display_pipe_prepare_fb);
-
 static int tinydrm_rotate_mode(struct drm_display_mode *mode,
 			       unsigned int rotation)
 {
diff --git a/drivers/gpu/drm/tinydrm/ili9225.c b/drivers/gpu/drm/tinydrm/ili9225.c
index 0874e877b111..841c69aba059 100644
--- a/drivers/gpu/drm/tinydrm/ili9225.c
+++ b/drivers/gpu/drm/tinydrm/ili9225.c
@@ -354,7 +354,7 @@ static const struct drm_simple_display_pipe_funcs ili9225_pipe_funcs = {
 	.enable		= ili9225_pipe_enable,
 	.disable	= ili9225_pipe_disable,
 	.update		= tinydrm_display_pipe_update,
-	.prepare_fb	= tinydrm_display_pipe_prepare_fb,
+	.prepare_fb	= drm_gem_fb_simple_display_pipe_prepare_fb,
 };
 
 static const struct drm_display_mode ili9225_mode = {
diff --git a/drivers/gpu/drm/tinydrm/mi0283qt.c b/drivers/gpu/drm/tinydrm/mi0283qt.c
index 4e6d2ee94e55..d5ef65179c16 100644
--- a/drivers/gpu/drm/tinydrm/mi0283qt.c
+++ b/drivers/gpu/drm/tinydrm/mi0283qt.c
@@ -19,6 +19,7 @@
 
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_modeset_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/tinydrm/mipi-dbi.h>
 #include <drm/tinydrm/tinydrm-helpers.h>
 #include <video/mipi_display.h>
@@ -134,7 +135,7 @@ static const struct drm_simple_display_pipe_funcs mi0283qt_pipe_funcs = {
 	.enable = mi0283qt_enable,
 	.disable = mipi_dbi_pipe_disable,
 	.update = tinydrm_display_pipe_update,
-	.prepare_fb = tinydrm_display_pipe_prepare_fb,
+	.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
 };
 
 static const struct drm_display_mode mi0283qt_mode = {
diff --git a/drivers/gpu/drm/tinydrm/repaper.c b/drivers/gpu/drm/tinydrm/repaper.c
index bb6f80a81899..1ee6855212a0 100644
--- a/drivers/gpu/drm/tinydrm/repaper.c
+++ b/drivers/gpu/drm/tinydrm/repaper.c
@@ -841,7 +841,7 @@ static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = {
 	.enable = repaper_pipe_enable,
 	.disable = repaper_pipe_disable,
 	.update = tinydrm_display_pipe_update,
-	.prepare_fb = tinydrm_display_pipe_prepare_fb,
+	.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
 };
 
 static const uint32_t repaper_formats[] = {
diff --git a/drivers/gpu/drm/tinydrm/st7586.c b/drivers/gpu/drm/tinydrm/st7586.c
index 22644b88199a..5c29e3803ecb 100644
--- a/drivers/gpu/drm/tinydrm/st7586.c
+++ b/drivers/gpu/drm/tinydrm/st7586.c
@@ -290,7 +290,7 @@ static const struct drm_simple_display_pipe_funcs st7586_pipe_funcs = {
 	.enable		= st7586_pipe_enable,
 	.disable	= st7586_pipe_disable,
 	.update		= tinydrm_display_pipe_update,
-	.prepare_fb	= tinydrm_display_pipe_prepare_fb,
+	.prepare_fb	= drm_gem_fb_simple_display_pipe_prepare_fb,
 };
 
 static const struct drm_display_mode st7586_mode = {
diff --git a/drivers/gpu/drm/tinydrm/st7735r.c b/drivers/gpu/drm/tinydrm/st7735r.c
index 189a07894d36..6c7b15c9da4f 100644
--- a/drivers/gpu/drm/tinydrm/st7735r.c
+++ b/drivers/gpu/drm/tinydrm/st7735r.c
@@ -106,7 +106,7 @@ static const struct drm_simple_display_pipe_funcs jd_t18003_t01_pipe_funcs = {
 	.enable		= jd_t18003_t01_pipe_enable,
 	.disable	= mipi_dbi_pipe_disable,
 	.update		= tinydrm_display_pipe_update,
-	.prepare_fb	= tinydrm_display_pipe_prepare_fb,
+	.prepare_fb	= drm_gem_fb_simple_display_pipe_prepare_fb,
 };
 
 static const struct drm_display_mode jd_t18003_t01_mode = {
diff --git a/include/drm/drm_gem_framebuffer_helper.h b/include/drm/drm_gem_framebuffer_helper.h
index 5ca7cdc3f527..a38de7eb55b4 100644
--- a/include/drm/drm_gem_framebuffer_helper.h
+++ b/include/drm/drm_gem_framebuffer_helper.h
@@ -10,6 +10,7 @@ struct drm_gem_object;
 struct drm_mode_fb_cmd2;
 struct drm_plane;
 struct drm_plane_state;
+struct drm_simple_display_pipe;
 
 struct drm_gem_object *drm_gem_fb_get_obj(struct drm_framebuffer *fb,
 					  unsigned int plane);
@@ -27,6 +28,8 @@ drm_gem_fb_create(struct drm_device *dev, struct drm_file *file,
 
 int drm_gem_fb_prepare_fb(struct drm_plane *plane,
 			  struct drm_plane_state *state);
+int drm_gem_fb_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
+					      struct drm_plane_state *plane_state);
 
 struct drm_framebuffer *
 drm_gem_fbdev_fb_create(struct drm_device *dev,
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
index b02793742317..451960438a29 100644
--- a/include/drm/drm_simple_kms_helper.h
+++ b/include/drm/drm_simple_kms_helper.h
@@ -116,6 +116,9 @@ struct drm_simple_display_pipe_funcs {
 	 * Optional, called by &drm_plane_helper_funcs.prepare_fb.  Please read
 	 * the documentation for the &drm_plane_helper_funcs.prepare_fb hook for
 	 * more details.
+	 *
+	 * Drivers which always have their buffers pinned should use
+	 * drm_gem_fb_simple_display_pipe_prepare_fb() for this hook.
 	 */
 	int (*prepare_fb)(struct drm_simple_display_pipe *pipe,
 			  struct drm_plane_state *plane_state);
diff --git a/include/drm/tinydrm/tinydrm.h b/include/drm/tinydrm/tinydrm.h
index 6e2b960e25eb..56e4a916b5e8 100644
--- a/include/drm/tinydrm/tinydrm.h
+++ b/include/drm/tinydrm/tinydrm.h
@@ -95,8 +95,6 @@ void tinydrm_shutdown(struct tinydrm_device *tdev);
 
 void tinydrm_display_pipe_update(struct drm_simple_display_pipe *pipe,
 				 struct drm_plane_state *old_state);
-int tinydrm_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
-				    struct drm_plane_state *plane_state);
 int
 tinydrm_display_pipe_init(struct tinydrm_device *tdev,
 			  const struct drm_simple_display_pipe_funcs *funcs,

From 78172ad85b600b05100b7e0b67a60a3c3bfe6b5b Mon Sep 17 00:00:00 2001
From: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Thu, 5 Apr 2018 17:44:43 +0200
Subject: [PATCH 0475/1286] drm/tve200: Use simple_display_pipe prepare_fb
 helper

Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Linus Walleij <linus.walleij@linaro.org>
Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20180405154449.23038-4-daniel.vetter@ffwll.ch
---
 drivers/gpu/drm/tve200/tve200_display.c | 8 +-------
 1 file changed, 1 insertion(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c
index 108f3b2b5d25..e8723a2412a6 100644
--- a/drivers/gpu/drm/tve200/tve200_display.c
+++ b/drivers/gpu/drm/tve200/tve200_display.c
@@ -293,18 +293,12 @@ static void tve200_display_disable_vblank(struct drm_simple_display_pipe *pipe)
 	writel(0, priv->regs + TVE200_INT_EN);
 }
 
-static int tve200_display_prepare_fb(struct drm_simple_display_pipe *pipe,
-				    struct drm_plane_state *plane_state)
-{
-	return drm_gem_fb_prepare_fb(&pipe->plane, plane_state);
-}
-
 static const struct drm_simple_display_pipe_funcs tve200_display_funcs = {
 	.check = tve200_display_check,
 	.enable = tve200_display_enable,
 	.disable = tve200_display_disable,
 	.update = tve200_display_update,
-	.prepare_fb = tve200_display_prepare_fb,
+	.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
 	.enable_vblank = tve200_display_enable_vblank,
 	.disable_vblank = tve200_display_disable_vblank,
 };

From f2b5a62165120a22a3aa1bc0173e45d388bc9b87 Mon Sep 17 00:00:00 2001
From: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Thu, 5 Apr 2018 17:44:44 +0200
Subject: [PATCH 0476/1286] drm/pl111: Use simple_display_pipe prepare_fb
 helper

Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Eric Anholt <eric@anholt.net>
Reviewed-by: Eric Anholt <eric@anholt.net>
Link: https://patchwork.freedesktop.org/patch/msgid/20180405154449.23038-5-daniel.vetter@ffwll.ch
---
 drivers/gpu/drm/pl111/pl111_display.c | 8 +-------
 1 file changed, 1 insertion(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index 1fee578e05b0..19b0d006a54a 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -377,19 +377,13 @@ static void pl111_display_disable_vblank(struct drm_simple_display_pipe *pipe)
 	writel(0, priv->regs + priv->ienb);
 }
 
-static int pl111_display_prepare_fb(struct drm_simple_display_pipe *pipe,
-				    struct drm_plane_state *plane_state)
-{
-	return drm_gem_fb_prepare_fb(&pipe->plane, plane_state);
-}
-
 static struct drm_simple_display_pipe_funcs pl111_display_funcs = {
 	.mode_valid = pl111_mode_valid,
 	.check = pl111_display_check,
 	.enable = pl111_display_enable,
 	.disable = pl111_display_disable,
 	.update = pl111_display_update,
-	.prepare_fb = pl111_display_prepare_fb,
+	.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
 };
 
 static int pl111_clk_div_choose_div(struct clk_hw *hw, unsigned long rate,

From 244cb3dd22e429e183d06fe0db55ea3ebf1fef19 Mon Sep 17 00:00:00 2001
From: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Thu, 5 Apr 2018 17:44:45 +0200
Subject: [PATCH 0477/1286] drm/mxsfb: Use simple_display_pipe prepare_fb
 helper

Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Marek Vasut <marex@denx.de>
Reviewed-by: Eric Anholt <eric@anholt.net>
Link: https://patchwork.freedesktop.org/patch/msgid/20180405154449.23038-6-daniel.vetter@ffwll.ch
---
 drivers/gpu/drm/mxsfb/mxsfb_drv.c | 8 +-------
 1 file changed, 1 insertion(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index b9c7507813db..ffe5137ccaf8 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -126,12 +126,6 @@ static void mxsfb_pipe_update(struct drm_simple_display_pipe *pipe,
 	mxsfb_plane_atomic_update(mxsfb, plane_state);
 }
 
-static int mxsfb_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
-				 struct drm_plane_state *plane_state)
-{
-	return drm_gem_fb_prepare_fb(&pipe->plane, plane_state);
-}
-
 static int mxsfb_pipe_enable_vblank(struct drm_simple_display_pipe *pipe)
 {
 	struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
@@ -160,7 +154,7 @@ static struct drm_simple_display_pipe_funcs mxsfb_funcs = {
 	.enable		= mxsfb_pipe_enable,
 	.disable	= mxsfb_pipe_disable,
 	.update		= mxsfb_pipe_update,
-	.prepare_fb	= mxsfb_pipe_prepare_fb,
+	.prepare_fb	= drm_gem_fb_simple_display_pipe_prepare_fb,
 	.enable_vblank	= mxsfb_pipe_enable_vblank,
 	.disable_vblank	= mxsfb_pipe_disable_vblank,
 };

From 30d23f220c75cf58584b19929bd8460b4edc9771 Mon Sep 17 00:00:00 2001
From: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Thu, 5 Apr 2018 17:44:46 +0200
Subject: [PATCH 0478/1286] drm/atomic: better doc for implicit vs explicit
 fencing

Note that a pile of drivers don't seem to take implicit fencing into
account, or at least don't call drm_atoimc_set_fence_for_plane().
Cc'ing relevant people, or at least some. Some drivers also look like
they don't disable implicit fencing (e.g. amdgpu) because the explicit
fences and implicit fences are handled by entirely independent code
paths.

I also wonder whether we shouldn't just make the recommended helpers
the default ones, since a lot of drivers don't bother to handle the
implicit fences at all it seems. The helpers won't blow up even for
non-GEM drivers or GEM drivers which don't fill out the gem bo
pointers in struct drm_framebuffer.

v2: Comments from Eric.

Cc: Gerd Hoffmann <kraxel@redhat.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Harry Wentland <harry.wentland@amd.com>
Cc: Sinclair Yeh <syeh@vmware.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Gustavo Padovan <gustavo@padovan.org>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Sean Paul <seanpaul@chromium.org>
Reviewed-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180405154449.23038-7-daniel.vetter@ffwll.ch
---
 drivers/gpu/drm/drm_atomic.c             |  8 ++++++++
 include/drm/drm_modeset_helper_vtables.h |  5 ++++-
 include/drm/drm_plane.h                  | 11 +++++++++--
 3 files changed, 21 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 3d9ae057a6cd..9bdd67781917 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1496,6 +1496,14 @@ EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
  * Otherwise, if &drm_plane_state.fence is not set this function we just set it
  * with the received implicit fence. In both cases this function consumes a
  * reference for @fence.
+ *
+ * This way explicit fencing can be used to overrule implicit fencing, which is
+ * important to make explicit fencing use-cases work: One example is using one
+ * buffer for 2 screens with different refresh rates. Implicit fencing will
+ * clamp rendering to the refresh rate of the slower screen, whereas explicit
+ * fence allows 2 independent render and display loops on a single buffer. If a
+ * driver allows obeys both implicit and explicit fences for plane updates, then
+ * it will break all the benefits of explicit fencing.
  */
 void
 drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 3e76ca805b0f..35e2a3a79fc5 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -1004,11 +1004,14 @@ struct drm_plane_helper_funcs {
 	 * This function must not block for outstanding rendering, since it is
 	 * called in the context of the atomic IOCTL even for async commits to
 	 * be able to return any errors to userspace. Instead the recommended
-	 * way is to fill out the fence member of the passed-in
+	 * way is to fill out the &drm_plane_state.fence of the passed-in
 	 * &drm_plane_state. If the driver doesn't support native fences then
 	 * equivalent functionality should be implemented through private
 	 * members in the plane structure.
 	 *
+	 * Drivers which always have their buffers pinned should use
+	 * drm_gem_fb_prepare_fb() for this hook.
+	 *
 	 * The helpers will call @cleanup_fb with matching arguments for every
 	 * successful call to this hook.
 	 *
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 9563bd25f19b..26fa50c2a50e 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -80,8 +80,15 @@ struct drm_plane_state {
 	/**
 	 * @fence:
 	 *
-	 * Optional fence to wait for before scanning out @fb. Do not write this
-	 * directly, use drm_atomic_set_fence_for_plane()
+	 * Optional fence to wait for before scanning out @fb. The core atomic
+	 * code will set this when userspace is using explicit fencing. Do not
+	 * write this directly for a driver's implicit fence, use
+	 * drm_atomic_set_fence_for_plane() to ensure that an explicit fence is
+	 * preserved.
+	 *
+	 * Drivers should store any implicit fence in this from their
+	 * &drm_plane_helper.prepare_fb callback. See drm_gem_fb_prepare_fb()
+	 * and drm_gem_fb_simple_display_pipe_prepare_fb() for suitable helpers.
 	 */
 	struct dma_fence *fence;
 

From dd388ee1ecbb8c1a9376f02ac0be573af9db5703 Mon Sep 17 00:00:00 2001
From: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Mon, 9 Apr 2018 10:51:34 +0200
Subject: [PATCH 0479/1286] drm/xen-front: use simple display pipe prepare_fb
 helper

I missed this one because on an older tree.

Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
Cc: xen-devel@lists.xen.org
Reviewed-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180409085134.27321-1-daniel.vetter@ffwll.ch
---
 drivers/gpu/drm/xen/xen_drm_front_kms.c | 8 +-------
 1 file changed, 1 insertion(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c
index 0bd6681fa4f3..a3479eb72d79 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_kms.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c
@@ -226,12 +226,6 @@ static bool display_send_page_flip(struct drm_simple_display_pipe *pipe,
 	return false;
 }
 
-static int display_prepare_fb(struct drm_simple_display_pipe *pipe,
-			      struct drm_plane_state *plane_state)
-{
-	return drm_gem_fb_prepare_fb(&pipe->plane, plane_state);
-}
-
 static void display_update(struct drm_simple_display_pipe *pipe,
 			   struct drm_plane_state *old_plane_state)
 {
@@ -294,7 +288,7 @@ static const struct drm_simple_display_pipe_funcs display_funcs = {
 	.mode_valid = display_mode_valid,
 	.enable = display_enable,
 	.disable = display_disable,
-	.prepare_fb = display_prepare_fb,
+	.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
 	.update = display_update,
 };
 

From 3085982c6b45d7d22f76e3aa018affbc143a7370 Mon Sep 17 00:00:00 2001
From: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Mon, 23 Apr 2018 14:37:53 +0300
Subject: [PATCH 0480/1286] drm/i915: Use ktime on wait_for
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

We use jiffies to determine when wait expires. However
Imre did find out that jiffies can and will do a >1
increments on certain situations [1]. When this happens
in a wait_for loop, we return timeout errorneously
much earlier than what the real wallclock would say.

We can't afford our waits to timeout prematurely.
Discard jiffies and change to ktime to detect timeouts.

v2: added bugzilla entry (Imre), added stable (Chris)

Reported-by: Imre Deak <imre.deak@intel.com>
References: https://lkml.org/lkml/2018/4/18/798 [1]
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=105771
Cc: Imre Deak <imre.deak@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180423113754.28424-1-mika.kuoppala@linux.intel.com
---
 drivers/gpu/drm/i915/intel_drv.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 44ed248f1fe9..33ff2638c92b 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -49,12 +49,12 @@
  * check the condition before the timeout.
  */
 #define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
-	unsigned long timeout__ = jiffies + usecs_to_jiffies(US) + 1;	\
+	const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
 	long wait__ = (Wmin); /* recommended min for usleep is 10 us */	\
 	int ret__;							\
 	might_sleep();							\
 	for (;;) {							\
-		bool expired__ = time_after(jiffies, timeout__);	\
+		const bool expired__ = ktime_after(ktime_get_raw(), end__); \
 		OP;							\
 		if (COND) {						\
 			ret__ = 0;					\

From 1c3c1dc66a9664a9b7048c3869fa7863be9123a0 Mon Sep 17 00:00:00 2001
From: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Mon, 23 Apr 2018 14:37:54 +0300
Subject: [PATCH 0481/1286] drm/i915: Add compiler barrier to wait_for

We need to be careful to not let compiler evaluate
the expiration and the operation on it's terms.

Document and enforce that COND will be evaluated
before checking timeout expiration.

Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180423113754.28424-2-mika.kuoppala@linux.intel.com
---
 drivers/gpu/drm/i915/intel_drv.h | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 33ff2638c92b..58868b93d2a0 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -56,6 +56,8 @@
 	for (;;) {							\
 		const bool expired__ = ktime_after(ktime_get_raw(), end__); \
 		OP;							\
+		/* Guarantee COND check prior to timeout */		\
+		barrier();						\
 		if (COND) {						\
 			ret__ = 0;					\
 			break;						\
@@ -96,6 +98,8 @@
 		u64 now = local_clock(); \
 		if (!(ATOMIC)) \
 			preempt_enable(); \
+		/* Guarantee COND check prior to timeout */ \
+		barrier(); \
 		if (COND) { \
 			ret = 0; \
 			break; \

From 247870ac8ea72916cd26f89e9bc211b97141ecd5 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Tue, 24 Apr 2018 02:08:39 +0100
Subject: [PATCH 0482/1286] drm/i915: Build request info on stack before printk

printk unhelpfully inserts a '\n' between consecutive calls, and since
our drm_printf wrapper may be emitting info a seq_file instead,
KERN_CONT is not an option. To work with any drm_printf destination, we
need to build up the output into a temporary buf on the stack and then
feed the complete line in a single call to printk.

Fixes: b7268c5eed0a ("drm/i915: Pack params to engine->schedule() into a struct")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180424010839.22860-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_engine_cs.c | 24 +++++++++++++++---------
 1 file changed, 15 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index be608f7111f5..66cddd059666 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1113,14 +1113,17 @@ unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
 	return which;
 }
 
-static void print_sched_attr(struct drm_printer *m,
-			     const struct drm_i915_private *i915,
-			     const struct i915_sched_attr *attr)
+static int print_sched_attr(struct drm_i915_private *i915,
+			    const struct i915_sched_attr *attr,
+			    char *buf, int x, int len)
 {
 	if (attr->priority == I915_PRIORITY_INVALID)
-		return;
+		return x;
 
-	drm_printf(m, "prio=%d", attr->priority);
+	x += snprintf(buf + x, len - x,
+		      " prio=%d", attr->priority);
+
+	return x;
 }
 
 static void print_request(struct drm_printer *m,
@@ -1128,14 +1131,17 @@ static void print_request(struct drm_printer *m,
 			  const char *prefix)
 {
 	const char *name = rq->fence.ops->get_timeline_name(&rq->fence);
+	char buf[80];
+	int x = 0;
 
-	drm_printf(m, "%s%x%s [%llx:%x] ",
+	x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
+
+	drm_printf(m, "%s%x%s [%llx:%x]%s @ %dms: %s\n",
 		   prefix,
 		   rq->global_seqno,
 		   i915_request_completed(rq) ? "!" : "",
-		   rq->fence.context, rq->fence.seqno);
-	print_sched_attr(m, rq->i915, &rq->sched.attr);
-	drm_printf(m, " @ %dms: %s\n",
+		   rq->fence.context, rq->fence.seqno,
+		   buf,
 		   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
 		   name);
 }

From 56021f48dbea69a00b96a53d6450b0950f9c811f Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Tue, 24 Apr 2018 09:16:00 +0100
Subject: [PATCH 0483/1286] drm/i915: Don't dump umpteen thousand requests

If we have more than a few, possibly several thousand request in the
queue, don't show the central portion, just the first few and the last
being executed and/or queued. The first few should be enough to help
identify a problem in execution, and most often comparing the first/last
in the queue is enough to identify problems in the scheduling.

We may need some fine tuning to set MAX_REQUESTS_TO_SHOW for common
debug scenarios, but for the moment if we can avoiding spending more
than a few seconds dumping the GPU state that will avoid a nasty
livelock (where hangcheck spends so long dumping the state, it fires
again and starts to dump the state again in parallel, ad infinitum).

v2: Remember to print last not the stale rq iter after the loop.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180424081600.27544-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_engine_cs.c | 43 +++++++++++++++++++++++---
 1 file changed, 38 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 66cddd059666..2398ea71e747 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1307,11 +1307,13 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 		       struct drm_printer *m,
 		       const char *header, ...)
 {
+	const int MAX_REQUESTS_TO_SHOW = 8;
 	struct intel_breadcrumbs * const b = &engine->breadcrumbs;
 	const struct intel_engine_execlists * const execlists = &engine->execlists;
 	struct i915_gpu_error * const error = &engine->i915->gpu_error;
-	struct i915_request *rq;
+	struct i915_request *rq, *last;
 	struct rb_node *rb;
+	int count;
 
 	if (header) {
 		va_list ap;
@@ -1378,16 +1380,47 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 	}
 
 	spin_lock_irq(&engine->timeline->lock);
-	list_for_each_entry(rq, &engine->timeline->requests, link)
-		print_request(m, rq, "\t\tE ");
+
+	last = NULL;
+	count = 0;
+	list_for_each_entry(rq, &engine->timeline->requests, link) {
+		if (count++ < MAX_REQUESTS_TO_SHOW - 1)
+			print_request(m, rq, "\t\tE ");
+		else
+			last = rq;
+	}
+	if (last) {
+		if (count > MAX_REQUESTS_TO_SHOW) {
+			drm_printf(m,
+				   "\t\t...skipping %d executing requests...\n",
+				   count - MAX_REQUESTS_TO_SHOW);
+		}
+		print_request(m, last, "\t\tE ");
+	}
+
+	last = NULL;
+	count = 0;
 	drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
 	for (rb = execlists->first; rb; rb = rb_next(rb)) {
 		struct i915_priolist *p =
 			rb_entry(rb, typeof(*p), node);
 
-		list_for_each_entry(rq, &p->requests, sched.link)
-			print_request(m, rq, "\t\tQ ");
+		list_for_each_entry(rq, &p->requests, sched.link) {
+			if (count++ < MAX_REQUESTS_TO_SHOW - 1)
+				print_request(m, rq, "\t\tQ ");
+			else
+				last = rq;
+		}
 	}
+	if (last) {
+		if (count > MAX_REQUESTS_TO_SHOW) {
+			drm_printf(m,
+				   "\t\t...skipping %d queued requests...\n",
+				   count - MAX_REQUESTS_TO_SHOW);
+		}
+		print_request(m, last, "\t\tQ ");
+	}
+
 	spin_unlock_irq(&engine->timeline->lock);
 
 	spin_lock_irq(&b->rb_lock);

From aaab22bcd157a0d081d3a747475fb26f4c549157 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Tue, 24 Apr 2018 12:52:36 +0100
Subject: [PATCH 0484/1286] drm/i915: Skip printing global offsets for
 per-engine scratch pages

Knowing the offset of the per-engine scratch/HWS page during boot is not
very informative, so remove the DRM_DEBUG.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180424115236.2022-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_engine_cs.c | 5 -----
 1 file changed, 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 2398ea71e747..58be7fac5b8c 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -541,8 +541,6 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
 		goto err_unref;
 
 	engine->scratch = vma;
-	DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
-			 engine->name, i915_ggtt_offset(vma));
 	return 0;
 
 err_unref:
@@ -636,9 +634,6 @@ static int init_status_page(struct intel_engine_cs *engine)
 	engine->status_page.vma = vma;
 	engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
 	engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE);
-
-	DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
-			 engine->name, i915_ggtt_offset(vma));
 	return 0;
 
 err_unpin:

From df9e6521749ab33cde306e8a4350b0ac7889220a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= <jose.souza@intel.com>
Date: Wed, 18 Apr 2018 16:41:58 -0700
Subject: [PATCH 0485/1286] drm/i915/fbdev: Enable late fbdev initial
 configuration
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

If the initial fbdev configuration (intel_fbdev_initial_config()) runs
and there still no sink connected it will cause
drm_fb_helper_initial_config() to return 0 as no error happened (but
internally the return is -EAGAIN).  Because no framebuffer was
allocated, when a sink is connected intel_fbdev_output_poll_changed()
will not execute drm_fb_helper_hotplug_event() that would trigger
another try to do the initial fbdev configuration.

So here allowing drm_fb_helper_hotplug_event() to be executed when there
is no framebuffer allocated and fbdev was not set up yet.

This issue also happens when a MST DP sink is connected since boot, as
the MST topology is discovered in parallel if
intel_fbdev_initial_config() is executed before the first sink MST is
discovered it will cause this same issue.

This is a follow-up patch of
https://patchwork.freedesktop.org/patch/196089/

Changes from v1:
- not creating a dump framebuffer anymore, instead just allowing
  drm_fb_helper_hotplug_event() to execute when fbdev is not setup yet.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=104158
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=104425
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: stable@vger.kernel.org # v4.15+
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: José Roberto de Souza <jose.souza@intel.com>
Tested-by: Paul Menzel <pmenzel@molgen.mpg.de>
Tested-by: frederik <frederik.schwan@linux.com> # 4.15.17
Tested-by: Ian Pilcher <arequipeno@gmail.com>
Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180418234158.9388-1-jose.souza@intel.com
---
 drivers/gpu/drm/i915/intel_fbdev.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 65a3313723c9..c1c31b429366 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -807,7 +807,7 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
 		return;
 
 	intel_fbdev_sync(ifbdev);
-	if (ifbdev->vma)
+	if (ifbdev->vma || ifbdev->helper.deferred_setup)
 		drm_fb_helper_hotplug_event(&ifbdev->helper);
 }
 

From a3997159133d56e444f0c0f56ab1ae59863912a8 Mon Sep 17 00:00:00 2001
From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
Date: Tue, 24 Apr 2018 08:15:45 -0500
Subject: [PATCH 0486/1286] drm/i915/selftests: Fix uninitialized variable

There is a potential execution path in which variable err is
returned without being properly initialized previously.

Fix this by initializing variable err to 0.

Addresses-Coverity-ID: 1468362 ("Uninitialized scalar variable")
Fixes: f4ecfbfc32ed ("drm/i915: Check whitelist registers across resets")
Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180424131545.GA4053@embeddedor.com
---
 drivers/gpu/drm/i915/selftests/intel_workarounds.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index 5455b2626627..17444a3abbb9 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -239,7 +239,7 @@ static int live_reset_whitelist(void *arg)
 	struct intel_engine_cs *engine = i915->engine[RCS];
 	struct i915_gpu_error *error = &i915->gpu_error;
 	struct whitelist w;
-	int err;
+	int err = 0;
 
 	/* If we reset the gpu, we should not lose the RING_NONPRIV */
 

From 36a501a199336fbf87299ebbe598d0af00922949 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Tue, 24 Apr 2018 15:29:45 +0100
Subject: [PATCH 0487/1286] drm/i915/breadcrumbs: Keep the fake irq armed
 across reset

Instead of synchronously cancelling the timer and re-enabling it inside
the reset callbacks, keep the timer enabled and let it die on its next
wakeup if no longer required. This allows
intel_engine_reset_breadcrumbs() to be used from an atomic
(timer/softirq) context such as required for resetting an engine.

It also allows us to react better to the user poking around debugfs for
testing missed irqs.

v2: Tighten the order of del_timer_sync as the fake_irq timer
may trigger the hangcheck timer, and so we should cancel it first and
then cancel the hangcheck (Mika)

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180424142945.6787-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_breadcrumbs.c | 29 ++++++++++++++++--------
 1 file changed, 20 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 671a6d61e29d..5ce4f51232f5 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -130,11 +130,12 @@ static void intel_breadcrumbs_hangcheck(struct timer_list *t)
 
 static void intel_breadcrumbs_fake_irq(struct timer_list *t)
 {
-	struct intel_engine_cs *engine = from_timer(engine, t,
-						    breadcrumbs.fake_irq);
+	struct intel_engine_cs *engine =
+		from_timer(engine, t, breadcrumbs.fake_irq);
 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
 
-	/* The timer persists in case we cannot enable interrupts,
+	/*
+	 * The timer persists in case we cannot enable interrupts,
 	 * or if we have previously seen seqno/interrupt incoherency
 	 * ("missed interrupt" syndrome, better known as a "missed breadcrumb").
 	 * Here the worker will wake up every jiffie in order to kick the
@@ -148,6 +149,12 @@ static void intel_breadcrumbs_fake_irq(struct timer_list *t)
 	if (!b->irq_armed)
 		return;
 
+	/* If the user has disabled the fake-irq, restore the hangchecking */
+	if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings)) {
+		mod_timer(&b->hangcheck, wait_timeout());
+		return;
+	}
+
 	mod_timer(&b->fake_irq, jiffies + 1);
 }
 
@@ -831,8 +838,8 @@ static void cancel_fake_irq(struct intel_engine_cs *engine)
 {
 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
 
+	del_timer_sync(&b->fake_irq); /* may queue b->hangcheck */
 	del_timer_sync(&b->hangcheck);
-	del_timer_sync(&b->fake_irq);
 	clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
 }
 
@@ -840,15 +847,22 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
 {
 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
 
-	cancel_fake_irq(engine);
 	spin_lock_irq(&b->irq_lock);
 
+	/*
+	 * Leave the fake_irq timer enabled (if it is running), but clear the
+	 * bit so that it turns itself off on its next wake up and goes back
+	 * to the long hangcheck interval if still required.
+	 */
+	clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
+
 	if (b->irq_enabled)
 		irq_enable(engine);
 	else
 		irq_disable(engine);
 
-	/* We set the IRQ_BREADCRUMB bit when we enable the irq presuming the
+	/*
+	 * We set the IRQ_BREADCRUMB bit when we enable the irq presuming the
 	 * GPU is active and may have already executed the MI_USER_INTERRUPT
 	 * before the CPU is ready to receive. However, the engine is currently
 	 * idle (we haven't started it yet), there is no possibility for a
@@ -857,9 +871,6 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
 	 */
 	clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
 
-	if (b->irq_armed)
-		enable_fake_irq(b);
-
 	spin_unlock_irq(&b->irq_lock);
 }
 

From 6e35fed963b4de28930e126a32600543f1662a3d Mon Sep 17 00:00:00 2001
From: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Tue, 24 Apr 2018 16:22:42 +0200
Subject: [PATCH 0488/1286] drm: Don't EXPORT drm_add/reset_display_info
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Only used within drm.ko, no need to tempt drivers.

Cc: Keith Packard <keithp@keithp.com>
Cc: Ville Syrjala <ville.syrjala@linux.intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180424142242.12093-1-daniel.vetter@ffwll.ch
---
 drivers/gpu/drm/drm_crtc_internal.h | 2 ++
 drivers/gpu/drm/drm_edid.c          | 2 --
 include/drm/drm_edid.h              | 2 --
 3 files changed, 2 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 3c2b82865ad2..5d307b23a4e6 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -220,3 +220,5 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
 
 /* drm_edid.c */
 void drm_mode_fixup_1366x768(struct drm_display_mode *mode);
+void drm_reset_display_info(struct drm_connector *connector);
+u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 134069f36482..61dd9a2fbe5b 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -4454,7 +4454,6 @@ drm_reset_display_info(struct drm_connector *connector)
 
 	info->non_desktop = 0;
 }
-EXPORT_SYMBOL_GPL(drm_reset_display_info);
 
 u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid)
 {
@@ -4538,7 +4537,6 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
 		info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
 	return quirks;
 }
-EXPORT_SYMBOL_GPL(drm_add_display_info);
 
 static int validate_displayid(u8 *displayid, int length, int idx)
 {
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 8d89a9c3748d..b25d12ef120a 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -465,8 +465,6 @@ struct edid *drm_get_edid(struct drm_connector *connector,
 struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
 				     struct i2c_adapter *adapter);
 struct edid *drm_edid_duplicate(const struct edid *edid);
-void drm_reset_display_info(struct drm_connector *connector);
-u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid);
 int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
 
 u8 drm_match_cea_mode(const struct drm_display_mode *to_match);

From 14d4e522f0cbef2a10c3e5c243786a84d91cfb7b Mon Sep 17 00:00:00 2001
From: Lyude Paul <lyude@redhat.com>
Date: Wed, 11 Apr 2018 19:42:40 -0400
Subject: [PATCH 0489/1286] drm/atomic: Print debug message on atomic check
 failure
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Does what it says on the label, it's a little confusing debugging atomic
check failures otherwise.

Cc: Manasi Navare <manasi.d.navare@intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180411234302.2896-2-lyude@redhat.com
---
 drivers/gpu/drm/drm_atomic.c | 10 +++++++---
 1 file changed, 7 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 7d25c42f22db..0da8c5e134b2 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1702,11 +1702,15 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
 		}
 	}
 
-	if (config->funcs->atomic_check)
+	if (config->funcs->atomic_check) {
 		ret = config->funcs->atomic_check(state->dev, state);
 
-	if (ret)
-		return ret;
+		if (ret) {
+			DRM_DEBUG_ATOMIC("atomic driver check for %p failed: %d\n",
+					 state, ret);
+			return ret;
+		}
+	}
 
 	if (!state->allow_modeset) {
 		for_each_new_crtc_in_state(state, crtc, crtc_state, i) {

From ffdd073681589d76055a4c8cc8adb187b1f599bf Mon Sep 17 00:00:00 2001
From: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
Date: Tue, 13 Feb 2018 17:48:54 +0000
Subject: [PATCH 0490/1286] dt-bindings: adv7511: Extend bindings to allow
 specifying slave map addresses

The ADV7511 has four 256-byte maps that can be accessed via the main I2C
ports. Each map has it own I2C address and acts as a standard slave
device on the I2C bus.

Extend the device tree node bindings to be able to override the default
addresses so that address conflicts with other devices on the same bus
may be resolved at the board description level.

Signed-off-by: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
Reviewed-by: Rob Herring <robh@kernel.org>
Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Archit Taneja <architt@codeaurora.org>
Link: https://patchwork.freedesktop.org/patch/msgid/1518544137-2742-3-git-send-email-kbingham@kernel.org
---
 .../bindings/display/bridge/adi,adv7511.txt    | 18 ++++++++++++++++--
 1 file changed, 16 insertions(+), 2 deletions(-)

diff --git a/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt b/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
index 0047b1394c70..2c887536258c 100644
--- a/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
+++ b/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
@@ -14,7 +14,13 @@ Required properties:
 		"adi,adv7513"
 		"adi,adv7533"
 
-- reg: I2C slave address
+- reg: I2C slave addresses
+  The ADV7511 internal registers are split into four pages exposed through
+  different I2C addresses, creating four register maps. Each map has it own
+  I2C address and acts as a standard slave device on the I2C bus. The main
+  address is mandatory, others are optional and revert to defaults if not
+  specified.
+
 
 The ADV7511 supports a large number of input data formats that differ by their
 color depth, color format, clock mode, bit justification and random
@@ -70,6 +76,9 @@ Optional properties:
   rather than generate its own timings for HDMI output.
 - clocks: from common clock binding: reference to the CEC clock.
 - clock-names: from common clock binding: must be "cec".
+- reg-names : Names of maps with programmable addresses.
+	It can contain any map needing a non-default address.
+	Possible maps names are : "main", "edid", "cec", "packet"
 
 Required nodes:
 
@@ -88,7 +97,12 @@ Example
 
 	adv7511w: hdmi@39 {
 		compatible = "adi,adv7511w";
-		reg = <39>;
+		/*
+		 * The EDID page will be accessible on address 0x66 on the I2C
+		 * bus. All other maps continue to use their default addresses.
+		 */
+		reg = <0x39>, <0x66>;
+		reg-names = "main", "edid";
 		interrupt-parent = <&gpio3>;
 		interrupts = <29 IRQ_TYPE_EDGE_FALLING>;
 		clocks = <&cec_clock>;

From 680532c50bca0f591ea90f4e820c5c1ce48adbfd Mon Sep 17 00:00:00 2001
From: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
Date: Tue, 13 Feb 2018 17:48:57 +0000
Subject: [PATCH 0491/1286] drm: adv7511: Add support for
 i2c_new_secondary_device

The ADV7511 has four 256-byte maps that can be accessed via the main I2C
ports. Each map has it own I2C address and acts as a standard slave
device on the I2C bus.

Allow a device tree node to override the default addresses so that
address conflicts with other devices on the same bus may be resolved at
the board description level.

Signed-off-by: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Archit Taneja <architt@codeaurora.org>
Link: https://patchwork.freedesktop.org/patch/msgid/1518544137-2742-6-git-send-email-kbingham@kernel.org
---
 drivers/gpu/drm/bridge/adv7511/adv7511.h     |  6 +++
 drivers/gpu/drm/bridge/adv7511/adv7511_drv.c | 42 +++++++++++++-------
 2 files changed, 33 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index d034b2cb5eee..73d8ccb97742 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -93,6 +93,11 @@
 #define ADV7511_REG_CHIP_ID_HIGH		0xf5
 #define ADV7511_REG_CHIP_ID_LOW			0xf6
 
+/* Hardware defined default addresses for I2C register maps */
+#define ADV7511_CEC_I2C_ADDR_DEFAULT		0x3c
+#define ADV7511_EDID_I2C_ADDR_DEFAULT		0x3f
+#define ADV7511_PACKET_I2C_ADDR_DEFAULT		0x38
+
 #define ADV7511_CSC_ENABLE			BIT(7)
 #define ADV7511_CSC_UPDATE_MODE			BIT(5)
 
@@ -321,6 +326,7 @@ enum adv7511_type {
 struct adv7511 {
 	struct i2c_client *i2c_main;
 	struct i2c_client *i2c_edid;
+	struct i2c_client *i2c_packet;
 	struct i2c_client *i2c_cec;
 
 	struct regmap *regmap;
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index efa29db5fc2b..5f749cdd2ca2 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -586,7 +586,7 @@ static int adv7511_get_modes(struct adv7511 *adv7511,
 	/* Reading the EDID only works if the device is powered */
 	if (!adv7511->powered) {
 		unsigned int edid_i2c_addr =
-					(adv7511->i2c_main->addr << 1) + 4;
+					(adv7511->i2c_edid->addr << 1);
 
 		__adv7511_power_on(adv7511);
 
@@ -969,10 +969,10 @@ static int adv7511_init_cec_regmap(struct adv7511 *adv)
 {
 	int ret;
 
-	adv->i2c_cec = i2c_new_dummy(adv->i2c_main->adapter,
-				     adv->i2c_main->addr - 1);
+	adv->i2c_cec = i2c_new_secondary_device(adv->i2c_main, "cec",
+						ADV7511_CEC_I2C_ADDR_DEFAULT);
 	if (!adv->i2c_cec)
-		return -ENOMEM;
+		return -EINVAL;
 	i2c_set_clientdata(adv->i2c_cec, adv);
 
 	adv->regmap_cec = devm_regmap_init_i2c(adv->i2c_cec,
@@ -1082,8 +1082,6 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
 	struct adv7511_link_config link_config;
 	struct adv7511 *adv7511;
 	struct device *dev = &i2c->dev;
-	unsigned int main_i2c_addr = i2c->addr << 1;
-	unsigned int edid_i2c_addr = main_i2c_addr + 4;
 	unsigned int val;
 	int ret;
 
@@ -1153,23 +1151,34 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
 	if (ret)
 		goto uninit_regulators;
 
-	regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR, edid_i2c_addr);
-	regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR,
-		     main_i2c_addr - 0xa);
-	regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR,
-		     main_i2c_addr - 2);
-
 	adv7511_packet_disable(adv7511, 0xffff);
 
-	adv7511->i2c_edid = i2c_new_dummy(i2c->adapter, edid_i2c_addr >> 1);
+	adv7511->i2c_edid = i2c_new_secondary_device(i2c, "edid",
+					ADV7511_EDID_I2C_ADDR_DEFAULT);
 	if (!adv7511->i2c_edid) {
-		ret = -ENOMEM;
+		ret = -EINVAL;
 		goto uninit_regulators;
 	}
 
+	regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR,
+		     adv7511->i2c_edid->addr << 1);
+
+	adv7511->i2c_packet = i2c_new_secondary_device(i2c, "packet",
+					ADV7511_PACKET_I2C_ADDR_DEFAULT);
+	if (!adv7511->i2c_packet) {
+		ret = -EINVAL;
+		goto err_i2c_unregister_edid;
+	}
+
+	regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR,
+		     adv7511->i2c_packet->addr << 1);
+
 	ret = adv7511_init_cec_regmap(adv7511);
 	if (ret)
-		goto err_i2c_unregister_edid;
+		goto err_i2c_unregister_packet;
+
+	regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR,
+		     adv7511->i2c_cec->addr << 1);
 
 	INIT_WORK(&adv7511->hpd_work, adv7511_hpd_work);
 
@@ -1207,6 +1216,8 @@ err_unregister_cec:
 	i2c_unregister_device(adv7511->i2c_cec);
 	if (adv7511->cec_clk)
 		clk_disable_unprepare(adv7511->cec_clk);
+err_i2c_unregister_packet:
+	i2c_unregister_device(adv7511->i2c_packet);
 err_i2c_unregister_edid:
 	i2c_unregister_device(adv7511->i2c_edid);
 uninit_regulators:
@@ -1233,6 +1244,7 @@ static int adv7511_remove(struct i2c_client *i2c)
 
 	cec_unregister_adapter(adv7511->cec_adap);
 
+	i2c_unregister_device(adv7511->i2c_packet);
 	i2c_unregister_device(adv7511->i2c_edid);
 
 	return 0;

From 1e10911539a61a048524bbb54ccfb6329b30b6d4 Mon Sep 17 00:00:00 2001
From: "Gomonovych, Vasyl" <gomonovych@gmail.com>
Date: Tue, 21 Nov 2017 23:31:33 +0100
Subject: [PATCH 0492/1286] drm/mediatek: Use ERR_CAST instead of
 ERR_PTR(PTR_ERR())

Use ERR_CAST inlined function instead of ERR_PTR(PTR_ERR(...)).

drivers/gpu/drm/mediatek/mtk_drm_gem.c:223:9-16: WARNING: ERR_CAST can be used with mtk_gem
Generated by: scripts/coccinelle/api/err_cast.cocci

Signed-off-by: Vasyl Gomonovych <gomonovych@gmail.com>
Acked-by: Philipp Zabel <p.zabel@pengutronix.de>
Signed-off-by: CK Hu <ck.hu@mediatek.com>
---
 drivers/gpu/drm/mediatek/mtk_drm_gem.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index f595ac816b55..259b7b0de1d2 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -220,7 +220,7 @@ struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
 	mtk_gem = mtk_drm_gem_init(dev, attach->dmabuf->size);
 
 	if (IS_ERR(mtk_gem))
-		return ERR_PTR(PTR_ERR(mtk_gem));
+		return ERR_CAST(mtk_gem);
 
 	expected = sg_dma_address(sg->sgl);
 	for_each_sg(sg->sgl, s, sg->nents, i) {

From 602b14a0c4eb01cc64ca7c851135c0ba0bd7e980 Mon Sep 17 00:00:00 2001
From: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Date: Tue, 24 Apr 2018 15:14:40 +0200
Subject: [PATCH 0493/1286] drm/ast: fix mode_valid's return type

The method struct drm_connector_helper_funcs::mode_valid is defined
as returning an 'enum drm_mode_status' but the driver implementation
for this method uses an 'int' for it.

Fix this by using 'enum drm_mode_status' in the driver too.

Signed-off-by: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20180424131443.1810-1-luc.vanoostenryck@gmail.com
---
 drivers/gpu/drm/ast/ast_mode.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 831b73392d82..036dff8a1f33 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -799,7 +799,7 @@ static int ast_get_modes(struct drm_connector *connector)
 	return 0;
 }
 
-static int ast_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status ast_mode_valid(struct drm_connector *connector,
 			  struct drm_display_mode *mode)
 {
 	struct ast_private *ast = connector->dev->dev_private;

From 0e19b023414e9f8f75e9b5aafda585ca490a70bb Mon Sep 17 00:00:00 2001
From: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Date: Tue, 24 Apr 2018 15:14:47 +0200
Subject: [PATCH 0494/1286] drm/bridge: adv7511: fix mode_valid's return type

The method struct drm_connector_helper_funcs::mode_valid is defined
as returning an 'enum drm_mode_status' but the driver implementation
for this method uses an 'int' for it.

Fix this by using 'enum drm_mode_status' in the driver too.

Signed-off-by: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20180424131450.1910-1-luc.vanoostenryck@gmail.com
---
 drivers/gpu/drm/bridge/adv7511/adv7511_drv.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 5f749cdd2ca2..2614cea538e2 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -654,7 +654,7 @@ adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector)
 	return status;
 }
 
-static int adv7511_mode_valid(struct adv7511 *adv7511,
+static enum drm_mode_status adv7511_mode_valid(struct adv7511 *adv7511,
 			      struct drm_display_mode *mode)
 {
 	if (mode->clock > 165000)

From e14d509d25aff0d85c0adce52b891935234feb3f Mon Sep 17 00:00:00 2001
From: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Date: Tue, 24 Apr 2018 15:15:02 +0200
Subject: [PATCH 0495/1286] drm/hisilicon: fix mode_valid's return type

The method struct drm_connector_helper_funcs::mode_valid is defined
as returning an 'enum drm_mode_status' but the driver implementation
for this method uses an 'int' for it.

Fix this by using 'enum drm_mode_status' in the driver too.

Signed-off-by: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20180424131504.2159-1-luc.vanoostenryck@gmail.com
---
 drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index f4eba87c96f3..d2f4749ebf8d 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -27,7 +27,7 @@ static int hibmc_connector_get_modes(struct drm_connector *connector)
 	return drm_add_modes_noedid(connector, 800, 600);
 }
 
-static int hibmc_connector_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status hibmc_connector_mode_valid(struct drm_connector *connector,
 				      struct drm_display_mode *mode)
 {
 	return MODE_OK;

From c69e52dea3ecbbc9c2365b8fc633ae62198c4801 Mon Sep 17 00:00:00 2001
From: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Date: Tue, 24 Apr 2018 15:15:06 +0200
Subject: [PATCH 0496/1286] drm/mgag200: fix mode_valid's return type

The method struct drm_connector_helper_funcs::mode_valid is defined
as returning an 'enum drm_mode_status' but the driver implementation
for this method uses an 'int' for it.

Fix this by using 'enum drm_mode_status' in the driver too.

Signed-off-by: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20180424131508.2210-1-luc.vanoostenryck@gmail.com
---
 drivers/gpu/drm/mgag200/mgag200_mode.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index fb50a9ddaae8..8918539a19aa 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1586,7 +1586,7 @@ static uint32_t mga_vga_calculate_mode_bandwidth(struct drm_display_mode *mode,
 
 #define MODE_BANDWIDTH	MODE_BAD
 
-static int mga_vga_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status mga_vga_mode_valid(struct drm_connector *connector,
 				 struct drm_display_mode *mode)
 {
 	struct drm_device *dev = connector->dev;

From b9d9168a2e5de35d2f29dccda225a0c4f3668bfe Mon Sep 17 00:00:00 2001
From: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Date: Tue, 24 Apr 2018 15:15:17 +0200
Subject: [PATCH 0497/1286] drm/udl: fix mode_valid's return type

The method struct drm_connector_helper_funcs::mode_valid is defined
as returning an 'enum drm_mode_status' but the driver implementation
for this method uses an 'int' for it.

Fix this by using 'enum drm_mode_status' in the driver too.

Signed-off-by: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20180424131520.2409-1-luc.vanoostenryck@gmail.com
---
 drivers/gpu/drm/udl/udl_connector.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index c3dc1fd20cb4..09dc585aa46f 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -105,7 +105,7 @@ static int udl_get_modes(struct drm_connector *connector)
 	return 0;
 }
 
-static int udl_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status udl_mode_valid(struct drm_connector *connector,
 			  struct drm_display_mode *mode)
 {
 	struct udl_device *udl = connector->dev->dev_private;

From c24c88c4df2eb89f71af87ee00e75499a369538d Mon Sep 17 00:00:00 2001
From: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Date: Tue, 24 Apr 2018 15:14:45 +0200
Subject: [PATCH 0498/1286] drm/bochs: fix mode_valid's return type

The method struct drm_connector_helper_funcs::mode_valid is defined
as returning an 'enum drm_mode_status' but the driver implementation
for this method uses an 'int' for it.

Fix this by using 'enum drm_mode_status' in the driver too.

Signed-off-by: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20180424131445.1861-1-luc.vanoostenryck@gmail.com
---
 drivers/gpu/drm/bochs/bochs_kms.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index a24a18fbd65a..233980a78591 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -188,7 +188,7 @@ static int bochs_connector_get_modes(struct drm_connector *connector)
 	return count;
 }
 
-static int bochs_connector_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status bochs_connector_mode_valid(struct drm_connector *connector,
 				      struct drm_display_mode *mode)
 {
 	struct bochs_device *bochs =

From 114b3ac8702c3c01aac3b0a6b30069ae2c4d53d5 Mon Sep 17 00:00:00 2001
From: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Date: Tue, 24 Apr 2018 15:14:52 +0200
Subject: [PATCH 0499/1286] drm/bridge: tc358767: fix mode_valid's return type

The method struct drm_connector_helper_funcs::mode_valid is defined
as returning an 'enum drm_mode_status' but the driver implementation
for this method uses an 'int' for it.

Fix this by using 'enum drm_mode_status' in the driver too.

Signed-off-by: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20180424131453.1961-1-luc.vanoostenryck@gmail.com
---
 drivers/gpu/drm/bridge/tc358767.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 08ab7d6aea65..0fd9cf27542c 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1102,7 +1102,7 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
 	return true;
 }
 
-static int tc_connector_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status tc_connector_mode_valid(struct drm_connector *connector,
 				   struct drm_display_mode *mode)
 {
 	/* DPI interface clock limitation: upto 154 MHz */

From 67772782f66392d9e4c92004055710ac01510906 Mon Sep 17 00:00:00 2001
From: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Date: Tue, 24 Apr 2018 15:14:54 +0200
Subject: [PATCH 0500/1286] drm/gma500: fix mode_valid's return type

The method struct drm_connector_helper_funcs::mode_valid is defined
as returning an 'enum drm_mode_status' but the driver implementation
for this method uses an 'int' for it.

Fix this by using 'enum drm_mode_status' in the driver too.

Signed-off-by: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20180424131455.2011-1-luc.vanoostenryck@gmail.com
---
 drivers/gpu/drm/gma500/cdv_intel_crt.c    | 2 +-
 drivers/gpu/drm/gma500/cdv_intel_dp.c     | 2 +-
 drivers/gpu/drm/gma500/cdv_intel_hdmi.c   | 2 +-
 drivers/gpu/drm/gma500/cdv_intel_lvds.c   | 2 +-
 drivers/gpu/drm/gma500/mdfld_dsi_output.c | 2 +-
 drivers/gpu/drm/gma500/oaktrail_hdmi.c    | 2 +-
 drivers/gpu/drm/gma500/psb_intel_sdvo.c   | 2 +-
 7 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index b837e7a92196..cb5a14b7ec7f 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -64,7 +64,7 @@ static void cdv_intel_crt_dpms(struct drm_encoder *encoder, int mode)
 	REG_WRITE(reg, temp);
 }
 
-static int cdv_intel_crt_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status cdv_intel_crt_mode_valid(struct drm_connector *connector,
 				struct drm_display_mode *mode)
 {
 	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index a4bb89b7878f..5ea785f07ba8 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -505,7 +505,7 @@ static void cdv_intel_edp_backlight_off (struct gma_encoder *intel_encoder)
 	msleep(intel_dp->backlight_off_delay);
 }
 
-static int
+static enum drm_mode_status
 cdv_intel_dp_mode_valid(struct drm_connector *connector,
 		    struct drm_display_mode *mode)
 {
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 563f193fcfac..f0878998526a 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -223,7 +223,7 @@ static int cdv_hdmi_get_modes(struct drm_connector *connector)
 	return ret;
 }
 
-static int cdv_hdmi_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status cdv_hdmi_mode_valid(struct drm_connector *connector,
 				 struct drm_display_mode *mode)
 {
 	if (mode->clock > 165000)
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index e64960db3224..de9531caaca0 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -244,7 +244,7 @@ static void cdv_intel_lvds_restore(struct drm_connector *connector)
 {
 }
 
-static int cdv_intel_lvds_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status cdv_intel_lvds_mode_valid(struct drm_connector *connector,
 			      struct drm_display_mode *mode)
 {
 	struct drm_device *dev = connector->dev;
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index acb3848ef1c9..fe020926ea4f 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -346,7 +346,7 @@ static int mdfld_dsi_connector_get_modes(struct drm_connector *connector)
 	return 0;
 }
 
-static int mdfld_dsi_connector_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status mdfld_dsi_connector_mode_valid(struct drm_connector *connector,
 						struct drm_display_mode *mode)
 {
 	struct mdfld_dsi_connector *dsi_connector =
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 8b2eb32ee988..78566a80ad25 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -509,7 +509,7 @@ static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
 	HDMI_WRITE(HDMI_VIDEO_REG, temp);
 }
 
-static int oaktrail_hdmi_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status oaktrail_hdmi_mode_valid(struct drm_connector *connector,
 				struct drm_display_mode *mode)
 {
 	if (mode->clock > 165000)
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 84507912be84..8dc2b19f913b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1157,7 +1157,7 @@ static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
 	return;
 }
 
-static int psb_intel_sdvo_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status psb_intel_sdvo_mode_valid(struct drm_connector *connector,
 				 struct drm_display_mode *mode)
 {
 	struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);

From 2ea009095c6e7396915a1d0dd480c41f02985f79 Mon Sep 17 00:00:00 2001
From: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Date: Tue, 24 Apr 2018 15:14:57 +0200
Subject: [PATCH 0501/1286] drm/gma500: fix psb_intel_lvds_mode_valid()'s
 return type

The method struct drm_connector_helper_funcs::mode_valid is defined
as returning an 'enum drm_mode_status' but the driver implementation
for this method, psb_intel_lvds_mode_valid(), uses an 'int' for it.

Fix this by using 'enum drm_mode_status' for psb_intel_lvds_mode_valid().

Signed-off-by: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20180424131458.2060-1-luc.vanoostenryck@gmail.com
---
 drivers/gpu/drm/gma500/psb_intel_drv.h  | 2 +-
 drivers/gpu/drm/gma500/psb_intel_lvds.c | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index e8e4ea14b12b..e05e5399af2d 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -255,7 +255,7 @@ extern int intelfb_remove(struct drm_device *dev,
 extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
 				      const struct drm_display_mode *mode,
 				      struct drm_display_mode *adjusted_mode);
-extern int psb_intel_lvds_mode_valid(struct drm_connector *connector,
+extern enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector,
 				     struct drm_display_mode *mode);
 extern int psb_intel_lvds_set_property(struct drm_connector *connector,
 					struct drm_property *property,
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index be3eefec5152..8baf6325c6e4 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -343,7 +343,7 @@ static void psb_intel_lvds_restore(struct drm_connector *connector)
 	}
 }
 
-int psb_intel_lvds_mode_valid(struct drm_connector *connector,
+enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector,
 				 struct drm_display_mode *mode)
 {
 	struct drm_psb_private *dev_priv = connector->dev->dev_private;

From e0d92e1668a81b2f010eb11649df9a90faee8294 Mon Sep 17 00:00:00 2001
From: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Date: Tue, 24 Apr 2018 15:15:15 +0200
Subject: [PATCH 0502/1286] drm/qxl: fix mode_valid's return type

The method struct drm_connector_helper_funcs::mode_valid is defined
as returning an 'enum drm_mode_status' but the driver implementation
for this method uses an 'int' for it.

Fix this by using 'enum drm_mode_status' in the driver too.

Signed-off-by: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20180424131515.2360-1-luc.vanoostenryck@gmail.com
---
 drivers/gpu/drm/qxl/qxl_display.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index ecb35ed0eac8..820cbca3bf6e 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -1037,7 +1037,7 @@ static int qxl_conn_get_modes(struct drm_connector *connector)
 	return ret;
 }
 
-static int qxl_conn_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status qxl_conn_mode_valid(struct drm_connector *connector,
 			       struct drm_display_mode *mode)
 {
 	struct drm_device *ddev = connector->dev;

From f555828ed98cb6da94facf83ec75f7581f5ac2e3 Mon Sep 17 00:00:00 2001
From: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Date: Tue, 24 Apr 2018 15:15:21 +0200
Subject: [PATCH 0503/1286] drm/i2c: tda998x: fix mode_valid's return type

The method struct drm_connector_helper_funcs::mode_valid is defined
as returning an 'enum drm_mode_status' but the driver implementation
for this method uses an 'int' for it.

Fix this by using 'enum drm_mode_status' in the driver too.

Signed-off-by: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20180424131522.2460-1-luc.vanoostenryck@gmail.com
---
 drivers/gpu/drm/i2c/tda998x_drv.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 9e67a7b4e3a4..421c8a72369e 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -1106,7 +1106,7 @@ static int tda998x_connector_get_modes(struct drm_connector *connector)
 	return n;
 }
 
-static int tda998x_connector_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status tda998x_connector_mode_valid(struct drm_connector *connector,
 					struct drm_display_mode *mode)
 {
 	/* TDA19988 dotclock can go up to 165MHz */

From 16d25ea09404a5b4a732a913557de8860fe940c8 Mon Sep 17 00:00:00 2001
From: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Date: Tue, 24 Apr 2018 15:15:24 +0200
Subject: [PATCH 0504/1286] drm/virtio: fix mode_valid's return type

The method struct drm_connector_helper_funcs::mode_valid is defined
as returning an 'enum drm_mode_status' but the driver implementation
for this method uses an 'int' for it.

Fix this by using 'enum drm_mode_status' in the driver too.

Signed-off-by: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20180424131524.2510-1-luc.vanoostenryck@gmail.com
Link: https://patchwork.freedesktop.org/patch/msgid/20180424131515.2360-1-luc.vanoostenryck@gmail.com
Cc: David Airlie <airlied@linux.ie>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Cc: virtualization@lists.linux-foundation.org
---
 drivers/gpu/drm/virtio/virtgpu_display.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 8cc8c34d67f5..a5edd86603d9 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -208,7 +208,7 @@ static int virtio_gpu_conn_get_modes(struct drm_connector *connector)
 	return count;
 }
 
-static int virtio_gpu_conn_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status virtio_gpu_conn_mode_valid(struct drm_connector *connector,
 				      struct drm_display_mode *mode)
 {
 	struct virtio_gpu_output *output =

From 1f177a131b2c106dd15ab51dda5c50f80b70cc72 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed, 25 Apr 2018 13:37:18 +0100
Subject: [PATCH 0505/1286] drm/i915: Use memset64() to align the ring with
 MI_NOOP

When filling the ring to align the emit pointer to the next cacheline,
use memset64() rather than open-coding it. As we know that we always
have an even number of dwords, we can replace the dword loop with the
qword equivalent.

v2: s/0/MI_NOOP<<32 | MI_NOOP/

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180425123718.16366-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_ringbuffer.c | 14 ++++++++------
 1 file changed, 8 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index c68ac605b8a9..c06c22c953b3 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1717,22 +1717,24 @@ u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
 /* Align the ring tail to a cacheline boundary */
 int intel_ring_cacheline_align(struct i915_request *rq)
 {
-	int num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
-	u32 *cs;
+	int num_dwords;
+	void *cs;
 
+	num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
 	if (num_dwords == 0)
 		return 0;
 
-	num_dwords = CACHELINE_BYTES / sizeof(u32) - num_dwords;
+	num_dwords = CACHELINE_DWORDS - num_dwords;
+	GEM_BUG_ON(num_dwords & 1);
+
 	cs = intel_ring_begin(rq, num_dwords);
 	if (IS_ERR(cs))
 		return PTR_ERR(cs);
 
-	while (num_dwords--)
-		*cs++ = MI_NOOP;
-
+	memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2);
 	intel_ring_advance(rq, cs);
 
+	GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
 	return 0;
 }
 

From f6f109155f0deca7fae7ab503d1f12a806c2c095 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed, 25 Apr 2018 15:23:34 +0100
Subject: [PATCH 0506/1286] drm/i915: Remove obsolete min/max freq setters from
 debugfs

A more complete, and more importantly stable, interface for controlling
the RPS frequency range is available in sysfs, obsoleting the unstable
debugfs.

It's presence seems to trick people into using it, forgetting it is not
ABI.

References: https://bugs.freedesktop.org/show_bug.cgi?id=106237
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180425142334.27113-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_debugfs.c | 115 ----------------------------
 1 file changed, 115 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 2f05f5262bba..1c88805d3354 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -4204,119 +4204,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
 			i915_drop_caches_get, i915_drop_caches_set,
 			"0x%08llx\n");
 
-static int
-i915_max_freq_get(void *data, u64 *val)
-{
-	struct drm_i915_private *dev_priv = data;
-
-	if (INTEL_GEN(dev_priv) < 6)
-		return -ENODEV;
-
-	*val = intel_gpu_freq(dev_priv, dev_priv->gt_pm.rps.max_freq_softlimit);
-	return 0;
-}
-
-static int
-i915_max_freq_set(void *data, u64 val)
-{
-	struct drm_i915_private *dev_priv = data;
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-	u32 hw_max, hw_min;
-	int ret;
-
-	if (INTEL_GEN(dev_priv) < 6)
-		return -ENODEV;
-
-	DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
-
-	ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
-	if (ret)
-		return ret;
-
-	/*
-	 * Turbo will still be enabled, but won't go above the set value.
-	 */
-	val = intel_freq_opcode(dev_priv, val);
-
-	hw_max = rps->max_freq;
-	hw_min = rps->min_freq;
-
-	if (val < hw_min || val > hw_max || val < rps->min_freq_softlimit) {
-		mutex_unlock(&dev_priv->pcu_lock);
-		return -EINVAL;
-	}
-
-	rps->max_freq_softlimit = val;
-
-	if (intel_set_rps(dev_priv, val))
-		DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
-
-	mutex_unlock(&dev_priv->pcu_lock);
-
-	return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
-			i915_max_freq_get, i915_max_freq_set,
-			"%llu\n");
-
-static int
-i915_min_freq_get(void *data, u64 *val)
-{
-	struct drm_i915_private *dev_priv = data;
-
-	if (INTEL_GEN(dev_priv) < 6)
-		return -ENODEV;
-
-	*val = intel_gpu_freq(dev_priv, dev_priv->gt_pm.rps.min_freq_softlimit);
-	return 0;
-}
-
-static int
-i915_min_freq_set(void *data, u64 val)
-{
-	struct drm_i915_private *dev_priv = data;
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-	u32 hw_max, hw_min;
-	int ret;
-
-	if (INTEL_GEN(dev_priv) < 6)
-		return -ENODEV;
-
-	DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
-
-	ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
-	if (ret)
-		return ret;
-
-	/*
-	 * Turbo will still be enabled, but won't go below the set value.
-	 */
-	val = intel_freq_opcode(dev_priv, val);
-
-	hw_max = rps->max_freq;
-	hw_min = rps->min_freq;
-
-	if (val < hw_min ||
-	    val > hw_max || val > rps->max_freq_softlimit) {
-		mutex_unlock(&dev_priv->pcu_lock);
-		return -EINVAL;
-	}
-
-	rps->min_freq_softlimit = val;
-
-	if (intel_set_rps(dev_priv, val))
-		DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
-
-	mutex_unlock(&dev_priv->pcu_lock);
-
-	return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
-			i915_min_freq_get, i915_min_freq_set,
-			"%llu\n");
-
 static int
 i915_cache_sharing_get(void *data, u64 *val)
 {
@@ -4878,8 +4765,6 @@ static const struct i915_debugfs_files {
 	const struct file_operations *fops;
 } i915_debugfs_files[] = {
 	{"i915_wedged", &i915_wedged_fops},
-	{"i915_max_freq", &i915_max_freq_fops},
-	{"i915_min_freq", &i915_min_freq_fops},
 	{"i915_cache_sharing", &i915_cache_sharing_fops},
 	{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
 	{"i915_ring_test_irq", &i915_ring_test_irq_fops},

From ff047a87cfacf0a530960171a0779f5b19a3b1b8 Mon Sep 17 00:00:00 2001
From: Oscar Mateo <oscar.mateo@intel.com>
Date: Tue, 24 Apr 2018 14:39:55 -0700
Subject: [PATCH 0507/1286] drm/i915/icl: Correctly clear lost ctx-switch
 interrupts across reset for Gen11

Interrupt handling in Gen11 is quite different from previous platforms.

v2: Rebased (Michel)
v3: Rebased with wiggle
v4: Rebased, remove TODO warning correctly (Daniele)
v5: Rebased, made gen11_gtiir const while at it (Michel)
v6: Rebased
v7: Adapt to the style currently in upstream

Suggested-by: Michel Thierry <michel.thierry@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Michel Thierry <michel.thierry@intel.com>
Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Michel Thierry <michel.thierry@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/1524605995-22324-1-git-send-email-oscar.mateo@intel.com
---
 drivers/gpu/drm/i915/i915_irq.c  |  6 ++--
 drivers/gpu/drm/i915/intel_drv.h |  3 ++
 drivers/gpu/drm/i915/intel_lrc.c | 60 ++++++++++++++++++++++----------
 3 files changed, 48 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 96547e091e23..f9bc3aaa90d0 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -247,9 +247,9 @@ static u32
 gen11_gt_engine_identity(struct drm_i915_private * const i915,
 			 const unsigned int bank, const unsigned int bit);
 
-static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
-				const unsigned int bank,
-				const unsigned int bit)
+bool gen11_reset_one_iir(struct drm_i915_private * const i915,
+			 const unsigned int bank,
+			 const unsigned int bit)
 {
 	void __iomem * const regs = i915->regs;
 	u32 dw;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 58868b93d2a0..9bba0354ccd3 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1333,6 +1333,9 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv);
 void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
 
 /* i915_irq.c */
+bool gen11_reset_one_iir(struct drm_i915_private * const i915,
+			 const unsigned int bank,
+			 const unsigned int bit);
 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
 void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 029901a8fa38..87eb3a688424 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -789,22 +789,9 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
 
 static void clear_gtiir(struct intel_engine_cs *engine)
 {
-	static const u8 gtiir[] = {
-		[RCS]  = 0,
-		[BCS]  = 0,
-		[VCS]  = 1,
-		[VCS2] = 1,
-		[VECS] = 3,
-	};
 	struct drm_i915_private *dev_priv = engine->i915;
 	int i;
 
-	/* TODO: correctly reset irqs for gen11 */
-	if (WARN_ON_ONCE(INTEL_GEN(engine->i915) >= 11))
-		return;
-
-	GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir));
-
 	/*
 	 * Clear any pending interrupt state.
 	 *
@@ -812,13 +799,50 @@ static void clear_gtiir(struct intel_engine_cs *engine)
 	 * double buffered, and so if we only reset it once there may
 	 * still be an interrupt pending.
 	 */
-	for (i = 0; i < 2; i++) {
-		I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
+	if (INTEL_GEN(dev_priv) >= 11) {
+		static const struct {
+			u8 bank;
+			u8 bit;
+		} gen11_gtiir[] = {
+			[RCS] = {0, GEN11_RCS0},
+			[BCS] = {0, GEN11_BCS},
+			[_VCS(0)] = {1, GEN11_VCS(0)},
+			[_VCS(1)] = {1, GEN11_VCS(1)},
+			[_VCS(2)] = {1, GEN11_VCS(2)},
+			[_VCS(3)] = {1, GEN11_VCS(3)},
+			[_VECS(0)] = {1, GEN11_VECS(0)},
+			[_VECS(1)] = {1, GEN11_VECS(1)},
+		};
+		unsigned long irqflags;
+
+		GEM_BUG_ON(engine->id >= ARRAY_SIZE(gen11_gtiir));
+
+		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+		for (i = 0; i < 2; i++) {
+			gen11_reset_one_iir(dev_priv,
+					    gen11_gtiir[engine->id].bank,
+					    gen11_gtiir[engine->id].bit);
+		}
+		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+	} else {
+		static const u8 gtiir[] = {
+			[RCS]  = 0,
+			[BCS]  = 0,
+			[VCS]  = 1,
+			[VCS2] = 1,
+			[VECS] = 3,
+		};
+
+		GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir));
+
+		for (i = 0; i < 2; i++) {
+			I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
+				   engine->irq_keep_mask);
+			POSTING_READ(GEN8_GT_IIR(gtiir[engine->id]));
+		}
+		GEM_BUG_ON(I915_READ(GEN8_GT_IIR(gtiir[engine->id])) &
 			   engine->irq_keep_mask);
-		POSTING_READ(GEN8_GT_IIR(gtiir[engine->id]));
 	}
-	GEM_BUG_ON(I915_READ(GEN8_GT_IIR(gtiir[engine->id])) &
-		   engine->irq_keep_mask);
 }
 
 static void reset_irq(struct intel_engine_cs *engine)

From 664991010ff34ce2a51cea95dfdfb711be8c780f Mon Sep 17 00:00:00 2001
From: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Wed, 25 Apr 2018 13:17:42 +0200
Subject: [PATCH 0508/1286] drm/todo: Fallout from v3d review

Bunch of ideas from Eric and me on what we could do to make gem gpu
rendering drivers a notch simpler to type.

v2: Fix typo (Eric).

Cc: Eric Anholt <eric@anholt.net>
Reviewed-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180425111742.5872-1-daniel.vetter@ffwll.ch
---
 Documentation/gpu/todo.rst | 18 ++++++++++++++++++
 1 file changed, 18 insertions(+)

diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index f4d0b3476d9c..a7c150d6b63f 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -212,6 +212,24 @@ probably use drm_fb_helper_fbdev_teardown().
 
 Contact: Maintainer of the driver you plan to convert
 
+Clean up mmap forwarding
+------------------------
+
+A lot of drivers forward gem mmap calls to dma-buf mmap for imported buffers.
+And also a lot of them forward dma-buf mmap to the gem mmap implementations.
+Would be great to refactor this all into a set of small common helpers.
+
+Contact: Daniel Vetter
+
+Put a reservation_object into drm_gem_object
+--------------------------------------------
+
+This would remove the need for the ->gem_prime_res_obj callback. It would also
+allow us to implement generic helpers for waiting for a bo, allowing for quite a
+bit of refactoring in the various wait ioctl implementations.
+
+Contact: Daniel Vetter
+
 idr_init_base()
 ---------------
 

From 741c3aeb82c78e173aa7155aaffb971e5c73ab3c Mon Sep 17 00:00:00 2001
From: Philippe CORNU <philippe.cornu@st.com>
Date: Thu, 25 Jan 2018 16:55:04 +0100
Subject: [PATCH 0509/1286] drm/bridge/synopsys: dsi: use adjusted_mode in
 mode_set
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

The "adjusted_mode" clock value (ie the real pixel clock) is more
accurate than "mode" clock value (ie the panel/bridge requested
clock value). It offers a better preciseness for timing
computations and allows to reduce the extra dsi bandwidth in
burst mode (from ~20% to ~10-12%, hw platform dependent).

Signed-off-by: Philippe Cornu <philippe.cornu@st.com>
Tested-by: Brian Norris <briannorris@chromium.org>
Reviewed-by: Yannick Fertré <yannick.fertre@st.com>
Tested-by: Yannick Fertré <yannick.fertre@st.com>
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180125155504.8611-1-philippe.cornu@st.com
---
 drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index 0c7ecf798874..fd7999642cf8 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -771,20 +771,20 @@ static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
 
 	clk_prepare_enable(dsi->pclk);
 
-	ret = phy_ops->get_lane_mbps(priv_data, mode, dsi->mode_flags,
+	ret = phy_ops->get_lane_mbps(priv_data, adjusted_mode, dsi->mode_flags,
 				     dsi->lanes, dsi->format, &dsi->lane_mbps);
 	if (ret)
 		DRM_DEBUG_DRIVER("Phy get_lane_mbps() failed\n");
 
 	pm_runtime_get_sync(dsi->dev);
 	dw_mipi_dsi_init(dsi);
-	dw_mipi_dsi_dpi_config(dsi, mode);
+	dw_mipi_dsi_dpi_config(dsi, adjusted_mode);
 	dw_mipi_dsi_packet_handler_config(dsi);
 	dw_mipi_dsi_video_mode_config(dsi);
-	dw_mipi_dsi_video_packet_config(dsi, mode);
+	dw_mipi_dsi_video_packet_config(dsi, adjusted_mode);
 	dw_mipi_dsi_command_mode_config(dsi);
-	dw_mipi_dsi_line_timer_config(dsi, mode);
-	dw_mipi_dsi_vertical_timing_config(dsi, mode);
+	dw_mipi_dsi_line_timer_config(dsi, adjusted_mode);
+	dw_mipi_dsi_vertical_timing_config(dsi, adjusted_mode);
 
 	dw_mipi_dsi_dphy_init(dsi);
 	dw_mipi_dsi_dphy_timing_config(dsi);
@@ -798,7 +798,7 @@ static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
 
 	dw_mipi_dsi_dphy_enable(dsi);
 
-	dw_mipi_dsi_wait_for_two_frames(mode);
+	dw_mipi_dsi_wait_for_two_frames(adjusted_mode);
 
 	/* Switch to cmd mode for panel-bridge pre_enable & panel prepare */
 	dw_mipi_dsi_set_mode(dsi, 0);

From 741258cdd297e94b5f7167408b958b55795abaf8 Mon Sep 17 00:00:00 2001
From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Date: Thu, 26 Apr 2018 08:47:16 +0100
Subject: [PATCH 0510/1286] drm/i915: Use seqlock in engine stats

We can convert engine stats from a spinlock to seqlock to ensure interrupt
processing is never even a tiny bit delayed by parallel readers.

There is a smidgen bit more cost on the write lock side, and an extremely
unlikely chance that readers will have to retry a few times in face of
heavy interrupt load. But it should be extremely unlikely given how
lightweight read side section is compared to the interrupt processing
side, and also compared to the rest of the code paths which can lead into
it. Furthermore, writer is the ones doing the real, latency sensitive
work, while readers are only informative.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180426074716.7352-1-tvrtko.ursulin@linux.intel.com
---
 drivers/gpu/drm/i915/intel_engine_cs.c  | 19 ++++++++++---------
 drivers/gpu/drm/i915/intel_ringbuffer.h | 11 ++++++-----
 2 files changed, 16 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 58be7fac5b8c..ac009f10c948 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -306,7 +306,7 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
 	/* Nothing to do here, execute in order of dependencies */
 	engine->schedule = NULL;
 
-	spin_lock_init(&engine->stats.lock);
+	seqlock_init(&engine->stats.lock);
 
 	ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
 
@@ -1481,7 +1481,7 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
 		return -ENODEV;
 
 	tasklet_disable(&execlists->tasklet);
-	spin_lock_irqsave(&engine->stats.lock, flags);
+	write_seqlock_irqsave(&engine->stats.lock, flags);
 
 	if (unlikely(engine->stats.enabled == ~0)) {
 		err = -EBUSY;
@@ -1505,7 +1505,7 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
 	}
 
 unlock:
-	spin_unlock_irqrestore(&engine->stats.lock, flags);
+	write_sequnlock_irqrestore(&engine->stats.lock, flags);
 	tasklet_enable(&execlists->tasklet);
 
 	return err;
@@ -1534,12 +1534,13 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
  */
 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine)
 {
+	unsigned int seq;
 	ktime_t total;
-	unsigned long flags;
 
-	spin_lock_irqsave(&engine->stats.lock, flags);
-	total = __intel_engine_get_busy_time(engine);
-	spin_unlock_irqrestore(&engine->stats.lock, flags);
+	do {
+		seq = read_seqbegin(&engine->stats.lock);
+		total = __intel_engine_get_busy_time(engine);
+	} while (read_seqretry(&engine->stats.lock, seq));
 
 	return total;
 }
@@ -1557,13 +1558,13 @@ void intel_disable_engine_stats(struct intel_engine_cs *engine)
 	if (!intel_engine_supports_stats(engine))
 		return;
 
-	spin_lock_irqsave(&engine->stats.lock, flags);
+	write_seqlock_irqsave(&engine->stats.lock, flags);
 	WARN_ON_ONCE(engine->stats.enabled == 0);
 	if (--engine->stats.enabled == 0) {
 		engine->stats.total = __intel_engine_get_busy_time(engine);
 		engine->stats.active = 0;
 	}
-	spin_unlock_irqrestore(&engine->stats.lock, flags);
+	write_sequnlock_irqrestore(&engine->stats.lock, flags);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index c5e27905b0e1..24af3f1088ba 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -3,6 +3,7 @@
 #define _INTEL_RINGBUFFER_H_
 
 #include <linux/hashtable.h>
+#include <linux/seqlock.h>
 
 #include "i915_gem_batch_pool.h"
 #include "i915_gem_timeline.h"
@@ -595,7 +596,7 @@ struct intel_engine_cs {
 		/**
 		 * @lock: Lock protecting the below fields.
 		 */
-		spinlock_t lock;
+		seqlock_t lock;
 		/**
 		 * @enabled: Reference count indicating number of listeners.
 		 */
@@ -1064,7 +1065,7 @@ static inline void intel_engine_context_in(struct intel_engine_cs *engine)
 	if (READ_ONCE(engine->stats.enabled) == 0)
 		return;
 
-	spin_lock_irqsave(&engine->stats.lock, flags);
+	write_seqlock_irqsave(&engine->stats.lock, flags);
 
 	if (engine->stats.enabled > 0) {
 		if (engine->stats.active++ == 0)
@@ -1072,7 +1073,7 @@ static inline void intel_engine_context_in(struct intel_engine_cs *engine)
 		GEM_BUG_ON(engine->stats.active == 0);
 	}
 
-	spin_unlock_irqrestore(&engine->stats.lock, flags);
+	write_sequnlock_irqrestore(&engine->stats.lock, flags);
 }
 
 static inline void intel_engine_context_out(struct intel_engine_cs *engine)
@@ -1082,7 +1083,7 @@ static inline void intel_engine_context_out(struct intel_engine_cs *engine)
 	if (READ_ONCE(engine->stats.enabled) == 0)
 		return;
 
-	spin_lock_irqsave(&engine->stats.lock, flags);
+	write_seqlock_irqsave(&engine->stats.lock, flags);
 
 	if (engine->stats.enabled > 0) {
 		ktime_t last;
@@ -1109,7 +1110,7 @@ static inline void intel_engine_context_out(struct intel_engine_cs *engine)
 		}
 	}
 
-	spin_unlock_irqrestore(&engine->stats.lock, flags);
+	write_sequnlock_irqrestore(&engine->stats.lock, flags);
 }
 
 int intel_enable_engine_stats(struct intel_engine_cs *engine);

From 75a07f399cd43bc7fb41a13723fbe04e61c5c470 Mon Sep 17 00:00:00 2001
From: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
Date: Wed, 17 Jan 2018 22:18:41 +0200
Subject: [PATCH 0511/1286] drm: rcar-du: Zero-out sg_tables when duplicating
 plane state

The state structure for VSP-backed planes, rcar_du_vsp_plane_state,
contains sg tables that track framebuffer mapping performed in the
.prepare_fb() operation to unmap them in .cleanup_fb(). The tables are
incorrectly copied when duplicating state, which can result :

Zero-out sg_tables in original plane, effectively introducing move
semantic. Seems, this fixes issue with double-free,
when rcar_du_vsp_plane_cleanup_fb() freed the same sg_table
both in original plane and in the copy.

Reported-by: Volodymyr Babchuk <vlad.babchuk@gmail.com>
Signed-off-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
---
 drivers/gpu/drm/rcar-du/rcar_du_vsp.c | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index 2c260c33840b..4a01a99a4674 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -299,18 +299,17 @@ static const struct drm_plane_helper_funcs rcar_du_vsp_plane_helper_funcs = {
 static struct drm_plane_state *
 rcar_du_vsp_plane_atomic_duplicate_state(struct drm_plane *plane)
 {
-	struct rcar_du_vsp_plane_state *state;
 	struct rcar_du_vsp_plane_state *copy;
 
 	if (WARN_ON(!plane->state))
 		return NULL;
 
-	state = to_rcar_vsp_plane_state(plane->state);
-	copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
+	copy = kzalloc(sizeof(*copy), GFP_KERNEL);
 	if (copy == NULL)
 		return NULL;
 
 	__drm_atomic_helper_plane_duplicate_state(plane, &copy->state);
+	copy->alpha = to_rcar_vsp_plane_state(plane->state)->alpha;
 
 	return &copy->state;
 }

From 7f961d799fe4e0e515225c10b19177280d72c25e Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Thu, 26 Apr 2018 11:32:19 +0100
Subject: [PATCH 0512/1286] drm/i915: Compile out engine debug for release

The majority of the engine state dumping is too voluminous to be useful
outside of a controlled setup, though a few do accompany severe errors.
Keep the debug dumps next to the errors, but hide the others behind a CI
compile flag. This becomes more useful when adding more dumps to latency
sensitive paths.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180426103219.22181-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_gem.c          | 2 +-
 drivers/gpu/drm/i915/i915_gem.h          | 6 ++++++
 drivers/gpu/drm/i915/intel_breadcrumbs.c | 2 +-
 drivers/gpu/drm/i915/intel_hangcheck.c   | 2 +-
 4 files changed, 9 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 795ca83aed7a..6b0c67a4f214 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3312,7 +3312,7 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
 
 	GEM_TRACE("start\n");
 
-	if (drm_debug & DRM_UT_DRIVER) {
+	if (GEM_SHOW_DEBUG()) {
 		struct drm_printer p = drm_debug_printer(__func__);
 
 		for_each_engine(engine, i915, id)
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index deaf78d2ae8b..525920404ede 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -30,6 +30,9 @@
 struct drm_i915_private;
 
 #ifdef CONFIG_DRM_I915_DEBUG_GEM
+
+#define GEM_SHOW_DEBUG() (drm_debug & DRM_UT_DRIVER)
+
 #define GEM_BUG_ON(condition) do { if (unlikely((condition))) {	\
 		pr_err("%s:%d GEM_BUG_ON(%s)\n", \
 		       __func__, __LINE__, __stringify(condition)); \
@@ -45,6 +48,9 @@ struct drm_i915_private;
 #define GEM_DEBUG_BUG_ON(expr) GEM_BUG_ON(expr)
 
 #else
+
+#define GEM_SHOW_DEBUG() (0)
+
 #define GEM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
 #define GEM_WARN_ON(expr) (BUILD_BUG_ON_INVALID(expr), 0)
 
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 5ce4f51232f5..18e643df523e 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -82,7 +82,7 @@ static unsigned long wait_timeout(void)
 
 static noinline void missed_breadcrumb(struct intel_engine_cs *engine)
 {
-	if (drm_debug & DRM_UT_DRIVER) {
+	if (GEM_SHOW_DEBUG()) {
 		struct drm_printer p = drm_debug_printer(__func__);
 
 		intel_engine_dump(engine, &p,
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index fd0ffb8328d0..309e38b00e95 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -356,7 +356,7 @@ static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
 		break;
 
 	case ENGINE_DEAD:
-		if (drm_debug & DRM_UT_DRIVER) {
+		if (GEM_SHOW_DEBUG()) {
 			struct drm_printer p = drm_debug_printer("hangcheck");
 			intel_engine_dump(engine, &p, "%s\n", engine->name);
 		}

From f60fa4087a8abac258b0c8facddf2ee46c4b6b36 Mon Sep 17 00:00:00 2001
From: Jani Nikula <jani.nikula@intel.com>
Date: Thu, 26 Apr 2018 14:35:21 +0300
Subject: [PATCH 0513/1286] drm/i915: prefer INTEL_GEN() over INTEL_INFO()->gen

Prefer INTEL_GEN() over INTEL_INFO()->gen except in special
circumstances.

v2: don't change device info dump (Chris)

Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180426113521.28417-1-jani.nikula@intel.com
---
 drivers/gpu/drm/i915/intel_device_info.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index a32ba72c514e..0fd13df424cf 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -848,7 +848,7 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
 		gen9_sseu_info_init(dev_priv);
 	else if (INTEL_GEN(dev_priv) == 10)
 		gen10_sseu_info_init(dev_priv);
-	else if (INTEL_INFO(dev_priv)->gen >= 11)
+	else if (INTEL_GEN(dev_priv) >= 11)
 		gen11_sseu_info_init(dev_priv);
 
 	/* Initialize command stream timestamp frequency */

From 87251120553c82eda4ea109a8a52efe6c8ee0cc4 Mon Sep 17 00:00:00 2001
From: Jani Nikula <jani.nikula@intel.com>
Date: Thu, 26 Apr 2018 11:25:24 +0300
Subject: [PATCH 0514/1286] drm/i915/dp: remove stale comment about bw
 constants

We haven't used the DP bw constants here for a while. No functional
changes.

Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: Manasi Navare <manasi.d.navare@intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1dc7763cdc70c7f64c0a01f76f218d9ac0717227.1524730974.git.jani.nikula@intel.com
---
 drivers/gpu/drm/i915/intel_dp.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 62f82c4298ac..5f4b30faf6a2 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1701,7 +1701,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
 	int lane_count, clock;
 	int min_lane_count = 1;
 	int max_lane_count = intel_dp_max_lane_count(intel_dp);
-	/* Conveniently, the link BW constants become indices with a shift...*/
 	int min_clock = 0;
 	int max_clock;
 	int bpp, mode_rate;

From dd519418f5130ce9ca08256b3383bf0e529dbf81 Mon Sep 17 00:00:00 2001
From: Jani Nikula <jani.nikula@intel.com>
Date: Thu, 26 Apr 2018 11:25:25 +0300
Subject: [PATCH 0515/1286] drm/i915/dp: move link_bw and rate_select debugging
 where used

We call intel_dp_compute_rate() in intel_dp_compute_config() only to be
able to debug log the link_bw and rate_select parameters; we don't use
the parameters here for anything else. We call intel_dp_compute_rate()
again during link training where we actually need and use the
parameters.

Move the debug logging of link_bw and rate_select to
intel_dp_link_training_clock_recovery(), and clean up the extra
intel_dp_compute_rate() call and extra clutter from the already
overcrowded intel_dp_compute_config().

v2: Rewrote commit message (Rodrigo, Manasi)

Reviewed-by: Manasi Navare <manasi.d.navare@intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/c5cf6a179e2d244eceb6bb80a792765d9efbee4f.1524730974.git.jani.nikula@intel.com
---
 drivers/gpu/drm/i915/intel_dp.c               | 9 ++-------
 drivers/gpu/drm/i915/intel_dp_link_training.c | 5 +++++
 2 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 5f4b30faf6a2..81cf363e71af 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1706,7 +1706,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
 	int bpp, mode_rate;
 	int link_avail, link_clock;
 	int common_len;
-	uint8_t link_bw, rate_select;
 	bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
 					   DP_DPCD_QUIRK_LIMITED_M_N);
 
@@ -1852,12 +1851,8 @@ found:
 	pipe_config->pipe_bpp = bpp;
 	pipe_config->port_clock = intel_dp->common_rates[clock];
 
-	intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
-			      &link_bw, &rate_select);
-
-	DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
-		      link_bw, rate_select, pipe_config->lane_count,
-		      pipe_config->port_clock, bpp);
+	DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
+		      pipe_config->lane_count, pipe_config->port_clock, bpp);
 	DRM_DEBUG_KMS("DP link bw required %i available %i\n",
 		      mode_rate, link_avail);
 
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index f59b59bb0a21..3fcaa98b9055 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -139,6 +139,11 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
 	intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
 			      &link_bw, &rate_select);
 
+	if (link_bw)
+		DRM_DEBUG_KMS("Using LINK_BW_SET value %02x\n", link_bw);
+	else
+		DRM_DEBUG_KMS("Using LINK_RATE_SET value %02x\n", rate_select);
+
 	/* Write the link configuration data */
 	link_config[0] = link_bw;
 	link_config[1] = intel_dp->lane_count;

From 981a63eb2725eca63eee7b317ad9ff586d9e74b2 Mon Sep 17 00:00:00 2001
From: Jani Nikula <jani.nikula@intel.com>
Date: Thu, 26 Apr 2018 11:25:26 +0300
Subject: [PATCH 0516/1286] drm/i915/dp: abstract dp link config computation
 from the rest

Abstract a new intel_dp_compute_link_config() from
intel_dp_compute_config(), with the parts related to link configuration,
i.e. bpp, link rate, and lane count selection. No functional changes.

v2: Fix a checkpatch warn about spacing.

Reviewed-by: Manasi Navare <manasi.d.navare@intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/80f99a625633f87f44d38d487ba3b32ff9a26b07.1524730974.git.jani.nikula@intel.com
---
 drivers/gpu/drm/i915/intel_dp.c | 171 +++++++++++++++++---------------
 1 file changed, 93 insertions(+), 78 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 81cf363e71af..81da96b9ef33 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1685,19 +1685,14 @@ static bool intel_edp_compare_alt_mode(struct drm_display_mode *m1,
 	return bres;
 }
 
-bool
-intel_dp_compute_config(struct intel_encoder *encoder,
-			struct intel_crtc_state *pipe_config,
-			struct drm_connector_state *conn_state)
+static bool
+intel_dp_compute_link_config(struct intel_encoder *encoder,
+			     struct intel_crtc_state *pipe_config)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-	enum port port = encoder->port;
-	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
 	struct intel_connector *intel_connector = intel_dp->attached_connector;
-	struct intel_digital_connector_state *intel_conn_state =
-		to_intel_digital_connector_state(conn_state);
 	int lane_count, clock;
 	int min_lane_count = 1;
 	int max_lane_count = intel_dp_max_lane_count(intel_dp);
@@ -1706,9 +1701,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
 	int bpp, mode_rate;
 	int link_avail, link_clock;
 	int common_len;
-	bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
-					   DP_DPCD_QUIRK_LIMITED_M_N);
-
 	common_len = intel_dp_common_len_rate_limit(intel_dp,
 						    intel_dp->max_link_rate);
 
@@ -1717,51 +1709,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
 
 	max_clock = common_len - 1;
 
-	if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
-		pipe_config->has_pch_encoder = true;
-
-	pipe_config->has_drrs = false;
-	if (IS_G4X(dev_priv) || port == PORT_A)
-		pipe_config->has_audio = false;
-	else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
-		pipe_config->has_audio = intel_dp->has_audio;
-	else
-		pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
-
-	if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
-		struct drm_display_mode *panel_mode =
-			intel_connector->panel.alt_fixed_mode;
-		struct drm_display_mode *req_mode = &pipe_config->base.mode;
-
-		if (!intel_edp_compare_alt_mode(req_mode, panel_mode))
-			panel_mode = intel_connector->panel.fixed_mode;
-
-		drm_mode_debug_printmodeline(panel_mode);
-
-		intel_fixed_panel_mode(panel_mode, adjusted_mode);
-
-		if (INTEL_GEN(dev_priv) >= 9) {
-			int ret;
-			ret = skl_update_scaler_crtc(pipe_config);
-			if (ret)
-				return ret;
-		}
-
-		if (HAS_GMCH_DISPLAY(dev_priv))
-			intel_gmch_panel_fitting(intel_crtc, pipe_config,
-						 conn_state->scaling_mode);
-		else
-			intel_pch_panel_fitting(intel_crtc, pipe_config,
-						conn_state->scaling_mode);
-	}
-
-	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
-	    adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
-		return false;
-
-	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
-		return false;
-
 	/* Use values requested by Compliance Test Request */
 	if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
 		int index;
@@ -1831,23 +1778,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
 	return false;
 
 found:
-	if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
-		/*
-		 * See:
-		 * CEA-861-E - 5.1 Default Encoding Parameters
-		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
-		 */
-		pipe_config->limited_color_range =
-			bpp != 18 &&
-			drm_default_rgb_quant_range(adjusted_mode) ==
-			HDMI_QUANTIZATION_RANGE_LIMITED;
-	} else {
-		pipe_config->limited_color_range =
-			intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
-	}
-
 	pipe_config->lane_count = lane_count;
-
 	pipe_config->pipe_bpp = bpp;
 	pipe_config->port_clock = intel_dp->common_rates[clock];
 
@@ -1856,7 +1787,90 @@ found:
 	DRM_DEBUG_KMS("DP link bw required %i available %i\n",
 		      mode_rate, link_avail);
 
-	intel_link_compute_m_n(bpp, lane_count,
+	return true;
+}
+
+bool
+intel_dp_compute_config(struct intel_encoder *encoder,
+			struct intel_crtc_state *pipe_config,
+			struct drm_connector_state *conn_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	enum port port = encoder->port;
+	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
+	struct intel_connector *intel_connector = intel_dp->attached_connector;
+	struct intel_digital_connector_state *intel_conn_state =
+		to_intel_digital_connector_state(conn_state);
+	bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
+					   DP_DPCD_QUIRK_LIMITED_M_N);
+
+	if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
+		pipe_config->has_pch_encoder = true;
+
+	pipe_config->has_drrs = false;
+	if (IS_G4X(dev_priv) || port == PORT_A)
+		pipe_config->has_audio = false;
+	else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
+		pipe_config->has_audio = intel_dp->has_audio;
+	else
+		pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
+
+	if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
+		struct drm_display_mode *panel_mode =
+			intel_connector->panel.alt_fixed_mode;
+		struct drm_display_mode *req_mode = &pipe_config->base.mode;
+
+		if (!intel_edp_compare_alt_mode(req_mode, panel_mode))
+			panel_mode = intel_connector->panel.fixed_mode;
+
+		drm_mode_debug_printmodeline(panel_mode);
+
+		intel_fixed_panel_mode(panel_mode, adjusted_mode);
+
+		if (INTEL_GEN(dev_priv) >= 9) {
+			int ret;
+
+			ret = skl_update_scaler_crtc(pipe_config);
+			if (ret)
+				return ret;
+		}
+
+		if (HAS_GMCH_DISPLAY(dev_priv))
+			intel_gmch_panel_fitting(intel_crtc, pipe_config,
+						 conn_state->scaling_mode);
+		else
+			intel_pch_panel_fitting(intel_crtc, pipe_config,
+						conn_state->scaling_mode);
+	}
+
+	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+	    adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+		return false;
+
+	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
+		return false;
+
+	if (!intel_dp_compute_link_config(encoder, pipe_config))
+		return false;
+
+	if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
+		/*
+		 * See:
+		 * CEA-861-E - 5.1 Default Encoding Parameters
+		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
+		 */
+		pipe_config->limited_color_range =
+			pipe_config->pipe_bpp != 18 &&
+			drm_default_rgb_quant_range(adjusted_mode) ==
+			HDMI_QUANTIZATION_RANGE_LIMITED;
+	} else {
+		pipe_config->limited_color_range =
+			intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
+	}
+
+	intel_link_compute_m_n(pipe_config->pipe_bpp, pipe_config->lane_count,
 			       adjusted_mode->crtc_clock,
 			       pipe_config->port_clock,
 			       &pipe_config->dp_m_n,
@@ -1865,11 +1879,12 @@ found:
 	if (intel_connector->panel.downclock_mode != NULL &&
 		dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
 			pipe_config->has_drrs = true;
-			intel_link_compute_m_n(bpp, lane_count,
-				intel_connector->panel.downclock_mode->clock,
-				pipe_config->port_clock,
-				&pipe_config->dp_m2_n2,
-				reduce_m_n);
+			intel_link_compute_m_n(pipe_config->pipe_bpp,
+					       pipe_config->lane_count,
+					       intel_connector->panel.downclock_mode->clock,
+					       pipe_config->port_clock,
+					       &pipe_config->dp_m2_n2,
+					       reduce_m_n);
 	}
 
 	/*

From ef32659a78df0b10cd25076d483dcb6240274c90 Mon Sep 17 00:00:00 2001
From: Jani Nikula <jani.nikula@intel.com>
Date: Thu, 26 Apr 2018 11:25:27 +0300
Subject: [PATCH 0517/1286] drm/i915/dp: move eDP VBT bpp clamping code to
 intel_dp_compute_bpp()

Keep related things together. No functional changes.

v2: Fix a typo in patch subject, fix a checkpatch alignment warning.

Reviewed-by: Manasi Navare <manasi.d.navare@intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/f24d44547a586a0e342f24e69ab4d576a2474891.1524730974.git.jani.nikula@intel.com
---
 drivers/gpu/drm/i915/intel_dp.c | 24 +++++++++++++-----------
 1 file changed, 13 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 81da96b9ef33..430c206e77fc 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1650,6 +1650,8 @@ void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
 static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
 				struct intel_crtc_state *pipe_config)
 {
+	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+	struct intel_connector *intel_connector = intel_dp->attached_connector;
 	int bpp, bpc;
 
 	bpp = pipe_config->pipe_bpp;
@@ -1665,6 +1667,17 @@ static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
 		DRM_DEBUG_KMS("Setting pipe_bpp to %d\n",
 			      pipe_config->pipe_bpp);
 	}
+
+	if (intel_dp_is_edp(intel_dp)) {
+		/* Get bpp from vbt only for panels that dont have bpp in edid */
+		if (intel_connector->base.display_info.bpc == 0 &&
+		    dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
+			DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
+				      dev_priv->vbt.edp.bpp);
+			bpp = dev_priv->vbt.edp.bpp;
+		}
+	}
+
 	return bpp;
 }
 
@@ -1689,10 +1702,8 @@ static bool
 intel_dp_compute_link_config(struct intel_encoder *encoder,
 			     struct intel_crtc_state *pipe_config)
 {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-	struct intel_connector *intel_connector = intel_dp->attached_connector;
 	int lane_count, clock;
 	int min_lane_count = 1;
 	int max_lane_count = intel_dp_max_lane_count(intel_dp);
@@ -1735,15 +1746,6 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
 	 * bpc in between. */
 	bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
 	if (intel_dp_is_edp(intel_dp)) {
-
-		/* Get bpp from vbt only for panels that dont have bpp in edid */
-		if (intel_connector->base.display_info.bpc == 0 &&
-			(dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp)) {
-			DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
-				      dev_priv->vbt.edp.bpp);
-			bpp = dev_priv->vbt.edp.bpp;
-		}
-
 		/*
 		 * Use the maximum clock and number of lanes the eDP panel
 		 * advertizes being capable of. The panels are generally

From 7c2781e41ec8893ad9ef62f28821e2dbd4bd0420 Mon Sep 17 00:00:00 2001
From: Jani Nikula <jani.nikula@intel.com>
Date: Thu, 26 Apr 2018 11:25:28 +0300
Subject: [PATCH 0518/1286] drm/i915/dp: group link config limits in a struct

Also use same min/max model for bpp, and adjust debug logging while at
it.

Reviewed-by: Manasi Navare <manasi.d.navare@intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/72f78c7ae0cd1810798bd94cbf5e574c78da83f8.1524730974.git.jani.nikula@intel.com
---
 drivers/gpu/drm/i915/intel_dp.c | 57 +++++++++++++++++++--------------
 1 file changed, 33 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 430c206e77fc..9ef29b63b237 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1647,6 +1647,12 @@ void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
 	}
 }
 
+struct link_config_limits {
+	int min_clock, max_clock;
+	int min_lane_count, max_lane_count;
+	int min_bpp, max_bpp;
+};
+
 static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
 				struct intel_crtc_state *pipe_config)
 {
@@ -1704,21 +1710,25 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
 {
 	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-	int lane_count, clock;
-	int min_lane_count = 1;
-	int max_lane_count = intel_dp_max_lane_count(intel_dp);
-	int min_clock = 0;
-	int max_clock;
-	int bpp, mode_rate;
-	int link_avail, link_clock;
+	struct link_config_limits limits;
+	int bpp, clock, lane_count;
+	int mode_rate, link_avail, link_clock;
 	int common_len;
+
 	common_len = intel_dp_common_len_rate_limit(intel_dp,
 						    intel_dp->max_link_rate);
 
 	/* No common link rates between source and sink */
 	WARN_ON(common_len <= 0);
 
-	max_clock = common_len - 1;
+	limits.min_clock = 0;
+	limits.max_clock = common_len - 1;
+
+	limits.min_lane_count = 1;
+	limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
+
+	limits.min_bpp = 6 * 3;
+	limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
 
 	/* Use values requested by Compliance Test Request */
 	if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
@@ -1733,18 +1743,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
 						    intel_dp->num_common_rates,
 						    intel_dp->compliance.test_link_rate);
 			if (index >= 0)
-				min_clock = max_clock = index;
-			min_lane_count = max_lane_count = intel_dp->compliance.test_lane_count;
+				limits.min_clock = limits.max_clock = index;
+			limits.min_lane_count = limits.max_lane_count = intel_dp->compliance.test_lane_count;
 		}
 	}
-	DRM_DEBUG_KMS("DP link computation with max lane count %i "
-		      "max bw %d pixel clock %iKHz\n",
-		      max_lane_count, intel_dp->common_rates[max_clock],
-		      adjusted_mode->crtc_clock);
 
-	/* Walk through all bpp values. Luckily they're all nicely spaced with 2
-	 * bpc in between. */
-	bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
 	if (intel_dp_is_edp(intel_dp)) {
 		/*
 		 * Use the maximum clock and number of lanes the eDP panel
@@ -1753,18 +1756,24 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
 		 * configuration, and typically these values correspond to the
 		 * native resolution of the panel.
 		 */
-		min_lane_count = max_lane_count;
-		min_clock = max_clock;
+		limits.min_lane_count = limits.max_lane_count;
+		limits.min_clock = limits.max_clock;
 	}
 
-	for (; bpp >= 6*3; bpp -= 2*3) {
+	DRM_DEBUG_KMS("DP link computation with max lane count %i "
+		      "max rate %d max bpp %d pixel clock %iKHz\n",
+		      limits.max_lane_count,
+		      intel_dp->common_rates[limits.max_clock],
+		      limits.max_bpp, adjusted_mode->crtc_clock);
+
+	for (bpp = limits.max_bpp; bpp >= limits.min_bpp; bpp -= 2 * 3) {
 		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
 						   bpp);
 
-		for (clock = min_clock; clock <= max_clock; clock++) {
-			for (lane_count = min_lane_count;
-				lane_count <= max_lane_count;
-				lane_count <<= 1) {
+		for (clock = limits.min_clock; clock <= limits.max_clock; clock++) {
+			for (lane_count = limits.min_lane_count;
+			     lane_count <= limits.max_lane_count;
+			     lane_count <<= 1) {
 
 				link_clock = intel_dp->common_rates[clock];
 				link_avail = intel_dp_max_data_rate(link_clock,

From 3acd115d08f70615debd5a8d37a8b97dc17a9cbb Mon Sep 17 00:00:00 2001
From: Jani Nikula <jani.nikula@intel.com>
Date: Thu, 26 Apr 2018 11:25:29 +0300
Subject: [PATCH 0519/1286] drm/i915/dp: abstract link config selection

For now, there's just the one link config selection, optimizing for slow
and wide link. No functional changes.

Keep the debug logging in the caller, to avoid duplication later on if
alternative link confing selection gets added.

v2: Improved commit message

Reviewed-by: Manasi Navare <manasi.d.navare@intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/64848b76bf90d6ceecd7ec6b5add28531e0b1a41.1524730974.git.jani.nikula@intel.com
---
 drivers/gpu/drm/i915/intel_dp.c | 81 ++++++++++++++++++++-------------
 1 file changed, 50 insertions(+), 31 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 9ef29b63b237..d622db76b9c3 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1704,6 +1704,42 @@ static bool intel_edp_compare_alt_mode(struct drm_display_mode *m1,
 	return bres;
 }
 
+/* Optimize link config in order: max bpp, min clock, min lanes */
+static bool
+intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
+				  struct intel_crtc_state *pipe_config,
+				  const struct link_config_limits *limits)
+{
+	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+	int bpp, clock, lane_count;
+	int mode_rate, link_clock, link_avail;
+
+	for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
+		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
+						   bpp);
+
+		for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
+			for (lane_count = limits->min_lane_count;
+			     lane_count <= limits->max_lane_count;
+			     lane_count <<= 1) {
+				link_clock = intel_dp->common_rates[clock];
+				link_avail = intel_dp_max_data_rate(link_clock,
+								    lane_count);
+
+				if (mode_rate <= link_avail) {
+					pipe_config->lane_count = lane_count;
+					pipe_config->pipe_bpp = bpp;
+					pipe_config->port_clock = link_clock;
+
+					return true;
+				}
+			}
+		}
+	}
+
+	return false;
+}
+
 static bool
 intel_dp_compute_link_config(struct intel_encoder *encoder,
 			     struct intel_crtc_state *pipe_config)
@@ -1711,8 +1747,6 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
 	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 	struct link_config_limits limits;
-	int bpp, clock, lane_count;
-	int mode_rate, link_avail, link_clock;
 	int common_len;
 
 	common_len = intel_dp_common_len_rate_limit(intel_dp,
@@ -1766,37 +1800,22 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
 		      intel_dp->common_rates[limits.max_clock],
 		      limits.max_bpp, adjusted_mode->crtc_clock);
 
-	for (bpp = limits.max_bpp; bpp >= limits.min_bpp; bpp -= 2 * 3) {
-		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
-						   bpp);
-
-		for (clock = limits.min_clock; clock <= limits.max_clock; clock++) {
-			for (lane_count = limits.min_lane_count;
-			     lane_count <= limits.max_lane_count;
-			     lane_count <<= 1) {
-
-				link_clock = intel_dp->common_rates[clock];
-				link_avail = intel_dp_max_data_rate(link_clock,
-								    lane_count);
-
-				if (mode_rate <= link_avail) {
-					goto found;
-				}
-			}
-		}
-	}
-
-	return false;
-
-found:
-	pipe_config->lane_count = lane_count;
-	pipe_config->pipe_bpp = bpp;
-	pipe_config->port_clock = intel_dp->common_rates[clock];
+	/*
+	 * Optimize for slow and wide. This is the place to add alternative
+	 * optimization policy.
+	 */
+	if (!intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits))
+		return false;
 
 	DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
-		      pipe_config->lane_count, pipe_config->port_clock, bpp);
-	DRM_DEBUG_KMS("DP link bw required %i available %i\n",
-		      mode_rate, link_avail);
+		      pipe_config->lane_count, pipe_config->port_clock,
+		      pipe_config->pipe_bpp);
+
+	DRM_DEBUG_KMS("DP link rate required %i available %i\n",
+		      intel_dp_link_required(adjusted_mode->crtc_clock,
+					     pipe_config->pipe_bpp),
+		      intel_dp_max_data_rate(pipe_config->port_clock,
+					     pipe_config->lane_count));
 
 	return true;
 }

From a49714531be37111b1540d85a70bb55d0958272c Mon Sep 17 00:00:00 2001
From: Jani Nikula <jani.nikula@intel.com>
Date: Thu, 26 Apr 2018 11:25:30 +0300
Subject: [PATCH 0520/1286] drm/i915/dp: fix compliance test adjustments

Abstract compliance test adjustments to a single function. Also make the
bpc adjustments affect the limits, actually forcing the bpc. Seems like
directly changing the pipe_bpp in the past could not have been
effective.

Reviewed-by: Manasi Navare <manasi.d.navare@intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ef61e76003ab7719c82810b742f3fb5765c0e14c.1524730974.git.jani.nikula@intel.com
---
 drivers/gpu/drm/i915/intel_dp.c | 64 +++++++++++++++++++--------------
 1 file changed, 38 insertions(+), 26 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index d622db76b9c3..83da50b13d81 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1666,14 +1666,6 @@ static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
 	if (bpc > 0)
 		bpp = min(bpp, 3*bpc);
 
-	/* For DP Compliance we override the computed bpp for the pipe */
-	if (intel_dp->compliance.test_data.bpc != 0) {
-		pipe_config->pipe_bpp =	3*intel_dp->compliance.test_data.bpc;
-		pipe_config->dither_force_disable = pipe_config->pipe_bpp == 6*3;
-		DRM_DEBUG_KMS("Setting pipe_bpp to %d\n",
-			      pipe_config->pipe_bpp);
-	}
-
 	if (intel_dp_is_edp(intel_dp)) {
 		/* Get bpp from vbt only for panels that dont have bpp in edid */
 		if (intel_connector->base.display_info.bpc == 0 &&
@@ -1704,6 +1696,42 @@ static bool intel_edp_compare_alt_mode(struct drm_display_mode *m1,
 	return bres;
 }
 
+/* Adjust link config limits based on compliance test requests. */
+static void
+intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
+				  struct intel_crtc_state *pipe_config,
+				  struct link_config_limits *limits)
+{
+	/* For DP Compliance we override the computed bpp for the pipe */
+	if (intel_dp->compliance.test_data.bpc != 0) {
+		int bpp = 3 * intel_dp->compliance.test_data.bpc;
+
+		limits->min_bpp = limits->max_bpp = bpp;
+		pipe_config->dither_force_disable = bpp == 6 * 3;
+
+		DRM_DEBUG_KMS("Setting pipe_bpp to %d\n", bpp);
+	}
+
+	/* Use values requested by Compliance Test Request */
+	if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
+		int index;
+
+		/* Validate the compliance test data since max values
+		 * might have changed due to link train fallback.
+		 */
+		if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
+					       intel_dp->compliance.test_lane_count)) {
+			index = intel_dp_rate_index(intel_dp->common_rates,
+						    intel_dp->num_common_rates,
+						    intel_dp->compliance.test_link_rate);
+			if (index >= 0)
+				limits->min_clock = limits->max_clock = index;
+			limits->min_lane_count = limits->max_lane_count =
+				intel_dp->compliance.test_lane_count;
+		}
+	}
+}
+
 /* Optimize link config in order: max bpp, min clock, min lanes */
 static bool
 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
@@ -1764,24 +1792,6 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
 	limits.min_bpp = 6 * 3;
 	limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
 
-	/* Use values requested by Compliance Test Request */
-	if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
-		int index;
-
-		/* Validate the compliance test data since max values
-		 * might have changed due to link train fallback.
-		 */
-		if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
-					       intel_dp->compliance.test_lane_count)) {
-			index = intel_dp_rate_index(intel_dp->common_rates,
-						    intel_dp->num_common_rates,
-						    intel_dp->compliance.test_link_rate);
-			if (index >= 0)
-				limits.min_clock = limits.max_clock = index;
-			limits.min_lane_count = limits.max_lane_count = intel_dp->compliance.test_lane_count;
-		}
-	}
-
 	if (intel_dp_is_edp(intel_dp)) {
 		/*
 		 * Use the maximum clock and number of lanes the eDP panel
@@ -1794,6 +1804,8 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
 		limits.min_clock = limits.max_clock;
 	}
 
+	intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
+
 	DRM_DEBUG_KMS("DP link computation with max lane count %i "
 		      "max rate %d max bpp %d pixel clock %iKHz\n",
 		      limits.max_lane_count,

From 935dff1a218c2162aad8f0e681cbb5d601742412 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed, 11 Apr 2018 13:03:46 +0100
Subject: [PATCH 0521/1286] drm/i915/selftests: Wait for idle between idle
 resets as well
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Even though we weren't injecting guilty requests to be reset, we could
still fall over the issue of resetting the same request too fast -- where
the GPU refuses to start again. (Although it is interesting to note that
reloading the driver is sufficient, suggesting that we could recover if
we delayed the setup after reset?) Continue to paper over the problem by
adding a small delay by waiting for the engine to idle between tests,
and ensure that the engines are idle before starting the idle tests.

v2: Replace single instance of 50 with a magic macro.

References: 028666793a02 ("drm/i915/selftests: Avoid repeatedly harming the same innocent context")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Cc: Michel Thierry <michel.thierry@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Michał Winiarski <michal.winiarski@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180411120346.27618-1-chris@chris-wilson.co.uk
---
 .../gpu/drm/i915/selftests/intel_hangcheck.c  | 50 ++++++++++++++++++-
 1 file changed, 49 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index f7ee54e109ae..c61bf65454a9 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -30,6 +30,8 @@
 #include "mock_context.h"
 #include "mock_drm.h"
 
+#define IGT_IDLE_TIMEOUT 50 /* ms; time to wait after flushing between tests */
+
 struct hang {
 	struct drm_i915_private *i915;
 	struct drm_i915_gem_object *hws;
@@ -454,6 +456,11 @@ static int igt_global_reset(void *arg)
 	return err;
 }
 
+static bool wait_for_idle(struct intel_engine_cs *engine)
+{
+	return wait_for(intel_engine_is_idle(engine), IGT_IDLE_TIMEOUT) == 0;
+}
+
 static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
 {
 	struct intel_engine_cs *engine;
@@ -481,6 +488,13 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
 		if (active && !intel_engine_can_store_dword(engine))
 			continue;
 
+		if (!wait_for_idle(engine)) {
+			pr_err("%s failed to idle before reset\n",
+			       engine->name);
+			err = -EIO;
+			break;
+		}
+
 		reset_count = i915_reset_count(&i915->gpu_error);
 		reset_engine_count = i915_reset_engine_count(&i915->gpu_error,
 							     engine);
@@ -542,6 +556,19 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
 				err = -EINVAL;
 				break;
 			}
+
+			if (!wait_for_idle(engine)) {
+				struct drm_printer p =
+					drm_info_printer(i915->drm.dev);
+
+				pr_err("%s failed to idle after reset\n",
+				       engine->name);
+				intel_engine_dump(engine, &p,
+						  "%s\n", engine->name);
+
+				err = -EIO;
+				break;
+			}
 		} while (time_before(jiffies, end_time));
 		clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
 
@@ -696,6 +723,13 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
 		    !intel_engine_can_store_dword(engine))
 			continue;
 
+		if (!wait_for_idle(engine)) {
+			pr_err("i915_reset_engine(%s:%s): failed to idle before reset\n",
+			       engine->name, test_name);
+			err = -EIO;
+			break;
+		}
+
 		memset(threads, 0, sizeof(threads));
 		for_each_engine(other, i915, tmp) {
 			struct task_struct *tsk;
@@ -772,6 +806,20 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
 				i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
 				i915_request_put(rq);
 			}
+
+			if (!(flags & TEST_SELF) && !wait_for_idle(engine)) {
+				struct drm_printer p =
+					drm_info_printer(i915->drm.dev);
+
+				pr_err("i915_reset_engine(%s:%s):"
+				       " failed to idle after reset\n",
+				       engine->name, test_name);
+				intel_engine_dump(engine, &p,
+						  "%s\n", engine->name);
+
+				err = -EIO;
+				break;
+			}
 		} while (time_before(jiffies, end_time));
 		clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
 		pr_info("i915_reset_engine(%s:%s): %lu resets\n",
@@ -981,7 +1029,7 @@ static int wait_for_others(struct drm_i915_private *i915,
 		if (engine == exclude)
 			continue;
 
-		if (wait_for(intel_engine_is_idle(engine), 10))
+		if (!wait_for_idle(engine))
 			return -EIO;
 	}
 

From 75cbec033c08f6d41c4775784f66ab860d02a6b5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?os=C3=A9=20Roberto=20de=20Souza?= <jose.souza@intel.com>
Date: Wed, 25 Apr 2018 14:23:31 -0700
Subject: [PATCH 0522/1286] drm/i915/psr: Prevent PSR exit when a non-pipe
 related register is written
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Any write in any display register was causing HW to exit PSR,
masking it to allow more power savings. Writes to pipe related
registers will still cause HW to exit PSR.
This is already masked for PSR2.

It also do not break the Display WA #0884, writes to CURSURFLIVE
are still causing hardware to exit PSR. This was tested in CNL machine
by triggering a write to CURSURFLIVE when a debugfs was read by user.

Bspec: 7721 and 8042

v4: Checked that it do not breaks WA #0884 and added this information
to the commit message.

Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Signed-off-by: José Roberto de Souza <jose.souza@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180425212334.21109-1-jose.souza@intel.com
---
 drivers/gpu/drm/i915/intel_psr.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 0d548292dd09..e35a3b94fa69 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -667,7 +667,8 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp,
 		I915_WRITE(EDP_PSR_DEBUG,
 			   EDP_PSR_DEBUG_MASK_MEMUP |
 			   EDP_PSR_DEBUG_MASK_HPD |
-			   EDP_PSR_DEBUG_MASK_LPSP);
+			   EDP_PSR_DEBUG_MASK_LPSP |
+			   EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
 	}
 }
 

From bc18b4df0fcb9fa4a50a19576723f14bfdd10c26 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= <jose.souza@intel.com>
Date: Wed, 25 Apr 2018 14:23:32 -0700
Subject: [PATCH 0523/1286] drm/i915/psr/skl+: Print information about what
 caused a PSR exit
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This will be helpful to debug what hardware is actually tracking
and causing PSR to exit.

BSpec: 7721

v4:
- Using _MMIO_TRANS2() in PSR_EVENT
- Cleaning events before printing

Signed-off-by: José Roberto de Souza <jose.souza@intel.com>
Cc: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180425212334.21109-2-jose.souza@intel.com
---
 drivers/gpu/drm/i915/i915_reg.h  | 23 ++++++++++++++++
 drivers/gpu/drm/i915/intel_psr.c | 45 ++++++++++++++++++++++++++++++++
 2 files changed, 68 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2dad655a710c..391825ae2361 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -4095,6 +4095,29 @@ enum {
 #define   EDP_PSR2_IDLE_FRAME_MASK	0xf
 #define   EDP_PSR2_IDLE_FRAME_SHIFT	0
 
+#define _PSR_EVENT_TRANS_A			0x60848
+#define _PSR_EVENT_TRANS_B			0x61848
+#define _PSR_EVENT_TRANS_C			0x62848
+#define _PSR_EVENT_TRANS_D			0x63848
+#define _PSR_EVENT_TRANS_EDP			0x6F848
+#define PSR_EVENT(trans)			_MMIO_TRANS2(trans, _PSR_EVENT_TRANS_A)
+#define  PSR_EVENT_PSR2_WD_TIMER_EXPIRE		(1 << 17)
+#define  PSR_EVENT_PSR2_DISABLED		(1 << 16)
+#define  PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN	(1 << 15)
+#define  PSR_EVENT_SU_CRC_FIFO_UNDERRUN		(1 << 14)
+#define  PSR_EVENT_GRAPHICS_RESET		(1 << 12)
+#define  PSR_EVENT_PCH_INTERRUPT		(1 << 11)
+#define  PSR_EVENT_MEMORY_UP			(1 << 10)
+#define  PSR_EVENT_FRONT_BUFFER_MODIFY		(1 << 9)
+#define  PSR_EVENT_WD_TIMER_EXPIRE		(1 << 8)
+#define  PSR_EVENT_PIPE_REGISTERS_UPDATE	(1 << 6)
+#define  PSR_EVENT_REGISTER_UPDATE		(1 << 5)
+#define  PSR_EVENT_HDCP_ENABLE			(1 << 4)
+#define  PSR_EVENT_KVMR_SESSION_ENABLE		(1 << 3)
+#define  PSR_EVENT_VBI_ENABLE			(1 << 2)
+#define  PSR_EVENT_LPSP_MODE_EXIT		(1 << 1)
+#define  PSR_EVENT_PSR_DISABLE			(1 << 0)
+
 #define EDP_PSR2_STATUS			_MMIO(0x6f940)
 #define EDP_PSR2_STATUS_STATE_MASK     (0xf<<28)
 #define EDP_PSR2_STATUS_STATE_SHIFT    28
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index e35a3b94fa69..c8d5cdce544f 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -125,6 +125,43 @@ void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug)
 	I915_WRITE(EDP_PSR_IMR, ~mask);
 }
 
+static void psr_event_print(u32 val, bool psr2_enabled)
+{
+	DRM_DEBUG_KMS("PSR exit events: 0x%x\n", val);
+	if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
+		DRM_DEBUG_KMS("\tPSR2 watchdog timer expired\n");
+	if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
+		DRM_DEBUG_KMS("\tPSR2 disabled\n");
+	if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
+		DRM_DEBUG_KMS("\tSU dirty FIFO underrun\n");
+	if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
+		DRM_DEBUG_KMS("\tSU CRC FIFO underrun\n");
+	if (val & PSR_EVENT_GRAPHICS_RESET)
+		DRM_DEBUG_KMS("\tGraphics reset\n");
+	if (val & PSR_EVENT_PCH_INTERRUPT)
+		DRM_DEBUG_KMS("\tPCH interrupt\n");
+	if (val & PSR_EVENT_MEMORY_UP)
+		DRM_DEBUG_KMS("\tMemory up\n");
+	if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
+		DRM_DEBUG_KMS("\tFront buffer modification\n");
+	if (val & PSR_EVENT_WD_TIMER_EXPIRE)
+		DRM_DEBUG_KMS("\tPSR watchdog timer expired\n");
+	if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
+		DRM_DEBUG_KMS("\tPIPE registers updated\n");
+	if (val & PSR_EVENT_REGISTER_UPDATE)
+		DRM_DEBUG_KMS("\tRegister updated\n");
+	if (val & PSR_EVENT_HDCP_ENABLE)
+		DRM_DEBUG_KMS("\tHDCP enabled\n");
+	if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
+		DRM_DEBUG_KMS("\tKVMR session enabled\n");
+	if (val & PSR_EVENT_VBI_ENABLE)
+		DRM_DEBUG_KMS("\tVBI enabled\n");
+	if (val & PSR_EVENT_LPSP_MODE_EXIT)
+		DRM_DEBUG_KMS("\tLPSP mode exited\n");
+	if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
+		DRM_DEBUG_KMS("\tPSR disabled\n");
+}
+
 void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
 {
 	u32 transcoders = BIT(TRANSCODER_EDP);
@@ -152,6 +189,14 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
 			dev_priv->psr.last_exit = time_ns;
 			DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
 				      transcoder_name(cpu_transcoder));
+
+			if (INTEL_GEN(dev_priv) >= 9) {
+				u32 val = I915_READ(PSR_EVENT(cpu_transcoder));
+				bool psr2_enabled = dev_priv->psr.psr2_enabled;
+
+				I915_WRITE(PSR_EVENT(cpu_transcoder), val);
+				psr_event_print(val, psr2_enabled);
+			}
 		}
 	}
 }

From d0bc86231a55d72860ac3a07a0f676fc18f2dd9d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= <jose.souza@intel.com>
Date: Wed, 25 Apr 2018 14:23:33 -0700
Subject: [PATCH 0524/1286] drm/i915/debugfs: Print sink PSR status
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

IGT tests could be improved with sink status, knowing for sure that
hardware have activate or exit PSR.

v3:
Reading i915_edp_psr_status was causing PSR to exit but now with
'drm/i915/psr: Prevent PSR exit when a non-pipe related register is
written' it is fixed.

Reviewed-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: José Roberto de Souza <jose.souza@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180425212334.21109-3-jose.souza@intel.com
---
 drivers/gpu/drm/i915/i915_debugfs.c | 29 +++++++++++++++++++++++++++++
 1 file changed, 29 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 1c88805d3354..cb1a804bf72e 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2603,6 +2603,26 @@ static const char *psr2_live_status(u32 val)
 	return "unknown";
 }
 
+static const char *psr_sink_status(u8 val)
+{
+	static const char * const sink_status[] = {
+		"inactive",
+		"transition to active, capture and display",
+		"active, display from RFB",
+		"active, capture and display on sink device timings",
+		"transition to inactive, capture and display, timing re-sync",
+		"reserved",
+		"reserved",
+		"sink internal error"
+	};
+
+	val &= DP_PSR_SINK_STATE_MASK;
+	if (val < ARRAY_SIZE(sink_status))
+		return sink_status[val];
+
+	return "unknown";
+}
+
 static int i915_edp_psr_status(struct seq_file *m, void *data)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -2684,6 +2704,15 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
 		seq_printf(m, "EDP_PSR2_STATUS: %x [%s]\n",
 			   psr2, psr2_live_status(psr2));
 	}
+
+	if (dev_priv->psr.enabled) {
+		struct drm_dp_aux *aux = &dev_priv->psr.enabled->aux;
+		u8 val;
+
+		if (drm_dp_dpcd_readb(aux, DP_PSR_STATUS, &val) == 1)
+			seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val,
+				   psr_sink_status(val));
+	}
 	mutex_unlock(&dev_priv->psr.lock);
 
 	if (READ_ONCE(dev_priv->psr.debug)) {

From 2a34b0054bf4a5224347e4135019b63d91cfb5d4 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= <jose.souza@intel.com>
Date: Wed, 25 Apr 2018 14:23:34 -0700
Subject: [PATCH 0525/1286] drm/i915/psr/cnl: Set y-coordinate as valid in SDP
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This was my bad, spec says that the name of this bit is
'Y-coordinate valid' but the values for it is:
0: Include Y-coordinate valid eDP1.4a
1: Do not include Y-coordinate valid eDP 1.4
So not setting it.

BSpec: 7713

Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Signed-off-by: José Roberto de Souza <jose.souza@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180425212334.21109-4-jose.souza@intel.com
---
 drivers/gpu/drm/i915/intel_psr.c | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index c8d5cdce544f..6233a322aac5 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -508,9 +508,8 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
 	 * mesh at all with our frontbuffer tracking. And the hw alone isn't
 	 * good enough. */
 	val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
-	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
-		val |= EDP_Y_COORDINATE_VALID | EDP_Y_COORDINATE_ENABLE;
-	}
+	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+		val |= EDP_Y_COORDINATE_ENABLE;
 
 	val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
 

From 735581a0a13c58e6ff7eaf7a1087e1e5d917cabe Mon Sep 17 00:00:00 2001
From: Gerd Hoffmann <kraxel@redhat.com>
Date: Fri, 20 Apr 2018 09:19:01 +0200
Subject: [PATCH 0526/1286] qxl: remove qxl_io_log()

qxl_io_log() sends messages over to the host (qemu) for logging.
Remove the function and all callers, we can just use standard
DRM_DEBUG calls (and if needed a serial console).

Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
Reviewed-by: Dave Airlie <airlied@redhat.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20180420071904.24276-2-kraxel@redhat.com
---
 drivers/gpu/drm/qxl/qxl_cmd.c     | 34 ++-----------------------------
 drivers/gpu/drm/qxl/qxl_display.c | 27 ++++--------------------
 drivers/gpu/drm/qxl/qxl_drv.h     |  3 ---
 drivers/gpu/drm/qxl/qxl_fb.c      |  2 --
 drivers/gpu/drm/qxl/qxl_irq.c     |  3 +--
 5 files changed, 7 insertions(+), 62 deletions(-)

diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index c0fb52c6d4ca..850f8d7d37ce 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -341,12 +341,9 @@ int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
 	surface_height = surf->surf.height;
 
 	if (area->left < 0 || area->top < 0 ||
-	    area->right > surface_width || area->bottom > surface_height) {
-		qxl_io_log(qdev, "%s: not doing area update for "
-			   "%d, (%d,%d,%d,%d) (%d,%d)\n", __func__, surface_id, area->left,
-			   area->top, area->right, area->bottom, surface_width, surface_height);
+	    area->right > surface_width || area->bottom > surface_height)
 		return -EINVAL;
-	}
+
 	mutex_lock(&qdev->update_area_mutex);
 	qdev->ram_header->update_area = *area;
 	qdev->ram_header->update_surface = surface_id;
@@ -407,20 +404,6 @@ void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id)
 	wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC);
 }
 
-void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...)
-{
-	va_list args;
-
-	va_start(args, fmt);
-	vsnprintf(qdev->ram_header->log_buf, QXL_LOG_BUF_SIZE, fmt, args);
-	va_end(args);
-	/*
-	 * DO not do a DRM output here - this will call printk, which will
-	 * call back into qxl for rendering (qxl_fb)
-	 */
-	outb(0, qdev->io_base + QXL_IO_LOG);
-}
-
 void qxl_io_reset(struct qxl_device *qdev)
 {
 	outb(0, qdev->io_base + QXL_IO_RESET);
@@ -428,19 +411,6 @@ void qxl_io_reset(struct qxl_device *qdev)
 
 void qxl_io_monitors_config(struct qxl_device *qdev)
 {
-	qxl_io_log(qdev, "%s: %d [%dx%d+%d+%d]\n", __func__,
-		   qdev->monitors_config ?
-		   qdev->monitors_config->count : -1,
-		   qdev->monitors_config && qdev->monitors_config->count ?
-		   qdev->monitors_config->heads[0].width : -1,
-		   qdev->monitors_config && qdev->monitors_config->count ?
-		   qdev->monitors_config->heads[0].height : -1,
-		   qdev->monitors_config && qdev->monitors_config->count ?
-		   qdev->monitors_config->heads[0].x : -1,
-		   qdev->monitors_config && qdev->monitors_config->count ?
-		   qdev->monitors_config->heads[0].y : -1
-		   );
-
 	wait_for_io_cmd(qdev, 0, QXL_IO_MONITORS_CONFIG_ASYNC);
 }
 
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 820cbca3bf6e..5809c6c6e7b7 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -48,12 +48,8 @@ static void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned c
 		qdev->client_monitors_config = kzalloc(
 				sizeof(struct qxl_monitors_config) +
 				sizeof(struct qxl_head) * count, GFP_KERNEL);
-		if (!qdev->client_monitors_config) {
-			qxl_io_log(qdev,
-				   "%s: allocation failure for %u heads\n",
-				   __func__, count);
+		if (!qdev->client_monitors_config)
 			return;
-		}
 	}
 	qdev->client_monitors_config->count = count;
 }
@@ -74,12 +70,8 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
 	num_monitors = qdev->rom->client_monitors_config.count;
 	crc = crc32(0, (const uint8_t *)&qdev->rom->client_monitors_config,
 		  sizeof(qdev->rom->client_monitors_config));
-	if (crc != qdev->rom->client_monitors_config_crc) {
-		qxl_io_log(qdev, "crc mismatch: have %X (%zd) != %X\n", crc,
-			   sizeof(qdev->rom->client_monitors_config),
-			   qdev->rom->client_monitors_config_crc);
+	if (crc != qdev->rom->client_monitors_config_crc)
 		return MONITORS_CONFIG_BAD_CRC;
-	}
 	if (!num_monitors) {
 		DRM_DEBUG_KMS("no client monitors configured\n");
 		return status;
@@ -170,12 +162,10 @@ void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
 		udelay(5);
 	}
 	if (status == MONITORS_CONFIG_BAD_CRC) {
-		qxl_io_log(qdev, "config: bad crc\n");
 		DRM_DEBUG_KMS("ignoring client monitors config: bad crc");
 		return;
 	}
 	if (status == MONITORS_CONFIG_UNCHANGED) {
-		qxl_io_log(qdev, "config: unchanged\n");
 		DRM_DEBUG_KMS("ignoring client monitors config: unchanged");
 		return;
 	}
@@ -385,14 +375,6 @@ static bool qxl_crtc_mode_fixup(struct drm_crtc *crtc,
 				  const struct drm_display_mode *mode,
 				  struct drm_display_mode *adjusted_mode)
 {
-	struct drm_device *dev = crtc->dev;
-	struct qxl_device *qdev = dev->dev_private;
-
-	qxl_io_log(qdev, "%s: (%d,%d) => (%d,%d)\n",
-		   __func__,
-		   mode->hdisplay, mode->vdisplay,
-		   adjusted_mode->hdisplay,
-		   adjusted_mode->vdisplay);
 	return true;
 }
 
@@ -403,10 +385,9 @@ qxl_send_monitors_config(struct qxl_device *qdev)
 
 	BUG_ON(!qdev->ram_header->monitors_config);
 
-	if (qdev->monitors_config->count == 0) {
-		qxl_io_log(qdev, "%s: 0 monitors??\n", __func__);
+	if (qdev->monitors_config->count == 0)
 		return;
-	}
+
 	for (i = 0 ; i < qdev->monitors_config->count ; ++i) {
 		struct qxl_head *head = &qdev->monitors_config->heads[i];
 
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 00a1a66b052a..4b8984017373 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -298,9 +298,6 @@ struct qxl_device {
 	int monitors_config_height;
 };
 
-/* forward declaration for QXL_INFO_IO */
-__printf(2,3) void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...);
-
 extern const struct drm_ioctl_desc qxl_ioctls[];
 extern int qxl_max_ioctl;
 
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 338891401f35..9a6752606079 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -185,8 +185,6 @@ static int qxlfb_framebuffer_dirty(struct drm_framebuffer *fb,
 	/*
 	 * we are using a shadow draw buffer, at qdev->surface0_shadow
 	 */
-	qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]\n", clips->x1, clips->x2,
-		   clips->y1, clips->y2);
 	image->dx = clips->x1;
 	image->dy = clips->y1;
 	image->width = clips->x2 - clips->x1;
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
index 23a40106ab53..3bb31add6350 100644
--- a/drivers/gpu/drm/qxl/qxl_irq.c
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
@@ -57,10 +57,9 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
 		 * to avoid endless loops).
 		 */
 		qdev->irq_received_error++;
-		qxl_io_log(qdev, "%s: driver is in bug mode.\n", __func__);
+		DRM_WARN("driver is in bug mode\n");
 	}
 	if (pending & QXL_INTERRUPT_CLIENT_MONITORS_CONFIG) {
-		qxl_io_log(qdev, "QXL_INTERRUPT_CLIENT_MONITORS_CONFIG\n");
 		schedule_work(&qdev->client_monitors_config_work);
 	}
 	qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;

From 998010bfae6ebaac68af905bef9f6e276f775254 Mon Sep 17 00:00:00 2001
From: Gerd Hoffmann <kraxel@redhat.com>
Date: Fri, 20 Apr 2018 09:19:02 +0200
Subject: [PATCH 0527/1286] qxl: move qxl_send_monitors_config()

Needed to avoid a forward declaration in a followup patch.
Pure code move, no functional change.

Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
Reviewed-by: Dave Airlie <airlied@redhat.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20180420071904.24276-3-kraxel@redhat.com
---
 drivers/gpu/drm/qxl/qxl_display.c | 47 +++++++++++++++----------------
 1 file changed, 23 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 5809c6c6e7b7..7d08a26c3a8b 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -258,6 +258,29 @@ static int qxl_add_common_modes(struct drm_connector *connector,
 	return i - 1;
 }
 
+static void qxl_send_monitors_config(struct qxl_device *qdev)
+{
+	int i;
+
+	BUG_ON(!qdev->ram_header->monitors_config);
+
+	if (qdev->monitors_config->count == 0)
+		return;
+
+	for (i = 0 ; i < qdev->monitors_config->count ; ++i) {
+		struct qxl_head *head = &qdev->monitors_config->heads[i];
+
+		if (head->y > 8192 || head->x > 8192 ||
+		    head->width > 8192 || head->height > 8192) {
+			DRM_ERROR("head %d wrong: %dx%d+%d+%d\n",
+				  i, head->width, head->height,
+				  head->x, head->y);
+			return;
+		}
+	}
+	qxl_io_monitors_config(qdev);
+}
+
 static void qxl_crtc_atomic_flush(struct drm_crtc *crtc,
 				  struct drm_crtc_state *old_crtc_state)
 {
@@ -378,30 +401,6 @@ static bool qxl_crtc_mode_fixup(struct drm_crtc *crtc,
 	return true;
 }
 
-static void
-qxl_send_monitors_config(struct qxl_device *qdev)
-{
-	int i;
-
-	BUG_ON(!qdev->ram_header->monitors_config);
-
-	if (qdev->monitors_config->count == 0)
-		return;
-
-	for (i = 0 ; i < qdev->monitors_config->count ; ++i) {
-		struct qxl_head *head = &qdev->monitors_config->heads[i];
-
-		if (head->y > 8192 || head->x > 8192 ||
-		    head->width > 8192 || head->height > 8192) {
-			DRM_ERROR("head %d wrong: %dx%d+%d+%d\n",
-				  i, head->width, head->height,
-				  head->x, head->y);
-			return;
-		}
-	}
-	qxl_io_monitors_config(qdev);
-}
-
 static void qxl_monitors_config_set(struct qxl_device *qdev,
 				    int index,
 				    unsigned x, unsigned y,

From a6d3c4d79822658e7f2f9c4b73237fe2b057ed67 Mon Sep 17 00:00:00 2001
From: Gerd Hoffmann <kraxel@redhat.com>
Date: Fri, 20 Apr 2018 09:19:03 +0200
Subject: [PATCH 0528/1286] qxl: hook monitors_config updates into crtc, not
 encoder.

The encoder callbacks are only called in case the video mode changes.
So any layout changes without mode changes will go unnoticed.

Add qxl_crtc_update_monitors_config(), based on the old
qxl_write_monitors_config_for_encoder() function.  Hook it into the
enable, disable and flush atomic crtc callbacks.  Remove monitors_config
updates from all other places.

Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1544322
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
Reviewed-by: Dave Airlie <airlied@redhat.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20180420071904.24276-4-kraxel@redhat.com
---
 drivers/gpu/drm/qxl/qxl_cmd.c     |   2 +
 drivers/gpu/drm/qxl/qxl_display.c | 156 ++++++++++++------------------
 2 files changed, 66 insertions(+), 92 deletions(-)

diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 850f8d7d37ce..95db20f2145f 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -371,6 +371,7 @@ void qxl_io_flush_surfaces(struct qxl_device *qdev)
 void qxl_io_destroy_primary(struct qxl_device *qdev)
 {
 	wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC);
+	qdev->primary_created = false;
 }
 
 void qxl_io_create_primary(struct qxl_device *qdev,
@@ -396,6 +397,7 @@ void qxl_io_create_primary(struct qxl_device *qdev,
 	create->type = QXL_SURF_TYPE_PRIMARY;
 
 	wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC);
+	qdev->primary_created = true;
 }
 
 void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id)
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 7d08a26c3a8b..58959733ae16 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -281,6 +281,66 @@ static void qxl_send_monitors_config(struct qxl_device *qdev)
 	qxl_io_monitors_config(qdev);
 }
 
+static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc,
+					    const char *reason)
+{
+	struct drm_device *dev = crtc->dev;
+	struct qxl_device *qdev = dev->dev_private;
+	struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
+	struct qxl_head head;
+	int oldcount, i = qcrtc->index;
+
+	if (!qdev->primary_created) {
+		DRM_DEBUG_KMS("no primary surface, skip (%s)\n", reason);
+		return;
+	}
+
+	if (!qdev->monitors_config ||
+	    qdev->monitors_config->max_allowed <= i)
+		return;
+
+	head.id = i;
+	head.flags = 0;
+	oldcount = qdev->monitors_config->count;
+	if (crtc->state->active) {
+		struct drm_display_mode *mode = &crtc->mode;
+		head.width = mode->hdisplay;
+		head.height = mode->vdisplay;
+		head.x = crtc->x;
+		head.y = crtc->y;
+		if (qdev->monitors_config->count < i + 1)
+			qdev->monitors_config->count = i + 1;
+	} else if (i > 0) {
+		head.width = 0;
+		head.height = 0;
+		head.x = 0;
+		head.y = 0;
+		if (qdev->monitors_config->count == i + 1)
+			qdev->monitors_config->count = i;
+	} else {
+		DRM_DEBUG_KMS("inactive head 0, skip (%s)\n", reason);
+		return;
+	}
+
+	if (head.width  == qdev->monitors_config->heads[i].width  &&
+	    head.height == qdev->monitors_config->heads[i].height &&
+	    head.x      == qdev->monitors_config->heads[i].x      &&
+	    head.y      == qdev->monitors_config->heads[i].y      &&
+	    oldcount    == qdev->monitors_config->count)
+		return;
+
+	DRM_DEBUG_KMS("head %d, %dx%d, at +%d+%d, %s (%s)\n",
+		      i, head.width, head.height, head.x, head.y,
+		      crtc->state->active ? "on" : "off", reason);
+	if (oldcount != qdev->monitors_config->count)
+		DRM_DEBUG_KMS("active heads %d -> %d (%d total)\n",
+			      oldcount, qdev->monitors_config->count,
+			      qdev->monitors_config->max_allowed);
+
+	qdev->monitors_config->heads[i] = head;
+	qxl_send_monitors_config(qdev);
+}
+
 static void qxl_crtc_atomic_flush(struct drm_crtc *crtc,
 				  struct drm_crtc_state *old_crtc_state)
 {
@@ -296,6 +356,8 @@ static void qxl_crtc_atomic_flush(struct drm_crtc *crtc,
 		drm_crtc_send_vblank_event(crtc, event);
 		spin_unlock_irqrestore(&dev->event_lock, flags);
 	}
+
+	qxl_crtc_update_monitors_config(crtc, "flush");
 }
 
 static void qxl_crtc_destroy(struct drm_crtc *crtc)
@@ -401,55 +463,20 @@ static bool qxl_crtc_mode_fixup(struct drm_crtc *crtc,
 	return true;
 }
 
-static void qxl_monitors_config_set(struct qxl_device *qdev,
-				    int index,
-				    unsigned x, unsigned y,
-				    unsigned width, unsigned height,
-				    unsigned surf_id)
-{
-	DRM_DEBUG_KMS("%d:%dx%d+%d+%d\n", index, width, height, x, y);
-	qdev->monitors_config->heads[index].x = x;
-	qdev->monitors_config->heads[index].y = y;
-	qdev->monitors_config->heads[index].width = width;
-	qdev->monitors_config->heads[index].height = height;
-	qdev->monitors_config->heads[index].surface_id = surf_id;
-
-}
-
-static void qxl_mode_set_nofb(struct drm_crtc *crtc)
-{
-	struct qxl_device *qdev = crtc->dev->dev_private;
-	struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
-	struct drm_display_mode *mode = &crtc->mode;
-
-	DRM_DEBUG("Mode set (%d,%d)\n",
-		  mode->hdisplay, mode->vdisplay);
-
-	qxl_monitors_config_set(qdev, qcrtc->index, 0, 0,
-				mode->hdisplay,	mode->vdisplay, 0);
-
-}
-
 static void qxl_crtc_atomic_enable(struct drm_crtc *crtc,
 				   struct drm_crtc_state *old_state)
 {
-	DRM_DEBUG("\n");
+	qxl_crtc_update_monitors_config(crtc, "enable");
 }
 
 static void qxl_crtc_atomic_disable(struct drm_crtc *crtc,
 				    struct drm_crtc_state *old_state)
 {
-	struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
-	struct qxl_device *qdev = crtc->dev->dev_private;
-
-	qxl_monitors_config_set(qdev, qcrtc->index, 0, 0, 0, 0, 0);
-
-	qxl_send_monitors_config(qdev);
+	qxl_crtc_update_monitors_config(crtc, "disable");
 }
 
 static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = {
 	.mode_fixup = qxl_crtc_mode_fixup,
-	.mode_set_nofb = qxl_mode_set_nofb,
 	.atomic_flush = qxl_crtc_atomic_flush,
 	.atomic_enable = qxl_crtc_atomic_enable,
 	.atomic_disable = qxl_crtc_atomic_disable,
@@ -939,61 +966,8 @@ static void qxl_enc_prepare(struct drm_encoder *encoder)
 	DRM_DEBUG("\n");
 }
 
-static void qxl_write_monitors_config_for_encoder(struct qxl_device *qdev,
-		struct drm_encoder *encoder)
-{
-	int i;
-	struct qxl_output *output = drm_encoder_to_qxl_output(encoder);
-	struct qxl_head *head;
-	struct drm_display_mode *mode;
-
-	BUG_ON(!encoder);
-	/* TODO: ugly, do better */
-	i = output->index;
-	if (!qdev->monitors_config ||
-	    qdev->monitors_config->max_allowed <= i) {
-		DRM_ERROR(
-		"head number too large or missing monitors config: %p, %d",
-		qdev->monitors_config,
-		qdev->monitors_config ?
-			qdev->monitors_config->max_allowed : -1);
-		return;
-	}
-	if (!encoder->crtc) {
-		DRM_ERROR("missing crtc on encoder %p\n", encoder);
-		return;
-	}
-	if (i != 0)
-		DRM_DEBUG("missing for multiple monitors: no head holes\n");
-	head = &qdev->monitors_config->heads[i];
-	head->id = i;
-	if (encoder->crtc->enabled) {
-		mode = &encoder->crtc->mode;
-		head->width = mode->hdisplay;
-		head->height = mode->vdisplay;
-		head->x = encoder->crtc->x;
-		head->y = encoder->crtc->y;
-		if (qdev->monitors_config->count < i + 1)
-			qdev->monitors_config->count = i + 1;
-	} else {
-		head->width = 0;
-		head->height = 0;
-		head->x = 0;
-		head->y = 0;
-	}
-	DRM_DEBUG_KMS("setting head %d to +%d+%d %dx%d out of %d\n",
-		      i, head->x, head->y, head->width, head->height, qdev->monitors_config->count);
-	head->flags = 0;
-	/* TODO - somewhere else to call this for multiple monitors
-	 * (config_commit?) */
-	qxl_send_monitors_config(qdev);
-}
-
 static void qxl_enc_commit(struct drm_encoder *encoder)
 {
-	struct qxl_device *qdev = encoder->dev->dev_private;
-
-	qxl_write_monitors_config_for_encoder(qdev, encoder);
 	DRM_DEBUG("\n");
 }
 
@@ -1080,8 +1054,6 @@ static enum drm_connector_status qxl_conn_detect(
 		     qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]);
 
 	DRM_DEBUG("#%d connected: %d\n", output->index, connected);
-	if (!connected)
-		qxl_monitors_config_set(qdev, output->index, 0, 0, 0, 0, 0);
 
 	return connected ? connector_status_connected
 			 : connector_status_disconnected;

From cc4e44d5156933272df02d27f99322100e1edd1d Mon Sep 17 00:00:00 2001
From: Gerd Hoffmann <kraxel@redhat.com>
Date: Fri, 20 Apr 2018 09:19:04 +0200
Subject: [PATCH 0529/1286] qxl: drop dummy functions

These days drm core checks function pointers everywhere before calling
them.  So we can drop a bunch of dummy functions now.

Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: Dave Airlie <airlied@redhat.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20180420071904.24276-5-kraxel@redhat.com
---
 drivers/gpu/drm/qxl/qxl_display.c | 50 -------------------------------
 1 file changed, 50 deletions(-)

diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 58959733ae16..b8cda9449241 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -456,13 +456,6 @@ qxl_framebuffer_init(struct drm_device *dev,
 	return 0;
 }
 
-static bool qxl_crtc_mode_fixup(struct drm_crtc *crtc,
-				  const struct drm_display_mode *mode,
-				  struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static void qxl_crtc_atomic_enable(struct drm_crtc *crtc,
 				   struct drm_crtc_state *old_state)
 {
@@ -476,7 +469,6 @@ static void qxl_crtc_atomic_disable(struct drm_crtc *crtc,
 }
 
 static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = {
-	.mode_fixup = qxl_crtc_mode_fixup,
 	.atomic_flush = qxl_crtc_atomic_flush,
 	.atomic_enable = qxl_crtc_atomic_enable,
 	.atomic_disable = qxl_crtc_atomic_disable,
@@ -620,12 +612,6 @@ static void qxl_primary_atomic_disable(struct drm_plane *plane,
 	}
 }
 
-static int qxl_plane_atomic_check(struct drm_plane *plane,
-				  struct drm_plane_state *state)
-{
-	return 0;
-}
-
 static void qxl_cursor_atomic_update(struct drm_plane *plane,
 				     struct drm_plane_state *old_state)
 {
@@ -831,7 +817,6 @@ static const uint32_t qxl_cursor_plane_formats[] = {
 };
 
 static const struct drm_plane_helper_funcs qxl_cursor_helper_funcs = {
-	.atomic_check = qxl_plane_atomic_check,
 	.atomic_update = qxl_cursor_atomic_update,
 	.atomic_disable = qxl_cursor_atomic_disable,
 	.prepare_fb = qxl_plane_prepare_fb,
@@ -956,28 +941,6 @@ free_mem:
 	return r;
 }
 
-static void qxl_enc_dpms(struct drm_encoder *encoder, int mode)
-{
-	DRM_DEBUG("\n");
-}
-
-static void qxl_enc_prepare(struct drm_encoder *encoder)
-{
-	DRM_DEBUG("\n");
-}
-
-static void qxl_enc_commit(struct drm_encoder *encoder)
-{
-	DRM_DEBUG("\n");
-}
-
-static void qxl_enc_mode_set(struct drm_encoder *encoder,
-				struct drm_display_mode *mode,
-				struct drm_display_mode *adjusted_mode)
-{
-	DRM_DEBUG("\n");
-}
-
 static int qxl_conn_get_modes(struct drm_connector *connector)
 {
 	unsigned pwidth = 1024;
@@ -1023,10 +986,6 @@ static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector)
 
 
 static const struct drm_encoder_helper_funcs qxl_enc_helper_funcs = {
-	.dpms = qxl_enc_dpms,
-	.prepare = qxl_enc_prepare,
-	.mode_set = qxl_enc_mode_set,
-	.commit = qxl_enc_commit,
 };
 
 static const struct drm_connector_helper_funcs qxl_connector_helper_funcs = {
@@ -1059,14 +1018,6 @@ static enum drm_connector_status qxl_conn_detect(
 			 : connector_status_disconnected;
 }
 
-static int qxl_conn_set_property(struct drm_connector *connector,
-				   struct drm_property *property,
-				   uint64_t value)
-{
-	DRM_DEBUG("\n");
-	return 0;
-}
-
 static void qxl_conn_destroy(struct drm_connector *connector)
 {
 	struct qxl_output *qxl_output =
@@ -1081,7 +1032,6 @@ static const struct drm_connector_funcs qxl_connector_funcs = {
 	.dpms = drm_helper_connector_dpms,
 	.detect = qxl_conn_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
-	.set_property = qxl_conn_set_property,
 	.destroy = qxl_conn_destroy,
 	.reset = drm_atomic_helper_connector_reset,
 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,

From c2af73645d3a67cc2e4a750179048a4c6d5110a1 Mon Sep 17 00:00:00 2001
From: Philippe CORNU <philippe.cornu@st.com>
Date: Tue, 17 Apr 2018 13:34:41 +0200
Subject: [PATCH 0530/1286] drm/stm: ltdc: fix deferred endpoint management
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

When a driver related to one of the endpoints is deferred
due to probe dependencies (i2c, spi...) but the other one
is ready, ltdc probe continues and the deferred driver
will never be probed again.

The fix consists in waiting for all deferred endpoints before
continuing the ltdc probe.

Signed-off-by: Philippe Cornu <philippe.cornu@st.com>
Reviewed-by: Yannick Fertré <yannick.fertre@st.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180417113441.8214-1-philippe.cornu@st.com
---
 drivers/gpu/drm/stm/ltdc.c | 11 +++++------
 1 file changed, 5 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index e3121d9e4230..014cef8cef37 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -987,14 +987,13 @@ int ltdc_load(struct drm_device *ddev)
 						  &bridge[i]);
 
 		/*
-		 * If at least one endpoint is ready, continue probing,
-		 * else if at least one endpoint is -EPROBE_DEFER and
-		 * there is no previous ready endpoints, defer probing.
+		 * If at least one endpoint is -EPROBE_DEFER, defer probing,
+		 * else if at least one endpoint is ready, continue probing.
 		 */
-		if (!ret)
+		if (ret == -EPROBE_DEFER)
+			return ret;
+		else if (!ret)
 			endpoint_not_ready = 0;
-		else if (ret == -EPROBE_DEFER && endpoint_not_ready)
-			endpoint_not_ready = -EPROBE_DEFER;
 	}
 
 	if (endpoint_not_ready)

From 0cefff963bf2af9ec9b2e2b537c3d3e6a43ceb9b Mon Sep 17 00:00:00 2001
From: Philippe CORNU <philippe.cornu@st.com>
Date: Tue, 17 Apr 2018 13:40:26 +0200
Subject: [PATCH 0531/1286] drm/stm: ltdc: add mode_valid()
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Add mode_valid() function to filter modes according to available
pll clock values and "preferred" modes. It is particularly
useful for hdmi modes that require precise pixel clocks.

Note that "preferred" modes are always accepted:
- this is important for panels because panel clock tolerances are
  bigger than hdmi ones and there is no reason to not accept them
  (the fps may vary a little but it is not a problem).
- the hdmi preferred mode will be accepted too, but userland will
  be able to use others hdmi "valid" modes if necessary.

Signed-off-by: Philippe Cornu <philippe.cornu@st.com>
Reviewed-by: Yannick Fertré <yannick.fertre@st.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180417114026.8709-1-philippe.cornu@st.com
---
 drivers/gpu/drm/stm/ltdc.c | 38 ++++++++++++++++++++++++++++++++++++++
 1 file changed, 38 insertions(+)

diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 014cef8cef37..616191fe98ae 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -445,6 +445,43 @@ static void ltdc_crtc_atomic_disable(struct drm_crtc *crtc,
 	reg_set(ldev->regs, LTDC_SRCR, SRCR_IMR);
 }
 
+#define CLK_TOLERANCE_HZ 50
+
+static enum drm_mode_status
+ltdc_crtc_mode_valid(struct drm_crtc *crtc,
+		     const struct drm_display_mode *mode)
+{
+	struct ltdc_device *ldev = crtc_to_ltdc(crtc);
+	int target = mode->clock * 1000;
+	int target_min = target - CLK_TOLERANCE_HZ;
+	int target_max = target + CLK_TOLERANCE_HZ;
+	int result;
+
+	/*
+	 * Accept all "preferred" modes:
+	 * - this is important for panels because panel clock tolerances are
+	 *   bigger than hdmi ones and there is no reason to not accept them
+	 *   (the fps may vary a little but it is not a problem).
+	 * - the hdmi preferred mode will be accepted too, but userland will
+	 *   be able to use others hdmi "valid" modes if necessary.
+	 */
+	if (mode->type & DRM_MODE_TYPE_PREFERRED)
+		return MODE_OK;
+
+	result = clk_round_rate(ldev->pixel_clk, target);
+
+	DRM_DEBUG_DRIVER("clk rate target %d, available %d\n", target, result);
+
+	/*
+	 * Filter modes according to the clock value, particularly useful for
+	 * hdmi modes that require precise pixel clocks.
+	 */
+	if (result < target_min || result > target_max)
+		return MODE_CLOCK_RANGE;
+
+	return MODE_OK;
+}
+
 static bool ltdc_crtc_mode_fixup(struct drm_crtc *crtc,
 				 const struct drm_display_mode *mode,
 				 struct drm_display_mode *adjusted_mode)
@@ -559,6 +596,7 @@ static void ltdc_crtc_atomic_flush(struct drm_crtc *crtc,
 }
 
 static const struct drm_crtc_helper_funcs ltdc_crtc_helper_funcs = {
+	.mode_valid = ltdc_crtc_mode_valid,
 	.mode_fixup = ltdc_crtc_mode_fixup,
 	.mode_set_nofb = ltdc_crtc_mode_set_nofb,
 	.atomic_flush = ltdc_crtc_atomic_flush,

From cccb57d8fdc9332c14f451e96a9604fa02a5bed2 Mon Sep 17 00:00:00 2001
From: Philippe CORNU <philippe.cornu@st.com>
Date: Thu, 19 Apr 2018 15:28:04 +0200
Subject: [PATCH 0532/1286] drm/stm: ltdc: fix warnings in ltdc_plane_create()
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

"make C=1" returns 2 warnings in ltdc_plane_create()
("Using plain integer as NULL pointer"). This patch
fixes them.

Signed-off-by: Philippe Cornu <philippe.cornu@st.com>
Reviewed-by: Yannick Fertré <yannick.fertre@st.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180419132804.8317-1-philippe.cornu@st.com
---
 drivers/gpu/drm/stm/ltdc.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 616191fe98ae..d997a6014d6c 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -860,13 +860,13 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
 
 	plane = devm_kzalloc(dev, sizeof(*plane), GFP_KERNEL);
 	if (!plane)
-		return 0;
+		return NULL;
 
 	ret = drm_universal_plane_init(ddev, plane, possible_crtcs,
 				       &ltdc_plane_funcs, formats, nb_fmt,
 				       NULL, type, NULL);
 	if (ret < 0)
-		return 0;
+		return NULL;
 
 	drm_plane_helper_add(plane, &ltdc_plane_helper_funcs);
 

From f6d3e06f074721ad3a231df745d85b60428c1f03 Mon Sep 17 00:00:00 2001
From: Ian W MORRISON <ianwmorrison@gmail.com>
Date: Wed, 11 Apr 2018 14:42:13 +1000
Subject: [PATCH 0533/1286] drm/i915/glk: Add MODULE_FIRMWARE for Geminilake

As the Geminilake firmware is now merged to linux-firmware.git
use MODUE_FIRMWARE to load the firmware.

This removes the error message in the dmesg log:

    i915 0000:00:02.0: Direct firmware load for
        i915/glk_dmc_ver1_04.bin failed with error -2
    i915 0000:00:02.0: Failed to load DMC firmware
        i915/glk_dmc_ver1_04.bin. Disabling runtime power management.
    i915 0000:00:02.0: DMC firmware homepage:
        https://01.org/linuxgraphics/downloads/firmware

and now shows that the firmware has correctly loaded:

    [drm] Finished loading DMC firmware i915/glk_dmc_ver1_04.bin (v1.4)

Signed-off-by: Ian W MORRISON <ianwmorrison@gmail.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180411044213.383-1-ianwmorrison@gmail.com
---
 drivers/gpu/drm/i915/intel_csr.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 41e6c75a7f3c..f9550ea46c26 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -35,6 +35,7 @@
  */
 
 #define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin"
+MODULE_FIRMWARE(I915_CSR_GLK);
 #define GLK_CSR_VERSION_REQUIRED	CSR_VERSION(1, 4)
 
 #define I915_CSR_CNL "i915/cnl_dmc_ver1_07.bin"

From ebb513adb1bed9a2fa5a9db4363b009ed407879a Mon Sep 17 00:00:00 2001
From: Manasi Navare <manasi.d.navare@intel.com>
Date: Thu, 26 Apr 2018 12:27:48 -0700
Subject: [PATCH 0534/1286] drm/dp: Rename the edp_sdp_header as dp_sdp_header
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

No functional changes in this patch.

The SDP Header is a generic header for secondary data packets for
both eDP and DP so call it dp_sdp_header. This header gets used for
different SDP types already defined.
Also header bytes 2 and 3 are secondary data packet specific header bytes.
So change the comment to indicate the same.

Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: dri-devel@lists.freedesktop.org
Signed-off-by: Manasi Navare <manasi.d.navare@intel.com>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1524770868-16869-1-git-send-email-manasi.d.navare@intel.com
---
 include/drm/drm_dp_helper.h | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 62903bae0221..930919f74af5 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -967,18 +967,18 @@ int drm_dp_bw_code_to_link_rate(u8 link_bw);
 #define DP_SDP_VSC_EXT_CEA		0x21 /* DP 1.4 */
 /* 0x80+ CEA-861 infoframe types */
 
-struct edp_sdp_header {
+struct dp_sdp_header {
 	u8 HB0; /* Secondary Data Packet ID */
 	u8 HB1; /* Secondary Data Packet Type */
-	u8 HB2; /* 7:5 reserved, 4:0 revision number */
-	u8 HB3; /* 7:5 reserved, 4:0 number of valid data bytes */
+	u8 HB2; /* Secondary Data Packet Specific header, Byte 0 */
+	u8 HB3; /* Secondary Data packet Specific header, Byte 1 */
 } __packed;
 
 #define EDP_SDP_HEADER_REVISION_MASK		0x1F
 #define EDP_SDP_HEADER_VALID_PAYLOAD_BYTES	0x1F
 
 struct edp_vsc_psr {
-	struct edp_sdp_header sdp_header;
+	struct dp_sdp_header sdp_header;
 	u8 DB0; /* Stereo Interface */
 	u8 DB1; /* 0 - PSR State; 1 - Update RFB; 2 - CRC Valid */
 	u8 DB2; /* CRC value bits 7:0 of the R or Cr component */

From 30e9db6d046ba667070e5a011a13951830d60a6e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Fri, 16 Mar 2018 21:04:20 +0200
Subject: [PATCH 0535/1286] drm: Don't pass the index to
 drm_property_add_enum()
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

drm_property_add_enum() can calculate the index itself just fine,
so no point in having the caller pass it in.

Cc: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
Cc: Ben Skeggs <bskeggs@redhat.com>
Cc: nouveau@lists.freedesktop.org
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180316190420.26734-1-ville.syrjala@linux.intel.com
Reviewed-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
---
 drivers/gpu/drm/drm_connector.c           |  6 ++---
 drivers/gpu/drm/drm_property.c            | 27 +++++++++++------------
 drivers/gpu/drm/gma500/cdv_device.c       |  4 ++--
 drivers/gpu/drm/gma500/psb_intel_sdvo.c   |  2 +-
 drivers/gpu/drm/i915/intel_sdvo.c         |  5 ++---
 drivers/gpu/drm/nouveau/nouveau_display.c |  4 +---
 include/drm/drm_property.h                |  2 +-
 7 files changed, 23 insertions(+), 27 deletions(-)

diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index b3cde897cd80..dfc8ca1e9413 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -1069,7 +1069,7 @@ int drm_mode_create_tv_properties(struct drm_device *dev,
 		goto nomem;
 
 	for (i = 0; i < num_modes; i++)
-		drm_property_add_enum(dev->mode_config.tv_mode_property, i,
+		drm_property_add_enum(dev->mode_config.tv_mode_property,
 				      i, modes[i]);
 
 	dev->mode_config.tv_brightness_property =
@@ -1156,7 +1156,7 @@ int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
 {
 	struct drm_device *dev = connector->dev;
 	struct drm_property *scaling_mode_property;
-	int i, j = 0;
+	int i;
 	const unsigned valid_scaling_mode_mask =
 		(1U << ARRAY_SIZE(drm_scaling_mode_enum_list)) - 1;
 
@@ -1177,7 +1177,7 @@ int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
 		if (!(BIT(i) & scaling_mode_mask))
 			continue;
 
-		ret = drm_property_add_enum(scaling_mode_property, j++,
+		ret = drm_property_add_enum(scaling_mode_property,
 					    drm_scaling_mode_enum_list[i].type,
 					    drm_scaling_mode_enum_list[i].name);
 
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index 8f4672daac7f..1f8031e30f53 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -169,9 +169,9 @@ struct drm_property *drm_property_create_enum(struct drm_device *dev,
 		return NULL;
 
 	for (i = 0; i < num_values; i++) {
-		ret = drm_property_add_enum(property, i,
-				      props[i].type,
-				      props[i].name);
+		ret = drm_property_add_enum(property,
+					    props[i].type,
+					    props[i].name);
 		if (ret) {
 			drm_property_destroy(dev, property);
 			return NULL;
@@ -209,7 +209,7 @@ struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
 						 uint64_t supported_bits)
 {
 	struct drm_property *property;
-	int i, ret, index = 0;
+	int i, ret;
 	int num_values = hweight64(supported_bits);
 
 	flags |= DRM_MODE_PROP_BITMASK;
@@ -221,14 +221,9 @@ struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
 		if (!(supported_bits & (1ULL << props[i].type)))
 			continue;
 
-		if (WARN_ON(index >= num_values)) {
-			drm_property_destroy(dev, property);
-			return NULL;
-		}
-
-		ret = drm_property_add_enum(property, index++,
-				      props[i].type,
-				      props[i].name);
+		ret = drm_property_add_enum(property,
+					    props[i].type,
+					    props[i].name);
 		if (ret) {
 			drm_property_destroy(dev, property);
 			return NULL;
@@ -376,7 +371,6 @@ EXPORT_SYMBOL(drm_property_create_bool);
 /**
  * drm_property_add_enum - add a possible value to an enumeration property
  * @property: enumeration property to change
- * @index: index of the new enumeration
  * @value: value of the new enumeration
  * @name: symbolic name of the new enumeration
  *
@@ -388,10 +382,11 @@ EXPORT_SYMBOL(drm_property_create_bool);
  * Returns:
  * Zero on success, error code on failure.
  */
-int drm_property_add_enum(struct drm_property *property, int index,
+int drm_property_add_enum(struct drm_property *property,
 			  uint64_t value, const char *name)
 {
 	struct drm_property_enum *prop_enum;
+	int index = 0;
 
 	if (WARN_ON(strlen(name) >= DRM_PROP_NAME_LEN))
 		return -EINVAL;
@@ -411,8 +406,12 @@ int drm_property_add_enum(struct drm_property *property, int index,
 	list_for_each_entry(prop_enum, &property->enum_list, head) {
 		if (WARN_ON(prop_enum->value == value))
 			return -EINVAL;
+		index++;
 	}
 
+	if (WARN_ON(index >= property->num_values))
+		return -EINVAL;
+
 	prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL);
 	if (!prop_enum)
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 3a3bf752e03a..34b85767e4da 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -485,7 +485,7 @@ void cdv_intel_attach_force_audio_property(struct drm_connector *connector)
 			return;
 
 		for (i = 0; i < ARRAY_SIZE(force_audio_names); i++)
-			drm_property_add_enum(prop, i, i-1, force_audio_names[i]);
+			drm_property_add_enum(prop, i-1, force_audio_names[i]);
 
 		dev_priv->force_audio_property = prop;
 	}
@@ -514,7 +514,7 @@ void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector)
 			return;
 
 		for (i = 0; i < ARRAY_SIZE(broadcast_rgb_names); i++)
-			drm_property_add_enum(prop, i, i, broadcast_rgb_names[i]);
+			drm_property_add_enum(prop, i, broadcast_rgb_names[i]);
 
 		dev_priv->broadcast_rgb_property = prop;
 	}
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 8dc2b19f913b..f2ee6aa10afa 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -2281,7 +2281,7 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s
 
 	for (i = 0; i < psb_intel_sdvo_connector->format_supported_num; i++)
 		drm_property_add_enum(
-				psb_intel_sdvo_connector->tv_format, i,
+				psb_intel_sdvo_connector->tv_format,
 				i, tv_format_names[psb_intel_sdvo_connector->tv_format_supported[i]]);
 
 	psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[0];
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 96e213ec202d..25005023c243 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2779,9 +2779,8 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
 		return false;
 
 	for (i = 0; i < intel_sdvo_connector->format_supported_num; i++)
-		drm_property_add_enum(
-				intel_sdvo_connector->tv_format, i,
-				i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
+		drm_property_add_enum(intel_sdvo_connector->tv_format, i,
+				      tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
 
 	intel_sdvo_connector->base.base.state->tv.mode = intel_sdvo_connector->tv_format_supported[0];
 	drm_object_attach_property(&intel_sdvo_connector->base.base.base,
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 009713404cc4..7d0bec8dd03d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -338,11 +338,9 @@ static struct nouveau_drm_prop_enum_list dither_depth[] = {
 	if (c) {                                                               \
 		p = drm_property_create(dev, DRM_MODE_PROP_ENUM, n, c);        \
 		l = (list);                                                    \
-		c = 0;                                                         \
 		while (p && l->gen_mask) {                                     \
 			if (l->gen_mask & (1 << (gen))) {                      \
-				drm_property_add_enum(p, c, l->type, l->name); \
-				c++;                                           \
+				drm_property_add_enum(p, l->type, l->name);    \
 			}                                                      \
 			l++;                                                   \
 		}                                                              \
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
index ab8167baade5..1d5c0b2a8956 100644
--- a/include/drm/drm_property.h
+++ b/include/drm/drm_property.h
@@ -260,7 +260,7 @@ struct drm_property *drm_property_create_object(struct drm_device *dev,
 						uint32_t type);
 struct drm_property *drm_property_create_bool(struct drm_device *dev,
 					      u32 flags, const char *name);
-int drm_property_add_enum(struct drm_property *property, int index,
+int drm_property_add_enum(struct drm_property *property,
 			  uint64_t value, const char *name);
 void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
 

From 077ef1f09c2528b81366ae9a2a969ea35c475027 Mon Sep 17 00:00:00 2001
From: James Ausmus <james.ausmus@intel.com>
Date: Wed, 28 Mar 2018 14:57:56 -0700
Subject: [PATCH 0536/1286] drm/i915/icl: Don't set pipe CSC/Gamma in
 PLANE_COLOR_CTL
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

These fields have been deprecated and moved in ICL+. Stop setting the
bits.

They have moved to GAMMA_MODE and CSC_MODE, respectively. This patch
is just to stop incorrectly setting bits in PLANE_COLOR_CTL while
we're waiting for the new replacement functionality to be done.

v2: Drop useless comment, and change !(GEN >= 11) to (GEN < 11). (Ville)

v3: No changes

v4 (from Paulo): Rebase.

Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Signed-off-by: James Ausmus <james.ausmus@intel.com>
Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180328215803.13835-2-paulo.r.zanoni@intel.com
---
 drivers/gpu/drm/i915/i915_reg.h      | 4 ++--
 drivers/gpu/drm/i915/intel_display.c | 8 ++++++--
 2 files changed, 8 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 391825ae2361..8c322ff1c3e4 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -6407,9 +6407,9 @@ enum {
 #define _PLANE_COLOR_CTL_1_A			0x701CC /* GLK+ */
 #define _PLANE_COLOR_CTL_2_A			0x702CC /* GLK+ */
 #define _PLANE_COLOR_CTL_3_A			0x703CC /* GLK+ */
-#define   PLANE_COLOR_PIPE_GAMMA_ENABLE		(1 << 30)
+#define   PLANE_COLOR_PIPE_GAMMA_ENABLE		(1 << 30) /* Pre-ICL */
 #define   PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE	(1 << 28)
-#define   PLANE_COLOR_PIPE_CSC_ENABLE		(1 << 23)
+#define   PLANE_COLOR_PIPE_CSC_ENABLE		(1 << 23) /* Pre-ICL */
 #define   PLANE_COLOR_CSC_MODE_BYPASS			(0 << 17)
 #define   PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709		(1 << 17)
 #define   PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709		(2 << 17)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 687e70110800..efa8822f63d1 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3623,11 +3623,15 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
 			const struct intel_plane_state *plane_state)
 {
+	struct drm_i915_private *dev_priv =
+		to_i915(plane_state->base.plane->dev);
 	const struct drm_framebuffer *fb = plane_state->base.fb;
 	u32 plane_color_ctl = 0;
 
-	plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
-	plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
+	if (INTEL_GEN(dev_priv) < 11) {
+		plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
+		plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
+	}
 	plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
 	plane_color_ctl |= glk_plane_color_ctl_alpha(fb->format->format);
 

From 74bd8004e475d67eb41f6795cda5efac03d010b8 Mon Sep 17 00:00:00 2001
From: Mahesh Kumar <mahesh1.kumar@intel.com>
Date: Thu, 26 Apr 2018 19:55:15 +0530
Subject: [PATCH 0537/1286] drm/i915/icl: track dbuf slice-2 status

This patch adds support to start tracking status of DBUF slices.
This is foundation to introduce support for enabling/disabling second
DBUF slice dynamically for ICL.

Changes Since V1:
 - use kernel type u8 over uint8_t

Signed-off-by: Mahesh Kumar <mahesh1.kumar@intel.com>
Reviewed-by: James Ausmus <james.ausmus@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180426142517.16643-2-mahesh1.kumar@intel.com
---
 drivers/gpu/drm/i915/i915_drv.h         |  1 +
 drivers/gpu/drm/i915/intel_display.c    |  5 +++++
 drivers/gpu/drm/i915/intel_pm.c         | 20 ++++++++++++++++++++
 drivers/gpu/drm/i915/intel_runtime_pm.c |  4 ++++
 4 files changed, 30 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8444ca8d5aa3..193176bcddf5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1189,6 +1189,7 @@ struct skl_ddb_allocation {
 	/* packed/y */
 	struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES];
 	struct skl_ddb_entry uv_plane[I915_MAX_PIPES][I915_MAX_PLANES];
+	u8 enabled_slices; /* GEN11 has configurable 2 slices */
 };
 
 struct skl_ddb_values {
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index efa8822f63d1..338570e61a1f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11447,6 +11447,11 @@ static void verify_wm_state(struct drm_crtc *crtc,
 	skl_ddb_get_hw_state(dev_priv, &hw_ddb);
 	sw_ddb = &dev_priv->wm.skl_hw.ddb;
 
+	if (INTEL_GEN(dev_priv) >= 11)
+		if (hw_ddb.enabled_slices != sw_ddb->enabled_slices)
+			DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
+				  sw_ddb->enabled_slices,
+				  hw_ddb.enabled_slices);
 	/* planes */
 	for_each_universal_plane(dev_priv, pipe, plane) {
 		hw_plane_wm = &hw_wm.planes[plane];
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 4baab858e442..a29e6d512771 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3567,6 +3567,23 @@ bool ilk_disable_lp_wm(struct drm_device *dev)
 	return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
 }
 
+static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
+{
+	u8 enabled_slices;
+
+	/* Slice 1 will always be enabled */
+	enabled_slices = 1;
+
+	/* Gen prior to GEN11 have only one DBuf slice */
+	if (INTEL_GEN(dev_priv) < 11)
+		return enabled_slices;
+
+	if (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
+		enabled_slices++;
+
+	return enabled_slices;
+}
+
 /*
  * FIXME: We still don't have the proper code detect if we need to apply the WA,
  * so assume we'll always need it in order to avoid underruns.
@@ -3870,6 +3887,8 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
 
 	memset(ddb, 0, sizeof(*ddb));
 
+	ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
+
 	for_each_intel_crtc(&dev_priv->drm, crtc) {
 		enum intel_display_power_domain power_domain;
 		enum plane_id plane_id;
@@ -5088,6 +5107,7 @@ skl_copy_ddb_for_pipe(struct skl_ddb_values *dst,
 	       sizeof(dst->ddb.uv_plane[pipe]));
 	memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
 	       sizeof(dst->ddb.plane[pipe]));
+	dst->ddb.enabled_slices = src->ddb.enabled_slices;
 }
 
 static void
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index ec59992cf87a..afc6ef81ca0c 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -2656,6 +2656,8 @@ static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
 	if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
 	    !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
 		DRM_ERROR("DBuf power enable timeout\n");
+	else
+		dev_priv->wm.skl_hw.ddb.enabled_slices = 2;
 }
 
 static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
@@ -2669,6 +2671,8 @@ static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
 	if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
 	    (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
 		DRM_ERROR("DBuf power disable timeout!\n");
+	else
+		dev_priv->wm.skl_hw.ddb.enabled_slices = 0;
 }
 
 static void icl_mbus_init(struct drm_i915_private *dev_priv)

From aa9664ffe863f470efdbe40ea20ce96f2887ebcd Mon Sep 17 00:00:00 2001
From: Mahesh Kumar <mahesh1.kumar@intel.com>
Date: Thu, 26 Apr 2018 19:55:16 +0530
Subject: [PATCH 0538/1286] drm/i915/icl: Enable 2nd DBuf slice only when
 needed

ICL has two slices of DBuf, each slice of size 1024 blocks.
We should not always enable slice-2. It should be enabled only if
display total required BW is > 12GBps OR more than 1 pipes are enabled.

Changes since V1:
 - typecast total_data_rate to u64 before multiplication to solve any
   possible overflow (Rodrigo)
 - fix where skl_wm_get_hw_state was memsetting ddb, resulting
   enabled_slices to become zero
 - Fix the logic of calculating ddb_size
Changes since V2:
 - If no-crtc is part of commit required_slices will have value "0",
   don't try to disable DBuf slice.
Changes since V3:
 - Create a generic helper to enable/disable slice
 - don't return early if total_data_rate is 0, it may be cursor only
   commit, or atomic modeset without any plane.
Changes since V4:
 - Solve checkpatch warnings
 - use kernel types u8/u64 instead of uint8_t/uint64_t
Changes since V5:
 - Rebase

Signed-off-by: Mahesh Kumar <mahesh1.kumar@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180426142517.16643-3-mahesh1.kumar@intel.com
---
 drivers/gpu/drm/i915/intel_display.c    | 10 ++++
 drivers/gpu/drm/i915/intel_drv.h        |  6 +++
 drivers/gpu/drm/i915/intel_pm.c         | 57 ++++++++++++++++----
 drivers/gpu/drm/i915/intel_runtime_pm.c | 69 +++++++++++++++++++------
 4 files changed, 115 insertions(+), 27 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 338570e61a1f..84ce66be88f2 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12258,6 +12258,8 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
 	bool progress;
 	enum pipe pipe;
 	int i;
+	u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
+	u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
 
 	const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {};
 
@@ -12266,6 +12268,10 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
 		if (new_crtc_state->active)
 			entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
 
+	/* If 2nd DBuf slice required, enable it here */
+	if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
+		icl_dbuf_slices_update(dev_priv, required_slices);
+
 	/*
 	 * Whenever the number of active pipes changes, we need to make sure we
 	 * update the pipes in the right order so that their ddb allocations
@@ -12316,6 +12322,10 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
 			progress = true;
 		}
 	} while (progress);
+
+	/* If 2nd DBuf slice is no more required disable it */
+	if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
+		icl_dbuf_slices_update(dev_priv, required_slices);
 }
 
 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 9bba0354ccd3..11a1932cde6e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -144,6 +144,10 @@
 #define KHz(x) (1000 * (x))
 #define MHz(x) KHz(1000 * (x))
 
+#define KBps(x) (1000 * (x))
+#define MBps(x) KBps(1000 * (x))
+#define GBps(x) ((u64)1000 * MBps((x)))
+
 /*
  * Display related stuff
  */
@@ -1931,6 +1935,8 @@ bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
 					enum intel_display_power_domain domain);
 void intel_display_power_put(struct drm_i915_private *dev_priv,
 			     enum intel_display_power_domain domain);
+void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
+			    u8 req_slices);
 
 static inline void
 assert_rpm_device_not_suspended(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a29e6d512771..3e72e9eb736e 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3771,9 +3771,42 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
 	return true;
 }
 
+static unsigned int intel_get_ddb_size(struct drm_i915_private *dev_priv,
+				       const struct intel_crtc_state *cstate,
+				       const unsigned int total_data_rate,
+				       const int num_active,
+				       struct skl_ddb_allocation *ddb)
+{
+	const struct drm_display_mode *adjusted_mode;
+	u64 total_data_bw;
+	u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
+
+	WARN_ON(ddb_size == 0);
+
+	if (INTEL_GEN(dev_priv) < 11)
+		return ddb_size - 4; /* 4 blocks for bypass path allocation */
+
+	adjusted_mode = &cstate->base.adjusted_mode;
+	total_data_bw = (u64)total_data_rate * drm_mode_vrefresh(adjusted_mode);
+
+	/*
+	 * 12GB/s is maximum BW supported by single DBuf slice.
+	 */
+	if (total_data_bw >= GBps(12) || num_active > 1) {
+		ddb->enabled_slices = 2;
+	} else {
+		ddb->enabled_slices = 1;
+		ddb_size /= 2;
+	}
+
+	return ddb_size;
+}
+
 static void
 skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
 				   const struct intel_crtc_state *cstate,
+				   const unsigned int total_data_rate,
+				   struct skl_ddb_allocation *ddb,
 				   struct skl_ddb_entry *alloc, /* out */
 				   int *num_active /* out */)
 {
@@ -3796,11 +3829,8 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
 	else
 		*num_active = hweight32(dev_priv->active_crtcs);
 
-	ddb_size = INTEL_INFO(dev_priv)->ddb_size;
-	WARN_ON(ddb_size == 0);
-
-	if (INTEL_GEN(dev_priv) < 11)
-		ddb_size -= 4; /* 4 blocks for bypass path allocation */
+	ddb_size = intel_get_ddb_size(dev_priv, cstate, total_data_rate,
+				      *num_active, ddb);
 
 	/*
 	 * If the state doesn't change the active CRTC's, then there's
@@ -4261,7 +4291,11 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 		return 0;
 	}
 
-	skl_ddb_get_pipe_allocation_limits(dev, cstate, alloc, &num_active);
+	total_data_rate = skl_get_total_relative_data_rate(cstate,
+							   plane_data_rate,
+							   uv_plane_data_rate);
+	skl_ddb_get_pipe_allocation_limits(dev, cstate, total_data_rate, ddb,
+					   alloc, &num_active);
 	alloc_size = skl_ddb_entry_size(alloc);
 	if (alloc_size == 0)
 		return 0;
@@ -4296,9 +4330,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
 	 *
 	 * FIXME: we may not allocate every single block here.
 	 */
-	total_data_rate = skl_get_total_relative_data_rate(cstate,
-							   plane_data_rate,
-							   uv_plane_data_rate);
 	if (total_data_rate == 0)
 		return 0;
 
@@ -5492,8 +5523,12 @@ void skl_wm_get_hw_state(struct drm_device *dev)
 		/* Fully recompute DDB on first atomic commit */
 		dev_priv->wm.distrust_bios_wm = true;
 	} else {
-		/* Easy/common case; just sanitize DDB now if everything off */
-		memset(ddb, 0, sizeof(*ddb));
+		/*
+		 * Easy/common case; just sanitize DDB now if everything off
+		 * Keep dbuf slice info intact
+		 */
+		memset(ddb->plane, 0, sizeof(ddb->plane));
+		memset(ddb->uv_plane, 0, sizeof(ddb->uv_plane));
 	}
 }
 
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index afc6ef81ca0c..3fffbfe4521d 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -2619,32 +2619,69 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
 	mutex_unlock(&power_domains->lock);
 }
 
-static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
+static inline
+bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
+			  i915_reg_t reg, bool enable)
 {
-	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
-	POSTING_READ(DBUF_CTL);
+	u32 val, status;
 
+	val = I915_READ(reg);
+	val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
 	udelay(10);
 
-	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
-		DRM_ERROR("DBuf power enable timeout\n");
+	status = I915_READ(reg) & DBUF_POWER_STATE;
+	if ((enable && !status) || (!enable && status)) {
+		DRM_ERROR("DBus power %s timeout!\n",
+			  enable ? "enable" : "disable");
+		return false;
+	}
+	return true;
+}
+
+static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
+{
+	intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
 }
 
 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
 {
-	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
-	POSTING_READ(DBUF_CTL);
-
-	udelay(10);
-
-	if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
-		DRM_ERROR("DBuf power disable timeout!\n");
+	intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
+}
+
+static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
+{
+	if (INTEL_GEN(dev_priv) < 11)
+		return 1;
+	return 2;
+}
+
+void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
+			    u8 req_slices)
+{
+	u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
+	u32 val;
+	bool ret;
+
+	if (req_slices > intel_dbuf_max_slices(dev_priv)) {
+		DRM_ERROR("Invalid number of dbuf slices requested\n");
+		return;
+	}
+
+	if (req_slices == hw_enabled_slices || req_slices == 0)
+		return;
+
+	val = I915_READ(DBUF_CTL_S2);
+	if (req_slices > hw_enabled_slices)
+		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
+	else
+		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
+
+	if (ret)
+		dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
 }
 
-/*
- * TODO: we shouldn't always enable DBUF_CTL_S2, we should only enable it when
- * needed and keep it disabled as much as possible.
- */
 static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
 {
 	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);

From 37cde11ba720cc485bbc784e9a34878d40a34e96 Mon Sep 17 00:00:00 2001
From: Mahesh Kumar <mahesh1.kumar@intel.com>
Date: Thu, 26 Apr 2018 19:55:17 +0530
Subject: [PATCH 0539/1286] drm/i915/icl: update ddb entry start/end mask
 during hw ddb readout

Gen11/ICL onward ddb entry start/end mask is increased from 10 bits to
11 bits. This patch make changes to use proper mask for ICL+ during
hardware ddb value readout.

Changes since V1:
 - Use _MASK & _SHIFT macro (James)
Changes since V2:
 - use kernel type u8 instead of uint8_t
Changes since V3:
 - Rebase

Signed-off-by: Mahesh Kumar <mahesh1.kumar@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180426142517.16643-4-mahesh1.kumar@intel.com
---
 drivers/gpu/drm/i915/i915_reg.h |  3 +++
 drivers/gpu/drm/i915/intel_pm.c | 26 +++++++++++++++++++-------
 2 files changed, 22 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 8c322ff1c3e4..115d7be12502 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -6504,6 +6504,9 @@ enum {
 
 #define _PLANE_BUF_CFG_1_B			0x7127c
 #define _PLANE_BUF_CFG_2_B			0x7137c
+#define  SKL_DDB_ENTRY_MASK			0x3FF
+#define  ICL_DDB_ENTRY_MASK			0x7FF
+#define  DDB_ENTRY_END_SHIFT			16
 #define _PLANE_BUF_CFG_1(pipe)	\
 	_PIPE(pipe, _PLANE_BUF_CFG_1_A, _PLANE_BUF_CFG_1_B)
 #define _PLANE_BUF_CFG_2(pipe)	\
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 3e72e9eb736e..4126132eb707 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3864,10 +3864,18 @@ static unsigned int skl_cursor_allocation(int num_active)
 	return 8;
 }
 
-static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
+static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv,
+				       struct skl_ddb_entry *entry, u32 reg)
 {
-	entry->start = reg & 0x3ff;
-	entry->end = (reg >> 16) & 0x3ff;
+	u16 mask;
+
+	if (INTEL_GEN(dev_priv) >= 11)
+		mask = ICL_DDB_ENTRY_MASK;
+	else
+		mask = SKL_DDB_ENTRY_MASK;
+	entry->start = reg & mask;
+	entry->end = (reg >> DDB_ENTRY_END_SHIFT) & mask;
+
 	if (entry->end)
 		entry->end += 1;
 }
@@ -3884,7 +3892,8 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
 	/* Cursor doesn't support NV12/planar, so no extra calculation needed */
 	if (plane_id == PLANE_CURSOR) {
 		val = I915_READ(CUR_BUF_CFG(pipe));
-		skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane_id], val);
+		skl_ddb_entry_init_from_hw(dev_priv,
+					   &ddb->plane[pipe][plane_id], val);
 		return;
 	}
 
@@ -3903,10 +3912,13 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
 	val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
 
 	if (fourcc == DRM_FORMAT_NV12) {
-		skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane_id], val2);
-		skl_ddb_entry_init_from_hw(&ddb->uv_plane[pipe][plane_id], val);
+		skl_ddb_entry_init_from_hw(dev_priv,
+					   &ddb->plane[pipe][plane_id], val2);
+		skl_ddb_entry_init_from_hw(dev_priv,
+					   &ddb->uv_plane[pipe][plane_id], val);
 	} else {
-		skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane_id], val);
+		skl_ddb_entry_init_from_hw(dev_priv,
+					   &ddb->plane[pipe][plane_id], val);
 	}
 }
 

From 78b60ce7b96cf1869b51cee916a40041e400d6ce Mon Sep 17 00:00:00 2001
From: Paulo Zanoni <paulo.r.zanoni@intel.com>
Date: Wed, 28 Mar 2018 14:57:57 -0700
Subject: [PATCH 0540/1286] drm/i915/icl: add definitions for the ICL PLL
 registers

There's a lot of code for the PLL enabling, so let's first only
introduce the register definitions in order to make patch reviewing a
little easier.

v2: Coding style (Jani).
v3: Preparation for upstreaming.
v4: Fix MG_CLKTOP2_CORECLKCTL1 address and random typos (James).

Cc: James Ausmus <james.ausmus@intel.com>
Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: James Ausmus <james.ausmus@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180328215803.13835-3-paulo.r.zanoni@intel.com
---
 drivers/gpu/drm/i915/i915_reg.h | 149 ++++++++++++++++++++++++++++++++
 1 file changed, 149 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 115d7be12502..197c9660bbc1 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -8759,6 +8759,12 @@ enum skl_power_gate {
 #define  PORT_CLK_SEL_NONE		(7<<29)
 #define  PORT_CLK_SEL_MASK		(7<<29)
 
+/* On ICL+ this is the same as PORT_CLK_SEL, but all bits change. */
+#define DDI_CLK_SEL(port)		PORT_CLK_SEL(port)
+#define  DDI_CLK_SEL_NONE		(0x0 << 28)
+#define  DDI_CLK_SEL_MG			(0x8 << 28)
+#define  DDI_CLK_SEL_MASK		(0xF << 28)
+
 /* Transcoder clock selection */
 #define _TRANS_CLK_SEL_A		0x46140
 #define _TRANS_CLK_SEL_B		0x46144
@@ -8889,6 +8895,7 @@ enum skl_power_gate {
  * CNL Clocks
  */
 #define DPCLKA_CFGCR0				_MMIO(0x6C200)
+#define DPCLKA_CFGCR0_ICL			_MMIO(0x164280)
 #define  DPCLKA_CFGCR0_DDI_CLK_OFF(port)	(1 << ((port) ==  PORT_F ? 23 : \
 						      (port)+10))
 #define  DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port)	((port) == PORT_F ? 21 : \
@@ -8905,10 +8912,141 @@ enum skl_power_gate {
 #define  PLL_POWER_STATE	(1 << 26)
 #define CNL_DPLL_ENABLE(pll)	_MMIO_PLL(pll, DPLL0_ENABLE, DPLL1_ENABLE)
 
+#define _MG_PLL1_ENABLE		0x46030
+#define _MG_PLL2_ENABLE		0x46034
+#define _MG_PLL3_ENABLE		0x46038
+#define _MG_PLL4_ENABLE		0x4603C
+/* Bits are the same as DPLL0_ENABLE */
+#define MG_PLL_ENABLE(port)	_MMIO_PORT((port) - PORT_C, _MG_PLL1_ENABLE, \
+					   _MG_PLL2_ENABLE)
+
+#define _MG_REFCLKIN_CTL_PORT1				0x16892C
+#define _MG_REFCLKIN_CTL_PORT2				0x16992C
+#define _MG_REFCLKIN_CTL_PORT3				0x16A92C
+#define _MG_REFCLKIN_CTL_PORT4				0x16B92C
+#define   MG_REFCLKIN_CTL_OD_2_MUX(x)			((x) << 8)
+#define MG_REFCLKIN_CTL(port) _MMIO_PORT((port) - PORT_C, \
+					 _MG_REFCLKIN_CTL_PORT1, \
+					 _MG_REFCLKIN_CTL_PORT2)
+
+#define _MG_CLKTOP2_CORECLKCTL1_PORT1			0x1688D8
+#define _MG_CLKTOP2_CORECLKCTL1_PORT2			0x1698D8
+#define _MG_CLKTOP2_CORECLKCTL1_PORT3			0x16A8D8
+#define _MG_CLKTOP2_CORECLKCTL1_PORT4			0x16B8D8
+#define   MG_CLKTOP2_CORECLKCTL1_B_DIVRATIO(x)		((x) << 16)
+#define   MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(x)		((x) << 8)
+#define MG_CLKTOP2_CORECLKCTL1(port) _MMIO_PORT((port) - PORT_C, \
+						_MG_CLKTOP2_CORECLKCTL1_PORT1, \
+						_MG_CLKTOP2_CORECLKCTL1_PORT2)
+
+#define _MG_CLKTOP2_HSCLKCTL_PORT1			0x1688D4
+#define _MG_CLKTOP2_HSCLKCTL_PORT2			0x1698D4
+#define _MG_CLKTOP2_HSCLKCTL_PORT3			0x16A8D4
+#define _MG_CLKTOP2_HSCLKCTL_PORT4			0x16B8D4
+#define   MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(x)		((x) << 16)
+#define   MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(x)	((x) << 14)
+#define   MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO(x)		((x) << 12)
+#define   MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(x)		((x) << 8)
+#define MG_CLKTOP2_HSCLKCTL(port) _MMIO_PORT((port) - PORT_C, \
+					     _MG_CLKTOP2_HSCLKCTL_PORT1, \
+					     _MG_CLKTOP2_HSCLKCTL_PORT2)
+
+#define _MG_PLL_DIV0_PORT1				0x168A00
+#define _MG_PLL_DIV0_PORT2				0x169A00
+#define _MG_PLL_DIV0_PORT3				0x16AA00
+#define _MG_PLL_DIV0_PORT4				0x16BA00
+#define   MG_PLL_DIV0_FRACNEN_H				(1 << 30)
+#define   MG_PLL_DIV0_FBDIV_FRAC(x)			((x) << 8)
+#define   MG_PLL_DIV0_FBDIV_INT(x)			((x) << 0)
+#define MG_PLL_DIV0(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_DIV0_PORT1, \
+				     _MG_PLL_DIV0_PORT2)
+
+#define _MG_PLL_DIV1_PORT1				0x168A04
+#define _MG_PLL_DIV1_PORT2				0x169A04
+#define _MG_PLL_DIV1_PORT3				0x16AA04
+#define _MG_PLL_DIV1_PORT4				0x16BA04
+#define   MG_PLL_DIV1_IREF_NDIVRATIO(x)			((x) << 16)
+#define   MG_PLL_DIV1_DITHER_DIV_1			(0 << 12)
+#define   MG_PLL_DIV1_DITHER_DIV_2			(1 << 12)
+#define   MG_PLL_DIV1_DITHER_DIV_4			(2 << 12)
+#define   MG_PLL_DIV1_DITHER_DIV_8			(3 << 12)
+#define   MG_PLL_DIV1_NDIVRATIO(x)			((x) << 4)
+#define   MG_PLL_DIV1_FBPREDIV(x)			((x) << 0)
+#define MG_PLL_DIV1(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_DIV1_PORT1, \
+				     _MG_PLL_DIV1_PORT2)
+
+#define _MG_PLL_LF_PORT1				0x168A08
+#define _MG_PLL_LF_PORT2				0x169A08
+#define _MG_PLL_LF_PORT3				0x16AA08
+#define _MG_PLL_LF_PORT4				0x16BA08
+#define   MG_PLL_LF_TDCTARGETCNT(x)			((x) << 24)
+#define   MG_PLL_LF_AFCCNTSEL_256			(0 << 20)
+#define   MG_PLL_LF_AFCCNTSEL_512			(1 << 20)
+#define   MG_PLL_LF_GAINCTRL(x)				((x) << 16)
+#define   MG_PLL_LF_INT_COEFF(x)			((x) << 8)
+#define   MG_PLL_LF_PROP_COEFF(x)			((x) << 0)
+#define MG_PLL_LF(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_LF_PORT1, \
+				   _MG_PLL_LF_PORT2)
+
+#define _MG_PLL_FRAC_LOCK_PORT1				0x168A0C
+#define _MG_PLL_FRAC_LOCK_PORT2				0x169A0C
+#define _MG_PLL_FRAC_LOCK_PORT3				0x16AA0C
+#define _MG_PLL_FRAC_LOCK_PORT4				0x16BA0C
+#define   MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32		(1 << 18)
+#define   MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32		(1 << 16)
+#define   MG_PLL_FRAC_LOCK_LOCKTHRESH(x)		((x) << 11)
+#define   MG_PLL_FRAC_LOCK_DCODITHEREN			(1 << 10)
+#define   MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN		(1 << 8)
+#define   MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(x)		((x) << 0)
+#define MG_PLL_FRAC_LOCK(port) _MMIO_PORT((port) - PORT_C, \
+					  _MG_PLL_FRAC_LOCK_PORT1, \
+					  _MG_PLL_FRAC_LOCK_PORT2)
+
+#define _MG_PLL_SSC_PORT1				0x168A10
+#define _MG_PLL_SSC_PORT2				0x169A10
+#define _MG_PLL_SSC_PORT3				0x16AA10
+#define _MG_PLL_SSC_PORT4				0x16BA10
+#define   MG_PLL_SSC_EN					(1 << 28)
+#define   MG_PLL_SSC_TYPE(x)				((x) << 26)
+#define   MG_PLL_SSC_STEPLENGTH(x)			((x) << 16)
+#define   MG_PLL_SSC_STEPNUM(x)				((x) << 10)
+#define   MG_PLL_SSC_FLLEN				(1 << 9)
+#define   MG_PLL_SSC_STEPSIZE(x)			((x) << 0)
+#define MG_PLL_SSC(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_SSC_PORT1, \
+				    _MG_PLL_SSC_PORT2)
+
+#define _MG_PLL_BIAS_PORT1				0x168A14
+#define _MG_PLL_BIAS_PORT2				0x169A14
+#define _MG_PLL_BIAS_PORT3				0x16AA14
+#define _MG_PLL_BIAS_PORT4				0x16BA14
+#define   MG_PLL_BIAS_BIAS_GB_SEL(x)			((x) << 30)
+#define   MG_PLL_BIAS_INIT_DCOAMP(x)			((x) << 24)
+#define   MG_PLL_BIAS_BIAS_BONUS(x)			((x) << 16)
+#define   MG_PLL_BIAS_BIASCAL_EN			(1 << 15)
+#define   MG_PLL_BIAS_CTRIM(x)				((x) << 8)
+#define   MG_PLL_BIAS_VREF_RDAC(x)			((x) << 5)
+#define   MG_PLL_BIAS_IREFTRIM(x)			((x) << 0)
+#define MG_PLL_BIAS(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_BIAS_PORT1, \
+				     _MG_PLL_BIAS_PORT2)
+
+#define _MG_PLL_TDC_COLDST_BIAS_PORT1			0x168A18
+#define _MG_PLL_TDC_COLDST_BIAS_PORT2			0x169A18
+#define _MG_PLL_TDC_COLDST_BIAS_PORT3			0x16AA18
+#define _MG_PLL_TDC_COLDST_BIAS_PORT4			0x16BA18
+#define   MG_PLL_TDC_COLDST_IREFINT_EN			(1 << 27)
+#define   MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(x)	((x) << 17)
+#define   MG_PLL_TDC_COLDST_COLDSTART			(1 << 16)
+#define   MG_PLL_TDC_TDCOVCCORR_EN			(1 << 2)
+#define   MG_PLL_TDC_TDCSEL(x)				((x) << 0)
+#define MG_PLL_TDC_COLDST_BIAS(port) _MMIO_PORT((port) - PORT_C, \
+						_MG_PLL_TDC_COLDST_BIAS_PORT1, \
+						_MG_PLL_TDC_COLDST_BIAS_PORT2)
+
 #define _CNL_DPLL0_CFGCR0		0x6C000
 #define _CNL_DPLL1_CFGCR0		0x6C080
 #define  DPLL_CFGCR0_HDMI_MODE		(1 << 30)
 #define  DPLL_CFGCR0_SSC_ENABLE		(1 << 29)
+#define  DPLL_CFGCR0_SSC_ENABLE_ICL	(1 << 25)
 #define  DPLL_CFGCR0_LINK_RATE_MASK	(0xf << 25)
 #define  DPLL_CFGCR0_LINK_RATE_2700	(0 << 25)
 #define  DPLL_CFGCR0_LINK_RATE_1350	(1 << 25)
@@ -8942,8 +9080,19 @@ enum skl_power_gate {
 #define  DPLL_CFGCR1_PDIV_5		(4 << 2)
 #define  DPLL_CFGCR1_PDIV_7		(8 << 2)
 #define  DPLL_CFGCR1_CENTRAL_FREQ	(3 << 0)
+#define  DPLL_CFGCR1_CENTRAL_FREQ_8400	(3 << 0)
 #define CNL_DPLL_CFGCR1(pll)		_MMIO_PLL(pll, _CNL_DPLL0_CFGCR1, _CNL_DPLL1_CFGCR1)
 
+#define _ICL_DPLL0_CFGCR0		0x164000
+#define _ICL_DPLL1_CFGCR0		0x164080
+#define ICL_DPLL_CFGCR0(pll)		_MMIO_PLL(pll, _ICL_DPLL0_CFGCR0, \
+						  _ICL_DPLL1_CFGCR0)
+
+#define _ICL_DPLL0_CFGCR1		0x164004
+#define _ICL_DPLL1_CFGCR1		0x164084
+#define ICL_DPLL_CFGCR1(pll)		_MMIO_PLL(pll, _ICL_DPLL0_CFGCR1, \
+						  _ICL_DPLL1_CFGCR1)
+
 /* BXT display engine PLL */
 #define BXT_DE_PLL_CTL			_MMIO(0x6d000)
 #define   BXT_DE_PLL_RATIO(x)		(x)	/* {60,65,100} * 19.2MHz */

From 9f99963a43b735160052debbef3d99ad344db61d Mon Sep 17 00:00:00 2001
From: Tom Callaway <tcallawa@redhat.com>
Date: Mon, 23 Apr 2018 12:16:39 -0400
Subject: [PATCH 0541/1286] drm/tinydrm/mi0283qt: Always set rotation value
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

The PiTFT (ili9340) has a hardware reset circuit that resets only
on power-on and not on each reboot through a gpio like the
rpi-display does. As a result, we need to always apply the
rotation value regardless of the display "on/off" state.
Moved the rotation setting code below out_enable:.

Signed-off-by: Tom Callaway <tcallawa@redhat.com>
Reviewed-by: Noralf Trønnes <noralf@tronnes.org>
Signed-off-by: Noralf Trønnes <noralf@tronnes.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20180423161639.14420-1-tcallawa@redhat.com
---
 drivers/gpu/drm/tinydrm/mi0283qt.c | 41 +++++++++++++++++-------------
 1 file changed, 23 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/tinydrm/mi0283qt.c b/drivers/gpu/drm/tinydrm/mi0283qt.c
index d5ef65179c16..015d03f2acba 100644
--- a/drivers/gpu/drm/tinydrm/mi0283qt.c
+++ b/drivers/gpu/drm/tinydrm/mi0283qt.c
@@ -85,24 +85,6 @@ static void mi0283qt_enable(struct drm_simple_display_pipe *pipe,
 	/* Memory Access Control */
 	mipi_dbi_command(mipi, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT);
 
-	switch (mipi->rotation) {
-	default:
-		addr_mode = ILI9341_MADCTL_MV | ILI9341_MADCTL_MY |
-			    ILI9341_MADCTL_MX;
-		break;
-	case 90:
-		addr_mode = ILI9341_MADCTL_MY;
-		break;
-	case 180:
-		addr_mode = ILI9341_MADCTL_MV;
-		break;
-	case 270:
-		addr_mode = ILI9341_MADCTL_MX;
-		break;
-	}
-	addr_mode |= ILI9341_MADCTL_BGR;
-	mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
-
 	/* Frame Rate */
 	mipi_dbi_command(mipi, ILI9341_FRMCTR1, 0x00, 0x1b);
 
@@ -128,6 +110,29 @@ static void mi0283qt_enable(struct drm_simple_display_pipe *pipe,
 	msleep(100);
 
 out_enable:
+	/* The PiTFT (ili9340) has a hardware reset circuit that
+	 * resets only on power-on and not on each reboot through
+	 * a gpio like the rpi-display does.
+	 * As a result, we need to always apply the rotation value
+	 * regardless of the display "on/off" state.
+	 */
+	switch (mipi->rotation) {
+	default:
+		addr_mode = ILI9341_MADCTL_MV | ILI9341_MADCTL_MY |
+			    ILI9341_MADCTL_MX;
+		break;
+	case 90:
+		addr_mode = ILI9341_MADCTL_MY;
+		break;
+	case 180:
+		addr_mode = ILI9341_MADCTL_MV;
+		break;
+	case 270:
+		addr_mode = ILI9341_MADCTL_MX;
+		break;
+	}
+	addr_mode |= ILI9341_MADCTL_BGR;
+	mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
 	mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
 }
 

From 5692251c254a3d561316c4e8e10c77e470b60658 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Sat, 28 Apr 2018 12:15:32 +0100
Subject: [PATCH 0542/1286] drm/i915/lrc: Scrub the GPU state of the guilty
 hanging request
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Previously, we just reset the ring register in the context image such
that we could skip over the broken batch and emit the closing
breadcrumb. However, on resume the context image and GPU state would be
reloaded, which may have been left in an inconsistent state by the
reset. The presumption was that at worst it would just cause another
reset and skip again until it recovered, however it seems just as likely
to cause an unrecoverable hang. Instead of risking loading an incomplete
context image, restore it back to the default state.

v2: Fix up off-by-one from including the ppHSWP in with the register
state.
v3: Use a ring local to compact a few lines.
v4: Beware setting the ring local before checking for a NULL request.

References: https://bugs.freedesktop.org/show_bug.cgi?id=105304
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Cc: Michel Thierry <michel.thierry@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Michel Thierry <michel.thierry@intel.com> #v2
Link: https://patchwork.freedesktop.org/patch/msgid/20180428111532.15819-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_lrc.c | 24 +++++++++++++++++-------
 1 file changed, 17 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 87eb3a688424..58cad2448184 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1803,8 +1803,8 @@ static void reset_common_ring(struct intel_engine_cs *engine,
 			      struct i915_request *request)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
-	struct intel_context *ce;
 	unsigned long flags;
+	u32 *regs;
 
 	GEM_TRACE("%s request global=%x, current=%d\n",
 		  engine->name, request ? request->global_seqno : 0,
@@ -1854,14 +1854,24 @@ static void reset_common_ring(struct intel_engine_cs *engine,
 	 * future request will be after userspace has had the opportunity
 	 * to recreate its own state.
 	 */
-	ce = &request->ctx->engine[engine->id];
-	execlists_init_reg_state(ce->lrc_reg_state,
-				 request->ctx, engine, ce->ring);
+	regs = request->ctx->engine[engine->id].lrc_reg_state;
+	if (engine->default_state) {
+		void *defaults;
+
+		defaults = i915_gem_object_pin_map(engine->default_state,
+						   I915_MAP_WB);
+		if (!IS_ERR(defaults)) {
+			memcpy(regs, /* skip restoring the vanilla PPHWSP */
+			       defaults + LRC_STATE_PN * PAGE_SIZE,
+			       engine->context_size - PAGE_SIZE);
+			i915_gem_object_unpin_map(engine->default_state);
+		}
+	}
+	execlists_init_reg_state(regs, request->ctx, engine, request->ring);
 
 	/* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
-	ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
-		i915_ggtt_offset(ce->ring->vma);
-	ce->lrc_reg_state[CTX_RING_HEAD+1] = request->postfix;
+	regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(request->ring->vma);
+	regs[CTX_RING_HEAD + 1] = request->postfix;
 
 	request->ring->head = request->postfix;
 	intel_ring_update_space(request->ring);

From 52d7f16e5543ca892ae2393a716083d209ce3b36 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Mon, 30 Apr 2018 14:15:00 +0100
Subject: [PATCH 0543/1286] drm/i915: Stop tracking timeline->inflight_seqnos

In commit 9b6586ae9f6b ("drm/i915: Keep a global seqno per-engine"), we
moved from a global inflight counter to per-engine counters in the
hope that will be easy to run concurrently in future. However, with the
advent of the desire to move requests between engines, we do need a
global counter to preserve the semantics that no engine wraps in the
middle of a submit. (Although this semantic is now only required for gen7
semaphore support, which only supports greater-then comparisons!)

v2: Keep a global counter of all requests ever submitted and force the
reset when it wraps.

References: 9b6586ae9f6b ("drm/i915: Keep a global seqno per-engine")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180430131503.5375-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_debugfs.c      |  5 ++--
 drivers/gpu/drm/i915/i915_drv.h          |  1 +
 drivers/gpu/drm/i915/i915_gem_timeline.h |  6 -----
 drivers/gpu/drm/i915/i915_request.c      | 33 ++++++++++++------------
 drivers/gpu/drm/i915/intel_engine_cs.c   |  5 ++--
 5 files changed, 22 insertions(+), 28 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index cb1a804bf72e..747dad2666aa 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1340,10 +1340,9 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
 		struct rb_node *rb;
 
 		seq_printf(m, "%s:\n", engine->name);
-		seq_printf(m, "\tseqno = %x [current %x, last %x], inflight %d\n",
+		seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
 			   engine->hangcheck.seqno, seqno[id],
-			   intel_engine_last_submit(engine),
-			   engine->timeline->inflight_seqnos);
+			   intel_engine_last_submit(engine));
 		seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n",
 			   yesno(intel_engine_has_waiter(engine)),
 			   yesno(test_bit(engine->id,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 193176bcddf5..dd4d6b918e86 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2062,6 +2062,7 @@ struct drm_i915_private {
 		struct list_head timelines;
 		struct i915_gem_timeline global_timeline;
 		u32 active_requests;
+		u32 request_serial;
 
 		/**
 		 * Is the GPU currently considered idle, or busy executing
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.h b/drivers/gpu/drm/i915/i915_gem_timeline.h
index 33e01bf6aa36..6e82119e2cd8 100644
--- a/drivers/gpu/drm/i915/i915_gem_timeline.h
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.h
@@ -37,12 +37,6 @@ struct intel_timeline {
 	u64 fence_context;
 	u32 seqno;
 
-	/**
-	 * Count of outstanding requests, from the time they are constructed
-	 * to the moment they are retired. Loosely coupled to hardware.
-	 */
-	u32 inflight_seqnos;
-
 	spinlock_t lock;
 
 	/**
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index b692a9f7c357..b1993d4a1a53 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -241,6 +241,7 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
 			       sizeof(timeline->engine[id].global_sync));
 	}
 
+	i915->gt.request_serial = seqno;
 	return 0;
 }
 
@@ -257,18 +258,22 @@ int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
 	return reset_all_global_seqno(i915, seqno - 1);
 }
 
-static int reserve_engine(struct intel_engine_cs *engine)
+static int reserve_gt(struct drm_i915_private *i915)
 {
-	struct drm_i915_private *i915 = engine->i915;
-	u32 active = ++engine->timeline->inflight_seqnos;
-	u32 seqno = engine->timeline->seqno;
 	int ret;
 
-	/* Reservation is fine until we need to wrap around */
-	if (unlikely(add_overflows(seqno, active))) {
+	/*
+	 * Reservation is fine until we may need to wrap around
+	 *
+	 * By incrementing the serial for every request, we know that no
+	 * individual engine may exceed that serial (as each is reset to 0
+	 * on any wrap). This protects even the most pessimistic of migrations
+	 * of every request from all engines onto just one.
+	 */
+	while (unlikely(++i915->gt.request_serial == 0)) {
 		ret = reset_all_global_seqno(i915, 0);
 		if (ret) {
-			engine->timeline->inflight_seqnos--;
+			i915->gt.request_serial--;
 			return ret;
 		}
 	}
@@ -279,15 +284,10 @@ static int reserve_engine(struct intel_engine_cs *engine)
 	return 0;
 }
 
-static void unreserve_engine(struct intel_engine_cs *engine)
+static void unreserve_gt(struct drm_i915_private *i915)
 {
-	struct drm_i915_private *i915 = engine->i915;
-
 	if (!--i915->gt.active_requests)
 		i915_gem_park(i915);
-
-	GEM_BUG_ON(!engine->timeline->inflight_seqnos);
-	engine->timeline->inflight_seqnos--;
 }
 
 void i915_gem_retire_noop(struct i915_gem_active *active,
@@ -362,7 +362,6 @@ static void i915_request_retire(struct i915_request *request)
 	list_del_init(&request->link);
 	spin_unlock_irq(&engine->timeline->lock);
 
-	unreserve_engine(request->engine);
 	advance_ring(request);
 
 	free_capture_list(request);
@@ -424,6 +423,8 @@ static void i915_request_retire(struct i915_request *request)
 	}
 	spin_unlock_irq(&request->lock);
 
+	unreserve_gt(request->i915);
+
 	i915_sched_node_fini(request->i915, &request->sched);
 	i915_request_put(request);
 }
@@ -642,7 +643,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 		return ERR_CAST(ring);
 	GEM_BUG_ON(!ring);
 
-	ret = reserve_engine(engine);
+	ret = reserve_gt(i915);
 	if (ret)
 		goto err_unpin;
 
@@ -784,7 +785,7 @@ err_unwind:
 
 	kmem_cache_free(i915->requests, rq);
 err_unreserve:
-	unreserve_engine(engine);
+	unreserve_gt(i915);
 err_unpin:
 	engine->context_unpin(engine, ctx);
 	return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index ac009f10c948..eba81d55dc3a 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1321,12 +1321,11 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 	if (i915_terminally_wedged(&engine->i915->gpu_error))
 		drm_printf(m, "*** WEDGED ***\n");
 
-	drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
+	drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms]\n",
 		   intel_engine_get_seqno(engine),
 		   intel_engine_last_submit(engine),
 		   engine->hangcheck.seqno,
-		   jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
-		   engine->timeline->inflight_seqnos);
+		   jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
 	drm_printf(m, "\tReset count: %d (global %d)\n",
 		   i915_reset_engine_count(error, engine),
 		   i915_reset_count(error));

From ab82a0635cdf0b91a134aaae34abd4e864595c5b Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Mon, 30 Apr 2018 14:15:01 +0100
Subject: [PATCH 0544/1286] drm/i915: Wrap engine->context_pin() and
 engine->context_unpin()

Make life easier in upcoming patches by moving the context_pin and
context_unpin vfuncs into inline helpers.

v2: Fixup mock_engine to mark the context as pinned on use.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180430131503.5375-2-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/gvt/mmio_context.c      |  2 +-
 drivers/gpu/drm/i915/gvt/scheduler.c         | 20 ++++++-------
 drivers/gpu/drm/i915/i915_debugfs.c          | 20 +++++++------
 drivers/gpu/drm/i915/i915_gem.c              |  4 +--
 drivers/gpu/drm/i915/i915_gem_context.c      |  8 +++---
 drivers/gpu/drm/i915/i915_gem_context.h      | 30 +++++++++++++++++++-
 drivers/gpu/drm/i915/i915_gpu_error.c        |  3 +-
 drivers/gpu/drm/i915/i915_perf.c             |  9 +++---
 drivers/gpu/drm/i915/i915_request.c          |  6 ++--
 drivers/gpu/drm/i915/intel_engine_cs.c       | 13 ++++-----
 drivers/gpu/drm/i915/intel_guc_ads.c         |  3 +-
 drivers/gpu/drm/i915/intel_guc_submission.c  |  5 ++--
 drivers/gpu/drm/i915/intel_lrc.c             | 29 +++++++++++--------
 drivers/gpu/drm/i915/intel_lrc.h             |  2 +-
 drivers/gpu/drm/i915/intel_ringbuffer.c      | 19 +++++++------
 drivers/gpu/drm/i915/selftests/mock_engine.c | 13 +++++++--
 16 files changed, 117 insertions(+), 69 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index a5bac83d53a9..0f949554d118 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -448,7 +448,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
 
 bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id)
 {
-	u32 *reg_state = ctx->engine[ring_id].lrc_reg_state;
+	u32 *reg_state = ctx->__engine[ring_id].lrc_reg_state;
 	u32 inhibit_mask =
 		_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
 
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 35f7cfd7a6b4..ffb45a9ee228 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -58,7 +58,7 @@ static void update_shadow_pdps(struct intel_vgpu_workload *workload)
 	int ring_id = workload->ring_id;
 	struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
 	struct drm_i915_gem_object *ctx_obj =
-		shadow_ctx->engine[ring_id].state->obj;
+		shadow_ctx->__engine[ring_id].state->obj;
 	struct execlist_ring_context *shadow_ring_context;
 	struct page *page;
 
@@ -130,7 +130,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 	int ring_id = workload->ring_id;
 	struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
 	struct drm_i915_gem_object *ctx_obj =
-		shadow_ctx->engine[ring_id].state->obj;
+		shadow_ctx->__engine[ring_id].state->obj;
 	struct execlist_ring_context *shadow_ring_context;
 	struct page *page;
 	void *dst;
@@ -283,7 +283,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
 static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
 		struct intel_engine_cs *engine)
 {
-	struct intel_context *ce = &ctx->engine[engine->id];
+	struct intel_context *ce = to_intel_context(ctx, engine);
 	u64 desc = 0;
 
 	desc = ce->lrc_desc;
@@ -389,7 +389,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
 	 * shadow_ctx pages invalid. So gvt need to pin itself. After update
 	 * the guest context, gvt can unpin the shadow_ctx safely.
 	 */
-	ring = engine->context_pin(engine, shadow_ctx);
+	ring = intel_context_pin(shadow_ctx, engine);
 	if (IS_ERR(ring)) {
 		ret = PTR_ERR(ring);
 		gvt_vgpu_err("fail to pin shadow context\n");
@@ -403,7 +403,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
 	return 0;
 
 err_unpin:
-	engine->context_unpin(engine, shadow_ctx);
+	intel_context_unpin(shadow_ctx, engine);
 err_shadow:
 	release_shadow_wa_ctx(&workload->wa_ctx);
 err_scan:
@@ -437,7 +437,7 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
 	return 0;
 
 err_unpin:
-	engine->context_unpin(engine, shadow_ctx);
+	intel_context_unpin(shadow_ctx, engine);
 	release_shadow_wa_ctx(&workload->wa_ctx);
 	return ret;
 }
@@ -526,7 +526,7 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 	struct intel_vgpu_submission *s = &workload->vgpu->submission;
 	struct i915_gem_context *shadow_ctx = s->shadow_ctx;
 	struct drm_i915_gem_object *ctx_obj =
-		shadow_ctx->engine[ring_id].state->obj;
+		shadow_ctx->__engine[ring_id].state->obj;
 	struct execlist_ring_context *shadow_ring_context;
 	struct page *page;
 
@@ -688,7 +688,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
 
 	ret = prepare_workload(workload);
 	if (ret) {
-		engine->context_unpin(engine, shadow_ctx);
+		intel_context_unpin(shadow_ctx, engine);
 		goto out;
 	}
 
@@ -771,7 +771,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
 	struct i915_gem_context *shadow_ctx = s->shadow_ctx;
 	int ring_id = workload->ring_id;
 	struct drm_i915_gem_object *ctx_obj =
-		shadow_ctx->engine[ring_id].state->obj;
+		shadow_ctx->__engine[ring_id].state->obj;
 	struct execlist_ring_context *shadow_ring_context;
 	struct page *page;
 	void *src;
@@ -898,7 +898,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 		}
 		mutex_lock(&dev_priv->drm.struct_mutex);
 		/* unpin shadow ctx as the shadow_ctx update is done */
-		engine->context_unpin(engine, s->shadow_ctx);
+		intel_context_unpin(s->shadow_ctx, engine);
 		mutex_unlock(&dev_priv->drm.struct_mutex);
 	}
 
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 747dad2666aa..85911bc0b703 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -377,16 +377,19 @@ static void print_batch_pool_stats(struct seq_file *m,
 	print_file_stats(m, "[k]batch pool", stats);
 }
 
-static int per_file_ctx_stats(int id, void *ptr, void *data)
+static int per_file_ctx_stats(int idx, void *ptr, void *data)
 {
 	struct i915_gem_context *ctx = ptr;
-	int n;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 
-	for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
-		if (ctx->engine[n].state)
-			per_file_stats(0, ctx->engine[n].state->obj, data);
-		if (ctx->engine[n].ring)
-			per_file_stats(0, ctx->engine[n].ring->vma->obj, data);
+	for_each_engine(engine, ctx->i915, id) {
+		struct intel_context *ce = to_intel_context(ctx, engine);
+
+		if (ce->state)
+			per_file_stats(0, ce->state->obj, data);
+		if (ce->ring)
+			per_file_stats(0, ce->ring->vma->obj, data);
 	}
 
 	return 0;
@@ -1959,7 +1962,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
 		seq_putc(m, '\n');
 
 		for_each_engine(engine, dev_priv, id) {
-			struct intel_context *ce = &ctx->engine[engine->id];
+			struct intel_context *ce =
+				to_intel_context(ctx, engine);
 
 			seq_printf(m, "%s: ", engine->name);
 			if (ce->state)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6b0c67a4f214..4090bfdda340 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3234,7 +3234,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv,
 				      stalled_mask & ENGINE_MASK(id));
 		ctx = fetch_and_zero(&engine->last_retired_context);
 		if (ctx)
-			engine->context_unpin(engine, ctx);
+			intel_context_unpin(ctx, engine);
 
 		/*
 		 * Ostensibily, we always want a context loaded for powersaving,
@@ -5291,7 +5291,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
 	for_each_engine(engine, i915, id) {
 		struct i915_vma *state;
 
-		state = ctx->engine[id].state;
+		state = to_intel_context(ctx, engine)->state;
 		if (!state)
 			continue;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 74435affe23f..59d4bd4a7b73 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -117,15 +117,15 @@ static void lut_close(struct i915_gem_context *ctx)
 
 static void i915_gem_context_free(struct i915_gem_context *ctx)
 {
-	int i;
+	unsigned int n;
 
 	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
 	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
 
 	i915_ppgtt_put(ctx->ppgtt);
 
-	for (i = 0; i < I915_NUM_ENGINES; i++) {
-		struct intel_context *ce = &ctx->engine[i];
+	for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
+		struct intel_context *ce = &ctx->__engine[n];
 
 		if (!ce->state)
 			continue;
@@ -521,7 +521,7 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
 		if (!engine->last_retired_context)
 			continue;
 
-		engine->context_unpin(engine, engine->last_retired_context);
+		intel_context_unpin(engine->last_retired_context, engine);
 		engine->last_retired_context = NULL;
 	}
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index b12a8a8c5af9..ace3b129c189 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -149,7 +149,7 @@ struct i915_gem_context {
 		u32 *lrc_reg_state;
 		u64 lrc_desc;
 		int pin_count;
-	} engine[I915_NUM_ENGINES];
+	} __engine[I915_NUM_ENGINES];
 
 	/** ring_size: size for allocating the per-engine ring buffer */
 	u32 ring_size;
@@ -256,6 +256,34 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
 	return !ctx->file_priv;
 }
 
+static inline struct intel_context *
+to_intel_context(struct i915_gem_context *ctx,
+		 const struct intel_engine_cs *engine)
+{
+	return &ctx->__engine[engine->id];
+}
+
+static inline struct intel_ring *
+intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
+{
+	return engine->context_pin(engine, ctx);
+}
+
+static inline void __intel_context_pin(struct i915_gem_context *ctx,
+				       const struct intel_engine_cs *engine)
+{
+	struct intel_context *ce = to_intel_context(ctx, engine);
+
+	GEM_BUG_ON(!ce->pin_count);
+	ce->pin_count++;
+}
+
+static inline void intel_context_unpin(struct i915_gem_context *ctx,
+				       struct intel_engine_cs *engine)
+{
+	engine->context_unpin(engine, ctx);
+}
+
 /* i915_gem_context.c */
 int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
 void i915_gem_contexts_lost(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 671ffa37614e..c0127965b578 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1472,7 +1472,8 @@ static void gem_record_rings(struct i915_gpu_state *error)
 
 			ee->ctx =
 				i915_error_object_create(i915,
-							 request->ctx->engine[i].state);
+							 to_intel_context(request->ctx,
+									  engine)->state);
 
 			error->simulated |=
 				i915_gem_context_no_error_capture(request->ctx);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index bfc906cd4e5e..4b1da01168ae 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1234,7 +1234,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
 		 *
 		 * NB: implied RCS engine...
 		 */
-		ring = engine->context_pin(engine, stream->ctx);
+		ring = intel_context_pin(stream->ctx, engine);
 		mutex_unlock(&dev_priv->drm.struct_mutex);
 		if (IS_ERR(ring))
 			return PTR_ERR(ring);
@@ -1246,7 +1246,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
 		 * with gen8+ and execlists
 		 */
 		dev_priv->perf.oa.specific_ctx_id =
-			i915_ggtt_offset(stream->ctx->engine[engine->id].state);
+			i915_ggtt_offset(to_intel_context(stream->ctx, engine)->state);
 	}
 
 	return 0;
@@ -1271,7 +1271,7 @@ static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
 		mutex_lock(&dev_priv->drm.struct_mutex);
 
 		dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
-		engine->context_unpin(engine, stream->ctx);
+		intel_context_unpin(stream->ctx, engine);
 
 		mutex_unlock(&dev_priv->drm.struct_mutex);
 	}
@@ -1759,6 +1759,7 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
 static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
 				       const struct i915_oa_config *oa_config)
 {
+	struct intel_engine_cs *engine = dev_priv->engine[RCS];
 	struct i915_gem_context *ctx;
 	int ret;
 	unsigned int wait_flags = I915_WAIT_LOCKED;
@@ -1789,7 +1790,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
 
 	/* Update all contexts now that we've stalled the submission. */
 	list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
-		struct intel_context *ce = &ctx->engine[RCS];
+		struct intel_context *ce = to_intel_context(ctx, engine);
 		u32 *regs;
 
 		/* OA settings will be set upon first use */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index b1993d4a1a53..9358f2cf0c32 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -409,7 +409,7 @@ static void i915_request_retire(struct i915_request *request)
 	 * the subsequent request.
 	 */
 	if (engine->last_retired_context)
-		engine->context_unpin(engine, engine->last_retired_context);
+		intel_context_unpin(engine->last_retired_context, engine);
 	engine->last_retired_context = request->ctx;
 
 	spin_lock_irq(&request->lock);
@@ -638,7 +638,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 	 * GGTT space, so do this first before we reserve a seqno for
 	 * ourselves.
 	 */
-	ring = engine->context_pin(engine, ctx);
+	ring = intel_context_pin(ctx, engine);
 	if (IS_ERR(ring))
 		return ERR_CAST(ring);
 	GEM_BUG_ON(!ring);
@@ -787,7 +787,7 @@ err_unwind:
 err_unreserve:
 	unreserve_gt(i915);
 err_unpin:
-	engine->context_unpin(engine, ctx);
+	intel_context_unpin(ctx, engine);
 	return ERR_PTR(ret);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index eba81d55dc3a..238c8d3da041 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -685,7 +685,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
 	 * be available. To avoid this we always pin the default
 	 * context.
 	 */
-	ring = engine->context_pin(engine, engine->i915->kernel_context);
+	ring = intel_context_pin(engine->i915->kernel_context, engine);
 	if (IS_ERR(ring))
 		return PTR_ERR(ring);
 
@@ -694,8 +694,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
 	 * we can interrupt the engine at any time.
 	 */
 	if (engine->i915->preempt_context) {
-		ring = engine->context_pin(engine,
-					   engine->i915->preempt_context);
+		ring = intel_context_pin(engine->i915->preempt_context, engine);
 		if (IS_ERR(ring)) {
 			ret = PTR_ERR(ring);
 			goto err_unpin_kernel;
@@ -719,9 +718,9 @@ err_breadcrumbs:
 	intel_engine_fini_breadcrumbs(engine);
 err_unpin_preempt:
 	if (engine->i915->preempt_context)
-		engine->context_unpin(engine, engine->i915->preempt_context);
+		intel_context_unpin(engine->i915->preempt_context, engine);
 err_unpin_kernel:
-	engine->context_unpin(engine, engine->i915->kernel_context);
+	intel_context_unpin(engine->i915->kernel_context, engine);
 	return ret;
 }
 
@@ -749,8 +748,8 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 		i915_gem_object_put(engine->default_state);
 
 	if (engine->i915->preempt_context)
-		engine->context_unpin(engine, engine->i915->preempt_context);
-	engine->context_unpin(engine, engine->i915->kernel_context);
+		intel_context_unpin(engine->i915->preempt_context, engine);
+	intel_context_unpin(engine->i915->kernel_context, engine);
 }
 
 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_guc_ads.c b/drivers/gpu/drm/i915/intel_guc_ads.c
index 334cb5202e1c..dcaa3fb71765 100644
--- a/drivers/gpu/drm/i915/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/intel_guc_ads.c
@@ -121,7 +121,8 @@ int intel_guc_ads_create(struct intel_guc *guc)
 	 * to find it. Note that we have to skip our header (1 page),
 	 * because our GuC shared data is there.
 	 */
-	kernel_ctx_vma = dev_priv->kernel_context->engine[RCS].state;
+	kernel_ctx_vma = to_intel_context(dev_priv->kernel_context,
+					  dev_priv->engine[RCS])->state;
 	blob->ads.golden_context_lrca =
 		intel_guc_ggtt_offset(guc, kernel_ctx_vma) + skipped_offset;
 
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 02da05875aa7..6e6ed0f46bd3 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -362,7 +362,7 @@ static void guc_stage_desc_init(struct intel_guc *guc,
 	desc->db_id = client->doorbell_id;
 
 	for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
-		struct intel_context *ce = &ctx->engine[engine->id];
+		struct intel_context *ce = to_intel_context(ctx, engine);
 		u32 guc_engine_id = engine->guc_id;
 		struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id];
 
@@ -990,7 +990,8 @@ static void guc_fill_preempt_context(struct intel_guc *guc)
 	enum intel_engine_id id;
 
 	for_each_engine(engine, dev_priv, id) {
-		struct intel_context *ce = &client->owner->engine[id];
+		struct intel_context *ce =
+			to_intel_context(client->owner, engine);
 		u32 addr = intel_hws_preempt_done_address(engine);
 		u32 *cs;
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 58cad2448184..099995619472 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -223,7 +223,7 @@ static void
 intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
 				   struct intel_engine_cs *engine)
 {
-	struct intel_context *ce = &ctx->engine[engine->id];
+	struct intel_context *ce = to_intel_context(ctx, engine);
 	u64 desc;
 
 	BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
@@ -414,7 +414,7 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
 
 static u64 execlists_update_context(struct i915_request *rq)
 {
-	struct intel_context *ce = &rq->ctx->engine[rq->engine->id];
+	struct intel_context *ce = to_intel_context(rq->ctx, rq->engine);
 	struct i915_hw_ppgtt *ppgtt =
 		rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
 	u32 *reg_state = ce->lrc_reg_state;
@@ -523,7 +523,7 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
 {
 	struct intel_engine_execlists *execlists = &engine->execlists;
 	struct intel_context *ce =
-		&engine->i915->preempt_context->engine[engine->id];
+		to_intel_context(engine->i915->preempt_context, engine);
 	unsigned int n;
 
 	GEM_BUG_ON(execlists->preempt_complete_status !=
@@ -1327,7 +1327,7 @@ static struct intel_ring *
 execlists_context_pin(struct intel_engine_cs *engine,
 		      struct i915_gem_context *ctx)
 {
-	struct intel_context *ce = &ctx->engine[engine->id];
+	struct intel_context *ce = to_intel_context(ctx, engine);
 	void *vaddr;
 	int ret;
 
@@ -1380,7 +1380,7 @@ err:
 static void execlists_context_unpin(struct intel_engine_cs *engine,
 				    struct i915_gem_context *ctx)
 {
-	struct intel_context *ce = &ctx->engine[engine->id];
+	struct intel_context *ce = to_intel_context(ctx, engine);
 
 	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
 	GEM_BUG_ON(ce->pin_count == 0);
@@ -1399,8 +1399,8 @@ static void execlists_context_unpin(struct intel_engine_cs *engine,
 
 static int execlists_request_alloc(struct i915_request *request)
 {
-	struct intel_engine_cs *engine = request->engine;
-	struct intel_context *ce = &request->ctx->engine[engine->id];
+	struct intel_context *ce =
+		to_intel_context(request->ctx, request->engine);
 	int ret;
 
 	GEM_BUG_ON(!ce->pin_count);
@@ -1854,7 +1854,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
 	 * future request will be after userspace has had the opportunity
 	 * to recreate its own state.
 	 */
-	regs = request->ctx->engine[engine->id].lrc_reg_state;
+	regs = to_intel_context(request->ctx, engine)->lrc_reg_state;
 	if (engine->default_state) {
 		void *defaults;
 
@@ -2305,9 +2305,13 @@ static int logical_ring_init(struct intel_engine_cs *engine)
 	}
 
 	engine->execlists.preempt_complete_status = ~0u;
-	if (engine->i915->preempt_context)
+	if (engine->i915->preempt_context) {
+		struct intel_context *ce =
+			to_intel_context(engine->i915->preempt_context, engine);
+
 		engine->execlists.preempt_complete_status =
-			upper_32_bits(engine->i915->preempt_context->engine[engine->id].lrc_desc);
+			upper_32_bits(ce->lrc_desc);
+	}
 
 	return 0;
 
@@ -2589,7 +2593,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 					    struct intel_engine_cs *engine)
 {
 	struct drm_i915_gem_object *ctx_obj;
-	struct intel_context *ce = &ctx->engine[engine->id];
+	struct intel_context *ce = to_intel_context(ctx, engine);
 	struct i915_vma *vma;
 	uint32_t context_size;
 	struct intel_ring *ring;
@@ -2660,7 +2664,8 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
 	 */
 	list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
 		for_each_engine(engine, dev_priv, id) {
-			struct intel_context *ce = &ctx->engine[engine->id];
+			struct intel_context *ce =
+				to_intel_context(ctx, engine);
 			u32 *reg;
 
 			if (!ce->state)
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 59d7b86012e9..4ec7d8dd13c8 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -108,7 +108,7 @@ static inline uint64_t
 intel_lr_context_descriptor(struct i915_gem_context *ctx,
 			    struct intel_engine_cs *engine)
 {
-	return ctx->engine[engine->id].lrc_desc;
+	return to_intel_context(ctx, engine)->lrc_desc;
 }
 
 #endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index c06c22c953b3..69ffc0dfe92b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -558,7 +558,8 @@ static void reset_ring_common(struct intel_engine_cs *engine,
 	 */
 	if (request) {
 		struct drm_i915_private *dev_priv = request->i915;
-		struct intel_context *ce = &request->ctx->engine[engine->id];
+		struct intel_context *ce = to_intel_context(request->ctx,
+							    engine);
 		struct i915_hw_ppgtt *ppgtt;
 
 		if (ce->state) {
@@ -1163,9 +1164,9 @@ intel_ring_free(struct intel_ring *ring)
 	kfree(ring);
 }
 
-static int context_pin(struct i915_gem_context *ctx)
+static int context_pin(struct intel_context *ce)
 {
-	struct i915_vma *vma = ctx->engine[RCS].state;
+	struct i915_vma *vma = ce->state;
 	int ret;
 
 	/*
@@ -1256,7 +1257,7 @@ static struct intel_ring *
 intel_ring_context_pin(struct intel_engine_cs *engine,
 		       struct i915_gem_context *ctx)
 {
-	struct intel_context *ce = &ctx->engine[engine->id];
+	struct intel_context *ce = to_intel_context(ctx, engine);
 	int ret;
 
 	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
@@ -1278,7 +1279,7 @@ intel_ring_context_pin(struct intel_engine_cs *engine,
 	}
 
 	if (ce->state) {
-		ret = context_pin(ctx);
+		ret = context_pin(ce);
 		if (ret)
 			goto err;
 
@@ -1299,7 +1300,7 @@ err:
 static void intel_ring_context_unpin(struct intel_engine_cs *engine,
 				     struct i915_gem_context *ctx)
 {
-	struct intel_context *ce = &ctx->engine[engine->id];
+	struct intel_context *ce = to_intel_context(ctx, engine);
 
 	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
 	GEM_BUG_ON(ce->pin_count == 0);
@@ -1427,7 +1428,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
 
 	*cs++ = MI_NOOP;
 	*cs++ = MI_SET_CONTEXT;
-	*cs++ = i915_ggtt_offset(rq->ctx->engine[RCS].state) | flags;
+	*cs++ = i915_ggtt_offset(to_intel_context(rq->ctx, engine)->state) | flags;
 	/*
 	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
 	 * WaMiSetContext_Hang:snb,ivb,vlv
@@ -1518,7 +1519,7 @@ static int switch_context(struct i915_request *rq)
 		hw_flags = MI_FORCE_RESTORE;
 	}
 
-	if (to_ctx->engine[engine->id].state &&
+	if (to_intel_context(to_ctx, engine)->state &&
 	    (to_ctx != from_ctx || hw_flags & MI_FORCE_RESTORE)) {
 		GEM_BUG_ON(engine->id != RCS);
 
@@ -1566,7 +1567,7 @@ static int ring_request_alloc(struct i915_request *request)
 {
 	int ret;
 
-	GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count);
+	GEM_BUG_ON(!to_intel_context(request->ctx, request->engine)->pin_count);
 
 	/* Flush enough space to reduce the likelihood of waiting after
 	 * we start building the request - in which case we will just
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 78a89efa1119..b82420c6b810 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -71,14 +71,21 @@ static struct intel_ring *
 mock_context_pin(struct intel_engine_cs *engine,
 		 struct i915_gem_context *ctx)
 {
-	i915_gem_context_get(ctx);
+	struct intel_context *ce = to_intel_context(ctx, engine);
+
+	if (!ce->pin_count++)
+		i915_gem_context_get(ctx);
+
 	return engine->buffer;
 }
 
 static void mock_context_unpin(struct intel_engine_cs *engine,
 			       struct i915_gem_context *ctx)
 {
-	i915_gem_context_put(ctx);
+	struct intel_context *ce = to_intel_context(ctx, engine);
+
+	if (!--ce->pin_count)
+		i915_gem_context_put(ctx);
 }
 
 static int mock_request_alloc(struct i915_request *request)
@@ -217,7 +224,7 @@ void mock_engine_free(struct intel_engine_cs *engine)
 	GEM_BUG_ON(timer_pending(&mock->hw_delay));
 
 	if (engine->last_retired_context)
-		engine->context_unpin(engine, engine->last_retired_context);
+		intel_context_unpin(engine->last_retired_context, engine);
 
 	intel_engine_fini_breadcrumbs(engine);
 

From b887d61546245389c0304d8b1371bab9af8106c2 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Mon, 30 Apr 2018 14:15:02 +0100
Subject: [PATCH 0545/1286] drm/i915: Retire requests along rings

In the next patch, rings are the central timeline as requests may jump
between engines. Therefore in the future as we retire in order along the
engine timeline, we may retire out-of-order within a ring (as the ring now
occurs along multiple engines), leading to much hilarity in miscomputing
the position of ring->head.

As an added bonus, retiring along the ring reduces the penalty of having
one execlists client do cleanup for another (old legacy submission
shares a ring between all clients). The downside is that slow and
irregular (off the critical path) process of cleaning up stale requests
after userspace becomes a modicum less efficient.

In the long run, it will become apparent that the ordered
ring->request_list matches the ring->timeline, a fun challenge for the
future will be unifying the two lists to avoid duplication!

v2: We need both engine-order and ring-order processing to maintain our
knowledge of where individual rings have completed upto as well as
knowing what was last executing on any engine. And finally by decoupling
retiring the contexts on the engine and the timelines along the rings,
we do have to keep a reference to the context on each request
(previously it was guaranteed by the context being pinned).

v3: Not just a reference to the context, but we need to keep it pinned
as we manipulate the rings; i.e. we need a pin for both the manipulation
of the engine state during its retirements, and a separate pin for the
manipulation of the ring state.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180430131503.5375-3-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_drv.h               |   3 +-
 drivers/gpu/drm/i915/i915_gem.c               |   1 +
 drivers/gpu/drm/i915/i915_request.c           | 150 +++++++++++-------
 drivers/gpu/drm/i915/i915_utils.h             |   6 +
 drivers/gpu/drm/i915/intel_ringbuffer.c       |   6 +-
 drivers/gpu/drm/i915/intel_ringbuffer.h       |   1 +
 drivers/gpu/drm/i915/selftests/mock_engine.c  |  27 +++-
 .../gpu/drm/i915/selftests/mock_gem_device.c  |   2 +
 8 files changed, 131 insertions(+), 65 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index dd4d6b918e86..edc33e059191 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2059,8 +2059,9 @@ struct drm_i915_private {
 		void (*resume)(struct drm_i915_private *);
 		void (*cleanup_engine)(struct intel_engine_cs *engine);
 
-		struct list_head timelines;
 		struct i915_gem_timeline global_timeline;
+		struct list_head timelines;
+		struct list_head rings;
 		u32 active_requests;
 		u32 request_serial;
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4090bfdda340..f0644d1fbd75 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -5600,6 +5600,7 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
 		goto err_dependencies;
 
 	mutex_lock(&dev_priv->drm.struct_mutex);
+	INIT_LIST_HEAD(&dev_priv->gt.rings);
 	INIT_LIST_HEAD(&dev_priv->gt.timelines);
 	err = i915_gem_timeline_init__global(dev_priv);
 	mutex_unlock(&dev_priv->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 9358f2cf0c32..e6535255d445 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -286,6 +286,7 @@ static int reserve_gt(struct drm_i915_private *i915)
 
 static void unreserve_gt(struct drm_i915_private *i915)
 {
+	GEM_BUG_ON(!i915->gt.active_requests);
 	if (!--i915->gt.active_requests)
 		i915_gem_park(i915);
 }
@@ -298,6 +299,7 @@ void i915_gem_retire_noop(struct i915_gem_active *active,
 
 static void advance_ring(struct i915_request *request)
 {
+	struct intel_ring *ring = request->ring;
 	unsigned int tail;
 
 	/*
@@ -309,7 +311,8 @@ static void advance_ring(struct i915_request *request)
 	 * Note this requires that we are always called in request
 	 * completion order.
 	 */
-	if (list_is_last(&request->ring_link, &request->ring->request_list)) {
+	GEM_BUG_ON(!list_is_first(&request->ring_link, &ring->request_list));
+	if (list_is_last(&request->ring_link, &ring->request_list)) {
 		/*
 		 * We may race here with execlists resubmitting this request
 		 * as we retire it. The resubmission will move the ring->tail
@@ -322,9 +325,9 @@ static void advance_ring(struct i915_request *request)
 	} else {
 		tail = request->postfix;
 	}
-	list_del(&request->ring_link);
+	list_del_init(&request->ring_link);
 
-	request->ring->head = tail;
+	ring->head = tail;
 }
 
 static void free_capture_list(struct i915_request *request)
@@ -340,30 +343,84 @@ static void free_capture_list(struct i915_request *request)
 	}
 }
 
+static void __retire_engine_request(struct intel_engine_cs *engine,
+				    struct i915_request *rq)
+{
+	GEM_TRACE("%s(%s) fence %llx:%d, global=%d, current %d\n",
+		  __func__, engine->name,
+		  rq->fence.context, rq->fence.seqno,
+		  rq->global_seqno,
+		  intel_engine_get_seqno(engine));
+
+	GEM_BUG_ON(!i915_request_completed(rq));
+
+	local_irq_disable();
+
+	spin_lock(&engine->timeline->lock);
+	GEM_BUG_ON(!list_is_first(&rq->link, &engine->timeline->requests));
+	list_del_init(&rq->link);
+	spin_unlock(&engine->timeline->lock);
+
+	spin_lock(&rq->lock);
+	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
+		dma_fence_signal_locked(&rq->fence);
+	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
+		intel_engine_cancel_signaling(rq);
+	if (rq->waitboost) {
+		GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
+		atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
+	}
+	spin_unlock(&rq->lock);
+
+	local_irq_enable();
+
+	/*
+	 * The backing object for the context is done after switching to the
+	 * *next* context. Therefore we cannot retire the previous context until
+	 * the next context has already started running. However, since we
+	 * cannot take the required locks at i915_request_submit() we
+	 * defer the unpinning of the active context to now, retirement of
+	 * the subsequent request.
+	 */
+	if (engine->last_retired_context)
+		intel_context_unpin(engine->last_retired_context, engine);
+	engine->last_retired_context = rq->ctx;
+}
+
+static void __retire_engine_upto(struct intel_engine_cs *engine,
+				 struct i915_request *rq)
+{
+	struct i915_request *tmp;
+
+	if (list_empty(&rq->link))
+		return;
+
+	do {
+		tmp = list_first_entry(&engine->timeline->requests,
+				       typeof(*tmp), link);
+
+		GEM_BUG_ON(tmp->engine != engine);
+		__retire_engine_request(engine, tmp);
+	} while (tmp != rq);
+}
+
 static void i915_request_retire(struct i915_request *request)
 {
-	struct intel_engine_cs *engine = request->engine;
 	struct i915_gem_active *active, *next;
 
 	GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n",
-		  engine->name,
+		  request->engine->name,
 		  request->fence.context, request->fence.seqno,
 		  request->global_seqno,
-		  intel_engine_get_seqno(engine));
+		  intel_engine_get_seqno(request->engine));
 
 	lockdep_assert_held(&request->i915->drm.struct_mutex);
 	GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
 	GEM_BUG_ON(!i915_request_completed(request));
-	GEM_BUG_ON(!request->i915->gt.active_requests);
 
 	trace_i915_request_retire(request);
 
-	spin_lock_irq(&engine->timeline->lock);
-	list_del_init(&request->link);
-	spin_unlock_irq(&engine->timeline->lock);
-
 	advance_ring(request);
-
 	free_capture_list(request);
 
 	/*
@@ -399,29 +456,9 @@ static void i915_request_retire(struct i915_request *request)
 
 	/* Retirement decays the ban score as it is a sign of ctx progress */
 	atomic_dec_if_positive(&request->ctx->ban_score);
+	intel_context_unpin(request->ctx, request->engine);
 
-	/*
-	 * The backing object for the context is done after switching to the
-	 * *next* context. Therefore we cannot retire the previous context until
-	 * the next context has already started running. However, since we
-	 * cannot take the required locks at i915_request_submit() we
-	 * defer the unpinning of the active context to now, retirement of
-	 * the subsequent request.
-	 */
-	if (engine->last_retired_context)
-		intel_context_unpin(engine->last_retired_context, engine);
-	engine->last_retired_context = request->ctx;
-
-	spin_lock_irq(&request->lock);
-	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags))
-		dma_fence_signal_locked(&request->fence);
-	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
-		intel_engine_cancel_signaling(request);
-	if (request->waitboost) {
-		GEM_BUG_ON(!atomic_read(&request->i915->gt_pm.rps.num_waiters));
-		atomic_dec(&request->i915->gt_pm.rps.num_waiters);
-	}
-	spin_unlock_irq(&request->lock);
+	__retire_engine_upto(request->engine, request);
 
 	unreserve_gt(request->i915);
 
@@ -431,18 +468,24 @@ static void i915_request_retire(struct i915_request *request)
 
 void i915_request_retire_upto(struct i915_request *rq)
 {
-	struct intel_engine_cs *engine = rq->engine;
+	struct intel_ring *ring = rq->ring;
 	struct i915_request *tmp;
 
+	GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n",
+		  rq->engine->name,
+		  rq->fence.context, rq->fence.seqno,
+		  rq->global_seqno,
+		  intel_engine_get_seqno(rq->engine));
+
 	lockdep_assert_held(&rq->i915->drm.struct_mutex);
 	GEM_BUG_ON(!i915_request_completed(rq));
 
-	if (list_empty(&rq->link))
+	if (list_empty(&rq->ring_link))
 		return;
 
 	do {
-		tmp = list_first_entry(&engine->timeline->requests,
-				       typeof(*tmp), link);
+		tmp = list_first_entry(&ring->request_list,
+				       typeof(*tmp), ring_link);
 
 		i915_request_retire(tmp);
 	} while (tmp != rq);
@@ -651,9 +694,9 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 	if (ret)
 		goto err_unreserve;
 
-	/* Move the oldest request to the slab-cache (if not in use!) */
-	rq = list_first_entry_or_null(&engine->timeline->requests,
-				      typeof(*rq), link);
+	/* Move our oldest request to the slab-cache (if not in use!) */
+	rq = list_first_entry_or_null(&ring->request_list,
+				      typeof(*rq), ring_link);
 	if (rq && i915_request_completed(rq))
 		i915_request_retire(rq);
 
@@ -771,6 +814,9 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 	if (ret)
 		goto err_unwind;
 
+	/* Keep a second pin for the dual retirement along engine and ring */
+	__intel_context_pin(rq->ctx, engine);
+
 	/* Check that we didn't interrupt ourselves with a new request */
 	GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno);
 	return rq;
@@ -1357,38 +1403,30 @@ complete:
 	return timeout;
 }
 
-static void engine_retire_requests(struct intel_engine_cs *engine)
+static void ring_retire_requests(struct intel_ring *ring)
 {
 	struct i915_request *request, *next;
-	u32 seqno = intel_engine_get_seqno(engine);
-	LIST_HEAD(retire);
 
-	spin_lock_irq(&engine->timeline->lock);
 	list_for_each_entry_safe(request, next,
-				 &engine->timeline->requests, link) {
-		if (!i915_seqno_passed(seqno, request->global_seqno))
+				 &ring->request_list, ring_link) {
+		if (!i915_request_completed(request))
 			break;
 
-		list_move_tail(&request->link, &retire);
-	}
-	spin_unlock_irq(&engine->timeline->lock);
-
-	list_for_each_entry_safe(request, next, &retire, link)
 		i915_request_retire(request);
+	}
 }
 
 void i915_retire_requests(struct drm_i915_private *i915)
 {
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
+	struct intel_ring *ring, *next;
 
 	lockdep_assert_held(&i915->drm.struct_mutex);
 
 	if (!i915->gt.active_requests)
 		return;
 
-	for_each_engine(engine, i915, id)
-		engine_retire_requests(engine);
+	list_for_each_entry_safe(ring, next, &i915->gt.rings, link)
+		ring_retire_requests(ring);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 0695717522ea..00165ad55fb3 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -120,6 +120,12 @@ static inline u64 ptr_to_u64(const void *ptr)
 
 #include <linux/list.h>
 
+static inline int list_is_first(const struct list_head *list,
+				const struct list_head *head)
+{
+	return head->next == list;
+}
+
 static inline void __list_del_many(struct list_head *head,
 				   struct list_head *first)
 {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 69ffc0dfe92b..ae8958007df5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1066,7 +1066,6 @@ err:
 
 void intel_ring_reset(struct intel_ring *ring, u32 tail)
 {
-	GEM_BUG_ON(!list_empty(&ring->request_list));
 	ring->tail = tail;
 	ring->head = tail;
 	ring->emit = tail;
@@ -1125,6 +1124,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
 
 	GEM_BUG_ON(!is_power_of_2(size));
 	GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
+	lockdep_assert_held(&engine->i915->drm.struct_mutex);
 
 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
 	if (!ring)
@@ -1150,6 +1150,8 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
 	}
 	ring->vma = vma;
 
+	list_add(&ring->link, &engine->i915->gt.rings);
+
 	return ring;
 }
 
@@ -1161,6 +1163,8 @@ intel_ring_free(struct intel_ring *ring)
 	i915_vma_close(ring->vma);
 	__i915_gem_object_release_unless_active(obj);
 
+	list_del(&ring->link);
+
 	kfree(ring);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 24af3f1088ba..deb80d01e0bd 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -130,6 +130,7 @@ struct intel_ring {
 	void *vaddr;
 
 	struct list_head request_list;
+	struct list_head link;
 
 	u32 head;
 	u32 tail;
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index b82420c6b810..d95fc481e5c1 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -147,9 +147,18 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
 	INIT_LIST_HEAD(&ring->request_list);
 	intel_ring_update_space(ring);
 
+	list_add(&ring->link, &engine->i915->gt.rings);
+
 	return ring;
 }
 
+static void mock_ring_free(struct intel_ring *ring)
+{
+	list_del(&ring->link);
+
+	kfree(ring);
+}
+
 struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
 				    const char *name,
 				    int id)
@@ -162,12 +171,6 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
 	if (!engine)
 		return NULL;
 
-	engine->base.buffer = mock_ring(&engine->base);
-	if (!engine->base.buffer) {
-		kfree(engine);
-		return NULL;
-	}
-
 	/* minimal engine setup for requests */
 	engine->base.i915 = i915;
 	snprintf(engine->base.name, sizeof(engine->base.name), "%s", name);
@@ -192,7 +195,16 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
 	timer_setup(&engine->hw_delay, hw_delay_complete, 0);
 	INIT_LIST_HEAD(&engine->hw_queue);
 
+	engine->base.buffer = mock_ring(&engine->base);
+	if (!engine->base.buffer)
+		goto err_breadcrumbs;
+
 	return &engine->base;
+
+err_breadcrumbs:
+	intel_engine_fini_breadcrumbs(&engine->base);
+	kfree(engine);
+	return NULL;
 }
 
 void mock_engine_flush(struct intel_engine_cs *engine)
@@ -226,8 +238,9 @@ void mock_engine_free(struct intel_engine_cs *engine)
 	if (engine->last_retired_context)
 		intel_context_unpin(engine->last_retired_context, engine);
 
+	mock_ring_free(engine->buffer);
+
 	intel_engine_fini_breadcrumbs(engine);
 
-	kfree(engine->buffer);
 	kfree(engine);
 }
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index e6d4b882599a..ac4bacf8b5b9 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -44,6 +44,7 @@ void mock_device_flush(struct drm_i915_private *i915)
 		mock_engine_flush(engine);
 
 	i915_retire_requests(i915);
+	GEM_BUG_ON(i915->gt.active_requests);
 }
 
 static void mock_device_release(struct drm_device *dev)
@@ -224,6 +225,7 @@ struct drm_i915_private *mock_gem_device(void)
 		goto err_dependencies;
 
 	mutex_lock(&i915->drm.struct_mutex);
+	INIT_LIST_HEAD(&i915->gt.rings);
 	INIT_LIST_HEAD(&i915->gt.timelines);
 	err = i915_gem_timeline_init__global(i915);
 	if (err) {

From 643b450a594e9cb57fbd2534d1571d244faddd01 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Mon, 30 Apr 2018 14:15:03 +0100
Subject: [PATCH 0546/1286] drm/i915: Only track live rings for retiring

We don't need to track every ring for its lifetime as they are managed
by the contexts/engines. What we do want to track are the live rings so
that we can sporadically clean up requests if userspace falls behind. We
can simply restrict the gt->rings list to being only gt->live_rings.

v2: s/live/active/ for consistency with gt.active_requests

Suggested-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180430131503.5375-4-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_drv.h                  |  3 ++-
 drivers/gpu/drm/i915/i915_gem.c                  |  6 ++++--
 drivers/gpu/drm/i915/i915_request.c              | 10 ++++++++--
 drivers/gpu/drm/i915/intel_ringbuffer.c          |  4 ----
 drivers/gpu/drm/i915/intel_ringbuffer.h          |  2 +-
 drivers/gpu/drm/i915/selftests/mock_engine.c     |  4 ----
 drivers/gpu/drm/i915/selftests/mock_gem_device.c |  5 +++--
 7 files changed, 18 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index edc33e059191..6268a5103dba 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2061,7 +2061,8 @@ struct drm_i915_private {
 
 		struct i915_gem_timeline global_timeline;
 		struct list_head timelines;
-		struct list_head rings;
+
+		struct list_head active_rings;
 		u32 active_requests;
 		u32 request_serial;
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f0644d1fbd75..fa1d94a4eb5f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -141,6 +141,7 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
 {
 	lockdep_assert_held(&i915->drm.struct_mutex);
 	GEM_BUG_ON(i915->gt.active_requests);
+	GEM_BUG_ON(!list_empty(&i915->gt.active_rings));
 
 	if (!i915->gt.awake)
 		return I915_EPOCH_INVALID;
@@ -5599,9 +5600,10 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
 	if (!dev_priv->priorities)
 		goto err_dependencies;
 
-	mutex_lock(&dev_priv->drm.struct_mutex);
-	INIT_LIST_HEAD(&dev_priv->gt.rings);
 	INIT_LIST_HEAD(&dev_priv->gt.timelines);
+	INIT_LIST_HEAD(&dev_priv->gt.active_rings);
+
+	mutex_lock(&dev_priv->drm.struct_mutex);
 	err = i915_gem_timeline_init__global(dev_priv);
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 	if (err)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index e6535255d445..c8fc4b323e62 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -322,6 +322,7 @@ static void advance_ring(struct i915_request *request)
 		 * noops - they are safe to be replayed on a reset.
 		 */
 		tail = READ_ONCE(request->tail);
+		list_del(&ring->active_link);
 	} else {
 		tail = request->postfix;
 	}
@@ -1096,6 +1097,8 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
 	i915_gem_active_set(&timeline->last_request, request);
 
 	list_add_tail(&request->ring_link, &ring->request_list);
+	if (list_is_first(&request->ring_link, &ring->request_list))
+		list_add(&ring->active_link, &request->i915->gt.active_rings);
 	request->emitted_jiffies = jiffies;
 
 	/*
@@ -1418,14 +1421,17 @@ static void ring_retire_requests(struct intel_ring *ring)
 
 void i915_retire_requests(struct drm_i915_private *i915)
 {
-	struct intel_ring *ring, *next;
+	struct intel_ring *ring, *tmp;
 
 	lockdep_assert_held(&i915->drm.struct_mutex);
 
 	if (!i915->gt.active_requests)
 		return;
 
-	list_for_each_entry_safe(ring, next, &i915->gt.rings, link)
+	/* An outstanding request must be on a still active ring somewhere */
+	GEM_BUG_ON(list_empty(&i915->gt.active_rings));
+
+	list_for_each_entry_safe(ring, tmp, &i915->gt.active_rings, active_link)
 		ring_retire_requests(ring);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index ae8958007df5..007449cfa22b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1150,8 +1150,6 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
 	}
 	ring->vma = vma;
 
-	list_add(&ring->link, &engine->i915->gt.rings);
-
 	return ring;
 }
 
@@ -1163,8 +1161,6 @@ intel_ring_free(struct intel_ring *ring)
 	i915_vma_close(ring->vma);
 	__i915_gem_object_release_unless_active(obj);
 
-	list_del(&ring->link);
-
 	kfree(ring);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index deb80d01e0bd..fd679cec9ac6 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -130,7 +130,7 @@ struct intel_ring {
 	void *vaddr;
 
 	struct list_head request_list;
-	struct list_head link;
+	struct list_head active_link;
 
 	u32 head;
 	u32 tail;
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index d95fc481e5c1..19175ddcb45b 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -147,15 +147,11 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
 	INIT_LIST_HEAD(&ring->request_list);
 	intel_ring_update_space(ring);
 
-	list_add(&ring->link, &engine->i915->gt.rings);
-
 	return ring;
 }
 
 static void mock_ring_free(struct intel_ring *ring)
 {
-	list_del(&ring->link);
-
 	kfree(ring);
 }
 
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index ac4bacf8b5b9..f22a2b35a283 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -224,9 +224,10 @@ struct drm_i915_private *mock_gem_device(void)
 	if (!i915->priorities)
 		goto err_dependencies;
 
-	mutex_lock(&i915->drm.struct_mutex);
-	INIT_LIST_HEAD(&i915->gt.rings);
 	INIT_LIST_HEAD(&i915->gt.timelines);
+	INIT_LIST_HEAD(&i915->gt.active_rings);
+
+	mutex_lock(&i915->drm.struct_mutex);
 	err = i915_gem_timeline_init__global(i915);
 	if (err) {
 		mutex_unlock(&i915->drm.struct_mutex);

From d34deab907605a81eec83afe006fad2e5b4673b4 Mon Sep 17 00:00:00 2001
From: Eric Anholt <eric@anholt.net>
Date: Mon, 23 Apr 2018 17:46:08 -0700
Subject: [PATCH 0547/1286] drm: Make the prime vmap/vunmap hooks optional.

Some drivers leave these unimplemented, so don't make them have
unimplemented stubs.

Signed-off-by: Eric Anholt <eric@anholt.net>
Link: https://patchwork.freedesktop.org/patch/msgid/20180424004610.4637-2-eric@anholt.net
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
---
 drivers/gpu/drm/drm_prime.c | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index caf675e3e692..397b46b33739 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -409,7 +409,10 @@ void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
 	struct drm_gem_object *obj = dma_buf->priv;
 	struct drm_device *dev = obj->dev;
 
-	return dev->driver->gem_prime_vmap(obj);
+	if (dev->driver->gem_prime_vmap)
+		return dev->driver->gem_prime_vmap(obj);
+	else
+		return NULL;
 }
 EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
 
@@ -426,7 +429,8 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
 	struct drm_gem_object *obj = dma_buf->priv;
 	struct drm_device *dev = obj->dev;
 
-	dev->driver->gem_prime_vunmap(obj, vaddr);
+	if (dev->driver->gem_prime_vunmap)
+		dev->driver->gem_prime_vunmap(obj, vaddr);
 }
 EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
 

From 5e7854bd036ec9b8d8a861def32ecbfd97ca4e77 Mon Sep 17 00:00:00 2001
From: Vaishali Thakkar <vthakkar1994@gmail.com>
Date: Wed, 25 Apr 2018 12:39:53 +0530
Subject: [PATCH 0548/1286] drm/vc4: make function vc4_allocate_bin_bo static

Sparse complains with following warning:
drivers/gpu/drm/vc4/vc4_v3d.c:222:1: warning: symbol
'vc4_allocate_bin_bo' was not declared. Should it be static?

Make vc4_allocate_bin static as it is not used outside of
vc4_v3d.c.

Signed-off-by: Vaishali Thakkar <vthakkar1994@gmail.com>
Signed-off-by: Eric Anholt <eric@anholt.net>
Link: https://patchwork.freedesktop.org/patch/msgid/20180425070953.17933-1-vthakkar1994@gmail.com
---
 drivers/gpu/drm/vc4/vc4_v3d.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index bfc2fa73d2ae..e47e29426078 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -218,8 +218,7 @@ try_again:
  * overall CMA pool before they make scenes complicated enough to run
  * out of bin space.
  */
-int
-vc4_allocate_bin_bo(struct drm_device *drm)
+static int vc4_allocate_bin_bo(struct drm_device *drm)
 {
 	struct vc4_dev *vc4 = to_vc4_dev(drm);
 	struct vc4_v3d *v3d = vc4->v3d;

From 1825067e2b49c984d4b1a77f3720a7ae2576d226 Mon Sep 17 00:00:00 2001
From: Eric Anholt <eric@anholt.net>
Date: Tue, 31 Oct 2017 12:32:57 -0700
Subject: [PATCH 0549/1286] drm/vc4: Skip ULPS latching when we're in that ULPS
 state already.

It seems that trying to go from unlatched to unlatched will time out
waiting for STOP, and we can just skip that.

Signed-off-by: Eric Anholt <eric@anholt.net>
Link: https://patchwork.freedesktop.org/patch/msgid/20171031193258.17373-1-eric@anholt.net
Reviewed-by: Boris Brezillon <boris.brezillon@bootlin.com>
---
 drivers/gpu/drm/vc4/vc4_dsi.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 94085f8bcd68..8aa897835118 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -753,6 +753,11 @@ static void vc4_dsi_ulps(struct vc4_dsi *dsi, bool ulps)
 			 (dsi->lanes > 2 ? DSI1_STAT_PHY_D2_STOP : 0) |
 			 (dsi->lanes > 3 ? DSI1_STAT_PHY_D3_STOP : 0));
 	int ret;
+	bool ulps_currently_enabled = (DSI_PORT_READ(PHY_AFEC0) &
+				       DSI_PORT_BIT(PHY_AFEC0_LATCH_ULPS));
+
+	if (ulps == ulps_currently_enabled)
+		return;
 
 	DSI_PORT_WRITE(STAT, stat_ulps);
 	DSI_PORT_WRITE(PHYC, DSI_PORT_READ(PHYC) | phyc_ulps);

From 3481fe768faeae3f1d2a929e401748893460d82e Mon Sep 17 00:00:00 2001
From: Eric Anholt <eric@anholt.net>
Date: Tue, 31 Oct 2017 12:32:58 -0700
Subject: [PATCH 0550/1286] drm/panel: Enable DSI transactions on the RPi
 panel.

It turns out that I had just mistaken what type of write the register
writes were supposed to be, using DCS instead of generic long writes.

Switching to transactions instead of using the atmel as a bridge also
seems to resolve the sparkling pixels problem I've had.

Signed-off-by: Eric Anholt <eric@anholt.net>
Fixes: 2f733d6194bd ("drm/panel: Add support for the Raspberry Pi 7" Touchscreen.")
Link: https://patchwork.freedesktop.org/patch/msgid/20171031193258.17373-2-eric@anholt.net
Reviewed-by: Boris Brezillon <boris.brezillon@bootlin.com>
Acked-by: Thierry Reding <treding@nvidia.com>
---
 .../gpu/drm/panel/panel-raspberrypi-touchscreen.c  | 14 +-------------
 1 file changed, 1 insertion(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index d964d454e4ae..2c9c9722734f 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -238,12 +238,6 @@ static void rpi_touchscreen_i2c_write(struct rpi_touchscreen *ts,
 
 static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val)
 {
-#if 0
-	/* The firmware uses LP DSI transactions like this to bring up
-	 * the hardware, which should be faster than using I2C to then
-	 * pass to the Toshiba.  However, I was unable to get it to
-	 * work.
-	 */
 	u8 msg[] = {
 		reg,
 		reg >> 8,
@@ -253,13 +247,7 @@ static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val)
 		val >> 24,
 	};
 
-	mipi_dsi_dcs_write_buffer(ts->dsi, msg, sizeof(msg));
-#else
-	rpi_touchscreen_i2c_write(ts, REG_WR_ADDRH, reg >> 8);
-	rpi_touchscreen_i2c_write(ts, REG_WR_ADDRL, reg);
-	rpi_touchscreen_i2c_write(ts, REG_WRITEH, val >> 8);
-	rpi_touchscreen_i2c_write(ts, REG_WRITEL, val);
-#endif
+	mipi_dsi_generic_write(ts->dsi, msg, sizeof(msg));
 
 	return 0;
 }

From 818f5c8f4cd27747e8218e8a5fb230c322e02d1e Mon Sep 17 00:00:00 2001
From: Stefan Schake <stschake@gmail.com>
Date: Wed, 25 Apr 2018 00:03:45 +0200
Subject: [PATCH 0551/1286] drm/vc4: Syncobj import support

Allow userland to specify a syncobj that is waited on before a render job
starts processing.

v2: Use 0 as invalid syncobj to drop flag (Eric)
    Drop extra newline (Eric)

Signed-off-by: Stefan Schake <stschake@gmail.com>
Signed-off-by: Eric Anholt <eric@anholt.net>
Reviewed-by: Eric Anholt <eric@anholt.net>
Link: https://patchwork.freedesktop.org/patch/msgid/1524607427-12876-2-git-send-email-stschake@gmail.com
---
 drivers/gpu/drm/vc4/vc4_drv.h |  1 +
 drivers/gpu/drm/vc4/vc4_gem.c | 30 +++++++++++++++++++++++++-----
 include/uapi/drm/vc4_drm.h    |  7 +++----
 3 files changed, 29 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 22589d39083c..554a4e810d5b 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -11,6 +11,7 @@
 #include <drm/drm_encoder.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_atomic.h>
+#include <drm/drm_syncobj.h>
 
 #include "uapi/drm/vc4_drm.h"
 
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 2107b0daf8ef..e305ccdedf47 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -27,6 +27,7 @@
 #include <linux/device.h>
 #include <linux/io.h>
 #include <linux/sched/signal.h>
+#include <linux/dma-fence-array.h>
 
 #include "uapi/drm/vc4_drm.h"
 #include "vc4_drv.h"
@@ -1115,6 +1116,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
 	struct drm_vc4_submit_cl *args = data;
 	struct vc4_exec_info *exec;
 	struct ww_acquire_ctx acquire_ctx;
+	struct dma_fence *in_fence;
 	int ret = 0;
 
 	if ((args->flags & ~(VC4_SUBMIT_CL_USE_CLEAR_COLOR |
@@ -1125,11 +1127,6 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
 		return -EINVAL;
 	}
 
-	if (args->pad2 != 0) {
-		DRM_DEBUG("->pad2 must be set to zero\n");
-		return -EINVAL;
-	}
-
 	exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
 	if (!exec) {
 		DRM_ERROR("malloc failure on exec struct\n");
@@ -1164,6 +1161,29 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
 		}
 	}
 
+	if (args->in_sync) {
+		ret = drm_syncobj_find_fence(file_priv, args->in_sync,
+					     &in_fence);
+		if (ret)
+			goto fail;
+
+		/* When the fence (or fence array) is exclusively from our
+		 * context we can skip the wait since jobs are executed in
+		 * order of their submission through this ioctl and this can
+		 * only have fences from a prior job.
+		 */
+		if (!dma_fence_match_context(in_fence,
+					     vc4->dma_fence_context)) {
+			ret = dma_fence_wait(in_fence, true);
+			if (ret) {
+				dma_fence_put(in_fence);
+				goto fail;
+			}
+		}
+
+		dma_fence_put(in_fence);
+	}
+
 	if (exec->args->bin_cl_size != 0) {
 		ret = vc4_get_bcl(dev, exec);
 		if (ret)
diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h
index b95a0e11cb07..d97065b86431 100644
--- a/include/uapi/drm/vc4_drm.h
+++ b/include/uapi/drm/vc4_drm.h
@@ -183,11 +183,10 @@ struct drm_vc4_submit_cl {
 	/* ID of the perfmon to attach to this job. 0 means no perfmon. */
 	__u32 perfmonid;
 
-	/* Unused field to align this struct on 64 bits. Must be set to 0.
-	 * If one ever needs to add an u32 field to this struct, this field
-	 * can be used.
+	/* Syncobj handle to wait on. If set, processing of this render job
+	 * will not start until the syncobj is signaled. 0 means ignore.
 	 */
-	__u32 pad2;
+	__u32 in_sync;
 };
 
 /**

From e84fcb95e07442edd7ce3b13973523646dbc581a Mon Sep 17 00:00:00 2001
From: Stefan Schake <stschake@gmail.com>
Date: Wed, 25 Apr 2018 00:03:46 +0200
Subject: [PATCH 0552/1286] drm/vc4: Export fence through syncobj

Allow specifying a syncobj on render job submission where we store the
fence for the job. This gives userland flexible access to the fence.

v2: Use 0 as invalid syncobj to drop flag (Eric)
    Don't reintroduce the padding (Eric)

Signed-off-by: Stefan Schake <stschake@gmail.com>
Signed-off-by: Eric Anholt <eric@anholt.net>
Reviewed-by: Eric Anholt <eric@anholt.net>
Link: https://patchwork.freedesktop.org/patch/msgid/1524607427-12876-3-git-send-email-stschake@gmail.com
---
 drivers/gpu/drm/vc4/vc4_gem.c | 30 ++++++++++++++++++++++++++++--
 include/uapi/drm/vc4_drm.h    |  6 ++++++
 2 files changed, 34 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index e305ccdedf47..a4c4be3ac6af 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -656,7 +656,8 @@ retry:
  */
 static int
 vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
-		 struct ww_acquire_ctx *acquire_ctx)
+		 struct ww_acquire_ctx *acquire_ctx,
+		 struct drm_syncobj *out_sync)
 {
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
 	struct vc4_exec_info *renderjob;
@@ -679,6 +680,9 @@ vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
 	fence->seqno = exec->seqno;
 	exec->fence = &fence->base;
 
+	if (out_sync)
+		drm_syncobj_replace_fence(out_sync, exec->fence);
+
 	vc4_update_bo_seqnos(exec, seqno);
 
 	vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
@@ -1114,6 +1118,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
 	struct vc4_file *vc4file = file_priv->driver_priv;
 	struct drm_vc4_submit_cl *args = data;
+	struct drm_syncobj *out_sync = NULL;
 	struct vc4_exec_info *exec;
 	struct ww_acquire_ctx acquire_ctx;
 	struct dma_fence *in_fence;
@@ -1201,12 +1206,33 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
 	if (ret)
 		goto fail;
 
+	if (args->out_sync) {
+		out_sync = drm_syncobj_find(file_priv, args->out_sync);
+		if (!out_sync) {
+			ret = -EINVAL;
+			goto fail;
+		}
+
+		/* We replace the fence in out_sync in vc4_queue_submit since
+		 * the render job could execute immediately after that call.
+		 * If it finishes before our ioctl processing resumes the
+		 * render job fence could already have been freed.
+		 */
+	}
+
 	/* Clear this out of the struct we'll be putting in the queue,
 	 * since it's part of our stack.
 	 */
 	exec->args = NULL;
 
-	ret = vc4_queue_submit(dev, exec, &acquire_ctx);
+	ret = vc4_queue_submit(dev, exec, &acquire_ctx, out_sync);
+
+	/* The syncobj isn't part of the exec data and we need to free our
+	 * reference even if job submission failed.
+	 */
+	if (out_sync)
+		drm_syncobj_put(out_sync);
+
 	if (ret)
 		goto fail;
 
diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h
index d97065b86431..2be4fe3610b8 100644
--- a/include/uapi/drm/vc4_drm.h
+++ b/include/uapi/drm/vc4_drm.h
@@ -187,6 +187,12 @@ struct drm_vc4_submit_cl {
 	 * will not start until the syncobj is signaled. 0 means ignore.
 	 */
 	__u32 in_sync;
+
+	/* Syncobj handle to export fence to. If set, the fence in the syncobj
+	 * will be replaced with a fence that signals upon completion of this
+	 * render job. 0 means ignore.
+	 */
+	__u32 out_sync;
 };
 
 /**

From c720d8914397fe8efc568eea71e0dd240755a2d9 Mon Sep 17 00:00:00 2001
From: Stefan Schake <stschake@gmail.com>
Date: Wed, 25 Apr 2018 00:03:47 +0200
Subject: [PATCH 0553/1286] drm/vc4: Enable syncobj support

This doesn't require any additional functionality from the driver but
is a prerequisite to userland calling the syncobj ioctls.

Signed-off-by: Stefan Schake <stschake@gmail.com>
Signed-off-by: Eric Anholt <eric@anholt.net>
Reviewed-by: Eric Anholt <eric@anholt.net>
Link: https://patchwork.freedesktop.org/patch/msgid/1524607427-12876-4-git-send-email-stschake@gmail.com
---
 drivers/gpu/drm/vc4/vc4_drv.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 40ddeaafd65f..d9b8b701d2ce 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -175,7 +175,8 @@ static struct drm_driver vc4_drm_driver = {
 			    DRIVER_GEM |
 			    DRIVER_HAVE_IRQ |
 			    DRIVER_RENDER |
-			    DRIVER_PRIME),
+			    DRIVER_PRIME |
+			    DRIVER_SYNCOBJ),
 	.lastclose = drm_fb_helper_lastclose,
 	.open = vc4_open,
 	.postclose = vc4_close,

From fb5c8e9d4350cb20eba1d692213d9efbb7298256 Mon Sep 17 00:00:00 2001
From: Manasi Navare <manasi.d.navare@intel.com>
Date: Wed, 28 Mar 2018 14:58:02 -0700
Subject: [PATCH 0554/1286] drm/i915/icl: Implement voltage swing programming
 sequence for Combo PHY DDI

This is an important part of the DDI initalization as well as
for changing the voltage during DisplayPort link training.

The Voltage swing seqeuence is similar to Cannonlake.
However it has different register definitions and hence
it makes sense to create a separate vswing sequence and
program functions for ICL to leave room for more changes
in case the Bspec changes later and deviates from CNL sequence.

v2:
Use ~TAP3_DISABLE for enbaling that bit (Jani Nikula)

v3:
* Use dw4_scaling column for PORT_TX_DW4 values (Rodrigo)

v4:
* Call it combo_vswing, use switch statement (Paulo)

v5 (from Paulo):
* Fix a typo.
* s/rate < 600000/rate <= 600000/.
* Don't remove blank lines that should be there.

v6:
* Rebased by Rodrigo on top of Cannonlake changes
  where non vswing sequences are not aligned with iboost
  anymore.

v7: Another rebase after an upstream rework.

v8 (from Paulo):
* Adjust the code to the upstream output type changes.
* Squash the patch that moved some functions up.
* Merge both get_combo_buf_trans functions in order to simplify the
  code.
* Change the changelog format.

v9 (from Paulo):
* Use RTERM_SELECT instead of SCALING_MODE_SEL.
* Adjust the output type handling according to how the other platforms
  do it now.

v10 (from Paulo):
* Fix comment left out from v9 changes (Rodrigo).

Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: James Ausmus <james.ausmus@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Manasi Navare <manasi.d.navare@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180328215803.13835-8-paulo.r.zanoni@intel.com
---
 drivers/gpu/drm/i915/intel_ddi.c | 191 ++++++++++++++++++++++++++++++-
 1 file changed, 188 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 92cb26b18a9b..0edbdb68f311 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -870,6 +870,45 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
 	}
 }
 
+static const struct icl_combo_phy_ddi_buf_trans *
+icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port,
+			int type, int *n_entries)
+{
+	u32 voltage = I915_READ(ICL_PORT_COMP_DW3(port)) & VOLTAGE_INFO_MASK;
+
+	if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
+		switch (voltage) {
+		case VOLTAGE_INFO_0_85V:
+			*n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_85V);
+			return icl_combo_phy_ddi_translations_edp_0_85V;
+		case VOLTAGE_INFO_0_95V:
+			*n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_95V);
+			return icl_combo_phy_ddi_translations_edp_0_95V;
+		case VOLTAGE_INFO_1_05V:
+			*n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_1_05V);
+			return icl_combo_phy_ddi_translations_edp_1_05V;
+		default:
+			MISSING_CASE(voltage);
+			return NULL;
+		}
+	} else {
+		switch (voltage) {
+		case VOLTAGE_INFO_0_85V:
+			*n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_85V);
+			return icl_combo_phy_ddi_translations_dp_hdmi_0_85V;
+		case VOLTAGE_INFO_0_95V:
+			*n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_95V);
+			return icl_combo_phy_ddi_translations_dp_hdmi_0_95V;
+		case VOLTAGE_INFO_1_05V:
+			*n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_1_05V);
+			return icl_combo_phy_ddi_translations_dp_hdmi_1_05V;
+		default:
+			MISSING_CASE(voltage);
+			return NULL;
+		}
+	}
+}
+
 static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
 {
 	int n_entries, level, default_entry;
@@ -2182,6 +2221,146 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
 	I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
 }
 
+static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
+					 u32 level, enum port port, int type)
+{
+	const struct icl_combo_phy_ddi_buf_trans *ddi_translations = NULL;
+	u32 n_entries, val;
+	int ln;
+
+	ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type,
+						   &n_entries);
+	if (!ddi_translations)
+		return;
+
+	if (level >= n_entries) {
+		DRM_DEBUG_KMS("DDI translation not found for level %d. Using %d instead.", level, n_entries - 1);
+		level = n_entries - 1;
+	}
+
+	/* Set PORT_TX_DW5 Rterm Sel to 110b. */
+	val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+	val &= ~RTERM_SELECT_MASK;
+	val |= RTERM_SELECT(0x6);
+	I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
+
+	/* Program PORT_TX_DW5 */
+	val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+	/* Set DisableTap2 and DisableTap3 if MIPI DSI
+	 * Clear DisableTap2 and DisableTap3 for all other Ports
+	 */
+	if (type == INTEL_OUTPUT_DSI) {
+		val |= TAP2_DISABLE;
+		val |= TAP3_DISABLE;
+	} else {
+		val &= ~TAP2_DISABLE;
+		val &= ~TAP3_DISABLE;
+	}
+	I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
+
+	/* Program PORT_TX_DW2 */
+	val = I915_READ(ICL_PORT_TX_DW2_LN0(port));
+	val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
+		 RCOMP_SCALAR_MASK);
+	val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_select);
+	val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_select);
+	/* Program Rcomp scalar for every table entry */
+	val |= RCOMP_SCALAR(ddi_translations[level].dw2_swing_scalar);
+	I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val);
+
+	/* Program PORT_TX_DW4 */
+	/* We cannot write to GRP. It would overwrite individual loadgen. */
+	for (ln = 0; ln <= 3; ln++) {
+		val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln));
+		val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
+			 CURSOR_COEFF_MASK);
+		val |= ddi_translations[level].dw4_scaling;
+		I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val);
+	}
+}
+
+static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
+					      u32 level,
+					      enum intel_output_type type)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	enum port port = encoder->port;
+	int width = 0;
+	int rate = 0;
+	u32 val;
+	int ln = 0;
+
+	if (type == INTEL_OUTPUT_HDMI) {
+		width = 4;
+		/* Rate is always < than 6GHz for HDMI */
+	} else {
+		struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+		width = intel_dp->lane_count;
+		rate = intel_dp->link_rate;
+	}
+
+	/*
+	 * 1. If port type is eDP or DP,
+	 * set PORT_PCS_DW1 cmnkeeper_enable to 1b,
+	 * else clear to 0b.
+	 */
+	val = I915_READ(ICL_PORT_PCS_DW1_LN0(port));
+	if (type == INTEL_OUTPUT_HDMI)
+		val &= ~COMMON_KEEPER_EN;
+	else
+		val |= COMMON_KEEPER_EN;
+	I915_WRITE(ICL_PORT_PCS_DW1_GRP(port), val);
+
+	/* 2. Program loadgen select */
+	/*
+	 * Program PORT_TX_DW4_LN depending on Bit rate and used lanes
+	 * <= 6 GHz and 4 lanes (LN0=0, LN1=1, LN2=1, LN3=1)
+	 * <= 6 GHz and 1,2 lanes (LN0=0, LN1=1, LN2=1, LN3=0)
+	 * > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
+	 */
+	for (ln = 0; ln <= 3; ln++) {
+		val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln));
+		val &= ~LOADGEN_SELECT;
+
+		if ((rate <= 600000 && width == 4 && ln >= 1) ||
+		    (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
+			val |= LOADGEN_SELECT;
+		}
+		I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val);
+	}
+
+	/* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
+	val = I915_READ(ICL_PORT_CL_DW5(port));
+	val |= SUS_CLOCK_CONFIG;
+	I915_WRITE(ICL_PORT_CL_DW5(port), val);
+
+	/* 4. Clear training enable to change swing values */
+	val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+	val &= ~TX_TRAINING_EN;
+	I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
+
+	/* 5. Program swing and de-emphasis */
+	icl_ddi_combo_vswing_program(dev_priv, level, port, type);
+
+	/* 6. Set training enable to trigger update */
+	val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+	val |= TX_TRAINING_EN;
+	I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
+}
+
+static void icl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level,
+				    enum intel_output_type type)
+{
+	enum port port = encoder->port;
+
+	if (port == PORT_A || port == PORT_B)
+		icl_combo_phy_ddi_vswing_sequence(encoder, level, type);
+	else
+		/* Not Implemented Yet */
+		WARN_ON(1);
+}
+
 static uint32_t translate_signal_level(int signal_levels)
 {
 	int i;
@@ -2213,7 +2392,9 @@ u32 bxt_signal_levels(struct intel_dp *intel_dp)
 	struct intel_encoder *encoder = &dport->base;
 	int level = intel_ddi_dp_level(intel_dp);
 
-	if (IS_CANNONLAKE(dev_priv))
+	if (IS_ICELAKE(dev_priv))
+		icl_ddi_vswing_sequence(encoder, level, encoder->type);
+	else if (IS_CANNONLAKE(dev_priv))
 		cnl_ddi_vswing_sequence(encoder, level, encoder->type);
 	else
 		bxt_ddi_vswing_sequence(encoder, level, encoder->type);
@@ -2316,7 +2497,9 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
 
 	intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
 
-	if (IS_CANNONLAKE(dev_priv))
+	if (IS_ICELAKE(dev_priv))
+		icl_ddi_vswing_sequence(encoder, level, encoder->type);
+	else if (IS_CANNONLAKE(dev_priv))
 		cnl_ddi_vswing_sequence(encoder, level, encoder->type);
 	else if (IS_GEN9_LP(dev_priv))
 		bxt_ddi_vswing_sequence(encoder, level, encoder->type);
@@ -2347,7 +2530,9 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
 
 	intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
 
-	if (IS_CANNONLAKE(dev_priv))
+	if (IS_ICELAKE(dev_priv))
+		icl_ddi_vswing_sequence(encoder, level, INTEL_OUTPUT_HDMI);
+	else if (IS_CANNONLAKE(dev_priv))
 		cnl_ddi_vswing_sequence(encoder, level, INTEL_OUTPUT_HDMI);
 	else if (IS_GEN9_LP(dev_priv))
 		bxt_ddi_vswing_sequence(encoder, level, INTEL_OUTPUT_HDMI);

From 36cf89f53b0ccdbd6bdaedfe1435a574609f0de5 Mon Sep 17 00:00:00 2001
From: Manasi Navare <manasi.d.navare@intel.com>
Date: Wed, 28 Mar 2018 14:58:03 -0700
Subject: [PATCH 0555/1286] drm/i915/icl: Fix the DP Max Voltage for ICL

On clock recovery this function is called to find out
the max voltage swing level that we could go.

However gen 9 functions use the old buffer translation tables
to figure that out. ICL uses different set of tables for eDP
and DP for both Combo and MG PHY ports. This patch adds the hook
for ICL for getting this information from appropriate buf trans tables.

v5 (from Paulo):
* New rebase after changes to earlier patches.
v4:
* Rebase.
v3:
* Follow the coding conventions here
(https://cgit.freedesktop.org/drm-intel/tree/Documentation/process/codin
g-style.rst#n191) (Paulo)
v2:
* Rebase after patch that adds voltage check inside buf trans
function (Rodrigo)

Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Manasi Navare <manasi.d.navare@intel.com>
Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180328215803.13835-9-paulo.r.zanoni@intel.com
---
 drivers/gpu/drm/i915/intel_ddi.c | 8 +++++++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 0edbdb68f311..8225d223f452 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -2064,7 +2064,13 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
 	enum port port = encoder->port;
 	int n_entries;
 
-	if (IS_CANNONLAKE(dev_priv)) {
+	if (IS_ICELAKE(dev_priv)) {
+		if (port == PORT_A || port == PORT_B)
+			icl_get_combo_buf_trans(dev_priv, port, encoder->type,
+						&n_entries);
+		else
+			n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
+	} else if (IS_CANNONLAKE(dev_priv)) {
 		if (encoder->type == INTEL_OUTPUT_EDP)
 			cnl_get_buf_trans_edp(dev_priv, &n_entries);
 		else

From 58badaa7783dc341daa1586235823fff94d3f96a Mon Sep 17 00:00:00 2001
From: "Kristian H. Kristensen" <hoegsberg@gmail.com>
Date: Wed, 18 Apr 2018 10:31:52 -0700
Subject: [PATCH 0556/1286] drm/rockchip: Disable blending for win0

Blending win0 with the background color doesn't seem to work
correctly. We only get the background color, no matter the contents of
the win0 framebuffer.  However, blending pre-multiplied color with the
default opaque black default background color is a no-op, so we can
just disable blending to get the correct result.

Signed-off-by: Kristian H. Kristensen <hoegsberg@chromium.org>
Cc: Sandy Huang <hjc@rock-chips.com>
Cc: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20180418173152.93246-1-hoegsberg@chromium.org
---
 drivers/gpu/drm/rockchip/rockchip_drm_vop.c | 13 ++++++++++++-
 1 file changed, 12 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index fe3faa7c38d9..2121345a61af 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -76,6 +76,9 @@
 #define VOP_WIN_GET_YRGBADDR(vop, win) \
 		vop_readl(vop, win->base + win->phy->yrgb_mst.offset)
 
+#define VOP_WIN_TO_INDEX(vop_win) \
+	((vop_win) - (vop_win)->vop->win)
+
 #define to_vop(x) container_of(x, struct vop, crtc)
 #define to_vop_win(x) container_of(x, struct vop_win, base)
 
@@ -708,6 +711,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
 	dma_addr_t dma_addr;
 	uint32_t val;
 	bool rb_swap;
+	int win_index = VOP_WIN_TO_INDEX(vop_win);
 	int format;
 
 	/*
@@ -777,7 +781,14 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
 	rb_swap = has_rb_swapped(fb->format->format);
 	VOP_WIN_SET(vop, win, rb_swap, rb_swap);
 
-	if (fb->format->has_alpha) {
+	/*
+	 * Blending win0 with the background color doesn't seem to work
+	 * correctly. We only get the background color, no matter the contents
+	 * of the win0 framebuffer.  However, blending pre-multiplied color
+	 * with the default opaque black default background color is a no-op,
+	 * so we can just disable blending to get the correct result.
+	 */
+	if (fb->format->has_alpha && win_index > 0) {
 		VOP_WIN_SET(vop, win, dst_alpha_ctl,
 			    DST_FACTOR_M0(ALPHA_SRC_INVERSE));
 		val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) |

From c5ce3b8df6c758169b5b5df5ee9adc4c39505d9b Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Tue, 1 May 2018 13:21:31 +0100
Subject: [PATCH 0557/1286] drm/i915/execlists: Don't trigger preemption if
 complete

Due to the latency of the tasklet running from ksoftirqd, by the time we
process the execlist dequeue may be a long time behind the GPU. If the
request was completed when we ran reschedule, we will not have tweaked
its priority, but if it is still listed as being in-flight for dequeue
we will use it as a reference for the rest of the queue, including
requests from its own context which will now be at higher priority. This
can cause us to issue a preempt-to-idle request, even though the request
we want to preempt is already complete.

Reported-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180501122131.19435-1-chris@chris-wilson.co.uk
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/intel_lrc.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 099995619472..774b2adc9211 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -185,7 +185,8 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
 				int prio)
 {
 	return (intel_engine_has_preemption(engine) &&
-		__execlists_need_preempt(prio, rq_prio(last)));
+		__execlists_need_preempt(prio, rq_prio(last)) &&
+		!i915_request_completed(last));
 }
 
 /**

From 72ac6969033dc9f5e526566240a3a7934f0916ee Mon Sep 17 00:00:00 2001
From: Satendra Singh Thakur <thakursatendra2003@yahoo.co.in>
Date: Sat, 31 Mar 2018 20:17:58 +0530
Subject: [PATCH 0558/1286] drm/mediatek: Using the function
 drm_display_mode_to_videomode

This patch uses existing method drm_display_mode_to_videomode for
calculating front/back porches, sync lengths for mediatek dsi/dpi
drivers; instead of manually calculating them

Signed-off-by: Satendra Singh Thakur <thakursatendra2003@yahoo.co.in>
Signed-off-by: CK Hu <ck.hu@mediatek.com>
---
 drivers/gpu/drm/mediatek/Kconfig   |  1 +
 drivers/gpu/drm/mediatek/mtk_dpi.c | 60 +++++++++++++++---------------
 drivers/gpu/drm/mediatek/mtk_dsi.c | 14 +------
 3 files changed, 33 insertions(+), 42 deletions(-)

diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
index 294de4549922..119ec0a21de2 100644
--- a/drivers/gpu/drm/mediatek/Kconfig
+++ b/drivers/gpu/drm/mediatek/Kconfig
@@ -11,6 +11,7 @@ config DRM_MEDIATEK
 	select DRM_PANEL
 	select MEMORY
 	select MTK_SMI
+	select VIDEOMODE_HELPERS
 	help
 	  Choose this option if you have a Mediatek SoCs.
 	  The module will be called mediatek-drm
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index e80a603e5fb0..6c0ea39d5739 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -22,6 +22,7 @@
 #include <linux/interrupt.h>
 #include <linux/types.h>
 #include <linux/clk.h>
+#include <video/videomode.h>
 
 #include "mtk_dpi_regs.h"
 #include "mtk_drm_ddp_comp.h"
@@ -429,34 +430,35 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
 	struct mtk_dpi_sync_param vsync_leven = { 0 };
 	struct mtk_dpi_sync_param vsync_rodd = { 0 };
 	struct mtk_dpi_sync_param vsync_reven = { 0 };
-	unsigned long pix_rate;
+	struct videomode vm = { 0 };
 	unsigned long pll_rate;
 	unsigned int factor;
 
 	/* let pll_rate can fix the valid range of tvdpll (1G~2GHz) */
-	pix_rate = 1000UL * mode->clock;
+
 	if (mode->clock <= 27000)
-		factor = 16 * 3;
+		factor = 3 << 4;
 	else if (mode->clock <= 84000)
-		factor = 8 * 3;
+		factor = 3 << 3;
 	else if (mode->clock <= 167000)
-		factor = 4 * 3;
+		factor = 3 << 2;
 	else
-		factor = 2 * 3;
-	pll_rate = pix_rate * factor;
+		factor = 3 << 1;
+	drm_display_mode_to_videomode(mode, &vm);
+	pll_rate = vm.pixelclock * factor;
 
 	dev_dbg(dpi->dev, "Want PLL %lu Hz, pixel clock %lu Hz\n",
-		pll_rate, pix_rate);
+		pll_rate, vm.pixelclock);
 
 	clk_set_rate(dpi->tvd_clk, pll_rate);
 	pll_rate = clk_get_rate(dpi->tvd_clk);
 
-	pix_rate = pll_rate / factor;
-	clk_set_rate(dpi->pixel_clk, pix_rate);
-	pix_rate = clk_get_rate(dpi->pixel_clk);
+	vm.pixelclock = pll_rate / factor;
+	clk_set_rate(dpi->pixel_clk, vm.pixelclock);
+	vm.pixelclock = clk_get_rate(dpi->pixel_clk);
 
 	dev_dbg(dpi->dev, "Got  PLL %lu Hz, pixel clock %lu Hz\n",
-		pll_rate, pix_rate);
+		pll_rate, vm.pixelclock);
 
 	limit.c_bottom = 0x0010;
 	limit.c_top = 0x0FE0;
@@ -465,33 +467,31 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
 
 	dpi_pol.ck_pol = MTK_DPI_POLARITY_FALLING;
 	dpi_pol.de_pol = MTK_DPI_POLARITY_RISING;
-	dpi_pol.hsync_pol = mode->flags & DRM_MODE_FLAG_PHSYNC ?
+	dpi_pol.hsync_pol = vm.flags & DISPLAY_FLAGS_HSYNC_HIGH ?
 			    MTK_DPI_POLARITY_FALLING : MTK_DPI_POLARITY_RISING;
-	dpi_pol.vsync_pol = mode->flags & DRM_MODE_FLAG_PVSYNC ?
+	dpi_pol.vsync_pol = vm.flags & DISPLAY_FLAGS_VSYNC_HIGH ?
 			    MTK_DPI_POLARITY_FALLING : MTK_DPI_POLARITY_RISING;
-
-	hsync.sync_width = mode->hsync_end - mode->hsync_start;
-	hsync.back_porch = mode->htotal - mode->hsync_end;
-	hsync.front_porch = mode->hsync_start - mode->hdisplay;
+	hsync.sync_width = vm.hsync_len;
+	hsync.back_porch = vm.hback_porch;
+	hsync.front_porch = vm.hfront_porch;
 	hsync.shift_half_line = false;
-
-	vsync_lodd.sync_width = mode->vsync_end - mode->vsync_start;
-	vsync_lodd.back_porch = mode->vtotal - mode->vsync_end;
-	vsync_lodd.front_porch = mode->vsync_start - mode->vdisplay;
+	vsync_lodd.sync_width = vm.vsync_len;
+	vsync_lodd.back_porch = vm.vback_porch;
+	vsync_lodd.front_porch = vm.vfront_porch;
 	vsync_lodd.shift_half_line = false;
 
-	if (mode->flags & DRM_MODE_FLAG_INTERLACE &&
+	if (vm.flags & DISPLAY_FLAGS_INTERLACED &&
 	    mode->flags & DRM_MODE_FLAG_3D_MASK) {
 		vsync_leven = vsync_lodd;
 		vsync_rodd = vsync_lodd;
 		vsync_reven = vsync_lodd;
 		vsync_leven.shift_half_line = true;
 		vsync_reven.shift_half_line = true;
-	} else if (mode->flags & DRM_MODE_FLAG_INTERLACE &&
+	} else if (vm.flags & DISPLAY_FLAGS_INTERLACED &&
 		   !(mode->flags & DRM_MODE_FLAG_3D_MASK)) {
 		vsync_leven = vsync_lodd;
 		vsync_leven.shift_half_line = true;
-	} else if (!(mode->flags & DRM_MODE_FLAG_INTERLACE) &&
+	} else if (!(vm.flags & DISPLAY_FLAGS_INTERLACED) &&
 		   mode->flags & DRM_MODE_FLAG_3D_MASK) {
 		vsync_rodd = vsync_lodd;
 	}
@@ -505,12 +505,12 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
 	mtk_dpi_config_vsync_reven(dpi, &vsync_reven);
 
 	mtk_dpi_config_3d(dpi, !!(mode->flags & DRM_MODE_FLAG_3D_MASK));
-	mtk_dpi_config_interface(dpi, !!(mode->flags &
-					 DRM_MODE_FLAG_INTERLACE));
-	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
-		mtk_dpi_config_fb_size(dpi, mode->hdisplay, mode->vdisplay / 2);
+	mtk_dpi_config_interface(dpi, !!(vm.flags &
+					 DISPLAY_FLAGS_INTERLACED));
+	if (vm.flags & DISPLAY_FLAGS_INTERLACED)
+		mtk_dpi_config_fb_size(dpi, vm.hactive, vm.vactive >> 1);
 	else
-		mtk_dpi_config_fb_size(dpi, mode->hdisplay, mode->vdisplay);
+		mtk_dpi_config_fb_size(dpi, vm.hactive, vm.vactive);
 
 	mtk_dpi_config_channel_limit(dpi, &limit);
 	mtk_dpi_config_bit_num(dpi, dpi->bit_num);
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 7e5e24c2152a..aa0943ec32b0 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -551,13 +551,12 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi)
 	}
 
 	/**
-	 * vm.pixelclock is in kHz, pixel_clock unit is Hz, so multiply by 1000
 	 * htotal_time = htotal * byte_per_pixel / num_lanes
 	 * overhead_time = lpx + hs_prepare + hs_zero + hs_trail + hs_exit
 	 * mipi_ratio = (htotal_time + overhead_time) / htotal_time
 	 * data_rate = pixel_clock * bit_per_pixel * mipi_ratio / num_lanes;
 	 */
-	pixel_clock = dsi->vm.pixelclock * 1000;
+	pixel_clock = dsi->vm.pixelclock;
 	htotal = dsi->vm.hactive + dsi->vm.hback_porch + dsi->vm.hfront_porch +
 			dsi->vm.hsync_len;
 	htotal_bits = htotal * bit_per_pixel;
@@ -725,16 +724,7 @@ static void mtk_dsi_encoder_mode_set(struct drm_encoder *encoder,
 {
 	struct mtk_dsi *dsi = encoder_to_dsi(encoder);
 
-	dsi->vm.pixelclock = adjusted->clock;
-	dsi->vm.hactive = adjusted->hdisplay;
-	dsi->vm.hback_porch = adjusted->htotal - adjusted->hsync_end;
-	dsi->vm.hfront_porch = adjusted->hsync_start - adjusted->hdisplay;
-	dsi->vm.hsync_len = adjusted->hsync_end - adjusted->hsync_start;
-
-	dsi->vm.vactive = adjusted->vdisplay;
-	dsi->vm.vback_porch = adjusted->vtotal - adjusted->vsync_end;
-	dsi->vm.vfront_porch = adjusted->vsync_start - adjusted->vdisplay;
-	dsi->vm.vsync_len = adjusted->vsync_end - adjusted->vsync_start;
+	drm_display_mode_to_videomode(adjusted, &dsi->vm);
 }
 
 static void mtk_dsi_encoder_disable(struct drm_encoder *encoder)

From 2c269b090651234203c2f74af059a19f98ed101d Mon Sep 17 00:00:00 2001
From: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Fri, 27 Apr 2018 08:17:08 +0200
Subject: [PATCH 0559/1286] dma-fence: Some kerneldoc polish for dma-fence.h
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

- Switch to inline member docs for dma_fence_ops.
- Mild polish all around.
- hyperlink all the things!

v2: - Remove the various [in] annotations, they seem really uncommon
in kerneldoc and look funny.

v3: Linebreak the "Returns" part of the @fill_driver_data kerneldoc
(Eric).

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: linux-media@vger.kernel.org
Cc: linaro-mm-sig@lists.linaro.org
Reviewed-by: Eric Anholt <eric@anholt.net>
Reviewed-by: Christian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180427061724.28497-2-daniel.vetter@ffwll.ch
---
 include/linux/dma-fence.h | 236 +++++++++++++++++++++++++-------------
 1 file changed, 155 insertions(+), 81 deletions(-)

diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 4c008170fe65..eb9b05aa5aea 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -94,11 +94,11 @@ typedef void (*dma_fence_func_t)(struct dma_fence *fence,
 				 struct dma_fence_cb *cb);
 
 /**
- * struct dma_fence_cb - callback for dma_fence_add_callback
- * @node: used by dma_fence_add_callback to append this struct to fence::cb_list
+ * struct dma_fence_cb - callback for dma_fence_add_callback()
+ * @node: used by dma_fence_add_callback() to append this struct to fence::cb_list
  * @func: dma_fence_func_t to call
  *
- * This struct will be initialized by dma_fence_add_callback, additional
+ * This struct will be initialized by dma_fence_add_callback(), additional
  * data can be passed along by embedding dma_fence_cb in another struct.
  */
 struct dma_fence_cb {
@@ -108,75 +108,143 @@ struct dma_fence_cb {
 
 /**
  * struct dma_fence_ops - operations implemented for fence
- * @get_driver_name: returns the driver name.
- * @get_timeline_name: return the name of the context this fence belongs to.
- * @enable_signaling: enable software signaling of fence.
- * @signaled: [optional] peek whether the fence is signaled, can be null.
- * @wait: custom wait implementation, or dma_fence_default_wait.
- * @release: [optional] called on destruction of fence, can be null
- * @fill_driver_data: [optional] callback to fill in free-form debug info
- * Returns amount of bytes filled, or -errno.
- * @fence_value_str: [optional] fills in the value of the fence as a string
- * @timeline_value_str: [optional] fills in the current value of the timeline
- * as a string
  *
- * Notes on enable_signaling:
- * For fence implementations that have the capability for hw->hw
- * signaling, they can implement this op to enable the necessary
- * irqs, or insert commands into cmdstream, etc.  This is called
- * in the first wait() or add_callback() path to let the fence
- * implementation know that there is another driver waiting on
- * the signal (ie. hw->sw case).
- *
- * This function can be called from atomic context, but not
- * from irq context, so normal spinlocks can be used.
- *
- * A return value of false indicates the fence already passed,
- * or some failure occurred that made it impossible to enable
- * signaling. True indicates successful enabling.
- *
- * fence->error may be set in enable_signaling, but only when false is
- * returned.
- *
- * Calling dma_fence_signal before enable_signaling is called allows
- * for a tiny race window in which enable_signaling is called during,
- * before, or after dma_fence_signal. To fight this, it is recommended
- * that before enable_signaling returns true an extra reference is
- * taken on the fence, to be released when the fence is signaled.
- * This will mean dma_fence_signal will still be called twice, but
- * the second time will be a noop since it was already signaled.
- *
- * Notes on signaled:
- * May set fence->error if returning true.
- *
- * Notes on wait:
- * Must not be NULL, set to dma_fence_default_wait for default implementation.
- * the dma_fence_default_wait implementation should work for any fence, as long
- * as enable_signaling works correctly.
- *
- * Must return -ERESTARTSYS if the wait is intr = true and the wait was
- * interrupted, and remaining jiffies if fence has signaled, or 0 if wait
- * timed out. Can also return other error values on custom implementations,
- * which should be treated as if the fence is signaled. For example a hardware
- * lockup could be reported like that.
- *
- * Notes on release:
- * Can be NULL, this function allows additional commands to run on
- * destruction of the fence. Can be called from irq context.
- * If pointer is set to NULL, kfree will get called instead.
  */
-
 struct dma_fence_ops {
+	/**
+	 * @get_driver_name:
+	 *
+	 * Returns the driver name. This is a callback to allow drivers to
+	 * compute the name at runtime, without having it to store permanently
+	 * for each fence, or build a cache of some sort.
+	 *
+	 * This callback is mandatory.
+	 */
 	const char * (*get_driver_name)(struct dma_fence *fence);
+
+	/**
+	 * @get_timeline_name:
+	 *
+	 * Return the name of the context this fence belongs to. This is a
+	 * callback to allow drivers to compute the name at runtime, without
+	 * having it to store permanently for each fence, or build a cache of
+	 * some sort.
+	 *
+	 * This callback is mandatory.
+	 */
 	const char * (*get_timeline_name)(struct dma_fence *fence);
+
+	/**
+	 * @enable_signaling:
+	 *
+	 * Enable software signaling of fence.
+	 *
+	 * For fence implementations that have the capability for hw->hw
+	 * signaling, they can implement this op to enable the necessary
+	 * interrupts, or insert commands into cmdstream, etc, to avoid these
+	 * costly operations for the common case where only hw->hw
+	 * synchronization is required.  This is called in the first
+	 * dma_fence_wait() or dma_fence_add_callback() path to let the fence
+	 * implementation know that there is another driver waiting on the
+	 * signal (ie. hw->sw case).
+	 *
+	 * This function can be called from atomic context, but not
+	 * from irq context, so normal spinlocks can be used.
+	 *
+	 * A return value of false indicates the fence already passed,
+	 * or some failure occurred that made it impossible to enable
+	 * signaling. True indicates successful enabling.
+	 *
+	 * &dma_fence.error may be set in enable_signaling, but only when false
+	 * is returned.
+	 *
+	 * Since many implementations can call dma_fence_signal() even when before
+	 * @enable_signaling has been called there's a race window, where the
+	 * dma_fence_signal() might result in the final fence reference being
+	 * released and its memory freed. To avoid this, implementations of this
+	 * callback should grab their own reference using dma_fence_get(), to be
+	 * released when the fence is signalled (through e.g. the interrupt
+	 * handler).
+	 *
+	 * This callback is mandatory.
+	 */
 	bool (*enable_signaling)(struct dma_fence *fence);
+
+	/**
+	 * @signaled:
+	 *
+	 * Peek whether the fence is signaled, as a fastpath optimization for
+	 * e.g. dma_fence_wait() or dma_fence_add_callback(). Note that this
+	 * callback does not need to make any guarantees beyond that a fence
+	 * once indicates as signalled must always return true from this
+	 * callback. This callback may return false even if the fence has
+	 * completed already, in this case information hasn't propogated throug
+	 * the system yet. See also dma_fence_is_signaled().
+	 *
+	 * May set &dma_fence.error if returning true.
+	 *
+	 * This callback is optional.
+	 */
 	bool (*signaled)(struct dma_fence *fence);
+
+	/**
+	 * @wait:
+	 *
+	 * Custom wait implementation, or dma_fence_default_wait.
+	 *
+	 * Must not be NULL, set to dma_fence_default_wait for default implementation.
+	 * the dma_fence_default_wait implementation should work for any fence, as long
+	 * as enable_signaling works correctly.
+	 *
+	 * Must return -ERESTARTSYS if the wait is intr = true and the wait was
+	 * interrupted, and remaining jiffies if fence has signaled, or 0 if wait
+	 * timed out. Can also return other error values on custom implementations,
+	 * which should be treated as if the fence is signaled. For example a hardware
+	 * lockup could be reported like that.
+	 *
+	 * This callback is mandatory.
+	 */
 	signed long (*wait)(struct dma_fence *fence,
 			    bool intr, signed long timeout);
+
+	/**
+	 * @release:
+	 *
+	 * Called on destruction of fence to release additional resources.
+	 * Can be called from irq context.  This callback is optional. If it is
+	 * NULL, then dma_fence_free() is instead called as the default
+	 * implementation.
+	 */
 	void (*release)(struct dma_fence *fence);
 
+	/**
+	 * @fill_driver_data:
+	 *
+	 * Callback to fill in free-form debug info.
+	 *
+	 * Returns amount of bytes filled, or negative error on failure.
+	 *
+	 * This callback is optional.
+	 */
 	int (*fill_driver_data)(struct dma_fence *fence, void *data, int size);
+
+	/**
+	 * @fence_value_str:
+	 *
+	 * Callback to fill in free-form debug info specific to this fence, like
+	 * the sequence number.
+	 *
+	 * This callback is optional.
+	 */
 	void (*fence_value_str)(struct dma_fence *fence, char *str, int size);
+
+	/**
+	 * @timeline_value_str:
+	 *
+	 * Fills in the current value of the timeline as a string, like the
+	 * sequence number. This should match what @fill_driver_data prints for
+	 * the most recently signalled fence (assuming no delayed signalling).
+	 */
 	void (*timeline_value_str)(struct dma_fence *fence,
 				   char *str, int size);
 };
@@ -189,7 +257,7 @@ void dma_fence_free(struct dma_fence *fence);
 
 /**
  * dma_fence_put - decreases refcount of the fence
- * @fence:	[in]	fence to reduce refcount of
+ * @fence: fence to reduce refcount of
  */
 static inline void dma_fence_put(struct dma_fence *fence)
 {
@@ -199,7 +267,7 @@ static inline void dma_fence_put(struct dma_fence *fence)
 
 /**
  * dma_fence_get - increases refcount of the fence
- * @fence:	[in]	fence to increase refcount of
+ * @fence: fence to increase refcount of
  *
  * Returns the same fence, with refcount increased by 1.
  */
@@ -213,7 +281,7 @@ static inline struct dma_fence *dma_fence_get(struct dma_fence *fence)
 /**
  * dma_fence_get_rcu - get a fence from a reservation_object_list with
  *                     rcu read lock
- * @fence:	[in]	fence to increase refcount of
+ * @fence: fence to increase refcount of
  *
  * Function returns NULL if no refcount could be obtained, or the fence.
  */
@@ -227,7 +295,7 @@ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence)
 
 /**
  * dma_fence_get_rcu_safe  - acquire a reference to an RCU tracked fence
- * @fencep:	[in]	pointer to fence to increase refcount of
+ * @fencep: pointer to fence to increase refcount of
  *
  * Function returns NULL if no refcount could be obtained, or the fence.
  * This function handles acquiring a reference to a fence that may be
@@ -289,14 +357,16 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence);
 /**
  * dma_fence_is_signaled_locked - Return an indication if the fence
  *                                is signaled yet.
- * @fence:	[in]	the fence to check
+ * @fence: the fence to check
  *
  * Returns true if the fence was already signaled, false if not. Since this
  * function doesn't enable signaling, it is not guaranteed to ever return
- * true if dma_fence_add_callback, dma_fence_wait or
- * dma_fence_enable_sw_signaling haven't been called before.
+ * true if dma_fence_add_callback(), dma_fence_wait() or
+ * dma_fence_enable_sw_signaling() haven't been called before.
  *
- * This function requires fence->lock to be held.
+ * This function requires &dma_fence.lock to be held.
+ *
+ * See also dma_fence_is_signaled().
  */
 static inline bool
 dma_fence_is_signaled_locked(struct dma_fence *fence)
@@ -314,17 +384,19 @@ dma_fence_is_signaled_locked(struct dma_fence *fence)
 
 /**
  * dma_fence_is_signaled - Return an indication if the fence is signaled yet.
- * @fence:	[in]	the fence to check
+ * @fence: the fence to check
  *
  * Returns true if the fence was already signaled, false if not. Since this
  * function doesn't enable signaling, it is not guaranteed to ever return
- * true if dma_fence_add_callback, dma_fence_wait or
- * dma_fence_enable_sw_signaling haven't been called before.
+ * true if dma_fence_add_callback(), dma_fence_wait() or
+ * dma_fence_enable_sw_signaling() haven't been called before.
  *
  * It's recommended for seqno fences to call dma_fence_signal when the
  * operation is complete, it makes it possible to prevent issues from
  * wraparound between time of issue and time of use by checking the return
  * value of this function before calling hardware-specific wait instructions.
+ *
+ * See also dma_fence_is_signaled_locked().
  */
 static inline bool
 dma_fence_is_signaled(struct dma_fence *fence)
@@ -342,8 +414,8 @@ dma_fence_is_signaled(struct dma_fence *fence)
 
 /**
  * __dma_fence_is_later - return if f1 is chronologically later than f2
- * @f1:	[in]	the first fence's seqno
- * @f2:	[in]	the second fence's seqno from the same context
+ * @f1: the first fence's seqno
+ * @f2: the second fence's seqno from the same context
  *
  * Returns true if f1 is chronologically later than f2. Both fences must be
  * from the same context, since a seqno is not common across contexts.
@@ -355,8 +427,8 @@ static inline bool __dma_fence_is_later(u32 f1, u32 f2)
 
 /**
  * dma_fence_is_later - return if f1 is chronologically later than f2
- * @f1:	[in]	the first fence from the same context
- * @f2:	[in]	the second fence from the same context
+ * @f1: the first fence from the same context
+ * @f2: the second fence from the same context
  *
  * Returns true if f1 is chronologically later than f2. Both fences must be
  * from the same context, since a seqno is not re-used across contexts.
@@ -372,8 +444,8 @@ static inline bool dma_fence_is_later(struct dma_fence *f1,
 
 /**
  * dma_fence_later - return the chronologically later fence
- * @f1:	[in]	the first fence from the same context
- * @f2:	[in]	the second fence from the same context
+ * @f1:	the first fence from the same context
+ * @f2:	the second fence from the same context
  *
  * Returns NULL if both fences are signaled, otherwise the fence that would be
  * signaled last. Both fences must be from the same context, since a seqno is
@@ -398,7 +470,7 @@ static inline struct dma_fence *dma_fence_later(struct dma_fence *f1,
 
 /**
  * dma_fence_get_status_locked - returns the status upon completion
- * @fence: [in]	the dma_fence to query
+ * @fence: the dma_fence to query
  *
  * Drivers can supply an optional error status condition before they signal
  * the fence (to indicate whether the fence was completed due to an error
@@ -422,8 +494,8 @@ int dma_fence_get_status(struct dma_fence *fence);
 
 /**
  * dma_fence_set_error - flag an error condition on the fence
- * @fence: [in]	the dma_fence
- * @error: [in]	the error to store
+ * @fence: the dma_fence
+ * @error: the error to store
  *
  * Drivers can supply an optional error status condition before they signal
  * the fence, to indicate that the fence was completed due to an error
@@ -449,8 +521,8 @@ signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
 
 /**
  * dma_fence_wait - sleep until the fence gets signaled
- * @fence:	[in]	the fence to wait on
- * @intr:	[in]	if true, do an interruptible wait
+ * @fence: the fence to wait on
+ * @intr: if true, do an interruptible wait
  *
  * This function will return -ERESTARTSYS if interrupted by a signal,
  * or 0 if the fence was signaled. Other error values may be
@@ -459,6 +531,8 @@ signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
  * Performs a synchronous wait on this fence. It is assumed the caller
  * directly or indirectly holds a reference to the fence, otherwise the
  * fence might be freed before return, resulting in undefined behavior.
+ *
+ * See also dma_fence_wait_timeout() and dma_fence_wait_any_timeout().
  */
 static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
 {

From 043477b088d2af61a0937754c6560002237e6741 Mon Sep 17 00:00:00 2001
From: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Mon, 30 Apr 2018 10:52:59 +0300
Subject: [PATCH 0560/1286] drm/i915: Print error state times relative to
 capture

Using plain jiffies in error state output makes the output
time differences relative to the current system time. This
is wrong as it makes output time differences dependent
of when the error state is printed rather than when it is
captured.

Store capture jiffies into error state and use it
when outputting the state to fix time differences output.

v2: use engine timestamp as epoch, output formatting (Chris)
v3: pass epoch to print_engine/request (Chris)

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180430075259.4476-1-mika.kuoppala@linux.intel.com
---
 drivers/gpu/drm/i915/i915_gpu_error.c | 47 ++++++++++++++++++++++-----
 drivers/gpu/drm/i915/i915_gpu_error.h |  2 ++
 2 files changed, 40 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index c0127965b578..1176d068f88a 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -404,15 +404,16 @@ static const char *bannable(const struct drm_i915_error_context *ctx)
 
 static void error_print_request(struct drm_i915_error_state_buf *m,
 				const char *prefix,
-				const struct drm_i915_error_request *erq)
+				const struct drm_i915_error_request *erq,
+				const unsigned long epoch)
 {
 	if (!erq->seqno)
 		return;
 
-	err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, prio %d, emitted %dms ago, head %08x, tail %08x\n",
+	err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, prio %d, emitted %dms, head %08x, tail %08x\n",
 		   prefix, erq->pid, erq->ban_score,
 		   erq->context, erq->seqno, erq->sched_attr.priority,
-		   jiffies_to_msecs(jiffies - erq->jiffies),
+		   jiffies_to_msecs(erq->jiffies - epoch),
 		   erq->head, erq->tail);
 }
 
@@ -427,7 +428,8 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
 }
 
 static void error_print_engine(struct drm_i915_error_state_buf *m,
-			       const struct drm_i915_error_engine *ee)
+			       const struct drm_i915_error_engine *ee,
+			       const unsigned long epoch)
 {
 	int n;
 
@@ -497,14 +499,15 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
 	err_printf(m, "  hangcheck stall: %s\n", yesno(ee->hangcheck_stalled));
 	err_printf(m, "  hangcheck action: %s\n",
 		   hangcheck_action_to_str(ee->hangcheck_action));
-	err_printf(m, "  hangcheck action timestamp: %lu, %u ms ago\n",
+	err_printf(m, "  hangcheck action timestamp: %dms (%lu%s)\n",
+		   jiffies_to_msecs(ee->hangcheck_timestamp - epoch),
 		   ee->hangcheck_timestamp,
-		   jiffies_to_msecs(jiffies - ee->hangcheck_timestamp));
+		   ee->hangcheck_timestamp == epoch ? "; epoch" : "");
 	err_printf(m, "  engine reset count: %u\n", ee->reset_count);
 
 	for (n = 0; n < ee->num_ports; n++) {
 		err_printf(m, "  ELSP[%d]:", n);
-		error_print_request(m, " ", &ee->execlist[n]);
+		error_print_request(m, " ", &ee->execlist[n], epoch);
 	}
 
 	error_print_context(m, "  Active context: ", &ee->context);
@@ -650,6 +653,11 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 	ts = ktime_to_timespec64(error->uptime);
 	err_printf(m, "Uptime: %lld s %ld us\n",
 		   (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
+	err_printf(m, "Epoch: %lu jiffies (%u HZ)\n", error->epoch, HZ);
+	err_printf(m, "Capture: %lu jiffies; %d ms ago, %d ms after epoch\n",
+		   error->capture,
+		   jiffies_to_msecs(jiffies - error->capture),
+		   jiffies_to_msecs(error->capture - error->epoch));
 
 	for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
 		if (error->engine[i].hangcheck_stalled &&
@@ -710,7 +718,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 
 	for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
 		if (error->engine[i].engine_id != -1)
-			error_print_engine(m, &error->engine[i]);
+			error_print_engine(m, &error->engine[i], error->epoch);
 	}
 
 	for (i = 0; i < ARRAY_SIZE(error->active_vm); i++) {
@@ -769,7 +777,9 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 				   dev_priv->engine[i]->name,
 				   ee->num_requests);
 			for (j = 0; j < ee->num_requests; j++)
-				error_print_request(m, " ", &ee->requests[j]);
+				error_print_request(m, " ",
+						    &ee->requests[j],
+						    error->epoch);
 		}
 
 		if (IS_ERR(ee->waiters)) {
@@ -1736,6 +1746,22 @@ static void capture_params(struct i915_gpu_state *error)
 #undef DUP
 }
 
+static unsigned long capture_find_epoch(const struct i915_gpu_state *error)
+{
+	unsigned long epoch = error->capture;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
+		const struct drm_i915_error_engine *ee = &error->engine[i];
+
+		if (ee->hangcheck_stalled &&
+		    time_before(ee->hangcheck_timestamp, epoch))
+			epoch = ee->hangcheck_timestamp;
+	}
+
+	return epoch;
+}
+
 static int capture(void *data)
 {
 	struct i915_gpu_state *error = data;
@@ -1744,6 +1770,7 @@ static int capture(void *data)
 	error->boottime = ktime_get_boottime();
 	error->uptime = ktime_sub(ktime_get(),
 				  error->i915->gt.last_init_time);
+	error->capture = jiffies;
 
 	capture_params(error);
 	capture_gen_state(error);
@@ -1757,6 +1784,8 @@ static int capture(void *data)
 	error->overlay = intel_overlay_capture_error_state(error->i915);
 	error->display = intel_display_capture_error_state(error->i915);
 
+	error->epoch = capture_find_epoch(error);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 5d6fdcbc092c..0accd2ed72d9 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -31,6 +31,8 @@ struct i915_gpu_state {
 	ktime_t time;
 	ktime_t boottime;
 	ktime_t uptime;
+	unsigned long capture;
+	unsigned long epoch;
 
 	struct drm_i915_private *i915;
 

From f425d08bf17a68ade12424f420feadb70113f23a Mon Sep 17 00:00:00 2001
From: Jani Nikula <jani.nikula@intel.com>
Date: Tue, 24 Apr 2018 15:20:16 +0300
Subject: [PATCH 0561/1286] drm/i915: add support for specifying DMC firmware
 override by module param

Use i915.dmc_firmware_path to override default firmware for the platform
and bypassing version checks.

v2: add missing param struct member declaration (David)

Tested-by: David Weinehall <david.weinehall@linux.intel.com>
Reviewed-by: David Weinehall <david.weinehall@linux.intel.com>
Cc: Anusha Srivatsa <anusha.srivatsa@intel.com>
Cc: David Weinehall <david.weinehall@linux.intel.com>
Acked-by: Anusha Srivatsa <anusha.srivatsa@intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180424122016.2416-1-jani.nikula@intel.com
---
 drivers/gpu/drm/i915/i915_params.c | 3 +++
 drivers/gpu/drm/i915/i915_params.h | 1 +
 drivers/gpu/drm/i915/intel_csr.c   | 9 +++++++--
 3 files changed, 11 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 08108ce5be21..66ea3552c63e 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -164,6 +164,9 @@ i915_param_named_unsafe(guc_firmware_path, charp, 0400,
 i915_param_named_unsafe(huc_firmware_path, charp, 0400,
 	"HuC firmware path to use instead of the default one");
 
+i915_param_named_unsafe(dmc_firmware_path, charp, 0400,
+	"DMC firmware path to use instead of the default one");
+
 i915_param_named_unsafe(enable_dp_mst, bool, 0600,
 	"Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)");
 
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index c96360398072..6684025b7af8 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -51,6 +51,7 @@ struct drm_printer;
 	param(int, guc_log_level, -1) \
 	param(char *, guc_firmware_path, NULL) \
 	param(char *, huc_firmware_path, NULL) \
+	param(char *, dmc_firmware_path, NULL) \
 	param(int, mmio_debug, 0) \
 	param(int, edp_vswing, 0) \
 	param(int, reset, 2) \
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index f9550ea46c26..cf9b600cca79 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -298,7 +298,10 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
 
 	csr->version = css_header->version;
 
-	if (IS_CANNONLAKE(dev_priv)) {
+	if (csr->fw_path == i915_modparams.dmc_firmware_path) {
+		/* Bypass version check for firmware override. */
+		required_version = csr->version;
+	} else if (IS_CANNONLAKE(dev_priv)) {
 		required_version = CNL_CSR_VERSION_REQUIRED;
 	} else if (IS_GEMINILAKE(dev_priv)) {
 		required_version = GLK_CSR_VERSION_REQUIRED;
@@ -453,7 +456,9 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
 	if (!HAS_CSR(dev_priv))
 		return;
 
-	if (IS_CANNONLAKE(dev_priv))
+	if (i915_modparams.dmc_firmware_path)
+		csr->fw_path = i915_modparams.dmc_firmware_path;
+	else if (IS_CANNONLAKE(dev_priv))
 		csr->fw_path = I915_CSR_CNL;
 	else if (IS_GEMINILAKE(dev_priv))
 		csr->fw_path = I915_CSR_GLK;

From 77cbe925bf77bd3159f49c4db0ea89a2045d9071 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Tue, 17 Apr 2018 18:06:38 +0100
Subject: [PATCH 0562/1286] drm/i915/selftests: Fix error checking for
 wait_var_timeout

The old wait_on_atomic_t used a custom callback to perform the
schedule(), which used my return semantics of reporting an error code on
timeout. wait_var_event_timeout() uses the schedule() return semantics
of reporting the remaining jiffies (1 if it timed out with 0 jiffies
remaining!) and 0 on failure. This semantic mismatch lead to us falsely
claiming a time out occurred.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=106085
Fixes: d224985a5e31 ("sched/wait, drivers/drm: Convert wait_on_atomic_t() usage to the new wait_var_event() API")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180417170638.20550-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
index 46580026c7fc..d6926e7820e5 100644
--- a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
@@ -412,10 +412,11 @@ static int igt_wakeup(void *arg)
 		 * that they are ready for the next test. We wait until all
 		 * threads are complete and waiting for us (i.e. not a seqno).
 		 */
-		err = wait_var_event_timeout(&done, !atomic_read(&done), 10 * HZ);
-		if (err) {
+		if (!wait_var_event_timeout(&done,
+					    !atomic_read(&done), 10 * HZ)) {
 			pr_err("Timed out waiting for %d remaining waiters\n",
 			       atomic_read(&done));
+			err = -ETIMEDOUT;
 			break;
 		}
 

From 304f72e5947d63682159d2f575f56607592df500 Mon Sep 17 00:00:00 2001
From: Colin Ian King <colin.king@canonical.com>
Date: Wed, 2 May 2018 11:10:48 +0100
Subject: [PATCH 0563/1286] gpu: drm: sti: fix spelling mistake: "initialze" ->
 "initialize"

Trivial fix to spelling mistake in DRM_ERROR error message

Signed-off-by: Colin Ian King <colin.king@canonical.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20180502101048.8442-1-colin.king@canonical.com
---
 drivers/gpu/drm/sti/sti_crtc.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index 21e50d7b1f86..5824e6aca8f4 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -357,7 +357,7 @@ int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
 	res = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
 					&sti_crtc_funcs, NULL);
 	if (res) {
-		DRM_ERROR("Can't initialze CRTC\n");
+		DRM_ERROR("Can't initialize CRTC\n");
 		return -EINVAL;
 	}
 

From 3a068721a97320b2ffdbf0fc0685cc300dce5388 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed, 2 May 2018 11:41:50 +0100
Subject: [PATCH 0564/1286] drm/i915: Show ring->start for the ELSP
 context/request queue

Since the advent of execlists, the HW no longer executes from a single
statically assigned ring, but instead switches to a different ring for
each context (logical ringbuffer contexts as it is called). So a good way
to tally the executing context against what we have queued is by
comparing the RING_START register against our requests. Make it so.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180502104150.29874-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_gpu_error.c  | 5 +++--
 drivers/gpu/drm/i915/i915_gpu_error.h  | 1 +
 drivers/gpu/drm/i915/intel_engine_cs.c | 5 +++--
 3 files changed, 7 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 1176d068f88a..944939947d30 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -410,11 +410,11 @@ static void error_print_request(struct drm_i915_error_state_buf *m,
 	if (!erq->seqno)
 		return;
 
-	err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, prio %d, emitted %dms, head %08x, tail %08x\n",
+	err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, prio %d, emitted %dms, start %08x, head %08x, tail %08x\n",
 		   prefix, erq->pid, erq->ban_score,
 		   erq->context, erq->seqno, erq->sched_attr.priority,
 		   jiffies_to_msecs(erq->jiffies - epoch),
-		   erq->head, erq->tail);
+		   erq->start, erq->head, erq->tail);
 }
 
 static void error_print_context(struct drm_i915_error_state_buf *m,
@@ -1292,6 +1292,7 @@ static void record_request(struct i915_request *request,
 	erq->ban_score = atomic_read(&request->ctx->ban_score);
 	erq->seqno = request->global_seqno;
 	erq->jiffies = request->emitted_jiffies;
+	erq->start = i915_ggtt_offset(request->ring->vma);
 	erq->head = request->head;
 	erq->tail = request->tail;
 
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 0accd2ed72d9..dac0f8c4c1cf 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -152,6 +152,7 @@ struct i915_gpu_state {
 			u32 context;
 			int ban_score;
 			u32 seqno;
+			u32 start;
 			u32 head;
 			u32 tail;
 			struct i915_sched_attr sched_attr;
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 238c8d3da041..9164e6d665f8 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1278,8 +1278,9 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
 				char hdr[80];
 
 				snprintf(hdr, sizeof(hdr),
-					 "\t\tELSP[%d] count=%d, rq: ",
-					 idx, count);
+					 "\t\tELSP[%d] count=%d, ring->start=%08x, rq: ",
+					 idx, count,
+					 i915_ggtt_offset(rq->ring->vma));
 				print_request(m, rq, hdr);
 			} else {
 				drm_printf(m, "\t\tELSP[%d] idle\n", idx);

From 46e2068081e96472b1d7d2456fa0655aa148b32b Mon Sep 17 00:00:00 2001
From: Matthias Kaehlcke <mka@chromium.org>
Date: Tue, 1 May 2018 11:24:40 -0700
Subject: [PATCH 0565/1286] drm/i915: Disable some extra clang warnings

Commit 39bf4de89ff7 ("drm/i915: Add -Wall -Wextra to our build, set
warnings to full") enabled extra warnings for i915 to spot possible
bugs in new code, and then disabled a subset of these warnings to keep
the current code building without warnings (with gcc). Enabling the
extra warnings also enabled some additional clang-only warnings, as a
result building i915 with clang currently is extremely noisy. For now
also disable the clang warnings sign-compare, sometimes-uninitialized,
unneeded-internal-declaration and initializer-overrides. If desired
they can be re-enabled after the code has been fixed.

Signed-off-by: Matthias Kaehlcke <mka@chromium.org>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180501182440.70121-1-mka@chromium.org
---
 drivers/gpu/drm/i915/Makefile | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 9bee52a949a9..dfe01452c8d1 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -18,6 +18,10 @@ subdir-ccflags-y += $(call cc-disable-warning, type-limits)
 subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers)
 subdir-ccflags-y += $(call cc-disable-warning, implicit-fallthrough)
 subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
+# clang warnings
+subdir-ccflags-y += $(call cc-disable-warning, sign-compare)
+subdir-ccflags-y += $(call cc-disable-warning, sometimes-uninitialized)
+subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides)
 subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
 
 # Fine grained warnings disable

From 0d4b78b3d2c0cb570abe086bce8c17ea3c474095 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Tue, 1 May 2018 08:52:03 +0100
Subject: [PATCH 0566/1286] drm/i915/guc: Assert we have the doorbell before
 setting it up
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

As our early doorbell is split between early allocation and a late setup
after we have a channel to the GuC, it may happen due to a lapse of
programmer judgement that we try to setup an invalid doorbell. Make use
of our has_doorbell() function to check the doorbell does exist for the
client before we try and tell the guc about it. In doing so, we prevent
the compiler from warning about the otherwise unused function in some
configurations.

Reported-by: Matthias Kaehlcke <mka@chromium.org>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Michel Thierry <michel.thierry@intel.com>
Reviewed-by: Michel Thierry <michel.thierry@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180501075203.12458-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_guc_submission.c | 22 +++++++++++----------
 1 file changed, 12 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 6e6ed0f46bd3..c6bb5bebddfc 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -124,9 +124,17 @@ static int reserve_doorbell(struct intel_guc_client *client)
 	return 0;
 }
 
+static bool has_doorbell(struct intel_guc_client *client)
+{
+	if (client->doorbell_id == GUC_DOORBELL_INVALID)
+		return false;
+
+	return test_bit(client->doorbell_id, client->guc->doorbell_bitmap);
+}
+
 static void unreserve_doorbell(struct intel_guc_client *client)
 {
-	GEM_BUG_ON(client->doorbell_id == GUC_DOORBELL_INVALID);
+	GEM_BUG_ON(!has_doorbell(client));
 
 	__clear_bit(client->doorbell_id, client->guc->doorbell_bitmap);
 	client->doorbell_id = GUC_DOORBELL_INVALID;
@@ -184,14 +192,6 @@ static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client)
 	return client->vaddr + client->doorbell_offset;
 }
 
-static bool has_doorbell(struct intel_guc_client *client)
-{
-	if (client->doorbell_id == GUC_DOORBELL_INVALID)
-		return false;
-
-	return test_bit(client->doorbell_id, client->guc->doorbell_bitmap);
-}
-
 static void __create_doorbell(struct intel_guc_client *client)
 {
 	struct guc_doorbell_info *doorbell;
@@ -207,7 +207,6 @@ static void __destroy_doorbell(struct intel_guc_client *client)
 	struct guc_doorbell_info *doorbell;
 	u16 db_id = client->doorbell_id;
 
-
 	doorbell = __get_doorbell(client);
 	doorbell->db_status = GUC_DOORBELL_DISABLED;
 	doorbell->cookie = 0;
@@ -224,6 +223,9 @@ static int create_doorbell(struct intel_guc_client *client)
 {
 	int ret;
 
+	if (WARN_ON(!has_doorbell(client)))
+		return -ENODEV; /* internal setup error, should never happen */
+
 	__update_doorbell_desc(client, client->doorbell_id);
 	__create_doorbell(client);
 

From ca454bd42dc24374150febf83a443e8c1d9cf28a Mon Sep 17 00:00:00 2001
From: Linus Walleij <linus.walleij@linaro.org>
Date: Wed, 2 May 2018 15:47:18 +0200
Subject: [PATCH 0567/1286] drm/pl111: Support the Versatile Express

The Versatile Express uses a special configuration controller
deeply embedded in the system motherboard FPGA to multiplex the
two to three (!) display controller instances out to the single
SiI9022 bridge.

Set up an extra file with the logic to probe to the FPGA mux
register on the system controller bus, then parse the device
tree to see if there is a CLCD or HDLCD instance on the core
tile (also known as the daughterboard) by looking in the
root of the device tree for compatible nodes.

- If there is a HDLCD on the core tile, and there is a driver
  for it, we exit probe and deactivate the motherboard CLCD.
  We do not touch the DVI mux in this case, to make sure we
  don't break HDLCD.

- If there is a CLCD on both the motherboard and the core tile
  (only the CA9 has this) the core tile CLCD takes precedence
  and get muxed to the DVI connector.

- Only if there is no working graphics on the core tile, the
  motherboard CLCD is probed and muxed to the DVI connector.

Core tile graphics should always take precedence as it can
address all memory and is also faster, however the motherboard
CLCD is good to have around for diagnostics and testing.

It is possible to test the motherboard CLCD by setting the
status = "disabled" property on the core tile CLCD or
HDLCD.

Scale down the Versatile Express to 16BPP so we can support a
1024x768 display despite the bus bandwidth restrictions on this
platform. (The motherboard CLCD supports slightly lower
resolution.)

Cc: Liviu Dudau <liviu.dudau@arm.com>
Cc: Pawel Moll <pawel.moll@arm.com>
Acked-by: Eric Anholt <eric@anholt.net>
Tested-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20180502134719.8388-1-linus.walleij@linaro.org
---
 drivers/gpu/drm/pl111/Makefile          |   1 +
 drivers/gpu/drm/pl111/pl111_versatile.c |  49 +++++++++-
 drivers/gpu/drm/pl111/pl111_vexpress.c  | 125 ++++++++++++++++++++++++
 drivers/gpu/drm/pl111/pl111_vexpress.h  |  22 +++++
 4 files changed, 196 insertions(+), 1 deletion(-)
 create mode 100644 drivers/gpu/drm/pl111/pl111_vexpress.c
 create mode 100644 drivers/gpu/drm/pl111/pl111_vexpress.h

diff --git a/drivers/gpu/drm/pl111/Makefile b/drivers/gpu/drm/pl111/Makefile
index 9c5e8dba8ac6..19a8189dc54f 100644
--- a/drivers/gpu/drm/pl111/Makefile
+++ b/drivers/gpu/drm/pl111/Makefile
@@ -3,6 +3,7 @@ pl111_drm-y +=	pl111_display.o \
 		pl111_versatile.o \
 		pl111_drv.o
 
+pl111_drm-$(CONFIG_ARCH_VEXPRESS) += pl111_vexpress.o
 pl111_drm-$(CONFIG_DEBUG_FS) += pl111_debugfs.o
 
 obj-$(CONFIG_DRM_PL111) += pl111_drm.o
diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
index 9302f516045e..78ddf8534fd2 100644
--- a/drivers/gpu/drm/pl111/pl111_versatile.c
+++ b/drivers/gpu/drm/pl111/pl111_versatile.c
@@ -1,12 +1,14 @@
 #include <linux/amba/clcd-regs.h>
 #include <linux/device.h>
 #include <linux/of.h>
+#include <linux/of_platform.h>
 #include <linux/regmap.h>
 #include <linux/mfd/syscon.h>
 #include <linux/bitops.h>
 #include <linux/module.h>
 #include <drm/drmP.h>
 #include "pl111_versatile.h"
+#include "pl111_vexpress.h"
 #include "pl111_drm.h"
 
 static struct regmap *versatile_syscon_map;
@@ -22,6 +24,7 @@ enum versatile_clcd {
 	REALVIEW_CLCD_PB11MP,
 	REALVIEW_CLCD_PBA8,
 	REALVIEW_CLCD_PBX,
+	VEXPRESS_CLCD_V2M,
 };
 
 static const struct of_device_id versatile_clcd_of_match[] = {
@@ -53,6 +56,10 @@ static const struct of_device_id versatile_clcd_of_match[] = {
 		.compatible = "arm,realview-pbx-syscon",
 		.data = (void *)REALVIEW_CLCD_PBX,
 	},
+	{
+		.compatible = "arm,vexpress-muxfpga",
+		.data = (void *)VEXPRESS_CLCD_V2M,
+	},
 	{},
 };
 
@@ -286,12 +293,26 @@ static const struct pl111_variant_data pl111_realview = {
 	.fb_bpp = 16,
 };
 
+/*
+ * Versatile Express PL111 variant, again we just push the maximum
+ * BPP to 16 to be able to get 1024x768 without saturating the memory
+ * bus. The clockdivider also seems broken on the Versatile Express.
+ */
+static const struct pl111_variant_data pl111_vexpress = {
+	.name = "PL111 Versatile Express",
+	.formats = pl111_realview_pixel_formats,
+	.nformats = ARRAY_SIZE(pl111_realview_pixel_formats),
+	.fb_bpp = 16,
+	.broken_clockdivider = true,
+};
+
 int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
 {
 	const struct of_device_id *clcd_id;
 	enum versatile_clcd versatile_clcd_type;
 	struct device_node *np;
 	struct regmap *map;
+	int ret;
 
 	np = of_find_matching_node_and_match(NULL, versatile_clcd_of_match,
 					     &clcd_id);
@@ -301,7 +322,26 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
 	}
 	versatile_clcd_type = (enum versatile_clcd)clcd_id->data;
 
-	map = syscon_node_to_regmap(np);
+	/* Versatile Express special handling */
+	if (versatile_clcd_type == VEXPRESS_CLCD_V2M) {
+		struct platform_device *pdev;
+
+		/* Call into deep Vexpress configuration API */
+		pdev = of_find_device_by_node(np);
+		if (!pdev) {
+			dev_err(dev, "can't find the sysreg device, deferring\n");
+			return -EPROBE_DEFER;
+		}
+		map = dev_get_drvdata(&pdev->dev);
+		if (!map) {
+			dev_err(dev, "sysreg has not yet probed\n");
+			platform_device_put(pdev);
+			return -EPROBE_DEFER;
+		}
+	} else {
+		map = syscon_node_to_regmap(np);
+	}
+
 	if (IS_ERR(map)) {
 		dev_err(dev, "no Versatile syscon regmap\n");
 		return PTR_ERR(map);
@@ -340,6 +380,13 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
 		priv->variant_display_disable = pl111_realview_clcd_disable;
 		dev_info(dev, "set up callbacks for RealView PL111\n");
 		break;
+	case VEXPRESS_CLCD_V2M:
+		priv->variant = &pl111_vexpress;
+		dev_info(dev, "initializing Versatile Express PL111\n");
+		ret = pl111_vexpress_clcd_init(dev, priv, map);
+		if (ret)
+			return ret;
+		break;
 	default:
 		dev_info(dev, "unknown Versatile system controller\n");
 		break;
diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.c b/drivers/gpu/drm/pl111/pl111_vexpress.c
new file mode 100644
index 000000000000..c9fee625faf1
--- /dev/null
+++ b/drivers/gpu/drm/pl111/pl111_vexpress.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Versatile Express PL111 handling
+ * Copyright (C) 2018 Linus Walleij
+ *
+ * This module binds to the "arm,vexpress-muxfpga" device on the
+ * Versatile Express configuration bus and sets up which CLCD instance
+ * gets muxed out on the DVI bridge.
+ */
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/vexpress.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include "pl111_drm.h"
+#include "pl111_vexpress.h"
+
+#define VEXPRESS_FPGAMUX_MOTHERBOARD		0x00
+#define VEXPRESS_FPGAMUX_DAUGHTERBOARD_1	0x01
+#define VEXPRESS_FPGAMUX_DAUGHTERBOARD_2	0x02
+
+int pl111_vexpress_clcd_init(struct device *dev,
+			     struct pl111_drm_dev_private *priv,
+			     struct regmap *map)
+{
+	struct device_node *root;
+	struct device_node *child;
+	struct device_node *ct_clcd = NULL;
+	bool has_coretile_clcd = false;
+	bool has_coretile_hdlcd = false;
+	bool mux_motherboard = true;
+	u32 val;
+	int ret;
+
+	/*
+	 * Check if we have a CLCD or HDLCD on the core tile by checking if a
+	 * CLCD or HDLCD is available in the root of the device tree.
+	 */
+	root = of_find_node_by_path("/");
+	if (!root)
+		return -EINVAL;
+
+	for_each_available_child_of_node(root, child) {
+		if (of_device_is_compatible(child, "arm,pl111")) {
+			has_coretile_clcd = true;
+			ct_clcd = child;
+			break;
+		}
+		if (of_device_is_compatible(child, "arm,hdlcd")) {
+			has_coretile_hdlcd = true;
+			break;
+		}
+	}
+
+	/*
+	 * If there is a coretile HDLCD and it has a driver,
+	 * do not mux the CLCD on the motherboard to the DVI.
+	 */
+	if (has_coretile_hdlcd && IS_ENABLED(CONFIG_DRM_HDLCD))
+		mux_motherboard = false;
+
+	/*
+	 * On the Vexpress CA9 we let the CLCD on the coretile
+	 * take precedence, so also in this case do not mux the
+	 * motherboard to the DVI.
+	 */
+	if (has_coretile_clcd)
+		mux_motherboard = false;
+
+	if (mux_motherboard) {
+		dev_info(dev, "DVI muxed to motherboard CLCD\n");
+		val = VEXPRESS_FPGAMUX_MOTHERBOARD;
+	} else if (ct_clcd == dev->of_node) {
+		dev_info(dev,
+			 "DVI muxed to daughterboard 1 (core tile) CLCD\n");
+		val = VEXPRESS_FPGAMUX_DAUGHTERBOARD_1;
+	} else {
+		dev_info(dev, "core tile graphics present\n");
+		dev_info(dev, "this device will be deactivated\n");
+		return -ENODEV;
+	}
+
+	ret = regmap_write(map, 0, val);
+	if (ret) {
+		dev_err(dev, "error setting DVI muxmode\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+/*
+ * This sets up the regmap pointer that will then be retrieved by
+ * the detection code in pl111_versatile.c and passed in to the
+ * pl111_vexpress_clcd_init() function above.
+ */
+static int vexpress_muxfpga_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct regmap *map;
+
+	map = devm_regmap_init_vexpress_config(&pdev->dev);
+	if (IS_ERR(map))
+		return PTR_ERR(map);
+	dev_set_drvdata(dev, map);
+
+	return 0;
+}
+
+static const struct of_device_id vexpress_muxfpga_match[] = {
+	{ .compatible = "arm,vexpress-muxfpga", }
+};
+
+static struct platform_driver vexpress_muxfpga_driver = {
+	.driver = {
+		.name = "vexpress-muxfpga",
+		.of_match_table = of_match_ptr(vexpress_muxfpga_match),
+	},
+	.probe = vexpress_muxfpga_probe,
+};
+
+builtin_platform_driver(vexpress_muxfpga_driver);
diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.h b/drivers/gpu/drm/pl111/pl111_vexpress.h
new file mode 100644
index 000000000000..bb54864ca91e
--- /dev/null
+++ b/drivers/gpu/drm/pl111/pl111_vexpress.h
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+
+struct device;
+struct pl111_drm_dev_private;
+struct regmap;
+
+#ifdef CONFIG_ARCH_VEXPRESS
+
+int pl111_vexpress_clcd_init(struct device *dev,
+			     struct pl111_drm_dev_private *priv,
+			     struct regmap *map);
+
+#else
+
+static inline int pl111_vexpress_clcd_init(struct device *dev,
+					   struct pl111_drm_dev_private *priv,
+					   struct regmap *map)
+{
+	return -ENODEV;
+}
+
+#endif

From 57450671776b37d7c81cd52a89982c14bca46cfc Mon Sep 17 00:00:00 2001
From: Linus Walleij <linus.walleij@linaro.org>
Date: Wed, 2 May 2018 15:47:19 +0200
Subject: [PATCH 0568/1286] drm/pl111: Enable device-specific assigned memory
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

The Versatile Express has 8 MB of dedicated video RAM (VRAM)
on the motherboard, which is what we should be using for the
PL111 if available. On this platform, the memory backplane
is constructed so that only this memory will work properly
with the CLCD on the motherboard, using any other memory
area just gives random snow on the display.

The CA9 Versatile Express also has a PL111 instance on its
core tile that can address all memory, and this does not
have the restriction.

The memory is assigned to the device using the memory-region
device tree property and a "shared-dma-pool" reserved
memory pool like this:

reserved-memory {
        #address-cells = <1>;
        #size-cells = <1>;
        ranges;

        vram: vram@48000000 {
                compatible = "shared-dma-pool";
                reg = <0x48000000 0x00800000>;
                no-map;
        };
};

clcd@1f000 {
        compatible = "arm,pl111", "arm,primecell";
	(...)
        memory-region = <&vram>;
}·;

Cc: Liviu Dudau <liviu.dudau@arm.com>
Cc: Mali DP Maintainers <malidp@foss.arm.com>
Reviewed-by: Eric Anholt <eric@anholt.net>
Tested-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20180502134719.8388-2-linus.walleij@linaro.org
---
 drivers/gpu/drm/pl111/pl111_drm.h |  1 +
 drivers/gpu/drm/pl111/pl111_drv.c | 34 +++++++++++++++++++++++++++++--
 2 files changed, 33 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/pl111/pl111_drm.h b/drivers/gpu/drm/pl111/pl111_drm.h
index 8639b2d4ddf7..ce4501d0ab48 100644
--- a/drivers/gpu/drm/pl111/pl111_drm.h
+++ b/drivers/gpu/drm/pl111/pl111_drm.h
@@ -79,6 +79,7 @@ struct pl111_drm_dev_private {
 	const struct pl111_variant_data *variant;
 	void (*variant_display_enable) (struct drm_device *drm, u32 format);
 	void (*variant_display_disable) (struct drm_device *drm);
+	bool use_device_memory;
 };
 
 int pl111_display_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 4621259d5387..454ff0804642 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -60,6 +60,7 @@
 #include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/of_graph.h>
+#include <linux/of_reserved_mem.h>
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
@@ -207,6 +208,24 @@ finish:
 	return ret;
 }
 
+static struct drm_gem_object *
+pl111_gem_import_sg_table(struct drm_device *dev,
+			  struct dma_buf_attachment *attach,
+			  struct sg_table *sgt)
+{
+	struct pl111_drm_dev_private *priv = dev->dev_private;
+
+	/*
+	 * When using device-specific reserved memory we can't import
+	 * DMA buffers: those are passed by reference in any global
+	 * memory and we can only handle a specific range of memory.
+	 */
+	if (priv->use_device_memory)
+		return ERR_PTR(-EINVAL);
+
+	return drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
+}
+
 DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
 
 static struct drm_driver pl111_drm_driver = {
@@ -227,7 +246,7 @@ static struct drm_driver pl111_drm_driver = {
 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
 	.gem_prime_import = drm_gem_prime_import,
-	.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+	.gem_prime_import_sg_table = pl111_gem_import_sg_table,
 	.gem_prime_export = drm_gem_prime_export,
 	.gem_prime_get_sg_table	= drm_gem_cma_prime_get_sg_table,
 
@@ -257,6 +276,12 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
 	drm->dev_private = priv;
 	priv->variant = variant;
 
+	ret = of_reserved_mem_device_init(dev);
+	if (!ret) {
+		dev_info(dev, "using device-specific reserved memory\n");
+		priv->use_device_memory = true;
+	}
+
 	if (of_property_read_u32(dev->of_node, "max-memory-bandwidth",
 				 &priv->memory_bw)) {
 		dev_info(dev, "no max memory bandwidth specified, assume unlimited\n");
@@ -275,7 +300,8 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
 	priv->regs = devm_ioremap_resource(dev, &amba_dev->res);
 	if (IS_ERR(priv->regs)) {
 		dev_err(dev, "%s failed mmio\n", __func__);
-		return PTR_ERR(priv->regs);
+		ret = PTR_ERR(priv->regs);
+		goto dev_unref;
 	}
 
 	/* This may override some variant settings */
@@ -305,11 +331,14 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
 
 dev_unref:
 	drm_dev_unref(drm);
+	of_reserved_mem_device_release(dev);
+
 	return ret;
 }
 
 static int pl111_amba_remove(struct amba_device *amba_dev)
 {
+	struct device *dev = &amba_dev->dev;
 	struct drm_device *drm = amba_get_drvdata(amba_dev);
 	struct pl111_drm_dev_private *priv = drm->dev_private;
 
@@ -319,6 +348,7 @@ static int pl111_amba_remove(struct amba_device *amba_dev)
 		drm_panel_bridge_remove(priv->bridge);
 	drm_mode_config_cleanup(drm);
 	drm_dev_unref(drm);
+	of_reserved_mem_device_release(dev);
 
 	return 0;
 }

From ec66723197103eebd7f7099df6d5ea23deff679b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Thu, 26 Apr 2018 17:16:31 +0300
Subject: [PATCH 0569/1286] drm/rect: Fix drm_rect_rotation_inv() docs
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

An overeager sed has corrupted the drm_rect_rotation_inv()
documentation. Fix it up.

Looks like it wasn't entirely correct before the sed fail
either. We were missing _rect_ from the function names, which
also explains why the sed hit these by accident.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180426141631.15798-1-ville.syrjala@linux.intel.com
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
---
 drivers/gpu/drm/drm_rect.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c
index 9817c1445ba9..a3783ecea297 100644
--- a/drivers/gpu/drm/drm_rect.c
+++ b/drivers/gpu/drm/drm_rect.c
@@ -373,8 +373,8 @@ EXPORT_SYMBOL(drm_rect_rotate);
  * them when doing a rotatation and its inverse.
  * That is, if you do ::
  *
- *     DRM_MODE_PROP_ROTATE(&r, width, height, rotation);
- *     DRM_MODE_ROTATE_inv(&r, width, height, rotation);
+ *     drm_rect_rotate(&r, width, height, rotation);
+ *     drm_rect_rotate_inv(&r, width, height, rotation);
  *
  * you will always get back the original rectangle.
  */

From 81cf8b74b0e0728589fdaa37cb3ae42a561bfd5c Mon Sep 17 00:00:00 2001
From: Anusha Srivatsa <anusha.srivatsa@intel.com>
Date: Mon, 30 Apr 2018 15:59:28 -0700
Subject: [PATCH 0570/1286] drm/i915/firmware: Correct URL for firmware

Replace 01.org URL with upstream linux-firmware repo URL.
We no longer release firmware to 01.org.
linux-firmware.git is the ultimate place to find
the i915 firmwares.

Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Anusha Srivatsa <anusha.srivatsa@intel.com>
Acked-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1525129168-529-1-git-send-email-anusha.srivatsa@intel.com
---
 drivers/gpu/drm/i915/intel_uc_fw.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_uc_fw.h b/drivers/gpu/drm/i915/intel_uc_fw.h
index dc33b12394de..87910aa83267 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/intel_uc_fw.h
@@ -30,7 +30,7 @@ struct drm_i915_private;
 struct i915_vma;
 
 /* Home of GuC, HuC and DMC firmwares */
-#define INTEL_UC_FIRMWARE_URL "https://01.org/linuxgraphics/downloads/firmware"
+#define INTEL_UC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/tree/i915"
 
 enum intel_uc_fw_status {
 	INTEL_UC_FIRMWARE_FAIL = -1,

From 65fcb8064dd0e54d4674e8e2c6bf6ed7264a29e9 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed, 2 May 2018 17:38:38 +0100
Subject: [PATCH 0571/1286] drm/i915: Move timeline from GTT to ring

In the future, we want to move a request between engines. To achieve
this, we first realise that we have two timelines in effect here. The
first runs through the GTT is required for ordering vma access, which is
tracked currently by engine. The second is implied by sequential
execution of commands inside the ringbuffer. This timeline is one that
maps to userspace's expectations when submitting requests (i.e. given the
same context, batch A is executed before batch B). As the rings's
timelines map to userspace and the GTT timeline an implementation
detail, move the timeline from the GTT into the ring itself (per-context
in logical-ring-contexts/execlists, or a global per-engine timeline for
the shared ringbuffers in legacy submission.

The two timelines are still assumed to be equivalent at the moment (no
migrating requests between engines yet) and so we can simply move from
one to the other without adding extra ordering.

v2: Reinforce that one isn't allowed to mix the engine execution
timeline with the client timeline from userspace (on the ring).

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180502163839.3248-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_drv.h               | 13 +----
 drivers/gpu/drm/i915/i915_gem.c               |  9 ++--
 drivers/gpu/drm/i915/i915_gem_context.c       | 15 +++++-
 drivers/gpu/drm/i915/i915_gem_context.h       |  2 +
 drivers/gpu/drm/i915/i915_gem_gtt.c           |  3 --
 drivers/gpu/drm/i915/i915_gem_gtt.h           |  1 -
 drivers/gpu/drm/i915/i915_gem_timeline.c      | 54 +++++++++++++++++--
 drivers/gpu/drm/i915/i915_gem_timeline.h      |  4 ++
 drivers/gpu/drm/i915/i915_request.c           | 13 +++--
 drivers/gpu/drm/i915/intel_engine_cs.c        |  3 +-
 drivers/gpu/drm/i915/intel_lrc.c              |  2 +-
 drivers/gpu/drm/i915/intel_ringbuffer.c       | 10 +++-
 drivers/gpu/drm/i915/intel_ringbuffer.h       |  5 +-
 .../gpu/drm/i915/selftests/i915_gem_context.c | 12 +++++
 drivers/gpu/drm/i915/selftests/mock_engine.c  |  5 +-
 .../gpu/drm/i915/selftests/mock_gem_device.c  |  4 +-
 drivers/gpu/drm/i915/selftests/mock_gtt.c     |  1 -
 17 files changed, 115 insertions(+), 41 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6268a5103dba..ffa87aef31e5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2059,7 +2059,8 @@ struct drm_i915_private {
 		void (*resume)(struct drm_i915_private *);
 		void (*cleanup_engine)(struct intel_engine_cs *engine);
 
-		struct i915_gem_timeline global_timeline;
+		struct i915_gem_timeline execution_timeline;
+		struct i915_gem_timeline legacy_timeline;
 		struct list_head timelines;
 
 		struct list_head active_rings;
@@ -3235,16 +3236,6 @@ i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
 	return ctx;
 }
 
-static inline struct intel_timeline *
-i915_gem_context_lookup_timeline(struct i915_gem_context *ctx,
-				 struct intel_engine_cs *engine)
-{
-	struct i915_address_space *vm;
-
-	vm = ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base;
-	return &vm->timeline.engine[engine->id];
-}
-
 int i915_perf_open_ioctl(struct drm_device *dev, void *data,
 			 struct drm_file *file);
 int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index fa1d94a4eb5f..438a2fc5bba0 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3110,10 +3110,10 @@ static void engine_skip_context(struct i915_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
 	struct i915_gem_context *hung_ctx = request->ctx;
-	struct intel_timeline *timeline;
+	struct intel_timeline *timeline = request->timeline;
 	unsigned long flags;
 
-	timeline = i915_gem_context_lookup_timeline(hung_ctx, engine);
+	GEM_BUG_ON(timeline == engine->timeline);
 
 	spin_lock_irqsave(&engine->timeline->lock, flags);
 	spin_lock(&timeline->lock);
@@ -3782,7 +3782,7 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
 
 		ret = wait_for_engines(i915);
 	} else {
-		ret = wait_for_timeline(&i915->gt.global_timeline, flags);
+		ret = wait_for_timeline(&i915->gt.execution_timeline, flags);
 	}
 
 	return ret;
@@ -5652,7 +5652,8 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
 	WARN_ON(dev_priv->mm.object_count);
 
 	mutex_lock(&dev_priv->drm.struct_mutex);
-	i915_gem_timeline_fini(&dev_priv->gt.global_timeline);
+	i915_gem_timeline_fini(&dev_priv->gt.legacy_timeline);
+	i915_gem_timeline_fini(&dev_priv->gt.execution_timeline);
 	WARN_ON(!list_empty(&dev_priv->gt.timelines));
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 59d4bd4a7b73..1f4987dc6616 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -122,6 +122,7 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
 	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
 	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
 
+	i915_gem_timeline_free(ctx->timeline);
 	i915_ppgtt_put(ctx->ppgtt);
 
 	for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
@@ -376,6 +377,18 @@ i915_gem_create_context(struct drm_i915_private *dev_priv,
 		ctx->desc_template = default_desc_template(dev_priv, ppgtt);
 	}
 
+	if (HAS_EXECLISTS(dev_priv)) {
+		struct i915_gem_timeline *timeline;
+
+		timeline = i915_gem_timeline_create(dev_priv, ctx->name);
+		if (IS_ERR(timeline)) {
+			__destroy_hw_context(ctx, file_priv);
+			return ERR_CAST(timeline);
+		}
+
+		ctx->timeline = timeline;
+	}
+
 	trace_i915_context_create(ctx);
 
 	return ctx;
@@ -584,7 +597,7 @@ static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine)
 	list_for_each_entry(timeline, &engine->i915->gt.timelines, link) {
 		struct intel_timeline *tl;
 
-		if (timeline == &engine->i915->gt.global_timeline)
+		if (timeline == &engine->i915->gt.execution_timeline)
 			continue;
 
 		tl = &timeline->engine[engine->id];
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index ace3b129c189..ec53ba06f836 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -58,6 +58,8 @@ struct i915_gem_context {
 	/** file_priv: owning file descriptor */
 	struct drm_i915_file_private *file_priv;
 
+	struct i915_gem_timeline *timeline;
+
 	/**
 	 * @ppgtt: unique address space (GTT)
 	 *
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 21d72f695adb..e9d828324f67 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2111,8 +2111,6 @@ static void i915_address_space_init(struct i915_address_space *vm,
 				    struct drm_i915_private *dev_priv,
 				    const char *name)
 {
-	i915_gem_timeline_init(dev_priv, &vm->timeline, name);
-
 	drm_mm_init(&vm->mm, 0, vm->total);
 	vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
 
@@ -2129,7 +2127,6 @@ static void i915_address_space_fini(struct i915_address_space *vm)
 	if (pagevec_count(&vm->free_pages))
 		vm_free_pages_release(vm, true);
 
-	i915_gem_timeline_fini(&vm->timeline);
 	drm_mm_takedown(&vm->mm);
 	list_del(&vm->global_link);
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 6efc017e8bb3..98107925de48 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -257,7 +257,6 @@ struct i915_pml4 {
 
 struct i915_address_space {
 	struct drm_mm mm;
-	struct i915_gem_timeline timeline;
 	struct drm_i915_private *i915;
 	struct device *dma;
 	/* Every address space belongs to a struct file - except for the global
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.c b/drivers/gpu/drm/i915/i915_gem_timeline.c
index e9fd87604067..24f4068cc137 100644
--- a/drivers/gpu/drm/i915/i915_gem_timeline.c
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.c
@@ -95,12 +95,28 @@ int i915_gem_timeline_init(struct drm_i915_private *i915,
 
 int i915_gem_timeline_init__global(struct drm_i915_private *i915)
 {
-	static struct lock_class_key class;
+	static struct lock_class_key class1, class2;
+	int err;
 
-	return __i915_gem_timeline_init(i915,
-					&i915->gt.global_timeline,
-					"[execution]",
-					&class, "&global_timeline->lock");
+	err = __i915_gem_timeline_init(i915,
+				       &i915->gt.execution_timeline,
+				       "[execution]", &class1,
+				       "i915_execution_timeline");
+	if (err)
+		return err;
+
+	err = __i915_gem_timeline_init(i915,
+				       &i915->gt.legacy_timeline,
+				       "[global]", &class2,
+				       "i915_global_timeline");
+	if (err)
+		goto err_exec_timeline;
+
+	return 0;
+
+err_exec_timeline:
+	i915_gem_timeline_fini(&i915->gt.execution_timeline);
+	return err;
 }
 
 /**
@@ -148,6 +164,34 @@ void i915_gem_timeline_fini(struct i915_gem_timeline *timeline)
 	kfree(timeline->name);
 }
 
+struct i915_gem_timeline *
+i915_gem_timeline_create(struct drm_i915_private *i915, const char *name)
+{
+	struct i915_gem_timeline *timeline;
+	int err;
+
+	timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
+	if (!timeline)
+		return ERR_PTR(-ENOMEM);
+
+	err = i915_gem_timeline_init(i915, timeline, name);
+	if (err) {
+		kfree(timeline);
+		return ERR_PTR(err);
+	}
+
+	return timeline;
+}
+
+void i915_gem_timeline_free(struct i915_gem_timeline *timeline)
+{
+	if (!timeline)
+		return;
+
+	i915_gem_timeline_fini(timeline);
+	kfree(timeline);
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/mock_timeline.c"
 #include "selftests/i915_gem_timeline.c"
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.h b/drivers/gpu/drm/i915/i915_gem_timeline.h
index 6e82119e2cd8..780ed465c4fc 100644
--- a/drivers/gpu/drm/i915/i915_gem_timeline.h
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.h
@@ -90,6 +90,10 @@ int i915_gem_timeline_init__global(struct drm_i915_private *i915);
 void i915_gem_timelines_park(struct drm_i915_private *i915);
 void i915_gem_timeline_fini(struct i915_gem_timeline *tl);
 
+struct i915_gem_timeline *
+i915_gem_timeline_create(struct drm_i915_private *i915, const char *name);
+void i915_gem_timeline_free(struct i915_gem_timeline *timeline);
+
 static inline int __intel_timeline_sync_set(struct intel_timeline *tl,
 					    u64 context, u32 seqno)
 {
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index c8fc4b323e62..7bb613c00cc3 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -758,7 +758,12 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 		}
 	}
 
-	rq->timeline = i915_gem_context_lookup_timeline(ctx, engine);
+	INIT_LIST_HEAD(&rq->active_list);
+	rq->i915 = i915;
+	rq->engine = engine;
+	rq->ctx = ctx;
+	rq->ring = ring;
+	rq->timeline = ring->timeline;
 	GEM_BUG_ON(rq->timeline == engine->timeline);
 
 	spin_lock_init(&rq->lock);
@@ -774,12 +779,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 
 	i915_sched_node_init(&rq->sched);
 
-	INIT_LIST_HEAD(&rq->active_list);
-	rq->i915 = i915;
-	rq->engine = engine;
-	rq->ctx = ctx;
-	rq->ring = ring;
-
 	/* No zalloc, must clear what we need by hand */
 	rq->global_seqno = 0;
 	rq->signaling.wait.seqno = 0;
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 9164e6d665f8..7af5fe85612d 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -453,7 +453,8 @@ void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
 
 static void intel_engine_init_timeline(struct intel_engine_cs *engine)
 {
-	engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id];
+	engine->timeline =
+		&engine->i915->gt.execution_timeline.engine[engine->id];
 }
 
 static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 57396a2a6ea2..9b2407753ebd 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -2624,7 +2624,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 		goto error_deref_obj;
 	}
 
-	ring = intel_engine_create_ring(engine, ctx->ring_size);
+	ring = intel_engine_create_ring(engine, ctx->timeline, ctx->ring_size);
 	if (IS_ERR(ring)) {
 		ret = PTR_ERR(ring);
 		goto error_deref_obj;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 007449cfa22b..b73e700c3048 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1117,13 +1117,16 @@ err:
 }
 
 struct intel_ring *
-intel_engine_create_ring(struct intel_engine_cs *engine, int size)
+intel_engine_create_ring(struct intel_engine_cs *engine,
+			 struct i915_gem_timeline *timeline,
+			 int size)
 {
 	struct intel_ring *ring;
 	struct i915_vma *vma;
 
 	GEM_BUG_ON(!is_power_of_2(size));
 	GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
+	GEM_BUG_ON(&timeline->engine[engine->id] == engine->timeline);
 	lockdep_assert_held(&engine->i915->drm.struct_mutex);
 
 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
@@ -1131,6 +1134,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
 		return ERR_PTR(-ENOMEM);
 
 	INIT_LIST_HEAD(&ring->request_list);
+	ring->timeline = &timeline->engine[engine->id];
 
 	ring->size = size;
 	/* Workaround an erratum on the i830 which causes a hang if
@@ -1327,7 +1331,9 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
 	if (err)
 		goto err;
 
-	ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
+	ring = intel_engine_create_ring(engine,
+					&engine->i915->gt.legacy_timeline,
+					32 * PAGE_SIZE);
 	if (IS_ERR(ring)) {
 		err = PTR_ERR(ring);
 		goto err;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index fd679cec9ac6..da53aa2973a7 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -129,6 +129,7 @@ struct intel_ring {
 	struct i915_vma *vma;
 	void *vaddr;
 
+	struct intel_timeline *timeline;
 	struct list_head request_list;
 	struct list_head active_link;
 
@@ -768,7 +769,9 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
 #define CNL_HWS_CSB_WRITE_INDEX		0x2f
 
 struct intel_ring *
-intel_engine_create_ring(struct intel_engine_cs *engine, int size);
+intel_engine_create_ring(struct intel_engine_cs *engine,
+			 struct i915_gem_timeline *timeline,
+			 int size);
 int intel_ring_pin(struct intel_ring *ring,
 		   struct drm_i915_private *i915,
 		   unsigned int offset_bias);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 7ecaed50d0b9..24ac648dc83a 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -355,6 +355,18 @@ static int igt_ctx_exec(void *arg)
 
 		if (first_shared_gtt) {
 			ctx = __create_hw_context(i915, file->driver_priv);
+			if (!IS_ERR(ctx) && HAS_EXECLISTS(i915)) {
+				struct i915_gem_timeline *timeline;
+
+				timeline = i915_gem_timeline_create(i915, ctx->name);
+				if (IS_ERR(timeline)) {
+					__destroy_hw_context(ctx, file->driver_priv);
+					ctx = ERR_CAST(timeline);
+				} else {
+					ctx->timeline = timeline;
+				}
+			}
+
 			first_shared_gtt = false;
 		} else {
 			ctx = i915_gem_create_context(i915, file->driver_priv);
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 19175ddcb45b..6752498e2c73 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -140,6 +140,8 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
 	if (!ring)
 		return NULL;
 
+	ring->timeline = &engine->i915->gt.legacy_timeline.engine[engine->id];
+
 	ring->size = sz;
 	ring->effective_size = sz;
 	ring->vaddr = (void *)(ring + 1);
@@ -180,8 +182,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
 	engine->base.emit_breadcrumb = mock_emit_breadcrumb;
 	engine->base.submit_request = mock_submit_request;
 
-	engine->base.timeline =
-		&i915->gt.global_timeline.engine[engine->base.id];
+	intel_engine_init_timeline(&engine->base);
 
 	intel_engine_init_breadcrumbs(&engine->base);
 	engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index f22a2b35a283..f11c83e8ff32 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -73,7 +73,9 @@ static void mock_device_release(struct drm_device *dev)
 
 	mutex_lock(&i915->drm.struct_mutex);
 	mock_fini_ggtt(i915);
-	i915_gem_timeline_fini(&i915->gt.global_timeline);
+	i915_gem_timeline_fini(&i915->gt.legacy_timeline);
+	i915_gem_timeline_fini(&i915->gt.execution_timeline);
+	WARN_ON(!list_empty(&i915->gt.timelines));
 	mutex_unlock(&i915->drm.struct_mutex);
 
 	destroy_workqueue(i915->wq);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index e96873f96116..36c112088940 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -76,7 +76,6 @@ mock_ppgtt(struct drm_i915_private *i915,
 
 	INIT_LIST_HEAD(&ppgtt->base.global_link);
 	drm_mm_init(&ppgtt->base.mm, 0, ppgtt->base.total);
-	i915_gem_timeline_init(i915, &ppgtt->base.timeline, name);
 
 	ppgtt->base.clear_range = nop_clear_range;
 	ppgtt->base.insert_page = mock_insert_page;

From a89d1f921c15932b4c9a70861d134290f1a14a10 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed, 2 May 2018 17:38:39 +0100
Subject: [PATCH 0572/1286] drm/i915: Split i915_gem_timeline into individual
 timelines

We need to move to a more flexible timeline that doesn't assume one
fence context per engine, and so allow for a single timeline to be used
across a combination of engines. This means that preallocating a fence
context per engine is now a hindrance, and so we want to introduce the
singular timeline. From the code perspective, this has the notable
advantage of clearing up a lot of mirky semantics and some clumsy
pointer chasing.

By splitting the timeline up into a single entity rather than an array
of per-engine timelines, we can realise the goal of the previous patch
of tracking the timeline alongside the ring.

v2: Tweak wait_for_idle to stop the compiling thinking that ret may be
uninitialised.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180502163839.3248-2-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/Makefile                 |   2 +-
 drivers/gpu/drm/i915/i915_drv.h               |   4 +-
 drivers/gpu/drm/i915/i915_gem.c               | 129 +++++-------
 drivers/gpu/drm/i915/i915_gem_context.c       |  48 ++---
 drivers/gpu/drm/i915/i915_gem_context.h       |   2 -
 drivers/gpu/drm/i915/i915_gem_gtt.h           |   3 +-
 drivers/gpu/drm/i915/i915_gem_timeline.c      | 198 ------------------
 drivers/gpu/drm/i915/i915_gpu_error.c         |   4 +-
 drivers/gpu/drm/i915/i915_perf.c              |  10 +-
 drivers/gpu/drm/i915/i915_request.c           |  68 +++---
 drivers/gpu/drm/i915/i915_request.h           |   3 +-
 drivers/gpu/drm/i915/i915_timeline.c          | 105 ++++++++++
 .../{i915_gem_timeline.h => i915_timeline.h}  |  67 +++---
 drivers/gpu/drm/i915/intel_engine_cs.c        |  27 ++-
 drivers/gpu/drm/i915/intel_guc_submission.c   |   4 +-
 drivers/gpu/drm/i915/intel_lrc.c              |  48 +++--
 drivers/gpu/drm/i915/intel_ringbuffer.c       |  25 ++-
 drivers/gpu/drm/i915/intel_ringbuffer.h       |  11 +-
 .../gpu/drm/i915/selftests/i915_gem_context.c |  12 --
 .../{i915_gem_timeline.c => i915_timeline.c}  |  94 +++------
 drivers/gpu/drm/i915/selftests/mock_engine.c  |  32 ++-
 .../gpu/drm/i915/selftests/mock_gem_device.c  |  10 +-
 .../gpu/drm/i915/selftests/mock_timeline.c    |  45 ++--
 .../gpu/drm/i915/selftests/mock_timeline.h    |  28 +--
 24 files changed, 397 insertions(+), 582 deletions(-)
 delete mode 100644 drivers/gpu/drm/i915/i915_gem_timeline.c
 create mode 100644 drivers/gpu/drm/i915/i915_timeline.c
 rename drivers/gpu/drm/i915/{i915_gem_timeline.h => i915_timeline.h} (68%)
 rename drivers/gpu/drm/i915/selftests/{i915_gem_timeline.c => i915_timeline.c} (70%)

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index dfe01452c8d1..00c13382b008 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -71,11 +71,11 @@ i915-y += i915_cmd_parser.o \
 	  i915_gem_shrinker.o \
 	  i915_gem_stolen.o \
 	  i915_gem_tiling.o \
-	  i915_gem_timeline.o \
 	  i915_gem_userptr.o \
 	  i915_gemfs.o \
 	  i915_query.o \
 	  i915_request.o \
+	  i915_timeline.o \
 	  i915_trace_points.o \
 	  i915_vma.o \
 	  intel_breadcrumbs.o \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ffa87aef31e5..11ff84eef52a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -72,10 +72,10 @@
 #include "i915_gem_fence_reg.h"
 #include "i915_gem_object.h"
 #include "i915_gem_gtt.h"
-#include "i915_gem_timeline.h"
 #include "i915_gpu_error.h"
 #include "i915_request.h"
 #include "i915_scheduler.h"
+#include "i915_timeline.h"
 #include "i915_vma.h"
 
 #include "intel_gvt.h"
@@ -2059,8 +2059,6 @@ struct drm_i915_private {
 		void (*resume)(struct drm_i915_private *);
 		void (*cleanup_engine)(struct intel_engine_cs *engine);
 
-		struct i915_gem_timeline execution_timeline;
-		struct i915_gem_timeline legacy_timeline;
 		struct list_head timelines;
 
 		struct list_head active_rings;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 438a2fc5bba0..484354f25f98 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -162,7 +162,7 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
 	synchronize_irq(i915->drm.irq);
 
 	intel_engines_park(i915);
-	i915_gem_timelines_park(i915);
+	i915_timelines_park(i915);
 
 	i915_pmu_gt_parked(i915);
 
@@ -2977,8 +2977,8 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
 	 * extra delay for a recent interrupt is pointless. Hence, we do
 	 * not need an engine->irq_seqno_barrier() before the seqno reads.
 	 */
-	spin_lock_irqsave(&engine->timeline->lock, flags);
-	list_for_each_entry(request, &engine->timeline->requests, link) {
+	spin_lock_irqsave(&engine->timeline.lock, flags);
+	list_for_each_entry(request, &engine->timeline.requests, link) {
 		if (__i915_request_completed(request, request->global_seqno))
 			continue;
 
@@ -2989,7 +2989,7 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
 		active = request;
 		break;
 	}
-	spin_unlock_irqrestore(&engine->timeline->lock, flags);
+	spin_unlock_irqrestore(&engine->timeline.lock, flags);
 
 	return active;
 }
@@ -3110,15 +3110,15 @@ static void engine_skip_context(struct i915_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
 	struct i915_gem_context *hung_ctx = request->ctx;
-	struct intel_timeline *timeline = request->timeline;
+	struct i915_timeline *timeline = request->timeline;
 	unsigned long flags;
 
-	GEM_BUG_ON(timeline == engine->timeline);
+	GEM_BUG_ON(timeline == &engine->timeline);
 
-	spin_lock_irqsave(&engine->timeline->lock, flags);
+	spin_lock_irqsave(&engine->timeline.lock, flags);
 	spin_lock(&timeline->lock);
 
-	list_for_each_entry_continue(request, &engine->timeline->requests, link)
+	list_for_each_entry_continue(request, &engine->timeline.requests, link)
 		if (request->ctx == hung_ctx)
 			skip_request(request);
 
@@ -3126,7 +3126,7 @@ static void engine_skip_context(struct i915_request *request)
 		skip_request(request);
 
 	spin_unlock(&timeline->lock);
-	spin_unlock_irqrestore(&engine->timeline->lock, flags);
+	spin_unlock_irqrestore(&engine->timeline.lock, flags);
 }
 
 /* Returns the request if it was guilty of the hang */
@@ -3183,11 +3183,11 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
 			dma_fence_set_error(&request->fence, -EAGAIN);
 
 			/* Rewind the engine to replay the incomplete rq */
-			spin_lock_irq(&engine->timeline->lock);
+			spin_lock_irq(&engine->timeline.lock);
 			request = list_prev_entry(request, link);
-			if (&request->link == &engine->timeline->requests)
+			if (&request->link == &engine->timeline.requests)
 				request = NULL;
-			spin_unlock_irq(&engine->timeline->lock);
+			spin_unlock_irq(&engine->timeline.lock);
 		}
 	}
 
@@ -3300,10 +3300,10 @@ static void nop_complete_submit_request(struct i915_request *request)
 		  request->fence.context, request->fence.seqno);
 	dma_fence_set_error(&request->fence, -EIO);
 
-	spin_lock_irqsave(&request->engine->timeline->lock, flags);
+	spin_lock_irqsave(&request->engine->timeline.lock, flags);
 	__i915_request_submit(request);
 	intel_engine_init_global_seqno(request->engine, request->global_seqno);
-	spin_unlock_irqrestore(&request->engine->timeline->lock, flags);
+	spin_unlock_irqrestore(&request->engine->timeline.lock, flags);
 }
 
 void i915_gem_set_wedged(struct drm_i915_private *i915)
@@ -3372,10 +3372,10 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
 		 * (lockless) lookup doesn't try and wait upon the request as we
 		 * reset it.
 		 */
-		spin_lock_irqsave(&engine->timeline->lock, flags);
+		spin_lock_irqsave(&engine->timeline.lock, flags);
 		intel_engine_init_global_seqno(engine,
 					       intel_engine_last_submit(engine));
-		spin_unlock_irqrestore(&engine->timeline->lock, flags);
+		spin_unlock_irqrestore(&engine->timeline.lock, flags);
 
 		i915_gem_reset_finish_engine(engine);
 	}
@@ -3387,8 +3387,7 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
 
 bool i915_gem_unset_wedged(struct drm_i915_private *i915)
 {
-	struct i915_gem_timeline *tl;
-	int i;
+	struct i915_timeline *tl;
 
 	lockdep_assert_held(&i915->drm.struct_mutex);
 	if (!test_bit(I915_WEDGED, &i915->gpu_error.flags))
@@ -3407,29 +3406,27 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
 	 * No more can be submitted until we reset the wedged bit.
 	 */
 	list_for_each_entry(tl, &i915->gt.timelines, link) {
-		for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
-			struct i915_request *rq;
+		struct i915_request *rq;
 
-			rq = i915_gem_active_peek(&tl->engine[i].last_request,
-						  &i915->drm.struct_mutex);
-			if (!rq)
-				continue;
+		rq = i915_gem_active_peek(&tl->last_request,
+					  &i915->drm.struct_mutex);
+		if (!rq)
+			continue;
 
-			/*
-			 * We can't use our normal waiter as we want to
-			 * avoid recursively trying to handle the current
-			 * reset. The basic dma_fence_default_wait() installs
-			 * a callback for dma_fence_signal(), which is
-			 * triggered by our nop handler (indirectly, the
-			 * callback enables the signaler thread which is
-			 * woken by the nop_submit_request() advancing the seqno
-			 * and when the seqno passes the fence, the signaler
-			 * then signals the fence waking us up).
-			 */
-			if (dma_fence_default_wait(&rq->fence, true,
-						   MAX_SCHEDULE_TIMEOUT) < 0)
-				return false;
-		}
+		/*
+		 * We can't use our normal waiter as we want to
+		 * avoid recursively trying to handle the current
+		 * reset. The basic dma_fence_default_wait() installs
+		 * a callback for dma_fence_signal(), which is
+		 * triggered by our nop handler (indirectly, the
+		 * callback enables the signaler thread which is
+		 * woken by the nop_submit_request() advancing the seqno
+		 * and when the seqno passes the fence, the signaler
+		 * then signals the fence waking us up).
+		 */
+		if (dma_fence_default_wait(&rq->fence, true,
+					   MAX_SCHEDULE_TIMEOUT) < 0)
+			return false;
 	}
 	i915_retire_requests(i915);
 	GEM_BUG_ON(i915->gt.active_requests);
@@ -3734,17 +3731,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 	return ret;
 }
 
-static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
+static int wait_for_timeline(struct i915_timeline *tl, unsigned int flags)
 {
-	int ret, i;
-
-	for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
-		ret = i915_gem_active_wait(&tl->engine[i].last_request, flags);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
+	return i915_gem_active_wait(&tl->last_request, flags);
 }
 
 static int wait_for_engines(struct drm_i915_private *i915)
@@ -3762,30 +3751,37 @@ static int wait_for_engines(struct drm_i915_private *i915)
 
 int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
 {
-	int ret;
-
 	/* If the device is asleep, we have no requests outstanding */
 	if (!READ_ONCE(i915->gt.awake))
 		return 0;
 
 	if (flags & I915_WAIT_LOCKED) {
-		struct i915_gem_timeline *tl;
+		struct i915_timeline *tl;
+		int err;
 
 		lockdep_assert_held(&i915->drm.struct_mutex);
 
 		list_for_each_entry(tl, &i915->gt.timelines, link) {
-			ret = wait_for_timeline(tl, flags);
-			if (ret)
-				return ret;
+			err = wait_for_timeline(tl, flags);
+			if (err)
+				return err;
 		}
 		i915_retire_requests(i915);
 
-		ret = wait_for_engines(i915);
+		return wait_for_engines(i915);
 	} else {
-		ret = wait_for_timeline(&i915->gt.execution_timeline, flags);
-	}
+		struct intel_engine_cs *engine;
+		enum intel_engine_id id;
+		int err;
 
-	return ret;
+		for_each_engine(engine, i915, id) {
+			err = wait_for_timeline(&engine->timeline, flags);
+			if (err)
+				return err;
+		}
+
+		return 0;
+	}
 }
 
 static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
@@ -4954,7 +4950,7 @@ static void assert_kernel_context_is_current(struct drm_i915_private *i915)
 	enum intel_engine_id id;
 
 	for_each_engine(engine, i915, id) {
-		GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline->last_request));
+		GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline.last_request));
 		GEM_BUG_ON(engine->last_retired_context != kernel_context);
 	}
 }
@@ -5603,12 +5599,6 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
 	INIT_LIST_HEAD(&dev_priv->gt.timelines);
 	INIT_LIST_HEAD(&dev_priv->gt.active_rings);
 
-	mutex_lock(&dev_priv->drm.struct_mutex);
-	err = i915_gem_timeline_init__global(dev_priv);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
-	if (err)
-		goto err_priorities;
-
 	i915_gem_init__mm(dev_priv);
 
 	INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
@@ -5628,8 +5618,6 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
 
 	return 0;
 
-err_priorities:
-	kmem_cache_destroy(dev_priv->priorities);
 err_dependencies:
 	kmem_cache_destroy(dev_priv->dependencies);
 err_requests:
@@ -5650,12 +5638,7 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
 	GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
 	GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
 	WARN_ON(dev_priv->mm.object_count);
-
-	mutex_lock(&dev_priv->drm.struct_mutex);
-	i915_gem_timeline_fini(&dev_priv->gt.legacy_timeline);
-	i915_gem_timeline_fini(&dev_priv->gt.execution_timeline);
 	WARN_ON(!list_empty(&dev_priv->gt.timelines));
-	mutex_unlock(&dev_priv->drm.struct_mutex);
 
 	kmem_cache_destroy(dev_priv->priorities);
 	kmem_cache_destroy(dev_priv->dependencies);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 1f4987dc6616..33f8a4b3c981 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -122,7 +122,6 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
 	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
 	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
 
-	i915_gem_timeline_free(ctx->timeline);
 	i915_ppgtt_put(ctx->ppgtt);
 
 	for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
@@ -377,18 +376,6 @@ i915_gem_create_context(struct drm_i915_private *dev_priv,
 		ctx->desc_template = default_desc_template(dev_priv, ppgtt);
 	}
 
-	if (HAS_EXECLISTS(dev_priv)) {
-		struct i915_gem_timeline *timeline;
-
-		timeline = i915_gem_timeline_create(dev_priv, ctx->name);
-		if (IS_ERR(timeline)) {
-			__destroy_hw_context(ctx, file_priv);
-			return ERR_CAST(timeline);
-		}
-
-		ctx->timeline = timeline;
-	}
-
 	trace_i915_context_create(ctx);
 
 	return ctx;
@@ -590,19 +577,29 @@ void i915_gem_context_close(struct drm_file *file)
 	idr_destroy(&file_priv->context_idr);
 }
 
+static struct i915_request *
+last_request_on_engine(struct i915_timeline *timeline,
+		       struct intel_engine_cs *engine)
+{
+	struct i915_request *rq;
+
+	if (timeline == &engine->timeline)
+		return NULL;
+
+	rq = i915_gem_active_raw(&timeline->last_request,
+				 &engine->i915->drm.struct_mutex);
+	if (rq && rq->engine == engine)
+		return rq;
+
+	return NULL;
+}
+
 static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine)
 {
-	struct i915_gem_timeline *timeline;
+	struct i915_timeline *timeline;
 
 	list_for_each_entry(timeline, &engine->i915->gt.timelines, link) {
-		struct intel_timeline *tl;
-
-		if (timeline == &engine->i915->gt.execution_timeline)
-			continue;
-
-		tl = &timeline->engine[engine->id];
-		if (i915_gem_active_peek(&tl->last_request,
-					 &engine->i915->drm.struct_mutex))
+		if (last_request_on_engine(timeline, engine))
 			return false;
 	}
 
@@ -612,7 +609,7 @@ static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine)
 int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
 {
 	struct intel_engine_cs *engine;
-	struct i915_gem_timeline *timeline;
+	struct i915_timeline *timeline;
 	enum intel_engine_id id;
 
 	lockdep_assert_held(&dev_priv->drm.struct_mutex);
@@ -632,11 +629,8 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
 		/* Queue this switch after all other activity */
 		list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
 			struct i915_request *prev;
-			struct intel_timeline *tl;
 
-			tl = &timeline->engine[engine->id];
-			prev = i915_gem_active_raw(&tl->last_request,
-						   &dev_priv->drm.struct_mutex);
+			prev = last_request_on_engine(timeline, engine);
 			if (prev)
 				i915_sw_fence_await_sw_fence_gfp(&rq->submit,
 								 &prev->submit,
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index ec53ba06f836..ace3b129c189 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -58,8 +58,6 @@ struct i915_gem_context {
 	/** file_priv: owning file descriptor */
 	struct drm_i915_file_private *file_priv;
 
-	struct i915_gem_timeline *timeline;
-
 	/**
 	 * @ppgtt: unique address space (GTT)
 	 *
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 98107925de48..1db0dedb4059 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -38,10 +38,9 @@
 #include <linux/mm.h>
 #include <linux/pagevec.h>
 
-#include "i915_gem_timeline.h"
-
 #include "i915_request.h"
 #include "i915_selftest.h"
+#include "i915_timeline.h"
 
 #define I915_GTT_PAGE_SIZE_4K BIT(12)
 #define I915_GTT_PAGE_SIZE_64K BIT(16)
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.c b/drivers/gpu/drm/i915/i915_gem_timeline.c
deleted file mode 100644
index 24f4068cc137..000000000000
--- a/drivers/gpu/drm/i915/i915_gem_timeline.c
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include "i915_drv.h"
-#include "i915_syncmap.h"
-
-static void __intel_timeline_init(struct intel_timeline *tl,
-				  struct i915_gem_timeline *parent,
-				  u64 context,
-				  struct lock_class_key *lockclass,
-				  const char *lockname)
-{
-	tl->fence_context = context;
-	tl->common = parent;
-	spin_lock_init(&tl->lock);
-	lockdep_set_class_and_name(&tl->lock, lockclass, lockname);
-	init_request_active(&tl->last_request, NULL);
-	INIT_LIST_HEAD(&tl->requests);
-	i915_syncmap_init(&tl->sync);
-}
-
-static void __intel_timeline_fini(struct intel_timeline *tl)
-{
-	GEM_BUG_ON(!list_empty(&tl->requests));
-
-	i915_syncmap_free(&tl->sync);
-}
-
-static int __i915_gem_timeline_init(struct drm_i915_private *i915,
-				    struct i915_gem_timeline *timeline,
-				    const char *name,
-				    struct lock_class_key *lockclass,
-				    const char *lockname)
-{
-	unsigned int i;
-	u64 fences;
-
-	lockdep_assert_held(&i915->drm.struct_mutex);
-
-	/*
-	 * Ideally we want a set of engines on a single leaf as we expect
-	 * to mostly be tracking synchronisation between engines. It is not
-	 * a huge issue if this is not the case, but we may want to mitigate
-	 * any page crossing penalties if they become an issue.
-	 */
-	BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES);
-
-	timeline->i915 = i915;
-	timeline->name = kstrdup(name ?: "[kernel]", GFP_KERNEL);
-	if (!timeline->name)
-		return -ENOMEM;
-
-	list_add(&timeline->link, &i915->gt.timelines);
-
-	/* Called during early_init before we know how many engines there are */
-	fences = dma_fence_context_alloc(ARRAY_SIZE(timeline->engine));
-	for (i = 0; i < ARRAY_SIZE(timeline->engine); i++)
-		__intel_timeline_init(&timeline->engine[i],
-				      timeline, fences++,
-				      lockclass, lockname);
-
-	return 0;
-}
-
-int i915_gem_timeline_init(struct drm_i915_private *i915,
-			   struct i915_gem_timeline *timeline,
-			   const char *name)
-{
-	static struct lock_class_key class;
-
-	return __i915_gem_timeline_init(i915, timeline, name,
-					&class, "&timeline->lock");
-}
-
-int i915_gem_timeline_init__global(struct drm_i915_private *i915)
-{
-	static struct lock_class_key class1, class2;
-	int err;
-
-	err = __i915_gem_timeline_init(i915,
-				       &i915->gt.execution_timeline,
-				       "[execution]", &class1,
-				       "i915_execution_timeline");
-	if (err)
-		return err;
-
-	err = __i915_gem_timeline_init(i915,
-				       &i915->gt.legacy_timeline,
-				       "[global]", &class2,
-				       "i915_global_timeline");
-	if (err)
-		goto err_exec_timeline;
-
-	return 0;
-
-err_exec_timeline:
-	i915_gem_timeline_fini(&i915->gt.execution_timeline);
-	return err;
-}
-
-/**
- * i915_gem_timelines_park - called when the driver idles
- * @i915: the drm_i915_private device
- *
- * When the driver is completely idle, we know that all of our sync points
- * have been signaled and our tracking is then entirely redundant. Any request
- * to wait upon an older sync point will be completed instantly as we know
- * the fence is signaled and therefore we will not even look them up in the
- * sync point map.
- */
-void i915_gem_timelines_park(struct drm_i915_private *i915)
-{
-	struct i915_gem_timeline *timeline;
-	int i;
-
-	lockdep_assert_held(&i915->drm.struct_mutex);
-
-	list_for_each_entry(timeline, &i915->gt.timelines, link) {
-		for (i = 0; i < ARRAY_SIZE(timeline->engine); i++) {
-			struct intel_timeline *tl = &timeline->engine[i];
-
-			/*
-			 * All known fences are completed so we can scrap
-			 * the current sync point tracking and start afresh,
-			 * any attempt to wait upon a previous sync point
-			 * will be skipped as the fence was signaled.
-			 */
-			i915_syncmap_free(&tl->sync);
-		}
-	}
-}
-
-void i915_gem_timeline_fini(struct i915_gem_timeline *timeline)
-{
-	int i;
-
-	lockdep_assert_held(&timeline->i915->drm.struct_mutex);
-
-	for (i = 0; i < ARRAY_SIZE(timeline->engine); i++)
-		__intel_timeline_fini(&timeline->engine[i]);
-
-	list_del(&timeline->link);
-	kfree(timeline->name);
-}
-
-struct i915_gem_timeline *
-i915_gem_timeline_create(struct drm_i915_private *i915, const char *name)
-{
-	struct i915_gem_timeline *timeline;
-	int err;
-
-	timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
-	if (!timeline)
-		return ERR_PTR(-ENOMEM);
-
-	err = i915_gem_timeline_init(i915, timeline, name);
-	if (err) {
-		kfree(timeline);
-		return ERR_PTR(err);
-	}
-
-	return timeline;
-}
-
-void i915_gem_timeline_free(struct i915_gem_timeline *timeline)
-{
-	if (!timeline)
-		return;
-
-	i915_gem_timeline_fini(timeline);
-	kfree(timeline);
-}
-
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/mock_timeline.c"
-#include "selftests/i915_gem_timeline.c"
-#endif
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 944939947d30..df234dc23274 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1310,7 +1310,7 @@ static void engine_record_requests(struct intel_engine_cs *engine,
 
 	count = 0;
 	request = first;
-	list_for_each_entry_from(request, &engine->timeline->requests, link)
+	list_for_each_entry_from(request, &engine->timeline.requests, link)
 		count++;
 	if (!count)
 		return;
@@ -1323,7 +1323,7 @@ static void engine_record_requests(struct intel_engine_cs *engine,
 
 	count = 0;
 	request = first;
-	list_for_each_entry_from(request, &engine->timeline->requests, link) {
+	list_for_each_entry_from(request, &engine->timeline.requests, link) {
 		if (count >= ee->num_requests) {
 			/*
 			 * If the ring request list was changed in
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 4b1da01168ae..d9341415df40 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1695,7 +1695,7 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
 						 const struct i915_oa_config *oa_config)
 {
 	struct intel_engine_cs *engine = dev_priv->engine[RCS];
-	struct i915_gem_timeline *timeline;
+	struct i915_timeline *timeline;
 	struct i915_request *rq;
 	int ret;
 
@@ -1716,15 +1716,11 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
 	/* Queue this switch after all other activity */
 	list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
 		struct i915_request *prev;
-		struct intel_timeline *tl;
 
-		tl = &timeline->engine[engine->id];
-		prev = i915_gem_active_raw(&tl->last_request,
+		prev = i915_gem_active_raw(&timeline->last_request,
 					   &dev_priv->drm.struct_mutex);
 		if (prev)
-			i915_sw_fence_await_sw_fence_gfp(&rq->submit,
-							 &prev->submit,
-							 GFP_KERNEL);
+			i915_request_await_dma_fence(rq, &prev->fence);
 	}
 
 	i915_request_add(rq);
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 7bb613c00cc3..5acf869f3ca3 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -49,7 +49,7 @@ static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 		return "signaled";
 
-	return to_request(fence)->timeline->common->name;
+	return to_request(fence)->timeline->name;
 }
 
 static bool i915_fence_signaled(struct dma_fence *fence)
@@ -199,6 +199,7 @@ i915_sched_node_init(struct i915_sched_node *node)
 static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
 {
 	struct intel_engine_cs *engine;
+	struct i915_timeline *timeline;
 	enum intel_engine_id id;
 	int ret;
 
@@ -213,16 +214,13 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
 
 	/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
 	for_each_engine(engine, i915, id) {
-		struct i915_gem_timeline *timeline;
-		struct intel_timeline *tl = engine->timeline;
-
 		GEM_TRACE("%s seqno %d (current %d) -> %d\n",
 			  engine->name,
-			  tl->seqno,
+			  engine->timeline.seqno,
 			  intel_engine_get_seqno(engine),
 			  seqno);
 
-		if (!i915_seqno_passed(seqno, tl->seqno)) {
+		if (!i915_seqno_passed(seqno, engine->timeline.seqno)) {
 			/* Flush any waiters before we reuse the seqno */
 			intel_engine_disarm_breadcrumbs(engine);
 			GEM_BUG_ON(!list_empty(&engine->breadcrumbs.signals));
@@ -230,18 +228,18 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
 
 		/* Check we are idle before we fiddle with hw state! */
 		GEM_BUG_ON(!intel_engine_is_idle(engine));
-		GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request));
+		GEM_BUG_ON(i915_gem_active_isset(&engine->timeline.last_request));
 
 		/* Finally reset hw state */
 		intel_engine_init_global_seqno(engine, seqno);
-		tl->seqno = seqno;
-
-		list_for_each_entry(timeline, &i915->gt.timelines, link)
-			memset(timeline->engine[id].global_sync, 0,
-			       sizeof(timeline->engine[id].global_sync));
+		engine->timeline.seqno = seqno;
 	}
 
+	list_for_each_entry(timeline, &i915->gt.timelines, link)
+		memset(timeline->global_sync, 0, sizeof(timeline->global_sync));
+
 	i915->gt.request_serial = seqno;
+
 	return 0;
 }
 
@@ -357,10 +355,10 @@ static void __retire_engine_request(struct intel_engine_cs *engine,
 
 	local_irq_disable();
 
-	spin_lock(&engine->timeline->lock);
-	GEM_BUG_ON(!list_is_first(&rq->link, &engine->timeline->requests));
+	spin_lock(&engine->timeline.lock);
+	GEM_BUG_ON(!list_is_first(&rq->link, &engine->timeline.requests));
 	list_del_init(&rq->link);
-	spin_unlock(&engine->timeline->lock);
+	spin_unlock(&engine->timeline.lock);
 
 	spin_lock(&rq->lock);
 	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
@@ -397,7 +395,7 @@ static void __retire_engine_upto(struct intel_engine_cs *engine,
 		return;
 
 	do {
-		tmp = list_first_entry(&engine->timeline->requests,
+		tmp = list_first_entry(&engine->timeline.requests,
 				       typeof(*tmp), link);
 
 		GEM_BUG_ON(tmp->engine != engine);
@@ -492,16 +490,16 @@ void i915_request_retire_upto(struct i915_request *rq)
 	} while (tmp != rq);
 }
 
-static u32 timeline_get_seqno(struct intel_timeline *tl)
+static u32 timeline_get_seqno(struct i915_timeline *tl)
 {
 	return ++tl->seqno;
 }
 
 static void move_to_timeline(struct i915_request *request,
-			     struct intel_timeline *timeline)
+			     struct i915_timeline *timeline)
 {
-	GEM_BUG_ON(request->timeline == request->engine->timeline);
-	lockdep_assert_held(&request->engine->timeline->lock);
+	GEM_BUG_ON(request->timeline == &request->engine->timeline);
+	lockdep_assert_held(&request->engine->timeline.lock);
 
 	spin_lock(&request->timeline->lock);
 	list_move_tail(&request->link, &timeline->requests);
@@ -516,15 +514,15 @@ void __i915_request_submit(struct i915_request *request)
 	GEM_TRACE("%s fence %llx:%d -> global=%d, current %d\n",
 		  engine->name,
 		  request->fence.context, request->fence.seqno,
-		  engine->timeline->seqno + 1,
+		  engine->timeline.seqno + 1,
 		  intel_engine_get_seqno(engine));
 
 	GEM_BUG_ON(!irqs_disabled());
-	lockdep_assert_held(&engine->timeline->lock);
+	lockdep_assert_held(&engine->timeline.lock);
 
 	GEM_BUG_ON(request->global_seqno);
 
-	seqno = timeline_get_seqno(engine->timeline);
+	seqno = timeline_get_seqno(&engine->timeline);
 	GEM_BUG_ON(!seqno);
 	GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno));
 
@@ -539,7 +537,7 @@ void __i915_request_submit(struct i915_request *request)
 				request->ring->vaddr + request->postfix);
 
 	/* Transfer from per-context onto the global per-engine timeline */
-	move_to_timeline(request, engine->timeline);
+	move_to_timeline(request, &engine->timeline);
 
 	trace_i915_request_execute(request);
 
@@ -552,11 +550,11 @@ void i915_request_submit(struct i915_request *request)
 	unsigned long flags;
 
 	/* Will be called from irq-context when using foreign fences. */
-	spin_lock_irqsave(&engine->timeline->lock, flags);
+	spin_lock_irqsave(&engine->timeline.lock, flags);
 
 	__i915_request_submit(request);
 
-	spin_unlock_irqrestore(&engine->timeline->lock, flags);
+	spin_unlock_irqrestore(&engine->timeline.lock, flags);
 }
 
 void __i915_request_unsubmit(struct i915_request *request)
@@ -570,17 +568,17 @@ void __i915_request_unsubmit(struct i915_request *request)
 		  intel_engine_get_seqno(engine));
 
 	GEM_BUG_ON(!irqs_disabled());
-	lockdep_assert_held(&engine->timeline->lock);
+	lockdep_assert_held(&engine->timeline.lock);
 
 	/*
 	 * Only unwind in reverse order, required so that the per-context list
 	 * is kept in seqno/ring order.
 	 */
 	GEM_BUG_ON(!request->global_seqno);
-	GEM_BUG_ON(request->global_seqno != engine->timeline->seqno);
+	GEM_BUG_ON(request->global_seqno != engine->timeline.seqno);
 	GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine),
 				     request->global_seqno));
-	engine->timeline->seqno--;
+	engine->timeline.seqno--;
 
 	/* We may be recursing from the signal callback of another i915 fence */
 	spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
@@ -607,11 +605,11 @@ void i915_request_unsubmit(struct i915_request *request)
 	unsigned long flags;
 
 	/* Will be called from irq-context when using foreign fences. */
-	spin_lock_irqsave(&engine->timeline->lock, flags);
+	spin_lock_irqsave(&engine->timeline.lock, flags);
 
 	__i915_request_unsubmit(request);
 
-	spin_unlock_irqrestore(&engine->timeline->lock, flags);
+	spin_unlock_irqrestore(&engine->timeline.lock, flags);
 }
 
 static int __i915_sw_fence_call
@@ -764,7 +762,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 	rq->ctx = ctx;
 	rq->ring = ring;
 	rq->timeline = ring->timeline;
-	GEM_BUG_ON(rq->timeline == engine->timeline);
+	GEM_BUG_ON(rq->timeline == &engine->timeline);
 
 	spin_lock_init(&rq->lock);
 	dma_fence_init(&rq->fence,
@@ -929,7 +927,7 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
 
 		/* Squash repeated waits to the same timelines */
 		if (fence->context != rq->i915->mm.unordered_timeline &&
-		    intel_timeline_sync_is_later(rq->timeline, fence))
+		    i915_timeline_sync_is_later(rq->timeline, fence))
 			continue;
 
 		if (dma_fence_is_i915(fence))
@@ -943,7 +941,7 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
 
 		/* Record the latest fence used against each timeline */
 		if (fence->context != rq->i915->mm.unordered_timeline)
-			intel_timeline_sync_set(rq->timeline, fence);
+			i915_timeline_sync_set(rq->timeline, fence);
 	} while (--nchild);
 
 	return 0;
@@ -1020,7 +1018,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
 {
 	struct intel_engine_cs *engine = request->engine;
 	struct intel_ring *ring = request->ring;
-	struct intel_timeline *timeline = request->timeline;
+	struct i915_timeline *timeline = request->timeline;
 	struct i915_request *prev;
 	u32 *cs;
 	int err;
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 8f31ca8272f8..eddbd4245cb3 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -37,6 +37,7 @@
 struct drm_file;
 struct drm_i915_gem_object;
 struct i915_request;
+struct i915_timeline;
 
 struct intel_wait {
 	struct rb_node node;
@@ -95,7 +96,7 @@ struct i915_request {
 	struct i915_gem_context *ctx;
 	struct intel_engine_cs *engine;
 	struct intel_ring *ring;
-	struct intel_timeline *timeline;
+	struct i915_timeline *timeline;
 	struct intel_signal_node signaling;
 
 	/*
diff --git a/drivers/gpu/drm/i915/i915_timeline.c b/drivers/gpu/drm/i915/i915_timeline.c
new file mode 100644
index 000000000000..4667cc08c416
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_timeline.c
@@ -0,0 +1,105 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016-2018 Intel Corporation
+ */
+
+#include "i915_drv.h"
+
+#include "i915_timeline.h"
+#include "i915_syncmap.h"
+
+void i915_timeline_init(struct drm_i915_private *i915,
+			struct i915_timeline *timeline,
+			const char *name)
+{
+	lockdep_assert_held(&i915->drm.struct_mutex);
+
+	/*
+	 * Ideally we want a set of engines on a single leaf as we expect
+	 * to mostly be tracking synchronisation between engines. It is not
+	 * a huge issue if this is not the case, but we may want to mitigate
+	 * any page crossing penalties if they become an issue.
+	 */
+	BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES);
+
+	timeline->name = name;
+
+	list_add(&timeline->link, &i915->gt.timelines);
+
+	/* Called during early_init before we know how many engines there are */
+
+	timeline->fence_context = dma_fence_context_alloc(1);
+
+	spin_lock_init(&timeline->lock);
+
+	init_request_active(&timeline->last_request, NULL);
+	INIT_LIST_HEAD(&timeline->requests);
+
+	i915_syncmap_init(&timeline->sync);
+}
+
+/**
+ * i915_timelines_park - called when the driver idles
+ * @i915: the drm_i915_private device
+ *
+ * When the driver is completely idle, we know that all of our sync points
+ * have been signaled and our tracking is then entirely redundant. Any request
+ * to wait upon an older sync point will be completed instantly as we know
+ * the fence is signaled and therefore we will not even look them up in the
+ * sync point map.
+ */
+void i915_timelines_park(struct drm_i915_private *i915)
+{
+	struct i915_timeline *timeline;
+
+	lockdep_assert_held(&i915->drm.struct_mutex);
+
+	list_for_each_entry(timeline, &i915->gt.timelines, link) {
+		/*
+		 * All known fences are completed so we can scrap
+		 * the current sync point tracking and start afresh,
+		 * any attempt to wait upon a previous sync point
+		 * will be skipped as the fence was signaled.
+		 */
+		i915_syncmap_free(&timeline->sync);
+	}
+}
+
+void i915_timeline_fini(struct i915_timeline *timeline)
+{
+	GEM_BUG_ON(!list_empty(&timeline->requests));
+
+	i915_syncmap_free(&timeline->sync);
+
+	list_del(&timeline->link);
+}
+
+struct i915_timeline *
+i915_timeline_create(struct drm_i915_private *i915, const char *name)
+{
+	struct i915_timeline *timeline;
+
+	timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
+	if (!timeline)
+		return ERR_PTR(-ENOMEM);
+
+	i915_timeline_init(i915, timeline, name);
+	kref_init(&timeline->kref);
+
+	return timeline;
+}
+
+void __i915_timeline_free(struct kref *kref)
+{
+	struct i915_timeline *timeline =
+		container_of(kref, typeof(*timeline), kref);
+
+	i915_timeline_fini(timeline);
+	kfree(timeline);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/mock_timeline.c"
+#include "selftests/i915_timeline.c"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h
similarity index 68%
rename from drivers/gpu/drm/i915/i915_gem_timeline.h
rename to drivers/gpu/drm/i915/i915_timeline.h
index 780ed465c4fc..dc2a4632faa7 100644
--- a/drivers/gpu/drm/i915/i915_gem_timeline.h
+++ b/drivers/gpu/drm/i915/i915_timeline.h
@@ -22,18 +22,17 @@
  *
  */
 
-#ifndef I915_GEM_TIMELINE_H
-#define I915_GEM_TIMELINE_H
+#ifndef I915_TIMELINE_H
+#define I915_TIMELINE_H
 
 #include <linux/list.h>
+#include <linux/kref.h>
 
 #include "i915_request.h"
 #include "i915_syncmap.h"
 #include "i915_utils.h"
 
-struct i915_gem_timeline;
-
-struct intel_timeline {
+struct i915_timeline {
 	u64 fence_context;
 	u32 seqno;
 
@@ -71,51 +70,57 @@ struct intel_timeline {
 	 */
 	u32 global_sync[I915_NUM_ENGINES];
 
-	struct i915_gem_timeline *common;
-};
-
-struct i915_gem_timeline {
 	struct list_head link;
-
-	struct drm_i915_private *i915;
 	const char *name;
 
-	struct intel_timeline engine[I915_NUM_ENGINES];
+	struct kref kref;
 };
 
-int i915_gem_timeline_init(struct drm_i915_private *i915,
-			   struct i915_gem_timeline *tl,
-			   const char *name);
-int i915_gem_timeline_init__global(struct drm_i915_private *i915);
-void i915_gem_timelines_park(struct drm_i915_private *i915);
-void i915_gem_timeline_fini(struct i915_gem_timeline *tl);
+void i915_timeline_init(struct drm_i915_private *i915,
+			struct i915_timeline *tl,
+			const char *name);
+void i915_timeline_fini(struct i915_timeline *tl);
 
-struct i915_gem_timeline *
-i915_gem_timeline_create(struct drm_i915_private *i915, const char *name);
-void i915_gem_timeline_free(struct i915_gem_timeline *timeline);
+struct i915_timeline *
+i915_timeline_create(struct drm_i915_private *i915, const char *name);
 
-static inline int __intel_timeline_sync_set(struct intel_timeline *tl,
-					    u64 context, u32 seqno)
+static inline struct i915_timeline *
+i915_timeline_get(struct i915_timeline *timeline)
+{
+	kref_get(&timeline->kref);
+	return timeline;
+}
+
+void __i915_timeline_free(struct kref *kref);
+static inline void i915_timeline_put(struct i915_timeline *timeline)
+{
+	kref_put(&timeline->kref, __i915_timeline_free);
+}
+
+static inline int __i915_timeline_sync_set(struct i915_timeline *tl,
+					   u64 context, u32 seqno)
 {
 	return i915_syncmap_set(&tl->sync, context, seqno);
 }
 
-static inline int intel_timeline_sync_set(struct intel_timeline *tl,
-					  const struct dma_fence *fence)
+static inline int i915_timeline_sync_set(struct i915_timeline *tl,
+					 const struct dma_fence *fence)
 {
-	return __intel_timeline_sync_set(tl, fence->context, fence->seqno);
+	return __i915_timeline_sync_set(tl, fence->context, fence->seqno);
 }
 
-static inline bool __intel_timeline_sync_is_later(struct intel_timeline *tl,
-						  u64 context, u32 seqno)
+static inline bool __i915_timeline_sync_is_later(struct i915_timeline *tl,
+						 u64 context, u32 seqno)
 {
 	return i915_syncmap_is_later(&tl->sync, context, seqno);
 }
 
-static inline bool intel_timeline_sync_is_later(struct intel_timeline *tl,
-						const struct dma_fence *fence)
+static inline bool i915_timeline_sync_is_later(struct i915_timeline *tl,
+					       const struct dma_fence *fence)
 {
-	return __intel_timeline_sync_is_later(tl, fence->context, fence->seqno);
+	return __i915_timeline_sync_is_later(tl, fence->context, fence->seqno);
 }
 
+void i915_timelines_park(struct drm_i915_private *i915);
+
 #endif
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 7af5fe85612d..a90769b9954e 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -451,12 +451,6 @@ void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
 	GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno);
 }
 
-static void intel_engine_init_timeline(struct intel_engine_cs *engine)
-{
-	engine->timeline =
-		&engine->i915->gt.execution_timeline.engine[engine->id];
-}
-
 static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
 {
 	i915_gem_batch_pool_init(&engine->batch_pool, engine);
@@ -508,8 +502,9 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
  */
 void intel_engine_setup_common(struct intel_engine_cs *engine)
 {
+	i915_timeline_init(engine->i915, &engine->timeline, engine->name);
+
 	intel_engine_init_execlist(engine);
-	intel_engine_init_timeline(engine);
 	intel_engine_init_hangcheck(engine);
 	intel_engine_init_batch_pool(engine);
 	intel_engine_init_cmd_parser(engine);
@@ -751,6 +746,8 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 	if (engine->i915->preempt_context)
 		intel_context_unpin(engine->i915->preempt_context, engine);
 	intel_context_unpin(engine->i915->kernel_context, engine);
+
+	i915_timeline_fini(&engine->timeline);
 }
 
 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
@@ -1003,7 +1000,7 @@ bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
 	 * the last request that remains in the timeline. When idle, it is
 	 * the last executed context as tracked by retirement.
 	 */
-	rq = __i915_gem_active_peek(&engine->timeline->last_request);
+	rq = __i915_gem_active_peek(&engine->timeline.last_request);
 	if (rq)
 		return rq->ctx == kernel_context;
 	else
@@ -1335,14 +1332,14 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 
 	drm_printf(m, "\tRequests:\n");
 
-	rq = list_first_entry(&engine->timeline->requests,
+	rq = list_first_entry(&engine->timeline.requests,
 			      struct i915_request, link);
-	if (&rq->link != &engine->timeline->requests)
+	if (&rq->link != &engine->timeline.requests)
 		print_request(m, rq, "\t\tfirst  ");
 
-	rq = list_last_entry(&engine->timeline->requests,
+	rq = list_last_entry(&engine->timeline.requests,
 			     struct i915_request, link);
-	if (&rq->link != &engine->timeline->requests)
+	if (&rq->link != &engine->timeline.requests)
 		print_request(m, rq, "\t\tlast   ");
 
 	rq = i915_gem_find_active_request(engine);
@@ -1374,11 +1371,11 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 		drm_printf(m, "\tDevice is asleep; skipping register dump\n");
 	}
 
-	spin_lock_irq(&engine->timeline->lock);
+	spin_lock_irq(&engine->timeline.lock);
 
 	last = NULL;
 	count = 0;
-	list_for_each_entry(rq, &engine->timeline->requests, link) {
+	list_for_each_entry(rq, &engine->timeline.requests, link) {
 		if (count++ < MAX_REQUESTS_TO_SHOW - 1)
 			print_request(m, rq, "\t\tE ");
 		else
@@ -1416,7 +1413,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 		print_request(m, last, "\t\tQ ");
 	}
 
-	spin_unlock_irq(&engine->timeline->lock);
+	spin_unlock_irq(&engine->timeline.lock);
 
 	spin_lock_irq(&b->rb_lock);
 	for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index c6bb5bebddfc..62828e39ee26 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -679,7 +679,7 @@ static void guc_dequeue(struct intel_engine_cs *engine)
 	bool submit = false;
 	struct rb_node *rb;
 
-	spin_lock_irq(&engine->timeline->lock);
+	spin_lock_irq(&engine->timeline.lock);
 	rb = execlists->first;
 	GEM_BUG_ON(rb_first(&execlists->queue) != rb);
 
@@ -750,7 +750,7 @@ done:
 	GEM_BUG_ON(execlists->first && !port_isset(execlists->port));
 
 unlock:
-	spin_unlock_irq(&engine->timeline->lock);
+	spin_unlock_irq(&engine->timeline.lock);
 }
 
 static void guc_submission_tasklet(unsigned long data)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 9b2407753ebd..e04798e98db2 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -331,10 +331,10 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
 	struct i915_priolist *uninitialized_var(p);
 	int last_prio = I915_PRIORITY_INVALID;
 
-	lockdep_assert_held(&engine->timeline->lock);
+	lockdep_assert_held(&engine->timeline.lock);
 
 	list_for_each_entry_safe_reverse(rq, rn,
-					 &engine->timeline->requests,
+					 &engine->timeline.requests,
 					 link) {
 		if (i915_request_completed(rq))
 			return;
@@ -358,9 +358,9 @@ execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
 	struct intel_engine_cs *engine =
 		container_of(execlists, typeof(*engine), execlists);
 
-	spin_lock_irq(&engine->timeline->lock);
+	spin_lock_irq(&engine->timeline.lock);
 	__unwind_incomplete_requests(engine);
-	spin_unlock_irq(&engine->timeline->lock);
+	spin_unlock_irq(&engine->timeline.lock);
 }
 
 static inline void
@@ -584,7 +584,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	 * and context switches) submission.
 	 */
 
-	spin_lock_irq(&engine->timeline->lock);
+	spin_lock_irq(&engine->timeline.lock);
 	rb = execlists->first;
 	GEM_BUG_ON(rb_first(&execlists->queue) != rb);
 
@@ -744,7 +744,7 @@ done:
 	GEM_BUG_ON(execlists->first && !port_isset(execlists->port));
 
 unlock:
-	spin_unlock_irq(&engine->timeline->lock);
+	spin_unlock_irq(&engine->timeline.lock);
 
 	if (submit) {
 		execlists_user_begin(execlists, execlists->port);
@@ -894,10 +894,10 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
 	execlists_cancel_port_requests(execlists);
 	reset_irq(engine);
 
-	spin_lock(&engine->timeline->lock);
+	spin_lock(&engine->timeline.lock);
 
 	/* Mark all executing requests as skipped. */
-	list_for_each_entry(rq, &engine->timeline->requests, link) {
+	list_for_each_entry(rq, &engine->timeline.requests, link) {
 		GEM_BUG_ON(!rq->global_seqno);
 		if (!i915_request_completed(rq))
 			dma_fence_set_error(&rq->fence, -EIO);
@@ -929,7 +929,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
 	execlists->first = NULL;
 	GEM_BUG_ON(port_isset(execlists->port));
 
-	spin_unlock(&engine->timeline->lock);
+	spin_unlock(&engine->timeline.lock);
 
 	local_irq_restore(flags);
 }
@@ -1167,7 +1167,7 @@ static void execlists_submit_request(struct i915_request *request)
 	unsigned long flags;
 
 	/* Will be called from irq-context when using foreign fences. */
-	spin_lock_irqsave(&engine->timeline->lock, flags);
+	spin_lock_irqsave(&engine->timeline.lock, flags);
 
 	queue_request(engine, &request->sched, rq_prio(request));
 	submit_queue(engine, rq_prio(request));
@@ -1175,7 +1175,7 @@ static void execlists_submit_request(struct i915_request *request)
 	GEM_BUG_ON(!engine->execlists.first);
 	GEM_BUG_ON(list_empty(&request->sched.link));
 
-	spin_unlock_irqrestore(&engine->timeline->lock, flags);
+	spin_unlock_irqrestore(&engine->timeline.lock, flags);
 }
 
 static struct i915_request *sched_to_request(struct i915_sched_node *node)
@@ -1191,8 +1191,8 @@ sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
 	GEM_BUG_ON(!locked);
 
 	if (engine != locked) {
-		spin_unlock(&locked->timeline->lock);
-		spin_lock(&engine->timeline->lock);
+		spin_unlock(&locked->timeline.lock);
+		spin_lock(&engine->timeline.lock);
 	}
 
 	return engine;
@@ -1275,7 +1275,7 @@ static void execlists_schedule(struct i915_request *request,
 	}
 
 	engine = request->engine;
-	spin_lock_irq(&engine->timeline->lock);
+	spin_lock_irq(&engine->timeline.lock);
 
 	/* Fifo and depth-first replacement ensure our deps execute before us */
 	list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
@@ -1299,7 +1299,7 @@ static void execlists_schedule(struct i915_request *request,
 			__submit_queue(engine, prio);
 	}
 
-	spin_unlock_irq(&engine->timeline->lock);
+	spin_unlock_irq(&engine->timeline.lock);
 }
 
 static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
@@ -1828,9 +1828,9 @@ static void reset_common_ring(struct intel_engine_cs *engine,
 	reset_irq(engine);
 
 	/* Push back any incomplete requests for replay after the reset. */
-	spin_lock(&engine->timeline->lock);
+	spin_lock(&engine->timeline.lock);
 	__unwind_incomplete_requests(engine);
-	spin_unlock(&engine->timeline->lock);
+	spin_unlock(&engine->timeline.lock);
 
 	local_irq_restore(flags);
 
@@ -2599,6 +2599,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 	struct i915_vma *vma;
 	uint32_t context_size;
 	struct intel_ring *ring;
+	struct i915_timeline *timeline;
 	int ret;
 
 	if (ce->state)
@@ -2614,8 +2615,8 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 
 	ctx_obj = i915_gem_object_create(ctx->i915, context_size);
 	if (IS_ERR(ctx_obj)) {
-		DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
-		return PTR_ERR(ctx_obj);
+		ret = PTR_ERR(ctx_obj);
+		goto error_deref_obj;
 	}
 
 	vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL);
@@ -2624,7 +2625,14 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 		goto error_deref_obj;
 	}
 
-	ring = intel_engine_create_ring(engine, ctx->timeline, ctx->ring_size);
+	timeline = i915_timeline_create(ctx->i915, ctx->name);
+	if (IS_ERR(timeline)) {
+		ret = PTR_ERR(timeline);
+		goto error_deref_obj;
+	}
+
+	ring = intel_engine_create_ring(engine, timeline, ctx->ring_size);
+	i915_timeline_put(timeline);
 	if (IS_ERR(ring)) {
 		ret = PTR_ERR(ring);
 		goto error_deref_obj;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index b73e700c3048..8f19349a6055 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -697,17 +697,17 @@ static void cancel_requests(struct intel_engine_cs *engine)
 	struct i915_request *request;
 	unsigned long flags;
 
-	spin_lock_irqsave(&engine->timeline->lock, flags);
+	spin_lock_irqsave(&engine->timeline.lock, flags);
 
 	/* Mark all submitted requests as skipped. */
-	list_for_each_entry(request, &engine->timeline->requests, link) {
+	list_for_each_entry(request, &engine->timeline.requests, link) {
 		GEM_BUG_ON(!request->global_seqno);
 		if (!i915_request_completed(request))
 			dma_fence_set_error(&request->fence, -EIO);
 	}
 	/* Remaining _unready_ requests will be nop'ed when submitted */
 
-	spin_unlock_irqrestore(&engine->timeline->lock, flags);
+	spin_unlock_irqrestore(&engine->timeline.lock, flags);
 }
 
 static void i9xx_submit_request(struct i915_request *request)
@@ -1118,7 +1118,7 @@ err:
 
 struct intel_ring *
 intel_engine_create_ring(struct intel_engine_cs *engine,
-			 struct i915_gem_timeline *timeline,
+			 struct i915_timeline *timeline,
 			 int size)
 {
 	struct intel_ring *ring;
@@ -1126,7 +1126,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine,
 
 	GEM_BUG_ON(!is_power_of_2(size));
 	GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
-	GEM_BUG_ON(&timeline->engine[engine->id] == engine->timeline);
+	GEM_BUG_ON(timeline == &engine->timeline);
 	lockdep_assert_held(&engine->i915->drm.struct_mutex);
 
 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
@@ -1134,7 +1134,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine,
 		return ERR_PTR(-ENOMEM);
 
 	INIT_LIST_HEAD(&ring->request_list);
-	ring->timeline = &timeline->engine[engine->id];
+	ring->timeline = i915_timeline_get(timeline);
 
 	ring->size = size;
 	/* Workaround an erratum on the i830 which causes a hang if
@@ -1165,6 +1165,7 @@ intel_ring_free(struct intel_ring *ring)
 	i915_vma_close(ring->vma);
 	__i915_gem_object_release_unless_active(obj);
 
+	i915_timeline_put(ring->timeline);
 	kfree(ring);
 }
 
@@ -1323,6 +1324,7 @@ static void intel_ring_context_unpin(struct intel_engine_cs *engine,
 static int intel_init_ring_buffer(struct intel_engine_cs *engine)
 {
 	struct intel_ring *ring;
+	struct i915_timeline *timeline;
 	int err;
 
 	intel_engine_setup_common(engine);
@@ -1331,9 +1333,14 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
 	if (err)
 		goto err;
 
-	ring = intel_engine_create_ring(engine,
-					&engine->i915->gt.legacy_timeline,
-					32 * PAGE_SIZE);
+	timeline = i915_timeline_create(engine->i915, engine->name);
+	if (IS_ERR(timeline)) {
+		err = PTR_ERR(timeline);
+		goto err;
+	}
+
+	ring = intel_engine_create_ring(engine, timeline, 32 * PAGE_SIZE);
+	i915_timeline_put(timeline);
 	if (IS_ERR(ring)) {
 		err = PTR_ERR(ring);
 		goto err;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index da53aa2973a7..010750e8ee44 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -6,12 +6,12 @@
 #include <linux/seqlock.h>
 
 #include "i915_gem_batch_pool.h"
-#include "i915_gem_timeline.h"
 
 #include "i915_reg.h"
 #include "i915_pmu.h"
 #include "i915_request.h"
 #include "i915_selftest.h"
+#include "i915_timeline.h"
 #include "intel_gpu_commands.h"
 
 struct drm_printer;
@@ -129,7 +129,7 @@ struct intel_ring {
 	struct i915_vma *vma;
 	void *vaddr;
 
-	struct intel_timeline *timeline;
+	struct i915_timeline *timeline;
 	struct list_head request_list;
 	struct list_head active_link;
 
@@ -338,7 +338,8 @@ struct intel_engine_cs {
 	u32 mmio_base;
 
 	struct intel_ring *buffer;
-	struct intel_timeline *timeline;
+
+	struct i915_timeline timeline;
 
 	struct drm_i915_gem_object *default_state;
 
@@ -770,7 +771,7 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
 
 struct intel_ring *
 intel_engine_create_ring(struct intel_engine_cs *engine,
-			 struct i915_gem_timeline *timeline,
+			 struct i915_timeline *timeline,
 			 int size);
 int intel_ring_pin(struct intel_ring *ring,
 		   struct drm_i915_private *i915,
@@ -889,7 +890,7 @@ static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
 	 * wtih serialising this hint with anything, so document it as
 	 * a hint and nothing more.
 	 */
-	return READ_ONCE(engine->timeline->seqno);
+	return READ_ONCE(engine->timeline.seqno);
 }
 
 void intel_engine_get_instdone(struct intel_engine_cs *engine,
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 24ac648dc83a..7ecaed50d0b9 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -355,18 +355,6 @@ static int igt_ctx_exec(void *arg)
 
 		if (first_shared_gtt) {
 			ctx = __create_hw_context(i915, file->driver_priv);
-			if (!IS_ERR(ctx) && HAS_EXECLISTS(i915)) {
-				struct i915_gem_timeline *timeline;
-
-				timeline = i915_gem_timeline_create(i915, ctx->name);
-				if (IS_ERR(timeline)) {
-					__destroy_hw_context(ctx, file->driver_priv);
-					ctx = ERR_CAST(timeline);
-				} else {
-					ctx->timeline = timeline;
-				}
-			}
-
 			first_shared_gtt = false;
 		} else {
 			ctx = i915_gem_create_context(i915, file->driver_priv);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c b/drivers/gpu/drm/i915/selftests/i915_timeline.c
similarity index 70%
rename from drivers/gpu/drm/i915/selftests/i915_gem_timeline.c
rename to drivers/gpu/drm/i915/selftests/i915_timeline.c
index 3000e6a7d82d..19f1c6a5c8fb 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/i915_timeline.c
@@ -1,25 +1,7 @@
 /*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
  *
+ * Copyright © 2017-2018 Intel Corporation
  */
 
 #include "../i915_selftest.h"
@@ -35,21 +17,21 @@ struct __igt_sync {
 	bool set;
 };
 
-static int __igt_sync(struct intel_timeline *tl,
+static int __igt_sync(struct i915_timeline *tl,
 		      u64 ctx,
 		      const struct __igt_sync *p,
 		      const char *name)
 {
 	int ret;
 
-	if (__intel_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) {
+	if (__i915_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) {
 		pr_err("%s: %s(ctx=%llu, seqno=%u) expected passed %s but failed\n",
 		       name, p->name, ctx, p->seqno, yesno(p->expected));
 		return -EINVAL;
 	}
 
 	if (p->set) {
-		ret = __intel_timeline_sync_set(tl, ctx, p->seqno);
+		ret = __i915_timeline_sync_set(tl, ctx, p->seqno);
 		if (ret)
 			return ret;
 	}
@@ -77,37 +59,31 @@ static int igt_sync(void *arg)
 		{ "unwrap", UINT_MAX, true, false },
 		{},
 	}, *p;
-	struct intel_timeline *tl;
+	struct i915_timeline tl;
 	int order, offset;
 	int ret = -ENODEV;
 
-	tl = mock_timeline(0);
-	if (!tl)
-		return -ENOMEM;
-
+	mock_timeline_init(&tl, 0);
 	for (p = pass; p->name; p++) {
 		for (order = 1; order < 64; order++) {
 			for (offset = -1; offset <= (order > 1); offset++) {
 				u64 ctx = BIT_ULL(order) + offset;
 
-				ret = __igt_sync(tl, ctx, p, "1");
+				ret = __igt_sync(&tl, ctx, p, "1");
 				if (ret)
 					goto out;
 			}
 		}
 	}
-	mock_timeline_destroy(tl);
-
-	tl = mock_timeline(0);
-	if (!tl)
-		return -ENOMEM;
+	mock_timeline_fini(&tl);
 
+	mock_timeline_init(&tl, 0);
 	for (order = 1; order < 64; order++) {
 		for (offset = -1; offset <= (order > 1); offset++) {
 			u64 ctx = BIT_ULL(order) + offset;
 
 			for (p = pass; p->name; p++) {
-				ret = __igt_sync(tl, ctx, p, "2");
+				ret = __igt_sync(&tl, ctx, p, "2");
 				if (ret)
 					goto out;
 			}
@@ -115,7 +91,7 @@ static int igt_sync(void *arg)
 	}
 
 out:
-	mock_timeline_destroy(tl);
+	mock_timeline_fini(&tl);
 	return ret;
 }
 
@@ -127,15 +103,13 @@ static unsigned int random_engine(struct rnd_state *rnd)
 static int bench_sync(void *arg)
 {
 	struct rnd_state prng;
-	struct intel_timeline *tl;
+	struct i915_timeline tl;
 	unsigned long end_time, count;
 	u64 prng32_1M;
 	ktime_t kt;
 	int order, last_order;
 
-	tl = mock_timeline(0);
-	if (!tl)
-		return -ENOMEM;
+	mock_timeline_init(&tl, 0);
 
 	/* Lookups from cache are very fast and so the random number generation
 	 * and the loop itself becomes a significant factor in the per-iteration
@@ -167,7 +141,7 @@ static int bench_sync(void *arg)
 	do {
 		u64 id = i915_prandom_u64_state(&prng);
 
-		__intel_timeline_sync_set(tl, id, 0);
+		__i915_timeline_sync_set(&tl, id, 0);
 		count++;
 	} while (!time_after(jiffies, end_time));
 	kt = ktime_sub(ktime_get(), kt);
@@ -182,8 +156,8 @@ static int bench_sync(void *arg)
 	while (end_time--) {
 		u64 id = i915_prandom_u64_state(&prng);
 
-		if (!__intel_timeline_sync_is_later(tl, id, 0)) {
-			mock_timeline_destroy(tl);
+		if (!__i915_timeline_sync_is_later(&tl, id, 0)) {
+			mock_timeline_fini(&tl);
 			pr_err("Lookup of %llu failed\n", id);
 			return -EINVAL;
 		}
@@ -193,19 +167,17 @@ static int bench_sync(void *arg)
 	pr_info("%s: %lu random lookups, %lluns/lookup\n",
 		__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
 
-	mock_timeline_destroy(tl);
+	mock_timeline_fini(&tl);
 	cond_resched();
 
-	tl = mock_timeline(0);
-	if (!tl)
-		return -ENOMEM;
+	mock_timeline_init(&tl, 0);
 
 	/* Benchmark setting the first N (in order) contexts */
 	count = 0;
 	kt = ktime_get();
 	end_time = jiffies + HZ/10;
 	do {
-		__intel_timeline_sync_set(tl, count++, 0);
+		__i915_timeline_sync_set(&tl, count++, 0);
 	} while (!time_after(jiffies, end_time));
 	kt = ktime_sub(ktime_get(), kt);
 	pr_info("%s: %lu in-order insertions, %lluns/insert\n",
@@ -215,9 +187,9 @@ static int bench_sync(void *arg)
 	end_time = count;
 	kt = ktime_get();
 	while (end_time--) {
-		if (!__intel_timeline_sync_is_later(tl, end_time, 0)) {
+		if (!__i915_timeline_sync_is_later(&tl, end_time, 0)) {
 			pr_err("Lookup of %lu failed\n", end_time);
-			mock_timeline_destroy(tl);
+			mock_timeline_fini(&tl);
 			return -EINVAL;
 		}
 	}
@@ -225,12 +197,10 @@ static int bench_sync(void *arg)
 	pr_info("%s: %lu in-order lookups, %lluns/lookup\n",
 		__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
 
-	mock_timeline_destroy(tl);
+	mock_timeline_fini(&tl);
 	cond_resched();
 
-	tl = mock_timeline(0);
-	if (!tl)
-		return -ENOMEM;
+	mock_timeline_init(&tl, 0);
 
 	/* Benchmark searching for a random context id and maybe changing it */
 	prandom_seed_state(&prng, i915_selftest.random_seed);
@@ -241,8 +211,8 @@ static int bench_sync(void *arg)
 		u32 id = random_engine(&prng);
 		u32 seqno = prandom_u32_state(&prng);
 
-		if (!__intel_timeline_sync_is_later(tl, id, seqno))
-			__intel_timeline_sync_set(tl, id, seqno);
+		if (!__i915_timeline_sync_is_later(&tl, id, seqno))
+			__i915_timeline_sync_set(&tl, id, seqno);
 
 		count++;
 	} while (!time_after(jiffies, end_time));
@@ -250,7 +220,7 @@ static int bench_sync(void *arg)
 	kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20);
 	pr_info("%s: %lu repeated insert/lookups, %lluns/op\n",
 		__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
-	mock_timeline_destroy(tl);
+	mock_timeline_fini(&tl);
 	cond_resched();
 
 	/* Benchmark searching for a known context id and changing the seqno */
@@ -258,9 +228,7 @@ static int bench_sync(void *arg)
 	     ({ int tmp = last_order; last_order = order; order += tmp; })) {
 		unsigned int mask = BIT(order) - 1;
 
-		tl = mock_timeline(0);
-		if (!tl)
-			return -ENOMEM;
+		mock_timeline_init(&tl, 0);
 
 		count = 0;
 		kt = ktime_get();
@@ -272,8 +240,8 @@ static int bench_sync(void *arg)
 			 */
 			u64 id = (u64)(count & mask) << order;
 
-			__intel_timeline_sync_is_later(tl, id, 0);
-			__intel_timeline_sync_set(tl, id, 0);
+			__i915_timeline_sync_is_later(&tl, id, 0);
+			__i915_timeline_sync_set(&tl, id, 0);
 
 			count++;
 		} while (!time_after(jiffies, end_time));
@@ -281,7 +249,7 @@ static int bench_sync(void *arg)
 		pr_info("%s: %lu cyclic/%d insert/lookups, %lluns/op\n",
 			__func__, count, order,
 			(long long)div64_ul(ktime_to_ns(kt), count));
-		mock_timeline_destroy(tl);
+		mock_timeline_fini(&tl);
 		cond_resched();
 	}
 
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 6752498e2c73..26bf29d97007 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -25,6 +25,11 @@
 #include "mock_engine.h"
 #include "mock_request.h"
 
+struct mock_ring {
+	struct intel_ring base;
+	struct i915_timeline timeline;
+};
+
 static struct mock_request *first_request(struct mock_engine *engine)
 {
 	return list_first_entry_or_null(&engine->hw_queue,
@@ -132,7 +137,7 @@ static void mock_submit_request(struct i915_request *request)
 static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
 {
 	const unsigned long sz = PAGE_SIZE / 2;
-	struct intel_ring *ring;
+	struct mock_ring *ring;
 
 	BUILD_BUG_ON(MIN_SPACE_FOR_ADD_REQUEST > sz);
 
@@ -140,20 +145,24 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
 	if (!ring)
 		return NULL;
 
-	ring->timeline = &engine->i915->gt.legacy_timeline.engine[engine->id];
+	i915_timeline_init(engine->i915, &ring->timeline, engine->name);
 
-	ring->size = sz;
-	ring->effective_size = sz;
-	ring->vaddr = (void *)(ring + 1);
+	ring->base.size = sz;
+	ring->base.effective_size = sz;
+	ring->base.vaddr = (void *)(ring + 1);
+	ring->base.timeline = &ring->timeline;
 
-	INIT_LIST_HEAD(&ring->request_list);
-	intel_ring_update_space(ring);
+	INIT_LIST_HEAD(&ring->base.request_list);
+	intel_ring_update_space(&ring->base);
 
-	return ring;
+	return &ring->base;
 }
 
-static void mock_ring_free(struct intel_ring *ring)
+static void mock_ring_free(struct intel_ring *base)
 {
+	struct mock_ring *ring = container_of(base, typeof(*ring), base);
+
+	i915_timeline_fini(&ring->timeline);
 	kfree(ring);
 }
 
@@ -182,8 +191,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
 	engine->base.emit_breadcrumb = mock_emit_breadcrumb;
 	engine->base.submit_request = mock_submit_request;
 
-	intel_engine_init_timeline(&engine->base);
-
+	i915_timeline_init(i915, &engine->base.timeline, engine->base.name);
 	intel_engine_init_breadcrumbs(&engine->base);
 	engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */
 
@@ -200,6 +208,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
 
 err_breadcrumbs:
 	intel_engine_fini_breadcrumbs(&engine->base);
+	i915_timeline_fini(&engine->base.timeline);
 	kfree(engine);
 	return NULL;
 }
@@ -238,6 +247,7 @@ void mock_engine_free(struct intel_engine_cs *engine)
 	mock_ring_free(engine->buffer);
 
 	intel_engine_fini_breadcrumbs(engine);
+	i915_timeline_fini(&engine->timeline);
 
 	kfree(engine);
 }
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index f11c83e8ff32..a662c0450e77 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -73,10 +73,8 @@ static void mock_device_release(struct drm_device *dev)
 
 	mutex_lock(&i915->drm.struct_mutex);
 	mock_fini_ggtt(i915);
-	i915_gem_timeline_fini(&i915->gt.legacy_timeline);
-	i915_gem_timeline_fini(&i915->gt.execution_timeline);
-	WARN_ON(!list_empty(&i915->gt.timelines));
 	mutex_unlock(&i915->drm.struct_mutex);
+	WARN_ON(!list_empty(&i915->gt.timelines));
 
 	destroy_workqueue(i915->wq);
 
@@ -230,12 +228,6 @@ struct drm_i915_private *mock_gem_device(void)
 	INIT_LIST_HEAD(&i915->gt.active_rings);
 
 	mutex_lock(&i915->drm.struct_mutex);
-	err = i915_gem_timeline_init__global(i915);
-	if (err) {
-		mutex_unlock(&i915->drm.struct_mutex);
-		goto err_priorities;
-	}
-
 	mock_init_ggtt(i915);
 	mutex_unlock(&i915->drm.struct_mutex);
 
diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.c b/drivers/gpu/drm/i915/selftests/mock_timeline.c
index 47b1f47c5812..dcf3b16f5a07 100644
--- a/drivers/gpu/drm/i915/selftests/mock_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/mock_timeline.c
@@ -1,45 +1,28 @@
 /*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
  *
+ * Copyright © 2017-2018 Intel Corporation
  */
 
+#include "../i915_timeline.h"
+
 #include "mock_timeline.h"
 
-struct intel_timeline *mock_timeline(u64 context)
+void mock_timeline_init(struct i915_timeline *timeline, u64 context)
 {
-	static struct lock_class_key class;
-	struct intel_timeline *tl;
+	timeline->fence_context = context;
 
-	tl = kzalloc(sizeof(*tl), GFP_KERNEL);
-	if (!tl)
-		return NULL;
+	spin_lock_init(&timeline->lock);
 
-	__intel_timeline_init(tl, NULL, context, &class, "mock");
+	init_request_active(&timeline->last_request, NULL);
+	INIT_LIST_HEAD(&timeline->requests);
 
-	return tl;
+	i915_syncmap_init(&timeline->sync);
+
+	INIT_LIST_HEAD(&timeline->link);
 }
 
-void mock_timeline_destroy(struct intel_timeline *tl)
+void mock_timeline_fini(struct i915_timeline *timeline)
 {
-	__intel_timeline_fini(tl);
-	kfree(tl);
+	i915_timeline_fini(timeline);
 }
diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.h b/drivers/gpu/drm/i915/selftests/mock_timeline.h
index c27ff4639b8b..b6deaa61110d 100644
--- a/drivers/gpu/drm/i915/selftests/mock_timeline.h
+++ b/drivers/gpu/drm/i915/selftests/mock_timeline.h
@@ -1,33 +1,15 @@
 /*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
  *
+ * Copyright © 2017-2018 Intel Corporation
  */
 
 #ifndef __MOCK_TIMELINE__
 #define __MOCK_TIMELINE__
 
-#include "../i915_gem_timeline.h"
+struct i915_timeline;
 
-struct intel_timeline *mock_timeline(u64 context);
-void mock_timeline_destroy(struct intel_timeline *tl);
+void mock_timeline_init(struct i915_timeline *timeline, u64 context);
+void mock_timeline_fini(struct i915_timeline *timeline);
 
 #endif /* !__MOCK_TIMELINE__ */

From b9b7742687085fc0fc0135d9104e0a8f30bea2e1 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Thu, 3 May 2018 00:02:02 +0100
Subject: [PATCH 0573/1286] drm/i915/execlists: Emit i915_trace_request_out for
 preemption

Move the tracepoint into the common execlists_context_schedule_out() and
call it from preemption completion as well. A small bit of refactoring
code should help with when tracing, or else we end up with requests
mysteriously disappearing and some being emitted to HW multiple times.

Reported-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180502230202.6848-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_lrc.c | 19 +++++++++----------
 1 file changed, 9 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index e04798e98db2..3d747d1c3d4d 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -398,10 +398,11 @@ execlists_context_schedule_in(struct i915_request *rq)
 }
 
 static inline void
-execlists_context_schedule_out(struct i915_request *rq)
+execlists_context_schedule_out(struct i915_request *rq, unsigned long status)
 {
 	intel_engine_context_out(rq->engine);
-	execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
+	execlists_context_status_change(rq, status);
+	trace_i915_request_out(rq);
 }
 
 static void
@@ -772,12 +773,10 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
 			  intel_engine_get_seqno(rq->engine));
 
 		GEM_BUG_ON(!execlists->active);
-		intel_engine_context_out(rq->engine);
-
-		execlists_context_status_change(rq,
-						i915_request_completed(rq) ?
-						INTEL_CONTEXT_SCHEDULE_OUT :
-						INTEL_CONTEXT_SCHEDULE_PREEMPTED);
+		execlists_context_schedule_out(rq,
+					       i915_request_completed(rq) ?
+					       INTEL_CONTEXT_SCHEDULE_OUT :
+					       INTEL_CONTEXT_SCHEDULE_PREEMPTED);
 
 		i915_request_put(rq);
 
@@ -1105,8 +1104,8 @@ static void execlists_submission_tasklet(unsigned long data)
 				 */
 				GEM_BUG_ON(!i915_request_completed(rq));
 
-				execlists_context_schedule_out(rq);
-				trace_i915_request_out(rq);
+				execlists_context_schedule_out(rq,
+							       INTEL_CONTEXT_SCHEDULE_OUT);
 				i915_request_put(rq);
 
 				GEM_TRACE("%s completed ctx=%d\n",

From e01569ab962145b2fb46f6240bf1f07ec3a6261a Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Mon, 9 Apr 2018 10:49:05 +0100
Subject: [PATCH 0574/1286] drm/i915: Silence debugging DRM_ERROR for failing
 to suspend vlv powerwells
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

If we try to suspend a wedged device following a GPU reset failure, we
will also fail to turn off the rc6 powerwells (on vlv), leading to a
*ERROR*. This is quite expected in this case, so the best we can do is
shake our heads and reduce the *ERROR* to a debug so CI stops
complaining.

Testcase: igt/gem_eio/in-flight-suspend #vlv
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=105583
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Acked-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180409094905.4516-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_drv.c | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index b7dbeba72dec..8c2986849236 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2468,10 +2468,13 @@ static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
 	/*
 	 * RC6 transitioning can be delayed up to 2 msec (see
 	 * valleyview_enable_rps), use 3 msec for safety.
+	 *
+	 * This can fail to turn off the rc6 if the GPU is stuck after a failed
+	 * reset and we are trying to force the machine to sleep.
 	 */
 	if (vlv_wait_for_pw_status(dev_priv, mask, val))
-		DRM_ERROR("timeout waiting for GT wells to go %s\n",
-			  onoff(wait_for_on));
+		DRM_DEBUG_DRIVER("timeout waiting for GT wells to go %s\n",
+				 onoff(wait_for_on));
 }
 
 static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)

From 5bb562f829bf69223e90e158f654b063d964a291 Mon Sep 17 00:00:00 2001
From: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Fri, 20 Apr 2018 08:51:56 +0200
Subject: [PATCH 0575/1286] drm: Drop DRM_CONTROL_ALLOW from ioctls

We've disabled control nodes in

commit 8a357d10043c75e980e7fcdb60d2b913491564af
Author: Daniel Vetter <daniel.vetter@ffwll.ch>
Date:   Fri Oct 28 10:10:50 2016 +0200

    drm: Nerf DRM_CONTROL nodes

and there was only a minor uapi break that we've paper over with

commit 6449b088dd51dd5aa6b38455888bbf538d21f2fc
Author: Daniel Vetter <daniel.vetter@ffwll.ch>
Date:   Fri Dec 9 14:56:56 2016 +0100

    drm: Add fake controlD* symlinks for backwards compat

Since then Keith has also added real control nodes with a
proper&useable uapi in the form of drm leases.

It's time to remove the control node leftovers.

Cc: Keith Packard <keithp@keithp.com>
Reviewed-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Gustavo Padovan <gustavo@padovan.org>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Sean Paul <seanpaul@chromium.org>
Cc: David Airlie <airlied@linux.ie>
Link: https://patchwork.freedesktop.org/patch/msgid/20180420065159.4531-1-daniel.vetter@ffwll.ch
---
 drivers/gpu/drm/drm_ioc32.c |  4 +--
 drivers/gpu/drm/drm_ioctl.c | 68 ++++++++++++++++++-------------------
 2 files changed, 36 insertions(+), 36 deletions(-)

diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index f8e96e648acf..67b1fca39aa6 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -105,7 +105,7 @@ static int compat_drm_version(struct file *file, unsigned int cmd,
 		.desc = compat_ptr(v32.desc),
 	};
 	err = drm_ioctl_kernel(file, drm_version, &v,
-			DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW);
+			       DRM_UNLOCKED|DRM_RENDER_ALLOW);
 	if (err)
 		return err;
 
@@ -885,7 +885,7 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
 		return -EFAULT;
 
 	err = drm_ioctl_kernel(file, drm_mode_addfb2, &req64,
-				DRM_CONTROL_ALLOW|DRM_UNLOCKED);
+			       DRM_UNLOCKED);
 	if (err)
 		return err;
 
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index af782911c505..43f7e2e81294 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -539,7 +539,7 @@ EXPORT_SYMBOL(drm_ioctl_permit);
 /* Ioctl table */
 static const struct drm_ioctl_desc drm_ioctls[] = {
 	DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version,
-		      DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW),
+		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
@@ -613,41 +613,41 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
 	DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
 
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_UNLOCKED),
 
 	DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
 
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATOMIC, drm_mode_atomic_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATEPROPBLOB, drm_mode_createblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROYPROPBLOB, drm_mode_destroyblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATOMIC, drm_mode_atomic_ioctl, DRM_MASTER|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATEPROPBLOB, drm_mode_createblob_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROYPROPBLOB, drm_mode_destroyblob_ioctl, DRM_UNLOCKED),
 
 	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_CREATE, drm_syncobj_create_ioctl,
 		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
@@ -665,10 +665,10 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
 		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_CRTC_GET_SEQUENCE, drm_crtc_get_sequence_ioctl, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_CRTC_QUEUE_SEQUENCE, drm_crtc_queue_sequence_ioctl, DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_LEASE, drm_mode_create_lease_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_LIST_LESSEES, drm_mode_list_lessees_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GET_LEASE, drm_mode_get_lease_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_REVOKE_LEASE, drm_mode_revoke_lease_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_LEASE, drm_mode_create_lease_ioctl, DRM_MASTER|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_LIST_LESSEES, drm_mode_list_lessees_ioctl, DRM_MASTER|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GET_LEASE, drm_mode_get_lease_ioctl, DRM_MASTER|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_REVOKE_LEASE, drm_mode_revoke_lease_ioctl, DRM_MASTER|DRM_UNLOCKED),
 };
 
 #define DRM_CORE_IOCTL_COUNT	ARRAY_SIZE( drm_ioctls )

From 0cd54b039537767cc12c4d7b6a62a98d01d99403 Mon Sep 17 00:00:00 2001
From: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Fri, 20 Apr 2018 08:51:57 +0200
Subject: [PATCH 0576/1286] drm/i915: Drop DRM_CONTROL_ALLOW

Control nodes are no more!

Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: intel-gfx@lists.freedesktop.org
Link: https://patchwork.freedesktop.org/patch/msgid/20180420065159.4531-2-daniel.vetter@ffwll.ch
---
 drivers/gpu/drm/i915/i915_drv.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 07c07d55398b..154414832d86 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2816,10 +2816,10 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
 	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
-	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
-	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
-	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
-	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER),
+	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
+	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
+	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
 	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),

From 190c462d5be19ba622a82f5fd0625087c870a1e6 Mon Sep 17 00:00:00 2001
From: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Fri, 20 Apr 2018 08:51:58 +0200
Subject: [PATCH 0577/1286] drm/vmwgfx: Drop DRM_CONTROL_ALLOW

Control nodes are no more!

Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: VMware Graphics <linux-graphics-maintainer@vmware.com>
Cc: Sinclair Yeh <syeh@vmware.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180420065159.4531-3-daniel.vetter@ffwll.ch
---
 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 70e1a8820a7c..97f37c3c16f2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -159,14 +159,14 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
 		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
 		      vmw_kms_cursor_bypass_ioctl,
-		      DRM_MASTER | DRM_CONTROL_ALLOW),
+		      DRM_MASTER),
 
 	VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
-		      DRM_MASTER | DRM_CONTROL_ALLOW),
+		      DRM_MASTER),
 	VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
-		      DRM_MASTER | DRM_CONTROL_ALLOW),
+		      DRM_MASTER),
 	VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
-		      DRM_MASTER | DRM_CONTROL_ALLOW),
+		      DRM_MASTER),
 
 	VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
 		      DRM_AUTH | DRM_RENDER_ALLOW),

From ea491b23b2ffba069537a8216060d4d3400931a7 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed, 2 May 2018 23:03:12 +0100
Subject: [PATCH 0578/1286] drm/i915: Reset the hangcheck timestamp before
 repeating a seqno

In the unusual circumstance where we reuse a seqno (for example, in
igt), make sure that we reset the hangcheck timestamp before it sees the
same seqno again.

References: https://bugs.freedesktop.org/show_bug.cgi?id=106215
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180502220313.6459-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_request.c    | 1 +
 drivers/gpu/drm/i915/intel_hangcheck.c | 1 +
 2 files changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 5acf869f3ca3..63bb61089be5 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -223,6 +223,7 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
 		if (!i915_seqno_passed(seqno, engine->timeline.seqno)) {
 			/* Flush any waiters before we reuse the seqno */
 			intel_engine_disarm_breadcrumbs(engine);
+			intel_engine_init_hangcheck(engine);
 			GEM_BUG_ON(!list_empty(&engine->breadcrumbs.signals));
 		}
 
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index 309e38b00e95..d47e346bd49e 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -452,6 +452,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
 void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
 {
 	memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
+	engine->hangcheck.action_timestamp = jiffies;
 }
 
 void intel_hangcheck_init(struct drm_i915_private *i915)

From e21b141376f9b654e782757e156886c34bdf12ae Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed, 2 May 2018 23:03:13 +0100
Subject: [PATCH 0579/1286] drm/i915: Mark the hangcheck as idle when unparking
 the engines

As we unpark the engines and are about to begin a new cycle of activity,
mark the current status of the hangceck as idle so that we avoid
carrying over a stale timestamp/action into the next cycle.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180502220313.6459-2-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_engine_cs.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index a90769b9954e..70325e0824e3 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1073,6 +1073,8 @@ void intel_engines_unpark(struct drm_i915_private *i915)
 	for_each_engine(engine, i915, id) {
 		if (engine->unpark)
 			engine->unpark(engine);
+
+		intel_engine_init_hangcheck(engine);
 	}
 }
 

From aee1a37d0f1a904a1443c327211f4bcd645681f1 Mon Sep 17 00:00:00 2001
From: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Wed, 2 May 2018 10:23:59 +0200
Subject: [PATCH 0580/1286] dma-fence: remove fill_driver_data callback
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Noticed while I was typing docs. Entirely unused.

v2: Remove reference in @timeline_value_str too. While at it clarify
why timeline_value_str has a fence parameter - we don't have an
explicit timeline structure unfortunately.

Cc: Eric Anholt <eric@anholt.net>
Reviewed-by: Christian König <christian.koenig@amd.com> (v1)
Reviewed-by: Eric Anholt <eric@anholt.net>
Cc: Christian König <christian.koenig@amd.com> (v1)
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20180502082359.30345-1-daniel.vetter@ffwll.ch
---
 include/linux/dma-fence.h | 16 +++-------------
 1 file changed, 3 insertions(+), 13 deletions(-)

diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index eb9b05aa5aea..111aefe1c956 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -217,17 +217,6 @@ struct dma_fence_ops {
 	 */
 	void (*release)(struct dma_fence *fence);
 
-	/**
-	 * @fill_driver_data:
-	 *
-	 * Callback to fill in free-form debug info.
-	 *
-	 * Returns amount of bytes filled, or negative error on failure.
-	 *
-	 * This callback is optional.
-	 */
-	int (*fill_driver_data)(struct dma_fence *fence, void *data, int size);
-
 	/**
 	 * @fence_value_str:
 	 *
@@ -242,8 +231,9 @@ struct dma_fence_ops {
 	 * @timeline_value_str:
 	 *
 	 * Fills in the current value of the timeline as a string, like the
-	 * sequence number. This should match what @fill_driver_data prints for
-	 * the most recently signalled fence (assuming no delayed signalling).
+	 * sequence number. Note that the specific fence passed to this function
+	 * should not matter, drivers should only use it to look up the
+	 * corresponding timeline structures.
 	 */
 	void (*timeline_value_str)(struct dma_fence *fence,
 				   char *str, int size);

From 95ed01ea97b3d76380a817bc41ceeefffa6a99f1 Mon Sep 17 00:00:00 2001
From: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Fri, 27 Apr 2018 08:17:10 +0200
Subject: [PATCH 0581/1286] dma-fence: Make ->enable_signaling optional
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Many drivers have a trivial implementation for ->enable_signaling.
Let's make it optional by assuming that signalling is already
available when the callback isn't present.

Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Gustavo Padovan <gustavo@padovan.org>
Cc: linux-media@vger.kernel.org
Cc: linaro-mm-sig@lists.linaro.org
Reviewed-by: Christian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180427061724.28497-4-daniel.vetter@ffwll.ch
---
 drivers/dma-buf/dma-fence.c | 13 ++++++++++++-
 include/linux/dma-fence.h   |  3 ++-
 2 files changed, 14 insertions(+), 2 deletions(-)

diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 4edb9fd3cf47..7b5b40d6b70e 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -181,6 +181,13 @@ void dma_fence_release(struct kref *kref)
 }
 EXPORT_SYMBOL(dma_fence_release);
 
+/**
+ * dma_fence_free - default release function for &dma_fence.
+ * @fence: fence to release
+ *
+ * This is the default implementation for &dma_fence_ops.release. It calls
+ * kfree_rcu() on @fence.
+ */
 void dma_fence_free(struct dma_fence *fence)
 {
 	kfree_rcu(fence, rcu);
@@ -560,7 +567,7 @@ dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
 	       spinlock_t *lock, u64 context, unsigned seqno)
 {
 	BUG_ON(!lock);
-	BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
+	BUG_ON(!ops || !ops->wait ||
 	       !ops->get_driver_name || !ops->get_timeline_name);
 
 	kref_init(&fence->refcount);
@@ -572,6 +579,10 @@ dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
 	fence->flags = 0UL;
 	fence->error = 0;
 
+	if (!ops->enable_signaling)
+		set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+			&fence->flags);
+
 	trace_dma_fence_init(fence);
 }
 EXPORT_SYMBOL(dma_fence_init);
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 111aefe1c956..c053d19e1e24 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -166,7 +166,8 @@ struct dma_fence_ops {
 	 * released when the fence is signalled (through e.g. the interrupt
 	 * handler).
 	 *
-	 * This callback is mandatory.
+	 * This callback is optional. If this callback is not present, then the
+	 * driver must always have signaling enabled.
 	 */
 	bool (*enable_signaling)(struct dma_fence *fence);
 

From 90e9965524af18819e634bdd9c5637331ea645f1 Mon Sep 17 00:00:00 2001
From: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Fri, 27 Apr 2018 08:17:11 +0200
Subject: [PATCH 0582/1286] dma-fence: Allow wait_any_timeout for all fences
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

When this was introduced in

commit a519435a96597d8cd96123246fea4ae5a6c90b02
Author: Christian König <christian.koenig@amd.com>
Date:   Tue Oct 20 16:34:16 2015 +0200

    dma-buf/fence: add fence_wait_any_timeout function v2

there was a restriction added that this only works if the dma-fence
uses the dma_fence_default_wait hook. Which works for amdgpu, which is
the only caller. Well, until you share some buffers with e.g. i915,
then you get an -EINVAL.

But there's really no reason for this, because all drivers must
support callbacks. The special ->wait hook is only as an optimization;
if the driver needs to create a worker thread for an active callback,
then it can avoid to do that if it knows that there's a process
context available already. So ->wait is just an optimization, just
using the logic in dma_fence_default_wait() should work for all
drivers.

Let's remove this restriction.

Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Gustavo Padovan <gustavo@padovan.org>
Cc: linux-media@vger.kernel.org
Cc: linaro-mm-sig@lists.linaro.org
Cc: Christian König <christian.koenig@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180427061724.28497-5-daniel.vetter@ffwll.ch
---
 drivers/dma-buf/dma-fence.c | 5 -----
 1 file changed, 5 deletions(-)

diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 7b5b40d6b70e..59049375bd19 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -503,11 +503,6 @@ dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
 	for (i = 0; i < count; ++i) {
 		struct dma_fence *fence = fences[i];
 
-		if (fence->ops->wait != dma_fence_default_wait) {
-			ret = -EINVAL;
-			goto fence_rm_cb;
-		}
-
 		cb[i].task = current;
 		if (dma_fence_add_callback(fence, &cb[i].base,
 					   dma_fence_default_wait_cb)) {

From 49a53d493e603c594e39dfbc7171917effcaf01e Mon Sep 17 00:00:00 2001
From: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Fri, 27 Apr 2018 08:17:12 +0200
Subject: [PATCH 0583/1286] dma-fence: Make ->wait callback optional
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Almost everyone uses dma_fence_default_wait.

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Gustavo Padovan <gustavo@padovan.org>
Cc: linux-media@vger.kernel.org
Cc: linaro-mm-sig@lists.linaro.org
Link: https://patchwork.freedesktop.org/patch/msgid/20180427061724.28497-6-daniel.vetter@ffwll.ch
---
 drivers/dma-buf/dma-fence-array.c |  1 -
 drivers/dma-buf/dma-fence.c       |  5 ++++-
 drivers/dma-buf/sw_sync.c         |  1 -
 include/linux/dma-fence.h         | 13 ++++++++-----
 4 files changed, 12 insertions(+), 8 deletions(-)

diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c
index dd1edfb27b61..a8c254497251 100644
--- a/drivers/dma-buf/dma-fence-array.c
+++ b/drivers/dma-buf/dma-fence-array.c
@@ -104,7 +104,6 @@ const struct dma_fence_ops dma_fence_array_ops = {
 	.get_timeline_name = dma_fence_array_get_timeline_name,
 	.enable_signaling = dma_fence_array_enable_signaling,
 	.signaled = dma_fence_array_signaled,
-	.wait = dma_fence_default_wait,
 	.release = dma_fence_array_release,
 };
 EXPORT_SYMBOL(dma_fence_array_ops);
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 59049375bd19..30fcbe415ff4 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -158,7 +158,10 @@ dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
 		return -EINVAL;
 
 	trace_dma_fence_wait_start(fence);
-	ret = fence->ops->wait(fence, intr, timeout);
+	if (fence->ops->wait)
+		ret = fence->ops->wait(fence, intr, timeout);
+	else
+		ret = dma_fence_default_wait(fence, intr, timeout);
 	trace_dma_fence_wait_end(fence);
 	return ret;
 }
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 3d78ca89a605..53c1d6d36a64 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -188,7 +188,6 @@ static const struct dma_fence_ops timeline_fence_ops = {
 	.get_timeline_name = timeline_fence_get_timeline_name,
 	.enable_signaling = timeline_fence_enable_signaling,
 	.signaled = timeline_fence_signaled,
-	.wait = dma_fence_default_wait,
 	.release = timeline_fence_release,
 	.fence_value_str = timeline_fence_value_str,
 	.timeline_value_str = timeline_fence_timeline_value_str,
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index c053d19e1e24..02dba8cd033d 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -191,11 +191,14 @@ struct dma_fence_ops {
 	/**
 	 * @wait:
 	 *
-	 * Custom wait implementation, or dma_fence_default_wait.
+	 * Custom wait implementation, defaults to dma_fence_default_wait() if
+	 * not set.
 	 *
-	 * Must not be NULL, set to dma_fence_default_wait for default implementation.
-	 * the dma_fence_default_wait implementation should work for any fence, as long
-	 * as enable_signaling works correctly.
+	 * The dma_fence_default_wait implementation should work for any fence, as long
+	 * as @enable_signaling works correctly. This hook allows drivers to
+	 * have an optimized version for the case where a process context is
+	 * already available, e.g. if @enable_signaling for the general case
+	 * needs to set up a worker thread.
 	 *
 	 * Must return -ERESTARTSYS if the wait is intr = true and the wait was
 	 * interrupted, and remaining jiffies if fence has signaled, or 0 if wait
@@ -203,7 +206,7 @@ struct dma_fence_ops {
 	 * which should be treated as if the fence is signaled. For example a hardware
 	 * lockup could be reported like that.
 	 *
-	 * This callback is mandatory.
+	 * This callback is optional.
 	 */
 	signed long (*wait)(struct dma_fence *fence,
 			    bool intr, signed long timeout);

From 95aee35fe10dc6c86498c5dd5f06bd5fb5af723a Mon Sep 17 00:00:00 2001
From: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Fri, 27 Apr 2018 08:17:14 +0200
Subject: [PATCH 0584/1286] drm: Remove unecessary dma_fence_ops

dma_fence_default_wait is the default now, same for the trivial
enable_signaling implementation.

Reviewed-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Gustavo Padovan <gustavo@padovan.org>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Sean Paul <seanpaul@chromium.org>
Cc: David Airlie <airlied@linux.ie>
Link: https://patchwork.freedesktop.org/patch/msgid/20180427061724.28497-8-daniel.vetter@ffwll.ch
---
 drivers/gpu/drm/drm_crtc.c              |  7 -------
 drivers/gpu/drm/drm_syncobj.c           |  1 -
 drivers/gpu/drm/scheduler/sched_fence.c | 11 -----------
 3 files changed, 19 deletions(-)

diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index a231dd5dce16..e4d3285f4191 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -225,16 +225,9 @@ static const char *drm_crtc_fence_get_timeline_name(struct dma_fence *fence)
 	return crtc->timeline_name;
 }
 
-static bool drm_crtc_fence_enable_signaling(struct dma_fence *fence)
-{
-	return true;
-}
-
 static const struct dma_fence_ops drm_crtc_fence_ops = {
 	.get_driver_name = drm_crtc_fence_get_driver_name,
 	.get_timeline_name = drm_crtc_fence_get_timeline_name,
-	.enable_signaling = drm_crtc_fence_enable_signaling,
-	.wait = dma_fence_default_wait,
 };
 
 struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index d4f4ce484529..adb3cb27d31e 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -207,7 +207,6 @@ static const struct dma_fence_ops drm_syncobj_null_fence_ops = {
 	.get_driver_name = drm_syncobj_null_fence_get_name,
 	.get_timeline_name = drm_syncobj_null_fence_get_name,
 	.enable_signaling = drm_syncobj_null_fence_enable_signaling,
-	.wait = dma_fence_default_wait,
 	.release = NULL,
 };
 
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index 69aab086b913..4843289cc8f0 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -81,11 +81,6 @@ static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f)
 	return (const char *)fence->sched->name;
 }
 
-static bool drm_sched_fence_enable_signaling(struct dma_fence *f)
-{
-	return true;
-}
-
 /**
  * amd_sched_fence_free - free up the fence memory
  *
@@ -134,18 +129,12 @@ static void drm_sched_fence_release_finished(struct dma_fence *f)
 const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
 	.get_driver_name = drm_sched_fence_get_driver_name,
 	.get_timeline_name = drm_sched_fence_get_timeline_name,
-	.enable_signaling = drm_sched_fence_enable_signaling,
-	.signaled = NULL,
-	.wait = dma_fence_default_wait,
 	.release = drm_sched_fence_release_scheduled,
 };
 
 const struct dma_fence_ops drm_sched_fence_ops_finished = {
 	.get_driver_name = drm_sched_fence_get_driver_name,
 	.get_timeline_name = drm_sched_fence_get_timeline_name,
-	.enable_signaling = drm_sched_fence_enable_signaling,
-	.signaled = NULL,
-	.wait = dma_fence_default_wait,
 	.release = drm_sched_fence_release_finished,
 };
 

From bf3012ada1b2222e770de5c35c1bb16f73b3a01d Mon Sep 17 00:00:00 2001
From: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Wed, 2 May 2018 10:23:25 +0200
Subject: [PATCH 0585/1286] drm/qxl: Remove unecessary dma_fence_ops

The trivial enable_signaling implementation matches the default code.

v2: Fix up commit message to match patch better (Eric).

Cc: Eric Anholt <eric@anholt.net>
Reviewed-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Cc: virtualization@lists.linux-foundation.org
Link: https://patchwork.freedesktop.org/patch/msgid/20180502082325.30264-1-daniel.vetter@ffwll.ch
---
 drivers/gpu/drm/qxl/qxl_release.c | 7 -------
 1 file changed, 7 deletions(-)

diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 5d84a66fed36..04f3605ac42a 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -50,12 +50,6 @@ static const char *qxl_get_timeline_name(struct dma_fence *fence)
 	return "release";
 }
 
-static bool qxl_nop_signaling(struct dma_fence *fence)
-{
-	/* fences are always automatically signaled, so just pretend we did this.. */
-	return true;
-}
-
 static long qxl_fence_wait(struct dma_fence *fence, bool intr,
 			   signed long timeout)
 {
@@ -119,7 +113,6 @@ signaled:
 static const struct dma_fence_ops qxl_fence_ops = {
 	.get_driver_name = qxl_get_driver_name,
 	.get_timeline_name = qxl_get_timeline_name,
-	.enable_signaling = qxl_nop_signaling,
 	.wait = qxl_fence_wait,
 };
 

From 51f170a544bdb06d93316d8ff0814a52daa24a6c Mon Sep 17 00:00:00 2001
From: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Thu, 3 May 2018 12:31:38 +0200
Subject: [PATCH 0586/1286] Revert
 190c462d5be19ba622a82f5fd0625087c870a1e6..bf3012ada1b2222e770de5c35c1bb16f73b3a01d"

I shouldn't have pushed this, CI was right - I failed to remove the
BUG_ON(!ops->wait);

Reported-by: Chris Wilson <chris@chris-wilson.co.uk>
Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
---
 drivers/dma-buf/dma-fence-array.c       |  1 +
 drivers/dma-buf/dma-fence.c             | 23 ++++++------------
 drivers/dma-buf/sw_sync.c               |  1 +
 drivers/gpu/drm/drm_crtc.c              |  7 ++++++
 drivers/gpu/drm/drm_syncobj.c           |  1 +
 drivers/gpu/drm/qxl/qxl_release.c       |  7 ++++++
 drivers/gpu/drm/scheduler/sched_fence.c | 11 +++++++++
 include/linux/dma-fence.h               | 32 +++++++++++++++----------
 8 files changed, 54 insertions(+), 29 deletions(-)

diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c
index a8c254497251..dd1edfb27b61 100644
--- a/drivers/dma-buf/dma-fence-array.c
+++ b/drivers/dma-buf/dma-fence-array.c
@@ -104,6 +104,7 @@ const struct dma_fence_ops dma_fence_array_ops = {
 	.get_timeline_name = dma_fence_array_get_timeline_name,
 	.enable_signaling = dma_fence_array_enable_signaling,
 	.signaled = dma_fence_array_signaled,
+	.wait = dma_fence_default_wait,
 	.release = dma_fence_array_release,
 };
 EXPORT_SYMBOL(dma_fence_array_ops);
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 30fcbe415ff4..4edb9fd3cf47 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -158,10 +158,7 @@ dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
 		return -EINVAL;
 
 	trace_dma_fence_wait_start(fence);
-	if (fence->ops->wait)
-		ret = fence->ops->wait(fence, intr, timeout);
-	else
-		ret = dma_fence_default_wait(fence, intr, timeout);
+	ret = fence->ops->wait(fence, intr, timeout);
 	trace_dma_fence_wait_end(fence);
 	return ret;
 }
@@ -184,13 +181,6 @@ void dma_fence_release(struct kref *kref)
 }
 EXPORT_SYMBOL(dma_fence_release);
 
-/**
- * dma_fence_free - default release function for &dma_fence.
- * @fence: fence to release
- *
- * This is the default implementation for &dma_fence_ops.release. It calls
- * kfree_rcu() on @fence.
- */
 void dma_fence_free(struct dma_fence *fence)
 {
 	kfree_rcu(fence, rcu);
@@ -506,6 +496,11 @@ dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
 	for (i = 0; i < count; ++i) {
 		struct dma_fence *fence = fences[i];
 
+		if (fence->ops->wait != dma_fence_default_wait) {
+			ret = -EINVAL;
+			goto fence_rm_cb;
+		}
+
 		cb[i].task = current;
 		if (dma_fence_add_callback(fence, &cb[i].base,
 					   dma_fence_default_wait_cb)) {
@@ -565,7 +560,7 @@ dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
 	       spinlock_t *lock, u64 context, unsigned seqno)
 {
 	BUG_ON(!lock);
-	BUG_ON(!ops || !ops->wait ||
+	BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
 	       !ops->get_driver_name || !ops->get_timeline_name);
 
 	kref_init(&fence->refcount);
@@ -577,10 +572,6 @@ dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
 	fence->flags = 0UL;
 	fence->error = 0;
 
-	if (!ops->enable_signaling)
-		set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
-			&fence->flags);
-
 	trace_dma_fence_init(fence);
 }
 EXPORT_SYMBOL(dma_fence_init);
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 53c1d6d36a64..3d78ca89a605 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -188,6 +188,7 @@ static const struct dma_fence_ops timeline_fence_ops = {
 	.get_timeline_name = timeline_fence_get_timeline_name,
 	.enable_signaling = timeline_fence_enable_signaling,
 	.signaled = timeline_fence_signaled,
+	.wait = dma_fence_default_wait,
 	.release = timeline_fence_release,
 	.fence_value_str = timeline_fence_value_str,
 	.timeline_value_str = timeline_fence_timeline_value_str,
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index e4d3285f4191..a231dd5dce16 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -225,9 +225,16 @@ static const char *drm_crtc_fence_get_timeline_name(struct dma_fence *fence)
 	return crtc->timeline_name;
 }
 
+static bool drm_crtc_fence_enable_signaling(struct dma_fence *fence)
+{
+	return true;
+}
+
 static const struct dma_fence_ops drm_crtc_fence_ops = {
 	.get_driver_name = drm_crtc_fence_get_driver_name,
 	.get_timeline_name = drm_crtc_fence_get_timeline_name,
+	.enable_signaling = drm_crtc_fence_enable_signaling,
+	.wait = dma_fence_default_wait,
 };
 
 struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index adb3cb27d31e..d4f4ce484529 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -207,6 +207,7 @@ static const struct dma_fence_ops drm_syncobj_null_fence_ops = {
 	.get_driver_name = drm_syncobj_null_fence_get_name,
 	.get_timeline_name = drm_syncobj_null_fence_get_name,
 	.enable_signaling = drm_syncobj_null_fence_enable_signaling,
+	.wait = dma_fence_default_wait,
 	.release = NULL,
 };
 
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 04f3605ac42a..5d84a66fed36 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -50,6 +50,12 @@ static const char *qxl_get_timeline_name(struct dma_fence *fence)
 	return "release";
 }
 
+static bool qxl_nop_signaling(struct dma_fence *fence)
+{
+	/* fences are always automatically signaled, so just pretend we did this.. */
+	return true;
+}
+
 static long qxl_fence_wait(struct dma_fence *fence, bool intr,
 			   signed long timeout)
 {
@@ -113,6 +119,7 @@ signaled:
 static const struct dma_fence_ops qxl_fence_ops = {
 	.get_driver_name = qxl_get_driver_name,
 	.get_timeline_name = qxl_get_timeline_name,
+	.enable_signaling = qxl_nop_signaling,
 	.wait = qxl_fence_wait,
 };
 
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index 4843289cc8f0..69aab086b913 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -81,6 +81,11 @@ static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f)
 	return (const char *)fence->sched->name;
 }
 
+static bool drm_sched_fence_enable_signaling(struct dma_fence *f)
+{
+	return true;
+}
+
 /**
  * amd_sched_fence_free - free up the fence memory
  *
@@ -129,12 +134,18 @@ static void drm_sched_fence_release_finished(struct dma_fence *f)
 const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
 	.get_driver_name = drm_sched_fence_get_driver_name,
 	.get_timeline_name = drm_sched_fence_get_timeline_name,
+	.enable_signaling = drm_sched_fence_enable_signaling,
+	.signaled = NULL,
+	.wait = dma_fence_default_wait,
 	.release = drm_sched_fence_release_scheduled,
 };
 
 const struct dma_fence_ops drm_sched_fence_ops_finished = {
 	.get_driver_name = drm_sched_fence_get_driver_name,
 	.get_timeline_name = drm_sched_fence_get_timeline_name,
+	.enable_signaling = drm_sched_fence_enable_signaling,
+	.signaled = NULL,
+	.wait = dma_fence_default_wait,
 	.release = drm_sched_fence_release_finished,
 };
 
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 02dba8cd033d..eb9b05aa5aea 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -166,8 +166,7 @@ struct dma_fence_ops {
 	 * released when the fence is signalled (through e.g. the interrupt
 	 * handler).
 	 *
-	 * This callback is optional. If this callback is not present, then the
-	 * driver must always have signaling enabled.
+	 * This callback is mandatory.
 	 */
 	bool (*enable_signaling)(struct dma_fence *fence);
 
@@ -191,14 +190,11 @@ struct dma_fence_ops {
 	/**
 	 * @wait:
 	 *
-	 * Custom wait implementation, defaults to dma_fence_default_wait() if
-	 * not set.
+	 * Custom wait implementation, or dma_fence_default_wait.
 	 *
-	 * The dma_fence_default_wait implementation should work for any fence, as long
-	 * as @enable_signaling works correctly. This hook allows drivers to
-	 * have an optimized version for the case where a process context is
-	 * already available, e.g. if @enable_signaling for the general case
-	 * needs to set up a worker thread.
+	 * Must not be NULL, set to dma_fence_default_wait for default implementation.
+	 * the dma_fence_default_wait implementation should work for any fence, as long
+	 * as enable_signaling works correctly.
 	 *
 	 * Must return -ERESTARTSYS if the wait is intr = true and the wait was
 	 * interrupted, and remaining jiffies if fence has signaled, or 0 if wait
@@ -206,7 +202,7 @@ struct dma_fence_ops {
 	 * which should be treated as if the fence is signaled. For example a hardware
 	 * lockup could be reported like that.
 	 *
-	 * This callback is optional.
+	 * This callback is mandatory.
 	 */
 	signed long (*wait)(struct dma_fence *fence,
 			    bool intr, signed long timeout);
@@ -221,6 +217,17 @@ struct dma_fence_ops {
 	 */
 	void (*release)(struct dma_fence *fence);
 
+	/**
+	 * @fill_driver_data:
+	 *
+	 * Callback to fill in free-form debug info.
+	 *
+	 * Returns amount of bytes filled, or negative error on failure.
+	 *
+	 * This callback is optional.
+	 */
+	int (*fill_driver_data)(struct dma_fence *fence, void *data, int size);
+
 	/**
 	 * @fence_value_str:
 	 *
@@ -235,9 +242,8 @@ struct dma_fence_ops {
 	 * @timeline_value_str:
 	 *
 	 * Fills in the current value of the timeline as a string, like the
-	 * sequence number. Note that the specific fence passed to this function
-	 * should not matter, drivers should only use it to look up the
-	 * corresponding timeline structures.
+	 * sequence number. This should match what @fill_driver_data prints for
+	 * the most recently signalled fence (assuming no delayed signalling).
 	 */
 	void (*timeline_value_str)(struct dma_fence *fence,
 				   char *str, int size);

From 3297234a05ab1e90091b0574db4c397ef0e90d5f Mon Sep 17 00:00:00 2001
From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Date: Wed, 2 May 2018 10:52:55 -0700
Subject: [PATCH 0587/1286] drm/i915: Adjust eDP's logical vco in a reliable
 place.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

On intel_dp_compute_config() we were calculating the needed vco
for eDP on gen9 and we stashing it in
intel_atomic_state.cdclk.logical.vco

However few moments later on intel_modeset_checks() we fully
replace entire intel_atomic_state.cdclk.logical with
dev_priv->cdclk.logical fully overwriting the logical desired
vco for eDP on gen9.

So, with wrong VCO value we end up with wrong desired cdclk, but
also it will raise a lot of WARNs: On gen9, when we read
CDCLK_CTL to verify if we configured properly the desired
frequency the CD Frequency Select bits [27:26] == 10b can mean
337.5 or 308.57 MHz depending on the VCO. So if we have wrong
VCO value stashed we will believe the frequency selection didn't
stick and start to raise WARNs of cdclk mismatch.

[   42.857519] [drm:intel_dump_cdclk_state [i915]] Changing CDCLK to 308571 kHz, VCO 8640000 kHz, ref 24000 kHz, bypass 24000 kHz, voltage level 0
[   42.897269] cdclk state doesn't match!
[   42.901052] WARNING: CPU: 5 PID: 1116 at drivers/gpu/drm/i915/intel_cdclk.c:2084 intel_set_cdclk+0x5d/0x110 [i915]
[   42.938004] RIP: 0010:intel_set_cdclk+0x5d/0x110 [i915]
[   43.155253] WARNING: CPU: 5 PID: 1116 at drivers/gpu/drm/i915/intel_cdclk.c:2084 intel_set_cdclk+0x5d/0x110 [i915]
[   43.170277] [drm:intel_dump_cdclk_state [i915]] [hw state] 337500 kHz, VCO 8100000 kHz, ref 24000 kHz, bypass 24000 kHz, voltage level 0
[   43.182566] [drm:intel_dump_cdclk_state [i915]] [sw state] 308571 kHz, VCO 8640000 kHz, ref 24000 kHz, bypass 24000 kHz, voltage level 0

v2: Move the entire eDP's vco logical adjustment to inside
    the skl_modeset_calc_cdclk as suggested by Ville.

Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Fixes: bb0f4aab0e76 ("drm/i915: Track full cdclk state for the logical and actual cdclk frequencies")
Cc: <stable@vger.kernel.org> # v4.12+
Link: https://patchwork.freedesktop.org/patch/msgid/20180502175255.5344-1-rodrigo.vivi@intel.com
---
 drivers/gpu/drm/i915/intel_cdclk.c | 41 +++++++++++++++++++++++++++---
 drivers/gpu/drm/i915/intel_dp.c    | 20 ---------------
 2 files changed, 37 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index 32d24c69da3c..704ddb4d3ca7 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -2302,9 +2302,44 @@ static int bdw_modeset_calc_cdclk(struct drm_atomic_state *state)
 	return 0;
 }
 
+static int skl_dpll0_vco(struct intel_atomic_state *intel_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
+	struct intel_crtc *crtc;
+	struct intel_crtc_state *crtc_state;
+	int vco, i;
+
+	vco = intel_state->cdclk.logical.vco;
+	if (!vco)
+		vco = dev_priv->skl_preferred_vco_freq;
+
+	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
+		if (!crtc_state->base.enable)
+			continue;
+
+		if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+			continue;
+
+		/*
+		 * DPLL0 VCO may need to be adjusted to get the correct
+		 * clock for eDP. This will affect cdclk as well.
+		 */
+		switch (crtc_state->port_clock / 2) {
+		case 108000:
+		case 216000:
+			vco = 8640000;
+			break;
+		default:
+			vco = 8100000;
+			break;
+		}
+	}
+
+	return vco;
+}
+
 static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
 {
-	struct drm_i915_private *dev_priv = to_i915(state->dev);
 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
 	int min_cdclk, cdclk, vco;
 
@@ -2312,9 +2347,7 @@ static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
 	if (min_cdclk < 0)
 		return min_cdclk;
 
-	vco = intel_state->cdclk.logical.vco;
-	if (!vco)
-		vco = dev_priv->skl_preferred_vco_freq;
+	vco = skl_dpll0_vco(intel_state);
 
 	/*
 	 * FIXME should also account for plane ratio
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 83da50b13d81..dde92e4af5d3 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1929,26 +1929,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
 					       reduce_m_n);
 	}
 
-	/*
-	 * DPLL0 VCO may need to be adjusted to get the correct
-	 * clock for eDP. This will affect cdclk as well.
-	 */
-	if (intel_dp_is_edp(intel_dp) && IS_GEN9_BC(dev_priv)) {
-		int vco;
-
-		switch (pipe_config->port_clock / 2) {
-		case 108000:
-		case 216000:
-			vco = 8640000;
-			break;
-		default:
-			vco = 8100000;
-			break;
-		}
-
-		to_intel_atomic_state(pipe_config->base.state)->cdclk.logical.vco = vco;
-	}
-
 	if (!HAS_DDI(dev_priv))
 		intel_dp_set_clock(encoder, pipe_config);
 

From fc2a69f3903dfd97cd47f593e642b47918c949df Mon Sep 17 00:00:00 2001
From: Satendra Singh Thakur <satendra.t@samsung.com>
Date: Thu, 3 May 2018 11:19:32 +0530
Subject: [PATCH 0588/1286] drm/atomic: Handling the case when setting old crtc
 for plane

In the func drm_atomic_set_crtc_for_plane, with the current code,
if crtc of the plane_state and crtc passed as argument to the func
are same, entire func will executed in vein.
It will get state of crtc and clear and set the bits in plane_mask.
All these steps are not required for same old crtc.
Ideally, we should do nothing in this case, this patch handles the same,
and causes the program to return without doing anything in such scenario.

Signed-off-by: Satendra Singh Thakur <satendra.t@samsung.com>
Cc: Madhur Verma <madhur.verma@samsung.com>
Cc: Hemanshu Srivastava <hemanshu.s@samsung.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/1525326572-25854-1-git-send-email-satendra.t@samsung.com
---
 drivers/gpu/drm/drm_atomic.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 9bdd67781917..dc850b4b6e21 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1425,7 +1425,9 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
 {
 	struct drm_plane *plane = plane_state->plane;
 	struct drm_crtc_state *crtc_state;
-
+	/* Nothing to do for same crtc*/
+	if (plane_state->crtc == crtc)
+		return 0;
 	if (plane_state->crtc) {
 		crtc_state = drm_atomic_get_crtc_state(plane_state->state,
 						       plane_state->crtc);

From 9ba59b79dc699af38624e1dc337f07d99b376c27 Mon Sep 17 00:00:00 2001
From: Tarun <tarun.vyas@intel.com>
Date: Wed, 2 May 2018 16:33:00 -0700
Subject: [PATCH 0589/1286] drm/i915: Remove redundant check for negative
 timeout while doing an atomic pipe update
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

No functional changes, just a minor knit. Stumbled across the kernel doc for
schedule_timeout() which quotes "In all cases the return value is guaranteed
to be non-negative". Also, the return code of schedule_timeout() already checks
for negative values "return timeout < 0 ? 0 : timeout;" and returns 0
in such cases. Furthermore, the msec_to_jiffies returns an ungined long
value. So, let's do away with the redundant check for an atomic
pipe update.

v2: Commit message changes (Manasi).

Reviewed-by: Manasi Navare <manasi.d.navare@intel.com>
Signed-off-by: Tarun Vyas <tarun.vyas@intel.com>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180502233300.81220-1-tarun.vyas@intel.com
---
 drivers/gpu/drm/i915/intel_sprite.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index aa1dfaa692b9..9cd4be020840 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -131,7 +131,7 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
 		if (scanline < min || scanline > max)
 			break;
 
-		if (timeout <= 0) {
+		if (!timeout) {
 			DRM_ERROR("Potential atomic update failure on pipe %c\n",
 				  pipe_name(crtc->pipe));
 			break;

From bd4cd03c81010dcd4e6f0e02e4c15f44aefe12d1 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Thu, 26 Apr 2018 19:30:15 +0300
Subject: [PATCH 0590/1286] drm/i915: Correctly populate user mode h/vdisplay
 with pipe src size during readout
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

During state readout we first read out the pipe src size, store
that information in the user mode h/vdisplay, but later on we overwrite
that with the actual crtc timings. That makes our read out crtc state
inconsistent with itself when the BIOS has enabled the panel fitter to
scale the pipe contents. Let's preserve the pipe src size based
information in the user mode to make things consistent again.

This fixes a problem introduced by commit a2936e3d9a9c ("drm/i915:
Use drm_mode_get_hv_timing() to populate plane clip rectangle")
where the inconsistent state is now leading the plane clipping code
to report a failure on account the plane dst coordinates not matching
the user mode size. Previously we did the plane clipping based on
the pipe src size instead and thus never noticed the inconsistency.

The failure manifests as a WARN:
[    0.762117] [drm:intel_dump_pipe_config [i915]] requested mode:
[    0.762142] [drm:drm_mode_debug_printmodeline [drm]] Modeline 0:"1366x768" 60 72143 1366 1414 1446 1526 768 771 777 784 0x40 0xa
...
[    0.762327] [drm:intel_dump_pipe_config [i915]] port clock: 72143, pipe src size: 1024x768, pixel rate 72143
...
[    0.764666] [drm:drm_atomic_helper_check_plane_state [drm_kms_helper]] Plane must cover entire CRTC
[    0.764690] [drm:drm_rect_debug_print [drm]] dst: 1024x768+0+0
[    0.764711] [drm:drm_rect_debug_print [drm]] clip: 1366x768+0+0
[    0.764713] ------------[ cut here ]------------
[    0.764714] Could not determine valid watermarks for inherited state
[    0.764792] WARNING: CPU: 4 PID: 159 at drivers/gpu/drm/i915/intel_display.c:14584 intel_modeset_init+0x3ce/0x19d0 [i915]
...

Cc: FadeMind <fademind@gmail.com>
Cc: Dave Jones <davej@codemonkey.org.uk>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Reported-by: FadeMind <fademind@gmail.com>
Reported-by: Dave Jones <davej@codemonkey.org.uk>
Tested-by: Dave Jones <davej@codemonkey.org.uk>
References: https://lists.freedesktop.org/archives/intel-gfx/2018-April/163186.html
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=105992
Fixes: a2936e3d9a9c ("drm/i915: Use drm_mode_get_hv_timing() to populate plane clip rectangle")
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180426163015.14232-1-ville.syrjala@linux.intel.com
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Tested-by: Larry Finger <Larry.Finger@lwfinger.net>
Tested-by: FadeMind <fademind@gmail.com>
---
 drivers/gpu/drm/i915/intel_display.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 1087358f6364..3fd249c05e4e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -15307,6 +15307,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
 		memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
 		if (crtc_state->base.active) {
 			intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
+			crtc->base.mode.hdisplay = crtc_state->pipe_src_w;
+			crtc->base.mode.vdisplay = crtc_state->pipe_src_h;
 			intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
 			WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
 

From 9e1de9002190b712a264a21f31ee9692f6d0bc2e Mon Sep 17 00:00:00 2001
From: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Thu, 3 May 2018 11:31:07 +0200
Subject: [PATCH 0591/1286] drm/msm: Don't setup control node debugfs files

It's going away.

v2: Try harder to find them all.

Reviewed-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Rob Clark <robdclark@gmail.com>
Cc: Jordan Crouse <jcrouse@codeaurora.org>
Cc: Nicolas Dechesne <nicolas.dechesne@linaro.org>
Cc: Archit Taneja <architt@codeaurora.org>
Cc: Bjorn Andersson <bjorn.andersson@linaro.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20180503093107.25955-1-daniel.vetter@ffwll.ch
---
 drivers/gpu/drm/msm/adreno/adreno_device.c | 1 -
 drivers/gpu/drm/msm/msm_debugfs.c          | 3 ---
 2 files changed, 4 deletions(-)

diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 8e0cb161754b..0ae5ace65462 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -168,7 +168,6 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
 	if (gpu->funcs->debugfs_init) {
 		gpu->funcs->debugfs_init(gpu, dev->primary);
 		gpu->funcs->debugfs_init(gpu, dev->render);
-		gpu->funcs->debugfs_init(gpu, dev->control);
 	}
 #endif
 
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index ba74cb4f94df..1ff3fda245d1 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -140,9 +140,6 @@ int msm_debugfs_late_init(struct drm_device *dev)
 	if (ret)
 		return ret;
 	ret = late_init_minor(dev->render);
-	if (ret)
-		return ret;
-	ret = late_init_minor(dev->control);
 	return ret;
 }
 

From 0d49f303e8a7006e0af3b58ed3809e1cad0900fb Mon Sep 17 00:00:00 2001
From: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Fri, 20 Apr 2018 08:51:59 +0200
Subject: [PATCH 0592/1286] drm: remove all control node code

With the ioctl and driver prep done, we can remove everything else.

Reviewed-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Gustavo Padovan <gustavo@padovan.org>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Sean Paul <seanpaul@chromium.org>
Cc: David Airlie <airlied@linux.ie>
Link: https://patchwork.freedesktop.org/patch/msgid/20180420065159.4531-4-daniel.vetter@ffwll.ch
---
 drivers/gpu/drm/drm_drv.c         | 10 ----------
 drivers/gpu/drm/drm_framebuffer.c |  3 +--
 drivers/gpu/drm/drm_ioctl.c       |  8 +-------
 drivers/gpu/drm/drm_sysfs.c       |  4 +---
 include/drm/drm_device.h          |  1 -
 include/drm/drm_file.h            | 13 -------------
 include/drm/drm_ioctl.h           |  7 -------
 7 files changed, 3 insertions(+), 43 deletions(-)

diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 32a83b41ab61..f6910ebe4d0e 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -99,8 +99,6 @@ static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
 		return &dev->primary;
 	case DRM_MINOR_RENDER:
 		return &dev->render;
-	case DRM_MINOR_CONTROL:
-		return &dev->control;
 	default:
 		BUG();
 	}
@@ -567,7 +565,6 @@ err_ctxbitmap:
 err_minors:
 	drm_minor_free(dev, DRM_MINOR_PRIMARY);
 	drm_minor_free(dev, DRM_MINOR_RENDER);
-	drm_minor_free(dev, DRM_MINOR_CONTROL);
 	drm_fs_inode_free(dev->anon_inode);
 err_free:
 	mutex_destroy(&dev->master_mutex);
@@ -603,7 +600,6 @@ void drm_dev_fini(struct drm_device *dev)
 
 	drm_minor_free(dev, DRM_MINOR_PRIMARY);
 	drm_minor_free(dev, DRM_MINOR_RENDER);
-	drm_minor_free(dev, DRM_MINOR_CONTROL);
 
 	mutex_destroy(&dev->master_mutex);
 	mutex_destroy(&dev->ctxlist_mutex);
@@ -796,10 +792,6 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
 
 	mutex_lock(&drm_global_mutex);
 
-	ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
-	if (ret)
-		goto err_minors;
-
 	ret = drm_minor_register(dev, DRM_MINOR_RENDER);
 	if (ret)
 		goto err_minors;
@@ -837,7 +829,6 @@ err_minors:
 	remove_compat_control_link(dev);
 	drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
-	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
 out_unlock:
 	mutex_unlock(&drm_global_mutex);
 	return ret;
@@ -882,7 +873,6 @@ void drm_dev_unregister(struct drm_device *dev)
 	remove_compat_control_link(dev);
 	drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
-	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
 }
 EXPORT_SYMBOL(drm_dev_unregister);
 
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 8c4d32adcc17..bfedceff87bb 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -484,8 +484,7 @@ int drm_mode_getfb(struct drm_device *dev,
 	 * backwards-compatibility reasons, we cannot make GET_FB() privileged,
 	 * so just return an invalid handle for non-masters.
 	 */
-	if (!drm_is_current_master(file_priv) && !capable(CAP_SYS_ADMIN) &&
-	    !drm_is_control_client(file_priv)) {
+	if (!drm_is_current_master(file_priv) && !capable(CAP_SYS_ADMIN)) {
 		r->handle = 0;
 		ret = 0;
 		goto out;
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 43f7e2e81294..eadeabc393f0 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -510,13 +510,7 @@ int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
 
 	/* MASTER is only for master or control clients */
 	if (unlikely((flags & DRM_MASTER) &&
-		     !drm_is_current_master(file_priv) &&
-		     !drm_is_control_client(file_priv)))
-		return -EACCES;
-
-	/* Control clients must be explicitly allowed */
-	if (unlikely(!(flags & DRM_CONTROL_ALLOW) &&
-		     drm_is_control_client(file_priv)))
+		     !drm_is_current_master(file_priv)))
 		return -EACCES;
 
 	/* Render clients must be explicitly allowed */
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 1c5b5ce1fd7f..b3c1daad1169 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -331,9 +331,7 @@ struct device *drm_sysfs_minor_alloc(struct drm_minor *minor)
 	struct device *kdev;
 	int r;
 
-	if (minor->type == DRM_MINOR_CONTROL)
-		minor_str = "controlD%d";
-	else if (minor->type == DRM_MINOR_RENDER)
+	if (minor->type == DRM_MINOR_RENDER)
 		minor_str = "renderD%d";
 	else
 		minor_str = "card%d";
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index 3a0eac2885b7..858ba19a3e29 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -38,7 +38,6 @@ struct drm_device {
 	struct device *dev;		/**< Device structure of bus-device */
 	struct drm_driver *driver;	/**< DRM driver managing the device */
 	void *dev_private;		/**< DRM driver private data */
-	struct drm_minor *control;		/**< Control node */
 	struct drm_minor *primary;		/**< Primary node */
 	struct drm_minor *render;		/**< Render node */
 	bool registered;
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 5176c3797680..99ab50cbab00 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -49,7 +49,6 @@ struct device;
 
 enum drm_minor_type {
 	DRM_MINOR_PRIMARY,
-	DRM_MINOR_CONTROL,
 	DRM_MINOR_RENDER,
 };
 
@@ -348,18 +347,6 @@ static inline bool drm_is_render_client(const struct drm_file *file_priv)
 	return file_priv->minor->type == DRM_MINOR_RENDER;
 }
 
-/**
- * drm_is_control_client - is this an open file of the control node
- * @file_priv: DRM file
- *
- * Control nodes are deprecated and in the process of getting removed from the
- * DRM userspace API. Do not ever use!
- */
-static inline bool drm_is_control_client(const struct drm_file *file_priv)
-{
-	return file_priv->minor->type == DRM_MINOR_CONTROL;
-}
-
 int drm_open(struct inode *inode, struct file *filp);
 ssize_t drm_read(struct file *filp, char __user *buffer,
 		 size_t count, loff_t *offset);
diff --git a/include/drm/drm_ioctl.h b/include/drm/drm_ioctl.h
index add42809642a..fafb6f592c4b 100644
--- a/include/drm/drm_ioctl.h
+++ b/include/drm/drm_ioctl.h
@@ -108,13 +108,6 @@ enum drm_ioctl_flags {
 	 * This is equivalent to callers with the SYSADMIN capability.
 	 */
 	DRM_ROOT_ONLY		= BIT(2),
-	/**
-	 * @DRM_CONTROL_ALLOW:
-	 *
-	 * Deprecated, do not use. Control nodes are in the process of getting
-	 * removed.
-	 */
-	DRM_CONTROL_ALLOW	= BIT(3),
 	/**
 	 * @DRM_UNLOCKED:
 	 *

From 0a4587a034a43e5076770df10446214cfb3de8f8 Mon Sep 17 00:00:00 2001
From: Linus Walleij <linus.walleij@linaro.org>
Date: Thu, 3 May 2018 16:04:31 +0200
Subject: [PATCH 0593/1286] drm/pl111: Fix module probe bug

Commit a30933c27602 ("drm/pl111: Support the Versatile Express")
Added a second module using the builtin_platform_driver() call,
which works fine as long as you do not try to build the PL111
driver as a module, because a module can only have one initcall
and cause the following build bug:

(...) multiple definition of `init_module' (...)

Reported-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Liviu Dudau <liviu.dudau@arm.com>
Cc: Pawel Moll <pawel.moll@arm.com>
Cc: Eric Anholt <eric@anholt.net>
Cc: Robin Murphy <robin.murphy@arm.com>
Reviewed-by: Sean Paul <seanpaul@chromium.org>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Fixes: a30933c27602 ("drm/pl111: Support the Versatile Express")
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20180503140431.5798-1-linus.walleij@linaro.org
---
 drivers/gpu/drm/pl111/pl111_versatile.c |  7 +++++++
 drivers/gpu/drm/pl111/pl111_vexpress.c  | 11 ++++++++++-
 drivers/gpu/drm/pl111/pl111_vexpress.h  |  7 +++++++
 3 files changed, 24 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
index 78ddf8534fd2..b9baefdba38a 100644
--- a/drivers/gpu/drm/pl111/pl111_versatile.c
+++ b/drivers/gpu/drm/pl111/pl111_versatile.c
@@ -326,6 +326,13 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
 	if (versatile_clcd_type == VEXPRESS_CLCD_V2M) {
 		struct platform_device *pdev;
 
+		/* Registers a driver for the muxfpga */
+		ret = vexpress_muxfpga_init();
+		if (ret) {
+			dev_err(dev, "unable to initialize muxfpga driver\n");
+			return ret;
+		}
+
 		/* Call into deep Vexpress configuration API */
 		pdev = of_find_device_by_node(np);
 		if (!pdev) {
diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.c b/drivers/gpu/drm/pl111/pl111_vexpress.c
index c9fee625faf1..a534b225e31b 100644
--- a/drivers/gpu/drm/pl111/pl111_vexpress.c
+++ b/drivers/gpu/drm/pl111/pl111_vexpress.c
@@ -122,4 +122,13 @@ static struct platform_driver vexpress_muxfpga_driver = {
 	.probe = vexpress_muxfpga_probe,
 };
 
-builtin_platform_driver(vexpress_muxfpga_driver);
+int vexpress_muxfpga_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&vexpress_muxfpga_driver);
+	/* -EBUSY just means this driver is already registered */
+	if (ret == -EBUSY)
+		ret = 0;
+	return ret;
+}
diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.h b/drivers/gpu/drm/pl111/pl111_vexpress.h
index bb54864ca91e..5d3681bb4c00 100644
--- a/drivers/gpu/drm/pl111/pl111_vexpress.h
+++ b/drivers/gpu/drm/pl111/pl111_vexpress.h
@@ -10,6 +10,8 @@ int pl111_vexpress_clcd_init(struct device *dev,
 			     struct pl111_drm_dev_private *priv,
 			     struct regmap *map);
 
+int vexpress_muxfpga_init(void);
+
 #else
 
 static inline int pl111_vexpress_clcd_init(struct device *dev,
@@ -19,4 +21,9 @@ static inline int pl111_vexpress_clcd_init(struct device *dev,
 	return -ENODEV;
 }
 
+static inline int vexpress_muxfpga_init(void)
+{
+	return 0;
+}
+
 #endif

From 4c70ac7639f6af6d7c2d01f0307665a4b9afada7 Mon Sep 17 00:00:00 2001
From: Eric Anholt <eric@anholt.net>
Date: Mon, 30 Apr 2018 16:59:27 -0700
Subject: [PATCH 0594/1286] drm/vc4: Add a pad field to align drm_vc4_submit_cl
 to 64 bits.

I had originally asked Stefan Schake to drop the pad field from the
syncobj changes that just landed, because I couldn't come up with a
reason to align to 64 bits.

Talking with Dave Airlie about the new v3d driver's submit ioctl, we
came up with a reason: sizeof() on 64-bit platforms may align to 64
bits, in which case the userspace will be submitting the aligned size
and the final 32 bits won't be zero-padded by the kernel.  If
userspace doesn't zero-fill, then a future ABI change adding a 32-bit
field at the end could potentially cause the kernel to read undefined
data from old userspace (our userspace happens to use structure
initialization that zero-fills, but as a general rule we try not to
rely on that in the kernel).

Signed-off-by: Eric Anholt <eric@anholt.net>
Link: https://patchwork.freedesktop.org/patch/msgid/20180430235927.28712-1-eric@anholt.net
Reviewed-by: Stefan Schake <stschake@gmail.com>
---
 drivers/gpu/drm/vc4/vc4_gem.c | 5 +++++
 include/uapi/drm/vc4_drm.h    | 2 ++
 2 files changed, 7 insertions(+)

diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index a4c4be3ac6af..7910b9acedd6 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -1132,6 +1132,11 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
 		return -EINVAL;
 	}
 
+	if (args->pad2 != 0) {
+		DRM_DEBUG("Invalid pad: 0x%08x\n", args->pad2);
+		return -EINVAL;
+	}
+
 	exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
 	if (!exec) {
 		DRM_ERROR("malloc failure on exec struct\n");
diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h
index 2be4fe3610b8..2cac6277a1d7 100644
--- a/include/uapi/drm/vc4_drm.h
+++ b/include/uapi/drm/vc4_drm.h
@@ -193,6 +193,8 @@ struct drm_vc4_submit_cl {
 	 * render job. 0 means ignore.
 	 */
 	__u32 out_sync;
+
+	__u32 pad2;
 };
 
 /**

From 4000626f204d00f601dca7e9d9b8a793b07da4ad Mon Sep 17 00:00:00 2001
From: Eric Anholt <eric@anholt.net>
Date: Mon, 30 Apr 2018 11:10:57 -0700
Subject: [PATCH 0595/1286] dt-bindings: Add a new binding for Broadcom V3D 3.x
 and newer GPUs.

These OpenGL ES GPUs are present in the 7268 and 7278 set top box
chips.

v2: no changes
v3: move to gpu/, fix typo

Signed-off-by: Eric Anholt <eric@anholt.net>
Link: https://patchwork.freedesktop.org/patch/msgid/20180430181058.30181-2-eric@anholt.net
Reviewed-by: Rob Herring <robh@kernel.org>
---
 .../devicetree/bindings/gpu/brcm,bcm-v3d.txt  | 28 +++++++++++++++++++
 1 file changed, 28 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.txt

diff --git a/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.txt b/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.txt
new file mode 100644
index 000000000000..c907aa8dd755
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.txt
@@ -0,0 +1,28 @@
+Broadcom V3D GPU
+
+Only the Broadcom V3D 3.x and newer GPUs are covered by this binding.
+For V3D 2.x, see brcm,bcm-vc4.txt.
+
+Required properties:
+- compatible:	Should be "brcm,7268-v3d" or "brcm,7278-v3d"
+- reg:		Physical base addresses and lengths of the register areas
+- reg-names:	Names for the register areas.  The "hub", "bridge", and "core0"
+		  register areas are always required.  The "gca" register area
+		  is required if the GCA cache controller is present.
+- interrupts:	The interrupt numbers.  The first interrupt is for the hub,
+		  while the following interrupts are for the cores.
+		  See bindings/interrupt-controller/interrupts.txt
+
+Optional properties:
+- clocks:	The core clock the unit runs on
+
+v3d {
+	compatible = "brcm,7268-v3d";
+	reg = <0xf1204000 0x100>,
+	      <0xf1200000 0x4000>,
+	      <0xf1208000 0x4000>,
+	      <0xf1204100 0x100>;
+	reg-names = "bridge", "hub", "core0", "gca";
+	interrupts = <0 78 4>,
+		     <0 77 4>;
+};

From 57692c94dcbe99a1e0444409a3da13fb3443562c Mon Sep 17 00:00:00 2001
From: Eric Anholt <eric@anholt.net>
Date: Mon, 30 Apr 2018 11:10:58 -0700
Subject: [PATCH 0596/1286] drm/v3d: Introduce a new DRM driver for Broadcom
 V3D V3.x+

This driver will be used to support Mesa on the Broadcom 7268 and 7278
platforms.

V3D 3.3 introduces an MMU, which means we no longer need CMA or vc4's
complicated CL/shader validation scheme.  This massively changes the
GEM behavior, so I've forked off to a new driver.

v2: Mark SUBMIT_CL as needing DRM_AUTH.  coccinelle fixes from kbuild
    test robot. Drop personal git link from MAINTAINERS.  Don't
    double-map dma-buf imported BOs.  Add kerneldoc about needing MMU
    eviction.  Drop prime vmap/unmap stubs.  Delay mmap offset setup
    to mmap time.  Use drm_dev_init instead of _alloc.  Use
    ktime_get() for wait_bo timeouts.  Drop drm_can_sleep() usage,
    since we don't modeset.  Switch page tables back to WC (debug
    change to coherent had slipped in).  Switch
    drm_gem_object_unreference_unlocked() to
    drm_gem_object_put_unlocked().  Simplify overflow mem handling by
    not sharing overflow mem between jobs.
v3: no changes
v4: align submit_cl to 64 bits (review by airlied), check zero flags in
    other ioctls.

Signed-off-by: Eric Anholt <eric@anholt.net>
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch> (v4)
Acked-by: Dave Airlie <airlied@linux.ie> (v3, requested submit_cl change)
Link: https://patchwork.freedesktop.org/patch/msgid/20180430181058.30181-3-eric@anholt.net
---
 Documentation/gpu/drivers.rst          |   1 +
 MAINTAINERS                            |   8 +
 drivers/gpu/drm/Kconfig                |   2 +
 drivers/gpu/drm/Makefile               |   1 +
 drivers/gpu/drm/v3d/Kconfig            |   9 +
 drivers/gpu/drm/v3d/Makefile           |  18 +
 drivers/gpu/drm/v3d/v3d_bo.c           | 389 ++++++++++++++
 drivers/gpu/drm/v3d/v3d_debugfs.c      | 191 +++++++
 drivers/gpu/drm/v3d/v3d_drv.c          | 371 ++++++++++++++
 drivers/gpu/drm/v3d/v3d_drv.h          | 294 +++++++++++
 drivers/gpu/drm/v3d/v3d_fence.c        |  58 +++
 drivers/gpu/drm/v3d/v3d_gem.c          | 668 +++++++++++++++++++++++++
 drivers/gpu/drm/v3d/v3d_irq.c          | 206 ++++++++
 drivers/gpu/drm/v3d/v3d_mmu.c          | 122 +++++
 drivers/gpu/drm/v3d/v3d_regs.h         | 295 +++++++++++
 drivers/gpu/drm/v3d/v3d_sched.c        | 228 +++++++++
 drivers/gpu/drm/v3d/v3d_trace.h        |  82 +++
 drivers/gpu/drm/v3d/v3d_trace_points.c |   9 +
 include/uapi/drm/v3d_drm.h             | 194 +++++++
 19 files changed, 3146 insertions(+)
 create mode 100644 drivers/gpu/drm/v3d/Kconfig
 create mode 100644 drivers/gpu/drm/v3d/Makefile
 create mode 100644 drivers/gpu/drm/v3d/v3d_bo.c
 create mode 100644 drivers/gpu/drm/v3d/v3d_debugfs.c
 create mode 100644 drivers/gpu/drm/v3d/v3d_drv.c
 create mode 100644 drivers/gpu/drm/v3d/v3d_drv.h
 create mode 100644 drivers/gpu/drm/v3d/v3d_fence.c
 create mode 100644 drivers/gpu/drm/v3d/v3d_gem.c
 create mode 100644 drivers/gpu/drm/v3d/v3d_irq.c
 create mode 100644 drivers/gpu/drm/v3d/v3d_mmu.c
 create mode 100644 drivers/gpu/drm/v3d/v3d_regs.h
 create mode 100644 drivers/gpu/drm/v3d/v3d_sched.c
 create mode 100644 drivers/gpu/drm/v3d/v3d_trace.h
 create mode 100644 drivers/gpu/drm/v3d/v3d_trace_points.c
 create mode 100644 include/uapi/drm/v3d_drm.h

diff --git a/Documentation/gpu/drivers.rst b/Documentation/gpu/drivers.rst
index d3ab6abae838..f982558fc25d 100644
--- a/Documentation/gpu/drivers.rst
+++ b/Documentation/gpu/drivers.rst
@@ -10,6 +10,7 @@ GPU Driver Documentation
    tegra
    tinydrm
    tve200
+   v3d
    vc4
    bridge/dw-hdmi
    xen-front
diff --git a/MAINTAINERS b/MAINTAINERS
index 4af7f6119530..631a16f7fa19 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4795,6 +4795,14 @@ S:	Maintained
 F:	drivers/gpu/drm/omapdrm/
 F:	Documentation/devicetree/bindings/display/ti/
 
+DRM DRIVERS FOR V3D
+M:	Eric Anholt <eric@anholt.net>
+S:	Supported
+F:	drivers/gpu/drm/v3d/
+F:	include/uapi/drm/v3d_drm.h
+F:	Documentation/devicetree/bindings/display/brcm,bcm-v3d.txt
+T:	git git://anongit.freedesktop.org/drm/drm-misc
+
 DRM DRIVERS FOR VC4
 M:	Eric Anholt <eric@anholt.net>
 T:	git git://github.com/anholt/linux
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 757825ac60df..1c73a455fdb1 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -267,6 +267,8 @@ source "drivers/gpu/drm/amd/amdkfd/Kconfig"
 
 source "drivers/gpu/drm/imx/Kconfig"
 
+source "drivers/gpu/drm/v3d/Kconfig"
+
 source "drivers/gpu/drm/vc4/Kconfig"
 
 source "drivers/gpu/drm/etnaviv/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 9d66657ea117..7a401edd8761 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_DRM_MGA)	+= mga/
 obj-$(CONFIG_DRM_I810)	+= i810/
 obj-$(CONFIG_DRM_I915)	+= i915/
 obj-$(CONFIG_DRM_MGAG200) += mgag200/
+obj-$(CONFIG_DRM_V3D)  += v3d/
 obj-$(CONFIG_DRM_VC4)  += vc4/
 obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus/
 obj-$(CONFIG_DRM_SIS)   += sis/
diff --git a/drivers/gpu/drm/v3d/Kconfig b/drivers/gpu/drm/v3d/Kconfig
new file mode 100644
index 000000000000..a0c0259355bd
--- /dev/null
+++ b/drivers/gpu/drm/v3d/Kconfig
@@ -0,0 +1,9 @@
+config DRM_V3D
+	tristate "Broadcom V3D 3.x and newer"
+	depends on ARCH_BCM || ARCH_BCMSTB || COMPILE_TEST
+	depends on DRM
+	depends on COMMON_CLK
+	select DRM_SCHED
+	help
+	  Choose this option if you have a system that has a Broadcom
+	  V3D 3.x or newer GPU, such as BCM7268.
diff --git a/drivers/gpu/drm/v3d/Makefile b/drivers/gpu/drm/v3d/Makefile
new file mode 100644
index 000000000000..34446e1de64f
--- /dev/null
+++ b/drivers/gpu/drm/v3d/Makefile
@@ -0,0 +1,18 @@
+# Please keep these build lists sorted!
+
+# core driver code
+v3d-y := \
+	v3d_bo.o \
+	v3d_drv.o \
+	v3d_fence.o \
+	v3d_gem.o \
+	v3d_irq.o \
+	v3d_mmu.o \
+	v3d_trace_points.o \
+	v3d_sched.o
+
+v3d-$(CONFIG_DEBUG_FS) += v3d_debugfs.o
+
+obj-$(CONFIG_DRM_V3D)  += v3d.o
+
+CFLAGS_v3d_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
new file mode 100644
index 000000000000..7b1e2a549a71
--- /dev/null
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (C) 2015-2018 Broadcom */
+
+/**
+ * DOC: V3D GEM BO management support
+ *
+ * Compared to VC4 (V3D 2.x), V3D 3.3 introduces an MMU between the
+ * GPU and the bus, allowing us to use shmem objects for our storage
+ * instead of CMA.
+ *
+ * Physically contiguous objects may still be imported to V3D, but the
+ * driver doesn't allocate physically contiguous objects on its own.
+ * Display engines requiring physically contiguous allocations should
+ * look into Mesa's "renderonly" support (as used by the Mesa pl111
+ * driver) for an example of how to integrate with V3D.
+ *
+ * Long term, we should support evicting pages from the MMU when under
+ * memory pressure (thus the v3d_bo_get_pages() refcounting), but
+ * that's not a high priority since our systems tend to not have swap.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/pfn_t.h>
+
+#include "v3d_drv.h"
+#include "uapi/drm/v3d_drm.h"
+
+/* Pins the shmem pages, fills in the .pages and .sgt fields of the BO, and maps
+ * it for DMA.
+ */
+static int
+v3d_bo_get_pages(struct v3d_bo *bo)
+{
+	struct drm_gem_object *obj = &bo->base;
+	struct drm_device *dev = obj->dev;
+	int npages = obj->size >> PAGE_SHIFT;
+	int ret = 0;
+
+	mutex_lock(&bo->lock);
+	if (bo->pages_refcount++ != 0)
+		goto unlock;
+
+	if (!obj->import_attach) {
+		bo->pages = drm_gem_get_pages(obj);
+		if (IS_ERR(bo->pages)) {
+			ret = PTR_ERR(bo->pages);
+			goto unlock;
+		}
+
+		bo->sgt = drm_prime_pages_to_sg(bo->pages, npages);
+		if (IS_ERR(bo->sgt)) {
+			ret = PTR_ERR(bo->sgt);
+			goto put_pages;
+		}
+
+		/* Map the pages for use by the GPU. */
+		dma_map_sg(dev->dev, bo->sgt->sgl,
+			   bo->sgt->nents, DMA_BIDIRECTIONAL);
+	} else {
+		bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL);
+		if (!bo->pages)
+			goto put_pages;
+
+		drm_prime_sg_to_page_addr_arrays(bo->sgt, bo->pages,
+						 NULL, npages);
+
+		/* Note that dma-bufs come in mapped. */
+	}
+
+	mutex_unlock(&bo->lock);
+
+	return 0;
+
+put_pages:
+	drm_gem_put_pages(obj, bo->pages, true, true);
+	bo->pages = NULL;
+unlock:
+	bo->pages_refcount--;
+	mutex_unlock(&bo->lock);
+	return ret;
+}
+
+static void
+v3d_bo_put_pages(struct v3d_bo *bo)
+{
+	struct drm_gem_object *obj = &bo->base;
+
+	mutex_lock(&bo->lock);
+	if (--bo->pages_refcount == 0) {
+		if (!obj->import_attach) {
+			dma_unmap_sg(obj->dev->dev, bo->sgt->sgl,
+				     bo->sgt->nents, DMA_BIDIRECTIONAL);
+			sg_free_table(bo->sgt);
+			kfree(bo->sgt);
+			drm_gem_put_pages(obj, bo->pages, true, true);
+		} else {
+			kfree(bo->pages);
+		}
+	}
+	mutex_unlock(&bo->lock);
+}
+
+static struct v3d_bo *v3d_bo_create_struct(struct drm_device *dev,
+					   size_t unaligned_size)
+{
+	struct v3d_dev *v3d = to_v3d_dev(dev);
+	struct drm_gem_object *obj;
+	struct v3d_bo *bo;
+	size_t size = roundup(unaligned_size, PAGE_SIZE);
+	int ret;
+
+	if (size == 0)
+		return ERR_PTR(-EINVAL);
+
+	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+	if (!bo)
+		return ERR_PTR(-ENOMEM);
+	obj = &bo->base;
+
+	INIT_LIST_HEAD(&bo->vmas);
+	INIT_LIST_HEAD(&bo->unref_head);
+	mutex_init(&bo->lock);
+
+	ret = drm_gem_object_init(dev, obj, size);
+	if (ret)
+		goto free_bo;
+
+	spin_lock(&v3d->mm_lock);
+	ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node,
+					 obj->size >> PAGE_SHIFT,
+					 GMP_GRANULARITY >> PAGE_SHIFT, 0, 0);
+	spin_unlock(&v3d->mm_lock);
+	if (ret)
+		goto free_obj;
+
+	return bo;
+
+free_obj:
+	drm_gem_object_release(obj);
+free_bo:
+	kfree(bo);
+	return ERR_PTR(ret);
+}
+
+struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
+			     size_t unaligned_size)
+{
+	struct v3d_dev *v3d = to_v3d_dev(dev);
+	struct drm_gem_object *obj;
+	struct v3d_bo *bo;
+	int ret;
+
+	bo = v3d_bo_create_struct(dev, unaligned_size);
+	if (IS_ERR(bo))
+		return bo;
+	obj = &bo->base;
+
+	bo->resv = &bo->_resv;
+	reservation_object_init(bo->resv);
+
+	ret = v3d_bo_get_pages(bo);
+	if (ret)
+		goto free_mm;
+
+	v3d_mmu_insert_ptes(bo);
+
+	mutex_lock(&v3d->bo_lock);
+	v3d->bo_stats.num_allocated++;
+	v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT;
+	mutex_unlock(&v3d->bo_lock);
+
+	return bo;
+
+free_mm:
+	spin_lock(&v3d->mm_lock);
+	drm_mm_remove_node(&bo->node);
+	spin_unlock(&v3d->mm_lock);
+
+	drm_gem_object_release(obj);
+	kfree(bo);
+	return ERR_PTR(ret);
+}
+
+/* Called DRM core on the last userspace/kernel unreference of the
+ * BO.
+ */
+void v3d_free_object(struct drm_gem_object *obj)
+{
+	struct v3d_dev *v3d = to_v3d_dev(obj->dev);
+	struct v3d_bo *bo = to_v3d_bo(obj);
+
+	mutex_lock(&v3d->bo_lock);
+	v3d->bo_stats.num_allocated--;
+	v3d->bo_stats.pages_allocated -= obj->size >> PAGE_SHIFT;
+	mutex_unlock(&v3d->bo_lock);
+
+	reservation_object_fini(&bo->_resv);
+
+	v3d_bo_put_pages(bo);
+
+	if (obj->import_attach)
+		drm_prime_gem_destroy(obj, bo->sgt);
+
+	v3d_mmu_remove_ptes(bo);
+	spin_lock(&v3d->mm_lock);
+	drm_mm_remove_node(&bo->node);
+	spin_unlock(&v3d->mm_lock);
+
+	mutex_destroy(&bo->lock);
+
+	drm_gem_object_release(obj);
+	kfree(bo);
+}
+
+struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj)
+{
+	struct v3d_bo *bo = to_v3d_bo(obj);
+
+	return bo->resv;
+}
+
+static void
+v3d_set_mmap_vma_flags(struct vm_area_struct *vma)
+{
+	vma->vm_flags &= ~VM_PFNMAP;
+	vma->vm_flags |= VM_MIXEDMAP;
+	vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+}
+
+int v3d_gem_fault(struct vm_fault *vmf)
+{
+	struct vm_area_struct *vma = vmf->vma;
+	struct drm_gem_object *obj = vma->vm_private_data;
+	struct v3d_bo *bo = to_v3d_bo(obj);
+	unsigned long pfn;
+	pgoff_t pgoff;
+	int ret;
+
+	/* We don't use vmf->pgoff since that has the fake offset: */
+	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
+	pfn = page_to_pfn(bo->pages[pgoff]);
+
+	ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
+
+	switch (ret) {
+	case -EAGAIN:
+	case 0:
+	case -ERESTARTSYS:
+	case -EINTR:
+	case -EBUSY:
+		/*
+		 * EBUSY is ok: this just means that another thread
+		 * already did the job.
+		 */
+		return VM_FAULT_NOPAGE;
+	case -ENOMEM:
+		return VM_FAULT_OOM;
+	default:
+		return VM_FAULT_SIGBUS;
+	}
+}
+
+int v3d_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	int ret;
+
+	ret = drm_gem_mmap(filp, vma);
+	if (ret)
+		return ret;
+
+	v3d_set_mmap_vma_flags(vma);
+
+	return ret;
+}
+
+int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+	int ret;
+
+	ret = drm_gem_mmap_obj(obj, obj->size, vma);
+	if (ret < 0)
+		return ret;
+
+	v3d_set_mmap_vma_flags(vma);
+
+	return 0;
+}
+
+struct sg_table *
+v3d_prime_get_sg_table(struct drm_gem_object *obj)
+{
+	struct v3d_bo *bo = to_v3d_bo(obj);
+	int npages = obj->size >> PAGE_SHIFT;
+
+	return drm_prime_pages_to_sg(bo->pages, npages);
+}
+
+struct drm_gem_object *
+v3d_prime_import_sg_table(struct drm_device *dev,
+			  struct dma_buf_attachment *attach,
+			  struct sg_table *sgt)
+{
+	struct drm_gem_object *obj;
+	struct v3d_bo *bo;
+
+	bo = v3d_bo_create_struct(dev, attach->dmabuf->size);
+	if (IS_ERR(bo))
+		return ERR_CAST(bo);
+	obj = &bo->base;
+
+	bo->resv = attach->dmabuf->resv;
+
+	bo->sgt = sgt;
+	v3d_bo_get_pages(bo);
+
+	v3d_mmu_insert_ptes(bo);
+
+	return obj;
+}
+
+int v3d_create_bo_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	struct drm_v3d_create_bo *args = data;
+	struct v3d_bo *bo = NULL;
+	int ret;
+
+	if (args->flags != 0) {
+		DRM_INFO("unknown create_bo flags: %d\n", args->flags);
+		return -EINVAL;
+	}
+
+	bo = v3d_bo_create(dev, file_priv, PAGE_ALIGN(args->size));
+	if (IS_ERR(bo))
+		return PTR_ERR(bo);
+
+	args->offset = bo->node.start << PAGE_SHIFT;
+
+	ret = drm_gem_handle_create(file_priv, &bo->base, &args->handle);
+	drm_gem_object_put_unlocked(&bo->base);
+
+	return ret;
+}
+
+int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv)
+{
+	struct drm_v3d_mmap_bo *args = data;
+	struct drm_gem_object *gem_obj;
+	int ret;
+
+	if (args->flags != 0) {
+		DRM_INFO("unknown mmap_bo flags: %d\n", args->flags);
+		return -EINVAL;
+	}
+
+	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
+	if (!gem_obj) {
+		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
+		return -ENOENT;
+	}
+
+	ret = drm_gem_create_mmap_offset(gem_obj);
+	if (ret == 0)
+		args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
+	drm_gem_object_put_unlocked(gem_obj);
+
+	return ret;
+}
+
+int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *file_priv)
+{
+	struct drm_v3d_get_bo_offset *args = data;
+	struct drm_gem_object *gem_obj;
+	struct v3d_bo *bo;
+
+	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
+	if (!gem_obj) {
+		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
+		return -ENOENT;
+	}
+	bo = to_v3d_bo(gem_obj);
+
+	args->offset = bo->node.start << PAGE_SHIFT;
+
+	drm_gem_object_put_unlocked(gem_obj);
+	return 0;
+}
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
new file mode 100644
index 000000000000..4db62c545748
--- /dev/null
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (C) 2014-2018 Broadcom */
+
+#include <linux/circ_buf.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
+#include <linux/seq_file.h>
+#include <drm/drmP.h>
+
+#include "v3d_drv.h"
+#include "v3d_regs.h"
+
+#define REGDEF(reg) { reg, #reg }
+struct v3d_reg_def {
+	u32 reg;
+	const char *name;
+};
+
+static const struct v3d_reg_def v3d_hub_reg_defs[] = {
+	REGDEF(V3D_HUB_AXICFG),
+	REGDEF(V3D_HUB_UIFCFG),
+	REGDEF(V3D_HUB_IDENT0),
+	REGDEF(V3D_HUB_IDENT1),
+	REGDEF(V3D_HUB_IDENT2),
+	REGDEF(V3D_HUB_IDENT3),
+	REGDEF(V3D_HUB_INT_STS),
+	REGDEF(V3D_HUB_INT_MSK_STS),
+};
+
+static const struct v3d_reg_def v3d_gca_reg_defs[] = {
+	REGDEF(V3D_GCA_SAFE_SHUTDOWN),
+	REGDEF(V3D_GCA_SAFE_SHUTDOWN_ACK),
+};
+
+static const struct v3d_reg_def v3d_core_reg_defs[] = {
+	REGDEF(V3D_CTL_IDENT0),
+	REGDEF(V3D_CTL_IDENT1),
+	REGDEF(V3D_CTL_IDENT2),
+	REGDEF(V3D_CTL_MISCCFG),
+	REGDEF(V3D_CTL_INT_STS),
+	REGDEF(V3D_CTL_INT_MSK_STS),
+	REGDEF(V3D_CLE_CT0CS),
+	REGDEF(V3D_CLE_CT0CA),
+	REGDEF(V3D_CLE_CT0EA),
+	REGDEF(V3D_CLE_CT1CS),
+	REGDEF(V3D_CLE_CT1CA),
+	REGDEF(V3D_CLE_CT1EA),
+
+	REGDEF(V3D_PTB_BPCA),
+	REGDEF(V3D_PTB_BPCS),
+
+	REGDEF(V3D_MMU_CTL),
+	REGDEF(V3D_MMU_VIO_ADDR),
+
+	REGDEF(V3D_GMP_STATUS),
+	REGDEF(V3D_GMP_CFG),
+	REGDEF(V3D_GMP_VIO_ADDR),
+};
+
+static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = (struct drm_info_node *)m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct v3d_dev *v3d = to_v3d_dev(dev);
+	int i, core;
+
+	for (i = 0; i < ARRAY_SIZE(v3d_hub_reg_defs); i++) {
+		seq_printf(m, "%s (0x%04x): 0x%08x\n",
+			   v3d_hub_reg_defs[i].name, v3d_hub_reg_defs[i].reg,
+			   V3D_READ(v3d_hub_reg_defs[i].reg));
+	}
+
+	for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) {
+		seq_printf(m, "%s (0x%04x): 0x%08x\n",
+			   v3d_gca_reg_defs[i].name, v3d_gca_reg_defs[i].reg,
+			   V3D_GCA_READ(v3d_gca_reg_defs[i].reg));
+	}
+
+	for (core = 0; core < v3d->cores; core++) {
+		for (i = 0; i < ARRAY_SIZE(v3d_core_reg_defs); i++) {
+			seq_printf(m, "core %d %s (0x%04x): 0x%08x\n",
+				   core,
+				   v3d_core_reg_defs[i].name,
+				   v3d_core_reg_defs[i].reg,
+				   V3D_CORE_READ(core,
+						 v3d_core_reg_defs[i].reg));
+		}
+	}
+
+	return 0;
+}
+
+static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = (struct drm_info_node *)m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct v3d_dev *v3d = to_v3d_dev(dev);
+	u32 ident0, ident1, ident2, ident3, cores;
+	int ret, core;
+
+	ret = pm_runtime_get_sync(v3d->dev);
+	if (ret < 0)
+		return ret;
+
+	ident0 = V3D_READ(V3D_HUB_IDENT0);
+	ident1 = V3D_READ(V3D_HUB_IDENT1);
+	ident2 = V3D_READ(V3D_HUB_IDENT2);
+	ident3 = V3D_READ(V3D_HUB_IDENT3);
+	cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES);
+
+	seq_printf(m, "Revision:   %d.%d.%d.%d\n",
+		   V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_TVER),
+		   V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_REV),
+		   V3D_GET_FIELD(ident3, V3D_HUB_IDENT3_IPREV),
+		   V3D_GET_FIELD(ident3, V3D_HUB_IDENT3_IPIDX));
+	seq_printf(m, "MMU:        %s\n",
+		   (ident2 & V3D_HUB_IDENT2_WITH_MMU) ? "yes" : "no");
+	seq_printf(m, "TFU:        %s\n",
+		   (ident1 & V3D_HUB_IDENT1_WITH_TFU) ? "yes" : "no");
+	seq_printf(m, "TSY:        %s\n",
+		   (ident1 & V3D_HUB_IDENT1_WITH_TSY) ? "yes" : "no");
+	seq_printf(m, "MSO:        %s\n",
+		   (ident1 & V3D_HUB_IDENT1_WITH_MSO) ? "yes" : "no");
+	seq_printf(m, "L3C:        %s (%dkb)\n",
+		   (ident1 & V3D_HUB_IDENT1_WITH_L3C) ? "yes" : "no",
+		   V3D_GET_FIELD(ident2, V3D_HUB_IDENT2_L3C_NKB));
+
+	for (core = 0; core < cores; core++) {
+		u32 misccfg;
+		u32 nslc, ntmu, qups;
+
+		ident0 = V3D_CORE_READ(core, V3D_CTL_IDENT0);
+		ident1 = V3D_CORE_READ(core, V3D_CTL_IDENT1);
+		ident2 = V3D_CORE_READ(core, V3D_CTL_IDENT2);
+		misccfg = V3D_CORE_READ(core, V3D_CTL_MISCCFG);
+
+		nslc = V3D_GET_FIELD(ident1, V3D_IDENT1_NSLC);
+		ntmu = V3D_GET_FIELD(ident1, V3D_IDENT1_NTMU);
+		qups = V3D_GET_FIELD(ident1, V3D_IDENT1_QUPS);
+
+		seq_printf(m, "Core %d:\n", core);
+		seq_printf(m, "  Revision:     %d.%d\n",
+			   V3D_GET_FIELD(ident0, V3D_IDENT0_VER),
+			   V3D_GET_FIELD(ident1, V3D_IDENT1_REV));
+		seq_printf(m, "  Slices:       %d\n", nslc);
+		seq_printf(m, "  TMUs:         %d\n", nslc * ntmu);
+		seq_printf(m, "  QPUs:         %d\n", nslc * qups);
+		seq_printf(m, "  Semaphores:   %d\n",
+			   V3D_GET_FIELD(ident1, V3D_IDENT1_NSEM));
+		seq_printf(m, "  BCG int:      %d\n",
+			   (ident2 & V3D_IDENT2_BCG_INT) != 0);
+		seq_printf(m, "  Override TMU: %d\n",
+			   (misccfg & V3D_MISCCFG_OVRTMUOUT) != 0);
+	}
+
+	pm_runtime_mark_last_busy(v3d->dev);
+	pm_runtime_put_autosuspend(v3d->dev);
+
+	return 0;
+}
+
+static int v3d_debugfs_bo_stats(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = (struct drm_info_node *)m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct v3d_dev *v3d = to_v3d_dev(dev);
+
+	mutex_lock(&v3d->bo_lock);
+	seq_printf(m, "allocated bos:          %d\n",
+		   v3d->bo_stats.num_allocated);
+	seq_printf(m, "allocated bo size (kb): %ld\n",
+		   (long)v3d->bo_stats.pages_allocated << (PAGE_SHIFT - 10));
+	mutex_unlock(&v3d->bo_lock);
+
+	return 0;
+}
+
+static const struct drm_info_list v3d_debugfs_list[] = {
+	{"v3d_ident", v3d_v3d_debugfs_ident, 0},
+	{"v3d_regs", v3d_v3d_debugfs_regs, 0},
+	{"bo_stats", v3d_debugfs_bo_stats, 0},
+};
+
+int
+v3d_debugfs_init(struct drm_minor *minor)
+{
+	return drm_debugfs_create_files(v3d_debugfs_list,
+					ARRAY_SIZE(v3d_debugfs_list),
+					minor->debugfs_root, minor);
+}
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
new file mode 100644
index 000000000000..38e8041b5f0c
--- /dev/null
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -0,0 +1,371 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (C) 2014-2018 Broadcom */
+
+/**
+ * DOC: Broadcom V3D Graphics Driver
+ *
+ * This driver supports the Broadcom V3D 3.3 and 4.1 OpenGL ES GPUs.
+ * For V3D 2.x support, see the VC4 driver.
+ *
+ * Currently only single-core rendering using the binner and renderer
+ * is supported.  The TFU (texture formatting unit) and V3D 4.x's CSD
+ * (compute shader dispatch) are not yet supported.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
+
+#include "uapi/drm/v3d_drm.h"
+#include "v3d_drv.h"
+#include "v3d_regs.h"
+
+#define DRIVER_NAME "v3d"
+#define DRIVER_DESC "Broadcom V3D graphics"
+#define DRIVER_DATE "20180419"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+#ifdef CONFIG_PM
+static int v3d_runtime_suspend(struct device *dev)
+{
+	struct drm_device *drm = dev_get_drvdata(dev);
+	struct v3d_dev *v3d = to_v3d_dev(drm);
+
+	v3d_irq_disable(v3d);
+
+	clk_disable_unprepare(v3d->clk);
+
+	return 0;
+}
+
+static int v3d_runtime_resume(struct device *dev)
+{
+	struct drm_device *drm = dev_get_drvdata(dev);
+	struct v3d_dev *v3d = to_v3d_dev(drm);
+	int ret;
+
+	ret = clk_prepare_enable(v3d->clk);
+	if (ret != 0)
+		return ret;
+
+	/* XXX: VPM base */
+
+	v3d_mmu_set_page_table(v3d);
+	v3d_irq_enable(v3d);
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops v3d_v3d_pm_ops = {
+	SET_RUNTIME_PM_OPS(v3d_runtime_suspend, v3d_runtime_resume, NULL)
+};
+
+static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file_priv)
+{
+	struct v3d_dev *v3d = to_v3d_dev(dev);
+	struct drm_v3d_get_param *args = data;
+	int ret;
+	static const u32 reg_map[] = {
+		[DRM_V3D_PARAM_V3D_UIFCFG] = V3D_HUB_UIFCFG,
+		[DRM_V3D_PARAM_V3D_HUB_IDENT1] = V3D_HUB_IDENT1,
+		[DRM_V3D_PARAM_V3D_HUB_IDENT2] = V3D_HUB_IDENT2,
+		[DRM_V3D_PARAM_V3D_HUB_IDENT3] = V3D_HUB_IDENT3,
+		[DRM_V3D_PARAM_V3D_CORE0_IDENT0] = V3D_CTL_IDENT0,
+		[DRM_V3D_PARAM_V3D_CORE0_IDENT1] = V3D_CTL_IDENT1,
+		[DRM_V3D_PARAM_V3D_CORE0_IDENT2] = V3D_CTL_IDENT2,
+	};
+
+	if (args->pad != 0)
+		return -EINVAL;
+
+	/* Note that DRM_V3D_PARAM_V3D_CORE0_IDENT0 is 0, so we need
+	 * to explicitly allow it in the "the register in our
+	 * parameter map" check.
+	 */
+	if (args->param < ARRAY_SIZE(reg_map) &&
+	    (reg_map[args->param] ||
+	     args->param == DRM_V3D_PARAM_V3D_CORE0_IDENT0)) {
+		u32 offset = reg_map[args->param];
+
+		if (args->value != 0)
+			return -EINVAL;
+
+		ret = pm_runtime_get_sync(v3d->dev);
+		if (args->param >= DRM_V3D_PARAM_V3D_CORE0_IDENT0 &&
+		    args->param <= DRM_V3D_PARAM_V3D_CORE0_IDENT2) {
+			args->value = V3D_CORE_READ(0, offset);
+		} else {
+			args->value = V3D_READ(offset);
+		}
+		pm_runtime_mark_last_busy(v3d->dev);
+		pm_runtime_put_autosuspend(v3d->dev);
+		return 0;
+	}
+
+	/* Any params that aren't just register reads would go here. */
+
+	DRM_DEBUG("Unknown parameter %d\n", args->param);
+	return -EINVAL;
+}
+
+static int
+v3d_open(struct drm_device *dev, struct drm_file *file)
+{
+	struct v3d_dev *v3d = to_v3d_dev(dev);
+	struct v3d_file_priv *v3d_priv;
+	int i;
+
+	v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL);
+	if (!v3d_priv)
+		return -ENOMEM;
+
+	v3d_priv->v3d = v3d;
+
+	for (i = 0; i < V3D_MAX_QUEUES; i++) {
+		drm_sched_entity_init(&v3d->queue[i].sched,
+				      &v3d_priv->sched_entity[i],
+				      &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
+				      32, NULL);
+	}
+
+	file->driver_priv = v3d_priv;
+
+	return 0;
+}
+
+static void
+v3d_postclose(struct drm_device *dev, struct drm_file *file)
+{
+	struct v3d_dev *v3d = to_v3d_dev(dev);
+	struct v3d_file_priv *v3d_priv = file->driver_priv;
+	enum v3d_queue q;
+
+	for (q = 0; q < V3D_MAX_QUEUES; q++) {
+		drm_sched_entity_fini(&v3d->queue[q].sched,
+				      &v3d_priv->sched_entity[q]);
+	}
+
+	kfree(v3d_priv);
+}
+
+static const struct file_operations v3d_drm_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+	.mmap = v3d_mmap,
+	.poll = drm_poll,
+	.read = drm_read,
+	.compat_ioctl = drm_compat_ioctl,
+	.llseek = noop_llseek,
+};
+
+/* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP
+ * protection between clients.  Note that render nodes would be be
+ * able to submit CLs that could access BOs from clients authenticated
+ * with the master node.
+ */
+static const struct drm_ioctl_desc v3d_drm_ioctls[] = {
+	DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CL, v3d_submit_cl_ioctl, DRM_RENDER_ALLOW | DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(V3D_WAIT_BO, v3d_wait_bo_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(V3D_CREATE_BO, v3d_create_bo_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(V3D_MMAP_BO, v3d_mmap_bo_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(V3D_GET_PARAM, v3d_get_param_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(V3D_GET_BO_OFFSET, v3d_get_bo_offset_ioctl, DRM_RENDER_ALLOW),
+};
+
+static const struct vm_operations_struct v3d_vm_ops = {
+	.fault = v3d_gem_fault,
+	.open = drm_gem_vm_open,
+	.close = drm_gem_vm_close,
+};
+
+static struct drm_driver v3d_drm_driver = {
+	.driver_features = (DRIVER_GEM |
+			    DRIVER_RENDER |
+			    DRIVER_PRIME |
+			    DRIVER_SYNCOBJ),
+
+	.open = v3d_open,
+	.postclose = v3d_postclose,
+
+#if defined(CONFIG_DEBUG_FS)
+	.debugfs_init = v3d_debugfs_init,
+#endif
+
+	.gem_free_object_unlocked = v3d_free_object,
+	.gem_vm_ops = &v3d_vm_ops,
+
+	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+	.gem_prime_import = drm_gem_prime_import,
+	.gem_prime_export = drm_gem_prime_export,
+	.gem_prime_res_obj = v3d_prime_res_obj,
+	.gem_prime_get_sg_table	= v3d_prime_get_sg_table,
+	.gem_prime_import_sg_table = v3d_prime_import_sg_table,
+	.gem_prime_mmap = v3d_prime_mmap,
+
+	.ioctls = v3d_drm_ioctls,
+	.num_ioctls = ARRAY_SIZE(v3d_drm_ioctls),
+	.fops = &v3d_drm_fops,
+
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static const struct of_device_id v3d_of_match[] = {
+	{ .compatible = "brcm,7268-v3d" },
+	{ .compatible = "brcm,7278-v3d" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, v3d_of_match);
+
+static int
+map_regs(struct v3d_dev *v3d, void __iomem **regs, const char *name)
+{
+	struct resource *res =
+		platform_get_resource_byname(v3d->pdev, IORESOURCE_MEM, name);
+
+	*regs = devm_ioremap_resource(v3d->dev, res);
+	return PTR_ERR_OR_ZERO(*regs);
+}
+
+static int v3d_platform_drm_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct drm_device *drm;
+	struct v3d_dev *v3d;
+	int ret;
+	u32 ident1;
+
+	dev->coherent_dma_mask = DMA_BIT_MASK(36);
+
+	v3d = kzalloc(sizeof(*v3d), GFP_KERNEL);
+	if (!v3d)
+		return -ENOMEM;
+	v3d->dev = dev;
+	v3d->pdev = pdev;
+	drm = &v3d->drm;
+
+	ret = map_regs(v3d, &v3d->bridge_regs, "bridge");
+	if (ret)
+		goto dev_free;
+
+	ret = map_regs(v3d, &v3d->hub_regs, "hub");
+	if (ret)
+		goto dev_free;
+
+	ret = map_regs(v3d, &v3d->core_regs[0], "core0");
+	if (ret)
+		goto dev_free;
+
+	ident1 = V3D_READ(V3D_HUB_IDENT1);
+	v3d->ver = (V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_TVER) * 10 +
+		    V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_REV));
+	v3d->cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES);
+	WARN_ON(v3d->cores > 1); /* multicore not yet implemented */
+
+	if (v3d->ver < 41) {
+		ret = map_regs(v3d, &v3d->gca_regs, "gca");
+		if (ret)
+			goto dev_free;
+	}
+
+	v3d->mmu_scratch = dma_alloc_wc(dev, 4096, &v3d->mmu_scratch_paddr,
+					GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
+	if (!v3d->mmu_scratch) {
+		dev_err(dev, "Failed to allocate MMU scratch page\n");
+		ret = -ENOMEM;
+		goto dev_free;
+	}
+
+	pm_runtime_use_autosuspend(dev);
+	pm_runtime_set_autosuspend_delay(dev, 50);
+	pm_runtime_enable(dev);
+
+	ret = drm_dev_init(&v3d->drm, &v3d_drm_driver, dev);
+	if (ret)
+		goto dma_free;
+
+	platform_set_drvdata(pdev, drm);
+	drm->dev_private = v3d;
+
+	ret = v3d_gem_init(drm);
+	if (ret)
+		goto dev_destroy;
+
+	v3d_irq_init(v3d);
+
+	ret = drm_dev_register(drm, 0);
+	if (ret)
+		goto gem_destroy;
+
+	return 0;
+
+gem_destroy:
+	v3d_gem_destroy(drm);
+dev_destroy:
+	drm_dev_put(drm);
+dma_free:
+	dma_free_wc(dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr);
+dev_free:
+	kfree(v3d);
+	return ret;
+}
+
+static int v3d_platform_drm_remove(struct platform_device *pdev)
+{
+	struct drm_device *drm = platform_get_drvdata(pdev);
+	struct v3d_dev *v3d = to_v3d_dev(drm);
+
+	drm_dev_unregister(drm);
+
+	v3d_gem_destroy(drm);
+
+	drm_dev_put(drm);
+
+	dma_free_wc(v3d->dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr);
+
+	return 0;
+}
+
+static struct platform_driver v3d_platform_driver = {
+	.probe		= v3d_platform_drm_probe,
+	.remove		= v3d_platform_drm_remove,
+	.driver		= {
+		.name	= "v3d",
+		.of_match_table = v3d_of_match,
+	},
+};
+
+static int __init v3d_drm_register(void)
+{
+	return platform_driver_register(&v3d_platform_driver);
+}
+
+static void __exit v3d_drm_unregister(void)
+{
+	platform_driver_unregister(&v3d_platform_driver);
+}
+
+module_init(v3d_drm_register);
+module_exit(v3d_drm_unregister);
+
+MODULE_ALIAS("platform:v3d-drm");
+MODULE_DESCRIPTION("Broadcom V3D DRM Driver");
+MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
new file mode 100644
index 000000000000..a043ac3aae98
--- /dev/null
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (C) 2015-2018 Broadcom */
+
+#include <linux/reservation.h>
+#include <drm/drmP.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_gem.h>
+#include <drm/gpu_scheduler.h>
+
+#define GMP_GRANULARITY (128 * 1024)
+
+/* Enum for each of the V3D queues.  We maintain various queue
+ * tracking as an array because at some point we'll want to support
+ * the TFU (texture formatting unit) as another queue.
+ */
+enum v3d_queue {
+	V3D_BIN,
+	V3D_RENDER,
+};
+
+#define V3D_MAX_QUEUES (V3D_RENDER + 1)
+
+struct v3d_queue_state {
+	struct drm_gpu_scheduler sched;
+
+	u64 fence_context;
+	u64 emit_seqno;
+	u64 finished_seqno;
+};
+
+struct v3d_dev {
+	struct drm_device drm;
+
+	/* Short representation (e.g. 33, 41) of the V3D tech version
+	 * and revision.
+	 */
+	int ver;
+
+	struct device *dev;
+	struct platform_device *pdev;
+	void __iomem *hub_regs;
+	void __iomem *core_regs[3];
+	void __iomem *bridge_regs;
+	void __iomem *gca_regs;
+	struct clk *clk;
+
+	/* Virtual and DMA addresses of the single shared page table. */
+	volatile u32 *pt;
+	dma_addr_t pt_paddr;
+
+	/* Virtual and DMA addresses of the MMU's scratch page.  When
+	 * a read or write is invalid in the MMU, it will be
+	 * redirected here.
+	 */
+	void *mmu_scratch;
+	dma_addr_t mmu_scratch_paddr;
+
+	/* Number of V3D cores. */
+	u32 cores;
+
+	/* Allocator managing the address space.  All units are in
+	 * number of pages.
+	 */
+	struct drm_mm mm;
+	spinlock_t mm_lock;
+
+	struct work_struct overflow_mem_work;
+
+	struct v3d_exec_info *bin_job;
+	struct v3d_exec_info *render_job;
+
+	struct v3d_queue_state queue[V3D_MAX_QUEUES];
+
+	/* Spinlock used to synchronize the overflow memory
+	 * management against bin job submission.
+	 */
+	spinlock_t job_lock;
+
+	/* Protects bo_stats */
+	struct mutex bo_lock;
+
+	/* Lock taken when resetting the GPU, to keep multiple
+	 * processes from trying to park the scheduler threads and
+	 * reset at once.
+	 */
+	struct mutex reset_lock;
+
+	struct {
+		u32 num_allocated;
+		u32 pages_allocated;
+	} bo_stats;
+};
+
+static inline struct v3d_dev *
+to_v3d_dev(struct drm_device *dev)
+{
+	return (struct v3d_dev *)dev->dev_private;
+}
+
+/* The per-fd struct, which tracks the MMU mappings. */
+struct v3d_file_priv {
+	struct v3d_dev *v3d;
+
+	struct drm_sched_entity sched_entity[V3D_MAX_QUEUES];
+};
+
+/* Tracks a mapping of a BO into a per-fd address space */
+struct v3d_vma {
+	struct v3d_page_table *pt;
+	struct list_head list; /* entry in v3d_bo.vmas */
+};
+
+struct v3d_bo {
+	struct drm_gem_object base;
+
+	struct mutex lock;
+
+	struct drm_mm_node node;
+
+	u32 pages_refcount;
+	struct page **pages;
+	struct sg_table *sgt;
+	void *vaddr;
+
+	struct list_head vmas;    /* list of v3d_vma */
+
+	/* List entry for the BO's position in
+	 * v3d_exec_info->unref_list
+	 */
+	struct list_head unref_head;
+
+	/* normally (resv == &_resv) except for imported bo's */
+	struct reservation_object *resv;
+	struct reservation_object _resv;
+};
+
+static inline struct v3d_bo *
+to_v3d_bo(struct drm_gem_object *bo)
+{
+	return (struct v3d_bo *)bo;
+}
+
+struct v3d_fence {
+	struct dma_fence base;
+	struct drm_device *dev;
+	/* v3d seqno for signaled() test */
+	u64 seqno;
+	enum v3d_queue queue;
+};
+
+static inline struct v3d_fence *
+to_v3d_fence(struct dma_fence *fence)
+{
+	return (struct v3d_fence *)fence;
+}
+
+#define V3D_READ(offset) readl(v3d->hub_regs + offset)
+#define V3D_WRITE(offset, val) writel(val, v3d->hub_regs + offset)
+
+#define V3D_BRIDGE_READ(offset) readl(v3d->bridge_regs + offset)
+#define V3D_BRIDGE_WRITE(offset, val) writel(val, v3d->bridge_regs + offset)
+
+#define V3D_GCA_READ(offset) readl(v3d->gca_regs + offset)
+#define V3D_GCA_WRITE(offset, val) writel(val, v3d->gca_regs + offset)
+
+#define V3D_CORE_READ(core, offset) readl(v3d->core_regs[core] + offset)
+#define V3D_CORE_WRITE(core, offset, val) writel(val, v3d->core_regs[core] + offset)
+
+struct v3d_job {
+	struct drm_sched_job base;
+
+	struct v3d_exec_info *exec;
+
+	/* An optional fence userspace can pass in for the job to depend on. */
+	struct dma_fence *in_fence;
+
+	/* v3d fence to be signaled by IRQ handler when the job is complete. */
+	struct dma_fence *done_fence;
+
+	/* GPU virtual addresses of the start/end of the CL job. */
+	u32 start, end;
+};
+
+struct v3d_exec_info {
+	struct v3d_dev *v3d;
+
+	struct v3d_job bin, render;
+
+	/* Fence for when the scheduler considers the binner to be
+	 * done, for render to depend on.
+	 */
+	struct dma_fence *bin_done_fence;
+
+	struct kref refcount;
+
+	/* This is the array of BOs that were looked up at the start of exec. */
+	struct v3d_bo **bo;
+	u32 bo_count;
+
+	/* List of overflow BOs used in the job that need to be
+	 * released once the job is complete.
+	 */
+	struct list_head unref_list;
+
+	/* Submitted tile memory allocation start/size, tile state. */
+	u32 qma, qms, qts;
+};
+
+/**
+ * _wait_for - magic (register) wait macro
+ *
+ * Does the right thing for modeset paths when run under kdgb or similar atomic
+ * contexts. Note that it's important that we check the condition again after
+ * having timed out, since the timeout could be due to preemption or similar and
+ * we've never had a chance to check the condition before the timeout.
+ */
+#define wait_for(COND, MS) ({ \
+	unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1;	\
+	int ret__ = 0;							\
+	while (!(COND)) {						\
+		if (time_after(jiffies, timeout__)) {			\
+			if (!(COND))					\
+				ret__ = -ETIMEDOUT;			\
+			break;						\
+		}							\
+		msleep(1);					\
+	}								\
+	ret__;								\
+})
+
+static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
+{
+	/* nsecs_to_jiffies64() does not guard against overflow */
+	if (NSEC_PER_SEC % HZ &&
+	    div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
+		return MAX_JIFFY_OFFSET;
+
+	return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
+}
+
+/* v3d_bo.c */
+void v3d_free_object(struct drm_gem_object *gem_obj);
+struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
+			     size_t size);
+int v3d_create_bo_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv);
+int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *file_priv);
+int v3d_gem_fault(struct vm_fault *vmf);
+int v3d_mmap(struct file *filp, struct vm_area_struct *vma);
+struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj);
+int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+struct sg_table *v3d_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev,
+						 struct dma_buf_attachment *attach,
+						 struct sg_table *sgt);
+
+/* v3d_debugfs.c */
+int v3d_debugfs_init(struct drm_minor *minor);
+
+/* v3d_fence.c */
+extern const struct dma_fence_ops v3d_fence_ops;
+struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue);
+
+/* v3d_gem.c */
+int v3d_gem_init(struct drm_device *dev);
+void v3d_gem_destroy(struct drm_device *dev);
+int v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+int v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv);
+void v3d_exec_put(struct v3d_exec_info *exec);
+void v3d_reset(struct v3d_dev *v3d);
+void v3d_invalidate_caches(struct v3d_dev *v3d);
+void v3d_flush_caches(struct v3d_dev *v3d);
+
+/* v3d_irq.c */
+void v3d_irq_init(struct v3d_dev *v3d);
+void v3d_irq_enable(struct v3d_dev *v3d);
+void v3d_irq_disable(struct v3d_dev *v3d);
+void v3d_irq_reset(struct v3d_dev *v3d);
+
+/* v3d_mmu.c */
+int v3d_mmu_get_offset(struct drm_file *file_priv, struct v3d_bo *bo,
+		       u32 *offset);
+int v3d_mmu_set_page_table(struct v3d_dev *v3d);
+void v3d_mmu_insert_ptes(struct v3d_bo *bo);
+void v3d_mmu_remove_ptes(struct v3d_bo *bo);
+
+/* v3d_sched.c */
+int v3d_sched_init(struct v3d_dev *v3d);
+void v3d_sched_fini(struct v3d_dev *v3d);
diff --git a/drivers/gpu/drm/v3d/v3d_fence.c b/drivers/gpu/drm/v3d/v3d_fence.c
new file mode 100644
index 000000000000..087d49c8cb12
--- /dev/null
+++ b/drivers/gpu/drm/v3d/v3d_fence.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (C) 2017-2018 Broadcom */
+
+#include "v3d_drv.h"
+
+struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue)
+{
+	struct v3d_fence *fence;
+
+	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+	if (!fence)
+		return ERR_PTR(-ENOMEM);
+
+	fence->dev = &v3d->drm;
+	fence->queue = queue;
+	fence->seqno = ++v3d->queue[queue].emit_seqno;
+	dma_fence_init(&fence->base, &v3d_fence_ops, &v3d->job_lock,
+		       v3d->queue[queue].fence_context, fence->seqno);
+
+	return &fence->base;
+}
+
+static const char *v3d_fence_get_driver_name(struct dma_fence *fence)
+{
+	return "v3d";
+}
+
+static const char *v3d_fence_get_timeline_name(struct dma_fence *fence)
+{
+	struct v3d_fence *f = to_v3d_fence(fence);
+
+	if (f->queue == V3D_BIN)
+		return "v3d-bin";
+	else
+		return "v3d-render";
+}
+
+static bool v3d_fence_enable_signaling(struct dma_fence *fence)
+{
+	return true;
+}
+
+static bool v3d_fence_signaled(struct dma_fence *fence)
+{
+	struct v3d_fence *f = to_v3d_fence(fence);
+	struct v3d_dev *v3d = to_v3d_dev(f->dev);
+
+	return v3d->queue[f->queue].finished_seqno >= f->seqno;
+}
+
+const struct dma_fence_ops v3d_fence_ops = {
+	.get_driver_name = v3d_fence_get_driver_name,
+	.get_timeline_name = v3d_fence_get_timeline_name,
+	.enable_signaling = v3d_fence_enable_signaling,
+	.signaled = v3d_fence_signaled,
+	.wait = dma_fence_default_wait,
+	.release = dma_fence_free,
+};
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
new file mode 100644
index 000000000000..b513f9189caf
--- /dev/null
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -0,0 +1,668 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (C) 2014-2018 Broadcom */
+
+#include <drm/drmP.h>
+#include <drm/drm_syncobj.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/sched/signal.h>
+
+#include "uapi/drm/v3d_drm.h"
+#include "v3d_drv.h"
+#include "v3d_regs.h"
+#include "v3d_trace.h"
+
+static void
+v3d_init_core(struct v3d_dev *v3d, int core)
+{
+	/* Set OVRTMUOUT, which means that the texture sampler uniform
+	 * configuration's tmu output type field is used, instead of
+	 * using the hardware default behavior based on the texture
+	 * type.  If you want the default behavior, you can still put
+	 * "2" in the indirect texture state's output_type field.
+	 */
+	V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT);
+
+	/* Whenever we flush the L2T cache, we always want to flush
+	 * the whole thing.
+	 */
+	V3D_CORE_WRITE(core, V3D_CTL_L2TFLSTA, 0);
+	V3D_CORE_WRITE(core, V3D_CTL_L2TFLEND, ~0);
+}
+
+/* Sets invariant state for the HW. */
+static void
+v3d_init_hw_state(struct v3d_dev *v3d)
+{
+	v3d_init_core(v3d, 0);
+}
+
+static void
+v3d_idle_axi(struct v3d_dev *v3d, int core)
+{
+	V3D_CORE_WRITE(core, V3D_GMP_CFG, V3D_GMP_CFG_STOP_REQ);
+
+	if (wait_for((V3D_CORE_READ(core, V3D_GMP_STATUS) &
+		      (V3D_GMP_STATUS_RD_COUNT_MASK |
+		       V3D_GMP_STATUS_WR_COUNT_MASK |
+		       V3D_GMP_STATUS_CFG_BUSY)) == 0, 100)) {
+		DRM_ERROR("Failed to wait for safe GMP shutdown\n");
+	}
+}
+
+static void
+v3d_idle_gca(struct v3d_dev *v3d)
+{
+	if (v3d->ver >= 41)
+		return;
+
+	V3D_GCA_WRITE(V3D_GCA_SAFE_SHUTDOWN, V3D_GCA_SAFE_SHUTDOWN_EN);
+
+	if (wait_for((V3D_GCA_READ(V3D_GCA_SAFE_SHUTDOWN_ACK) &
+		      V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED) ==
+		     V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED, 100)) {
+		DRM_ERROR("Failed to wait for safe GCA shutdown\n");
+	}
+}
+
+static void
+v3d_reset_v3d(struct v3d_dev *v3d)
+{
+	int version = V3D_BRIDGE_READ(V3D_TOP_GR_BRIDGE_REVISION);
+
+	if (V3D_GET_FIELD(version, V3D_TOP_GR_BRIDGE_MAJOR) == 2) {
+		V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0,
+				 V3D_TOP_GR_BRIDGE_SW_INIT_0_V3D_CLK_108_SW_INIT);
+		V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0, 0);
+
+		/* GFXH-1383: The SW_INIT may cause a stray write to address 0
+		 * of the unit, so reset it to its power-on value here.
+		 */
+		V3D_WRITE(V3D_HUB_AXICFG, V3D_HUB_AXICFG_MAX_LEN_MASK);
+	} else {
+		WARN_ON_ONCE(V3D_GET_FIELD(version,
+					   V3D_TOP_GR_BRIDGE_MAJOR) != 7);
+		V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1,
+				 V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT);
+		V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, 0);
+	}
+
+	v3d_init_hw_state(v3d);
+}
+
+void
+v3d_reset(struct v3d_dev *v3d)
+{
+	struct drm_device *dev = &v3d->drm;
+
+	DRM_ERROR("Resetting GPU.\n");
+	trace_v3d_reset_begin(dev);
+
+	/* XXX: only needed for safe powerdown, not reset. */
+	if (false)
+		v3d_idle_axi(v3d, 0);
+
+	v3d_idle_gca(v3d);
+	v3d_reset_v3d(v3d);
+
+	v3d_mmu_set_page_table(v3d);
+	v3d_irq_reset(v3d);
+
+	trace_v3d_reset_end(dev);
+}
+
+static void
+v3d_flush_l3(struct v3d_dev *v3d)
+{
+	if (v3d->ver < 41) {
+		u32 gca_ctrl = V3D_GCA_READ(V3D_GCA_CACHE_CTRL);
+
+		V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
+			      gca_ctrl | V3D_GCA_CACHE_CTRL_FLUSH);
+
+		if (v3d->ver < 33) {
+			V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
+				      gca_ctrl & ~V3D_GCA_CACHE_CTRL_FLUSH);
+		}
+	}
+}
+
+/* Invalidates the (read-only) L2 cache. */
+static void
+v3d_invalidate_l2(struct v3d_dev *v3d, int core)
+{
+	V3D_CORE_WRITE(core, V3D_CTL_L2CACTL,
+		       V3D_L2CACTL_L2CCLR |
+		       V3D_L2CACTL_L2CENA);
+}
+
+static void
+v3d_invalidate_l1td(struct v3d_dev *v3d, int core)
+{
+	V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_L2TCACTL_TMUWCF);
+	if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
+		       V3D_L2TCACTL_L2TFLS), 100)) {
+		DRM_ERROR("Timeout waiting for L1T write combiner flush\n");
+	}
+}
+
+/* Invalidates texture L2 cachelines */
+static void
+v3d_flush_l2t(struct v3d_dev *v3d, int core)
+{
+	v3d_invalidate_l1td(v3d, core);
+
+	V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL,
+		       V3D_L2TCACTL_L2TFLS |
+		       V3D_SET_FIELD(V3D_L2TCACTL_FLM_FLUSH, V3D_L2TCACTL_FLM));
+	if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
+		       V3D_L2TCACTL_L2TFLS), 100)) {
+		DRM_ERROR("Timeout waiting for L2T flush\n");
+	}
+}
+
+/* Invalidates the slice caches.  These are read-only caches. */
+static void
+v3d_invalidate_slices(struct v3d_dev *v3d, int core)
+{
+	V3D_CORE_WRITE(core, V3D_CTL_SLCACTL,
+		       V3D_SET_FIELD(0xf, V3D_SLCACTL_TVCCS) |
+		       V3D_SET_FIELD(0xf, V3D_SLCACTL_TDCCS) |
+		       V3D_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
+		       V3D_SET_FIELD(0xf, V3D_SLCACTL_ICC));
+}
+
+/* Invalidates texture L2 cachelines */
+static void
+v3d_invalidate_l2t(struct v3d_dev *v3d, int core)
+{
+	V3D_CORE_WRITE(core,
+		       V3D_CTL_L2TCACTL,
+		       V3D_L2TCACTL_L2TFLS |
+		       V3D_SET_FIELD(V3D_L2TCACTL_FLM_CLEAR, V3D_L2TCACTL_FLM));
+	if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
+		       V3D_L2TCACTL_L2TFLS), 100)) {
+		DRM_ERROR("Timeout waiting for L2T invalidate\n");
+	}
+}
+
+void
+v3d_invalidate_caches(struct v3d_dev *v3d)
+{
+	v3d_flush_l3(v3d);
+
+	v3d_invalidate_l2(v3d, 0);
+	v3d_invalidate_slices(v3d, 0);
+	v3d_flush_l2t(v3d, 0);
+}
+
+void
+v3d_flush_caches(struct v3d_dev *v3d)
+{
+	v3d_invalidate_l1td(v3d, 0);
+	v3d_invalidate_l2t(v3d, 0);
+}
+
+static void
+v3d_attach_object_fences(struct v3d_exec_info *exec)
+{
+	struct dma_fence *out_fence = &exec->render.base.s_fence->finished;
+	struct v3d_bo *bo;
+	int i;
+
+	for (i = 0; i < exec->bo_count; i++) {
+		bo = to_v3d_bo(&exec->bo[i]->base);
+
+		/* XXX: Use shared fences for read-only objects. */
+		reservation_object_add_excl_fence(bo->resv, out_fence);
+	}
+}
+
+static void
+v3d_unlock_bo_reservations(struct drm_device *dev,
+			   struct v3d_exec_info *exec,
+			   struct ww_acquire_ctx *acquire_ctx)
+{
+	int i;
+
+	for (i = 0; i < exec->bo_count; i++) {
+		struct v3d_bo *bo = to_v3d_bo(&exec->bo[i]->base);
+
+		ww_mutex_unlock(&bo->resv->lock);
+	}
+
+	ww_acquire_fini(acquire_ctx);
+}
+
+/* Takes the reservation lock on all the BOs being referenced, so that
+ * at queue submit time we can update the reservations.
+ *
+ * We don't lock the RCL the tile alloc/state BOs, or overflow memory
+ * (all of which are on exec->unref_list).  They're entirely private
+ * to v3d, so we don't attach dma-buf fences to them.
+ */
+static int
+v3d_lock_bo_reservations(struct drm_device *dev,
+			 struct v3d_exec_info *exec,
+			 struct ww_acquire_ctx *acquire_ctx)
+{
+	int contended_lock = -1;
+	int i, ret;
+	struct v3d_bo *bo;
+
+	ww_acquire_init(acquire_ctx, &reservation_ww_class);
+
+retry:
+	if (contended_lock != -1) {
+		bo = to_v3d_bo(&exec->bo[contended_lock]->base);
+		ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
+						       acquire_ctx);
+		if (ret) {
+			ww_acquire_done(acquire_ctx);
+			return ret;
+		}
+	}
+
+	for (i = 0; i < exec->bo_count; i++) {
+		if (i == contended_lock)
+			continue;
+
+		bo = to_v3d_bo(&exec->bo[i]->base);
+
+		ret = ww_mutex_lock_interruptible(&bo->resv->lock, acquire_ctx);
+		if (ret) {
+			int j;
+
+			for (j = 0; j < i; j++) {
+				bo = to_v3d_bo(&exec->bo[j]->base);
+				ww_mutex_unlock(&bo->resv->lock);
+			}
+
+			if (contended_lock != -1 && contended_lock >= i) {
+				bo = to_v3d_bo(&exec->bo[contended_lock]->base);
+
+				ww_mutex_unlock(&bo->resv->lock);
+			}
+
+			if (ret == -EDEADLK) {
+				contended_lock = i;
+				goto retry;
+			}
+
+			ww_acquire_done(acquire_ctx);
+			return ret;
+		}
+	}
+
+	ww_acquire_done(acquire_ctx);
+
+	/* Reserve space for our shared (read-only) fence references,
+	 * before we commit the CL to the hardware.
+	 */
+	for (i = 0; i < exec->bo_count; i++) {
+		bo = to_v3d_bo(&exec->bo[i]->base);
+
+		ret = reservation_object_reserve_shared(bo->resv);
+		if (ret) {
+			v3d_unlock_bo_reservations(dev, exec, acquire_ctx);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * v3d_cl_lookup_bos() - Sets up exec->bo[] with the GEM objects
+ * referenced by the job.
+ * @dev: DRM device
+ * @file_priv: DRM file for this fd
+ * @exec: V3D job being set up
+ *
+ * The command validator needs to reference BOs by their index within
+ * the submitted job's BO list.  This does the validation of the job's
+ * BO list and reference counting for the lifetime of the job.
+ *
+ * Note that this function doesn't need to unreference the BOs on
+ * failure, because that will happen at v3d_exec_cleanup() time.
+ */
+static int
+v3d_cl_lookup_bos(struct drm_device *dev,
+		  struct drm_file *file_priv,
+		  struct drm_v3d_submit_cl *args,
+		  struct v3d_exec_info *exec)
+{
+	u32 *handles;
+	int ret = 0;
+	int i;
+
+	exec->bo_count = args->bo_handle_count;
+
+	if (!exec->bo_count) {
+		/* See comment on bo_index for why we have to check
+		 * this.
+		 */
+		DRM_DEBUG("Rendering requires BOs\n");
+		return -EINVAL;
+	}
+
+	exec->bo = kvmalloc_array(exec->bo_count,
+				  sizeof(struct drm_gem_cma_object *),
+				  GFP_KERNEL | __GFP_ZERO);
+	if (!exec->bo) {
+		DRM_DEBUG("Failed to allocate validated BO pointers\n");
+		return -ENOMEM;
+	}
+
+	handles = kvmalloc_array(exec->bo_count, sizeof(u32), GFP_KERNEL);
+	if (!handles) {
+		ret = -ENOMEM;
+		DRM_DEBUG("Failed to allocate incoming GEM handles\n");
+		goto fail;
+	}
+
+	if (copy_from_user(handles,
+			   (void __user *)(uintptr_t)args->bo_handles,
+			   exec->bo_count * sizeof(u32))) {
+		ret = -EFAULT;
+		DRM_DEBUG("Failed to copy in GEM handles\n");
+		goto fail;
+	}
+
+	spin_lock(&file_priv->table_lock);
+	for (i = 0; i < exec->bo_count; i++) {
+		struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
+						     handles[i]);
+		if (!bo) {
+			DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
+				  i, handles[i]);
+			ret = -ENOENT;
+			spin_unlock(&file_priv->table_lock);
+			goto fail;
+		}
+		drm_gem_object_get(bo);
+		exec->bo[i] = to_v3d_bo(bo);
+	}
+	spin_unlock(&file_priv->table_lock);
+
+fail:
+	kvfree(handles);
+	return ret;
+}
+
+static void
+v3d_exec_cleanup(struct kref *ref)
+{
+	struct v3d_exec_info *exec = container_of(ref, struct v3d_exec_info,
+						  refcount);
+	struct v3d_dev *v3d = exec->v3d;
+	unsigned int i;
+	struct v3d_bo *bo, *save;
+
+	dma_fence_put(exec->bin.in_fence);
+	dma_fence_put(exec->render.in_fence);
+
+	dma_fence_put(exec->bin.done_fence);
+	dma_fence_put(exec->render.done_fence);
+
+	dma_fence_put(exec->bin_done_fence);
+
+	for (i = 0; i < exec->bo_count; i++)
+		drm_gem_object_put_unlocked(&exec->bo[i]->base);
+	kvfree(exec->bo);
+
+	list_for_each_entry_safe(bo, save, &exec->unref_list, unref_head) {
+		drm_gem_object_put_unlocked(&bo->base);
+	}
+
+	pm_runtime_mark_last_busy(v3d->dev);
+	pm_runtime_put_autosuspend(v3d->dev);
+
+	kfree(exec);
+}
+
+void v3d_exec_put(struct v3d_exec_info *exec)
+{
+	kref_put(&exec->refcount, v3d_exec_cleanup);
+}
+
+int
+v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
+		  struct drm_file *file_priv)
+{
+	int ret;
+	struct drm_v3d_wait_bo *args = data;
+	struct drm_gem_object *gem_obj;
+	struct v3d_bo *bo;
+	ktime_t start = ktime_get();
+	u64 delta_ns;
+	unsigned long timeout_jiffies =
+		nsecs_to_jiffies_timeout(args->timeout_ns);
+
+	if (args->pad != 0)
+		return -EINVAL;
+
+	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
+	if (!gem_obj) {
+		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
+		return -EINVAL;
+	}
+	bo = to_v3d_bo(gem_obj);
+
+	ret = reservation_object_wait_timeout_rcu(bo->resv,
+						  true, true,
+						  timeout_jiffies);
+
+	if (ret == 0)
+		ret = -ETIME;
+	else if (ret > 0)
+		ret = 0;
+
+	/* Decrement the user's timeout, in case we got interrupted
+	 * such that the ioctl will be restarted.
+	 */
+	delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start));
+	if (delta_ns < args->timeout_ns)
+		args->timeout_ns -= delta_ns;
+	else
+		args->timeout_ns = 0;
+
+	/* Asked to wait beyond the jiffie/scheduler precision? */
+	if (ret == -ETIME && args->timeout_ns)
+		ret = -EAGAIN;
+
+	drm_gem_object_put_unlocked(gem_obj);
+
+	return ret;
+}
+
+/**
+ * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D.
+ * @dev: DRM device
+ * @data: ioctl argument
+ * @file_priv: DRM file for this fd
+ *
+ * This is the main entrypoint for userspace to submit a 3D frame to
+ * the GPU.  Userspace provides the binner command list (if
+ * applicable), and the kernel sets up the render command list to draw
+ * to the framebuffer described in the ioctl, using the command lists
+ * that the 3D engine's binner will produce.
+ */
+int
+v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
+{
+	struct v3d_dev *v3d = to_v3d_dev(dev);
+	struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
+	struct drm_v3d_submit_cl *args = data;
+	struct v3d_exec_info *exec;
+	struct ww_acquire_ctx acquire_ctx;
+	struct drm_syncobj *sync_out;
+	int ret = 0;
+
+	if (args->pad != 0) {
+		DRM_INFO("pad must be zero: %d\n", args->pad);
+		return -EINVAL;
+	}
+
+	exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
+	if (!exec)
+		return -ENOMEM;
+
+	ret = pm_runtime_get_sync(v3d->dev);
+	if (ret < 0) {
+		kfree(exec);
+		return ret;
+	}
+
+	kref_init(&exec->refcount);
+
+	ret = drm_syncobj_find_fence(file_priv, args->in_sync_bcl,
+				     &exec->bin.in_fence);
+	if (ret == -EINVAL)
+		goto fail;
+
+	ret = drm_syncobj_find_fence(file_priv, args->in_sync_rcl,
+				     &exec->render.in_fence);
+	if (ret == -EINVAL)
+		goto fail;
+
+	exec->qma = args->qma;
+	exec->qms = args->qms;
+	exec->qts = args->qts;
+	exec->bin.exec = exec;
+	exec->bin.start = args->bcl_start;
+	exec->bin.end = args->bcl_end;
+	exec->render.exec = exec;
+	exec->render.start = args->rcl_start;
+	exec->render.end = args->rcl_end;
+	exec->v3d = v3d;
+	INIT_LIST_HEAD(&exec->unref_list);
+
+	ret = v3d_cl_lookup_bos(dev, file_priv, args, exec);
+	if (ret)
+		goto fail;
+
+	ret = v3d_lock_bo_reservations(dev, exec, &acquire_ctx);
+	if (ret)
+		goto fail;
+
+	if (exec->bin.start != exec->bin.end) {
+		ret = drm_sched_job_init(&exec->bin.base,
+					 &v3d->queue[V3D_BIN].sched,
+					 &v3d_priv->sched_entity[V3D_BIN],
+					 v3d_priv);
+		if (ret)
+			goto fail_unreserve;
+
+		exec->bin_done_fence =
+			dma_fence_get(&exec->bin.base.s_fence->finished);
+
+		kref_get(&exec->refcount); /* put by scheduler job completion */
+		drm_sched_entity_push_job(&exec->bin.base,
+					  &v3d_priv->sched_entity[V3D_BIN]);
+	}
+
+	ret = drm_sched_job_init(&exec->render.base,
+				 &v3d->queue[V3D_RENDER].sched,
+				 &v3d_priv->sched_entity[V3D_RENDER],
+				 v3d_priv);
+	if (ret)
+		goto fail_unreserve;
+
+	kref_get(&exec->refcount); /* put by scheduler job completion */
+	drm_sched_entity_push_job(&exec->render.base,
+				  &v3d_priv->sched_entity[V3D_RENDER]);
+
+	v3d_attach_object_fences(exec);
+
+	v3d_unlock_bo_reservations(dev, exec, &acquire_ctx);
+
+	/* Update the return sync object for the */
+	sync_out = drm_syncobj_find(file_priv, args->out_sync);
+	if (sync_out) {
+		drm_syncobj_replace_fence(sync_out,
+					  &exec->render.base.s_fence->finished);
+		drm_syncobj_put(sync_out);
+	}
+
+	v3d_exec_put(exec);
+
+	return 0;
+
+fail_unreserve:
+	v3d_unlock_bo_reservations(dev, exec, &acquire_ctx);
+fail:
+	v3d_exec_put(exec);
+
+	return ret;
+}
+
+int
+v3d_gem_init(struct drm_device *dev)
+{
+	struct v3d_dev *v3d = to_v3d_dev(dev);
+	u32 pt_size = 4096 * 1024;
+	int ret, i;
+
+	for (i = 0; i < V3D_MAX_QUEUES; i++)
+		v3d->queue[i].fence_context = dma_fence_context_alloc(1);
+
+	spin_lock_init(&v3d->mm_lock);
+	spin_lock_init(&v3d->job_lock);
+	mutex_init(&v3d->bo_lock);
+	mutex_init(&v3d->reset_lock);
+
+	/* Note: We don't allocate address 0.  Various bits of HW
+	 * treat 0 as special, such as the occlusion query counters
+	 * where 0 means "disabled".
+	 */
+	drm_mm_init(&v3d->mm, 1, pt_size / sizeof(u32) - 1);
+
+	v3d->pt = dma_alloc_wc(v3d->dev, pt_size,
+			       &v3d->pt_paddr,
+			       GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
+	if (!v3d->pt) {
+		drm_mm_takedown(&v3d->mm);
+		dev_err(v3d->dev,
+			"Failed to allocate page tables. "
+			"Please ensure you have CMA enabled.\n");
+		return -ENOMEM;
+	}
+
+	v3d_init_hw_state(v3d);
+	v3d_mmu_set_page_table(v3d);
+
+	ret = v3d_sched_init(v3d);
+	if (ret) {
+		drm_mm_takedown(&v3d->mm);
+		dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt,
+				  v3d->pt_paddr);
+	}
+
+	return 0;
+}
+
+void
+v3d_gem_destroy(struct drm_device *dev)
+{
+	struct v3d_dev *v3d = to_v3d_dev(dev);
+	enum v3d_queue q;
+
+	v3d_sched_fini(v3d);
+
+	/* Waiting for exec to finish would need to be done before
+	 * unregistering V3D.
+	 */
+	for (q = 0; q < V3D_MAX_QUEUES; q++) {
+		WARN_ON(v3d->queue[q].emit_seqno !=
+			v3d->queue[q].finished_seqno);
+	}
+
+	drm_mm_takedown(&v3d->mm);
+
+	dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt, v3d->pt_paddr);
+}
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
new file mode 100644
index 000000000000..77e1fa046c10
--- /dev/null
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (C) 2014-2018 Broadcom */
+
+/**
+ * DOC: Interrupt management for the V3D engine
+ *
+ * When we take a binning or rendering flush done interrupt, we need
+ * to signal the fence for that job so that the scheduler can queue up
+ * the next one and unblock any waiters.
+ *
+ * When we take the binner out of memory interrupt, we need to
+ * allocate some new memory and pass it to the binner so that the
+ * current job can make progress.
+ */
+
+#include "v3d_drv.h"
+#include "v3d_regs.h"
+
+#define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM |	\
+			     V3D_INT_FLDONE |	\
+			     V3D_INT_FRDONE |	\
+			     V3D_INT_GMPV))
+
+#define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV |	\
+			    V3D_HUB_INT_MMU_PTI |	\
+			    V3D_HUB_INT_MMU_CAP))
+
+static void
+v3d_overflow_mem_work(struct work_struct *work)
+{
+	struct v3d_dev *v3d =
+		container_of(work, struct v3d_dev, overflow_mem_work);
+	struct drm_device *dev = &v3d->drm;
+	struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024);
+	unsigned long irqflags;
+
+	if (IS_ERR(bo)) {
+		DRM_ERROR("Couldn't allocate binner overflow mem\n");
+		return;
+	}
+
+	/* We lost a race, and our work task came in after the bin job
+	 * completed and exited.  This can happen because the HW
+	 * signals OOM before it's fully OOM, so the binner might just
+	 * barely complete.
+	 *
+	 * If we lose the race and our work task comes in after a new
+	 * bin job got scheduled, that's fine.  We'll just give them
+	 * some binner pool anyway.
+	 */
+	spin_lock_irqsave(&v3d->job_lock, irqflags);
+	if (!v3d->bin_job) {
+		spin_unlock_irqrestore(&v3d->job_lock, irqflags);
+		goto out;
+	}
+
+	drm_gem_object_get(&bo->base);
+	list_add_tail(&bo->unref_head, &v3d->bin_job->unref_list);
+	spin_unlock_irqrestore(&v3d->job_lock, irqflags);
+
+	V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << PAGE_SHIFT);
+	V3D_CORE_WRITE(0, V3D_PTB_BPOS, bo->base.size);
+
+out:
+	drm_gem_object_put_unlocked(&bo->base);
+}
+
+static irqreturn_t
+v3d_irq(int irq, void *arg)
+{
+	struct v3d_dev *v3d = arg;
+	u32 intsts;
+	irqreturn_t status = IRQ_NONE;
+
+	intsts = V3D_CORE_READ(0, V3D_CTL_INT_STS);
+
+	/* Acknowledge the interrupts we're handling here. */
+	V3D_CORE_WRITE(0, V3D_CTL_INT_CLR, intsts);
+
+	if (intsts & V3D_INT_OUTOMEM) {
+		/* Note that the OOM status is edge signaled, so the
+		 * interrupt won't happen again until the we actually
+		 * add more memory.
+		 */
+		schedule_work(&v3d->overflow_mem_work);
+		status = IRQ_HANDLED;
+	}
+
+	if (intsts & V3D_INT_FLDONE) {
+		v3d->queue[V3D_BIN].finished_seqno++;
+		dma_fence_signal(v3d->bin_job->bin.done_fence);
+		status = IRQ_HANDLED;
+	}
+
+	if (intsts & V3D_INT_FRDONE) {
+		v3d->queue[V3D_RENDER].finished_seqno++;
+		dma_fence_signal(v3d->render_job->render.done_fence);
+
+		status = IRQ_HANDLED;
+	}
+
+	/* We shouldn't be triggering these if we have GMP in
+	 * always-allowed mode.
+	 */
+	if (intsts & V3D_INT_GMPV)
+		dev_err(v3d->dev, "GMP violation\n");
+
+	return status;
+}
+
+static irqreturn_t
+v3d_hub_irq(int irq, void *arg)
+{
+	struct v3d_dev *v3d = arg;
+	u32 intsts;
+	irqreturn_t status = IRQ_NONE;
+
+	intsts = V3D_READ(V3D_HUB_INT_STS);
+
+	/* Acknowledge the interrupts we're handling here. */
+	V3D_WRITE(V3D_HUB_INT_CLR, intsts);
+
+	if (intsts & (V3D_HUB_INT_MMU_WRV |
+		      V3D_HUB_INT_MMU_PTI |
+		      V3D_HUB_INT_MMU_CAP)) {
+		u32 axi_id = V3D_READ(V3D_MMU_VIO_ID);
+		u64 vio_addr = (u64)V3D_READ(V3D_MMU_VIO_ADDR) << 8;
+
+		dev_err(v3d->dev, "MMU error from client %d at 0x%08llx%s%s%s\n",
+			axi_id, (long long)vio_addr,
+			((intsts & V3D_HUB_INT_MMU_WRV) ?
+			 ", write violation" : ""),
+			((intsts & V3D_HUB_INT_MMU_PTI) ?
+			 ", pte invalid" : ""),
+			((intsts & V3D_HUB_INT_MMU_CAP) ?
+			 ", cap exceeded" : ""));
+		status = IRQ_HANDLED;
+	}
+
+	return status;
+}
+
+void
+v3d_irq_init(struct v3d_dev *v3d)
+{
+	int ret, core;
+
+	INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work);
+
+	/* Clear any pending interrupts someone might have left around
+	 * for us.
+	 */
+	for (core = 0; core < v3d->cores; core++)
+		V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
+	V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
+
+	ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
+			       v3d_hub_irq, IRQF_SHARED,
+			       "v3d_hub", v3d);
+	ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 1),
+			       v3d_irq, IRQF_SHARED,
+			       "v3d_core0", v3d);
+	if (ret)
+		dev_err(v3d->dev, "IRQ setup failed: %d\n", ret);
+
+	v3d_irq_enable(v3d);
+}
+
+void
+v3d_irq_enable(struct v3d_dev *v3d)
+{
+	int core;
+
+	/* Enable our set of interrupts, masking out any others. */
+	for (core = 0; core < v3d->cores; core++) {
+		V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~V3D_CORE_IRQS);
+		V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_CLR, V3D_CORE_IRQS);
+	}
+
+	V3D_WRITE(V3D_HUB_INT_MSK_SET, ~V3D_HUB_IRQS);
+	V3D_WRITE(V3D_HUB_INT_MSK_CLR, V3D_HUB_IRQS);
+}
+
+void
+v3d_irq_disable(struct v3d_dev *v3d)
+{
+	int core;
+
+	/* Disable all interrupts. */
+	for (core = 0; core < v3d->cores; core++)
+		V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~0);
+	V3D_WRITE(V3D_HUB_INT_MSK_SET, ~0);
+
+	/* Clear any pending interrupts we might have left. */
+	for (core = 0; core < v3d->cores; core++)
+		V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
+	V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
+
+	cancel_work_sync(&v3d->overflow_mem_work);
+}
+
+/** Reinitializes interrupt registers when a GPU reset is performed. */
+void v3d_irq_reset(struct v3d_dev *v3d)
+{
+	v3d_irq_enable(v3d);
+}
diff --git a/drivers/gpu/drm/v3d/v3d_mmu.c b/drivers/gpu/drm/v3d/v3d_mmu.c
new file mode 100644
index 000000000000..b00f97c31b70
--- /dev/null
+++ b/drivers/gpu/drm/v3d/v3d_mmu.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (C) 2017-2018 Broadcom */
+
+/**
+ * DOC: Broadcom V3D MMU
+ *
+ * The V3D 3.x hardware (compared to VC4) now includes an MMU.  It has
+ * a single level of page tables for the V3D's 4GB address space to
+ * map to AXI bus addresses, thus it could need up to 4MB of
+ * physically contiguous memory to store the PTEs.
+ *
+ * Because the 4MB of contiguous memory for page tables is precious,
+ * and switching between them is expensive, we load all BOs into the
+ * same 4GB address space.
+ *
+ * To protect clients from each other, we should use the GMP to
+ * quickly mask out (at 128kb granularity) what pages are available to
+ * each client.  This is not yet implemented.
+ */
+
+#include "v3d_drv.h"
+#include "v3d_regs.h"
+
+#define V3D_MMU_PAGE_SHIFT 12
+
+/* Note: All PTEs for the 1MB superpage must be filled with the
+ * superpage bit set.
+ */
+#define V3D_PTE_SUPERPAGE BIT(31)
+#define V3D_PTE_WRITEABLE BIT(29)
+#define V3D_PTE_VALID BIT(28)
+
+static int v3d_mmu_flush_all(struct v3d_dev *v3d)
+{
+	int ret;
+
+	/* Make sure that another flush isn't already running when we
+	 * start this one.
+	 */
+	ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
+			 V3D_MMU_CTL_TLB_CLEARING), 100);
+	if (ret)
+		dev_err(v3d->dev, "TLB clear wait idle pre-wait failed\n");
+
+	V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL) |
+		  V3D_MMU_CTL_TLB_CLEAR);
+
+	V3D_WRITE(V3D_MMUC_CONTROL,
+		  V3D_MMUC_CONTROL_FLUSH |
+		  V3D_MMUC_CONTROL_ENABLE);
+
+	ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
+			 V3D_MMU_CTL_TLB_CLEARING), 100);
+	if (ret) {
+		dev_err(v3d->dev, "TLB clear wait idle failed\n");
+		return ret;
+	}
+
+	ret = wait_for(!(V3D_READ(V3D_MMUC_CONTROL) &
+			 V3D_MMUC_CONTROL_FLUSHING), 100);
+	if (ret)
+		dev_err(v3d->dev, "MMUC flush wait idle failed\n");
+
+	return ret;
+}
+
+int v3d_mmu_set_page_table(struct v3d_dev *v3d)
+{
+	V3D_WRITE(V3D_MMU_PT_PA_BASE, v3d->pt_paddr >> V3D_MMU_PAGE_SHIFT);
+	V3D_WRITE(V3D_MMU_CTL,
+		  V3D_MMU_CTL_ENABLE |
+		  V3D_MMU_CTL_PT_INVALID |
+		  V3D_MMU_CTL_PT_INVALID_ABORT |
+		  V3D_MMU_CTL_WRITE_VIOLATION_ABORT |
+		  V3D_MMU_CTL_CAP_EXCEEDED_ABORT);
+	V3D_WRITE(V3D_MMU_ILLEGAL_ADDR,
+		  (v3d->mmu_scratch_paddr >> V3D_MMU_PAGE_SHIFT) |
+		  V3D_MMU_ILLEGAL_ADDR_ENABLE);
+	V3D_WRITE(V3D_MMUC_CONTROL, V3D_MMUC_CONTROL_ENABLE);
+
+	return v3d_mmu_flush_all(v3d);
+}
+
+void v3d_mmu_insert_ptes(struct v3d_bo *bo)
+{
+	struct v3d_dev *v3d = to_v3d_dev(bo->base.dev);
+	u32 page = bo->node.start;
+	u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID;
+	unsigned int count;
+	struct scatterlist *sgl;
+
+	for_each_sg(bo->sgt->sgl, sgl, bo->sgt->nents, count) {
+		u32 page_address = sg_dma_address(sgl) >> V3D_MMU_PAGE_SHIFT;
+		u32 pte = page_prot | page_address;
+		u32 i;
+
+		BUG_ON(page_address + (sg_dma_len(sgl) >> V3D_MMU_PAGE_SHIFT) >=
+		       BIT(24));
+
+		for (i = 0; i < sg_dma_len(sgl) >> V3D_MMU_PAGE_SHIFT; i++)
+			v3d->pt[page++] = pte + i;
+	}
+
+	WARN_ON_ONCE(page - bo->node.start !=
+		     bo->base.size >> V3D_MMU_PAGE_SHIFT);
+
+	if (v3d_mmu_flush_all(v3d))
+		dev_err(v3d->dev, "MMU flush timeout\n");
+}
+
+void v3d_mmu_remove_ptes(struct v3d_bo *bo)
+{
+	struct v3d_dev *v3d = to_v3d_dev(bo->base.dev);
+	u32 npages = bo->base.size >> V3D_MMU_PAGE_SHIFT;
+	u32 page;
+
+	for (page = bo->node.start; page < bo->node.start + npages; page++)
+		v3d->pt[page] = 0;
+
+	if (v3d_mmu_flush_all(v3d))
+		dev_err(v3d->dev, "MMU flush timeout\n");
+}
diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h
new file mode 100644
index 000000000000..fc13282dfc2f
--- /dev/null
+++ b/drivers/gpu/drm/v3d/v3d_regs.h
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (C) 2017-2018 Broadcom */
+
+#ifndef V3D_REGS_H
+#define V3D_REGS_H
+
+#include <linux/bitops.h>
+
+#define V3D_MASK(high, low) ((u32)GENMASK(high, low))
+/* Using the GNU statement expression extension */
+#define V3D_SET_FIELD(value, field)					\
+	({								\
+		u32 fieldval = (value) << field##_SHIFT;		\
+		WARN_ON((fieldval & ~field##_MASK) != 0);		\
+		fieldval & field##_MASK;				\
+	 })
+
+#define V3D_GET_FIELD(word, field) (((word) & field##_MASK) >>		\
+				    field##_SHIFT)
+
+/* Hub registers for shared hardware between V3D cores. */
+
+#define V3D_HUB_AXICFG                                 0x00000
+# define V3D_HUB_AXICFG_MAX_LEN_MASK                   V3D_MASK(3, 0)
+# define V3D_HUB_AXICFG_MAX_LEN_SHIFT                  0
+#define V3D_HUB_UIFCFG                                 0x00004
+#define V3D_HUB_IDENT0                                 0x00008
+
+#define V3D_HUB_IDENT1                                 0x0000c
+# define V3D_HUB_IDENT1_WITH_MSO                       BIT(19)
+# define V3D_HUB_IDENT1_WITH_TSY                       BIT(18)
+# define V3D_HUB_IDENT1_WITH_TFU                       BIT(17)
+# define V3D_HUB_IDENT1_WITH_L3C                       BIT(16)
+# define V3D_HUB_IDENT1_NHOSTS_MASK                    V3D_MASK(15, 12)
+# define V3D_HUB_IDENT1_NHOSTS_SHIFT                   12
+# define V3D_HUB_IDENT1_NCORES_MASK                    V3D_MASK(11, 8)
+# define V3D_HUB_IDENT1_NCORES_SHIFT                   8
+# define V3D_HUB_IDENT1_REV_MASK                       V3D_MASK(7, 4)
+# define V3D_HUB_IDENT1_REV_SHIFT                      4
+# define V3D_HUB_IDENT1_TVER_MASK                      V3D_MASK(3, 0)
+# define V3D_HUB_IDENT1_TVER_SHIFT                     0
+
+#define V3D_HUB_IDENT2                                 0x00010
+# define V3D_HUB_IDENT2_WITH_MMU                       BIT(8)
+# define V3D_HUB_IDENT2_L3C_NKB_MASK                   V3D_MASK(7, 0)
+# define V3D_HUB_IDENT2_L3C_NKB_SHIFT                  0
+
+#define V3D_HUB_IDENT3                                 0x00014
+# define V3D_HUB_IDENT3_IPREV_MASK                     V3D_MASK(15, 8)
+# define V3D_HUB_IDENT3_IPREV_SHIFT                    8
+# define V3D_HUB_IDENT3_IPIDX_MASK                     V3D_MASK(7, 0)
+# define V3D_HUB_IDENT3_IPIDX_SHIFT                    0
+
+#define V3D_HUB_INT_STS                                0x00050
+#define V3D_HUB_INT_SET                                0x00054
+#define V3D_HUB_INT_CLR                                0x00058
+#define V3D_HUB_INT_MSK_STS                            0x0005c
+#define V3D_HUB_INT_MSK_SET                            0x00060
+#define V3D_HUB_INT_MSK_CLR                            0x00064
+# define V3D_HUB_INT_MMU_WRV                           BIT(5)
+# define V3D_HUB_INT_MMU_PTI                           BIT(4)
+# define V3D_HUB_INT_MMU_CAP                           BIT(3)
+# define V3D_HUB_INT_MSO                               BIT(2)
+# define V3D_HUB_INT_TFUC                              BIT(1)
+# define V3D_HUB_INT_TFUF                              BIT(0)
+
+#define V3D_GCA_CACHE_CTRL                             0x0000c
+# define V3D_GCA_CACHE_CTRL_FLUSH                      BIT(0)
+
+#define V3D_GCA_SAFE_SHUTDOWN                          0x000b0
+# define V3D_GCA_SAFE_SHUTDOWN_EN                      BIT(0)
+
+#define V3D_GCA_SAFE_SHUTDOWN_ACK                      0x000b4
+# define V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED               3
+
+# define V3D_TOP_GR_BRIDGE_REVISION                    0x00000
+# define V3D_TOP_GR_BRIDGE_MAJOR_MASK                  V3D_MASK(15, 8)
+# define V3D_TOP_GR_BRIDGE_MAJOR_SHIFT                 8
+# define V3D_TOP_GR_BRIDGE_MINOR_MASK                  V3D_MASK(7, 0)
+# define V3D_TOP_GR_BRIDGE_MINOR_SHIFT                 0
+
+/* 7268 reset reg */
+# define V3D_TOP_GR_BRIDGE_SW_INIT_0                   0x00008
+# define V3D_TOP_GR_BRIDGE_SW_INIT_0_V3D_CLK_108_SW_INIT BIT(0)
+/* 7278 reset reg */
+# define V3D_TOP_GR_BRIDGE_SW_INIT_1                   0x0000c
+# define V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT BIT(0)
+
+/* Per-MMU registers. */
+
+#define V3D_MMUC_CONTROL                               0x01000
+# define V3D_MMUC_CONTROL_CLEAR                        BIT(3)
+# define V3D_MMUC_CONTROL_FLUSHING                     BIT(2)
+# define V3D_MMUC_CONTROL_FLUSH                        BIT(1)
+# define V3D_MMUC_CONTROL_ENABLE                       BIT(0)
+
+#define V3D_MMU_CTL                                    0x01200
+# define V3D_MMU_CTL_CAP_EXCEEDED                      BIT(27)
+# define V3D_MMU_CTL_CAP_EXCEEDED_ABORT                BIT(26)
+# define V3D_MMU_CTL_CAP_EXCEEDED_INT                  BIT(25)
+# define V3D_MMU_CTL_CAP_EXCEEDED_EXCEPTION            BIT(24)
+# define V3D_MMU_CTL_PT_INVALID                        BIT(20)
+# define V3D_MMU_CTL_PT_INVALID_ABORT                  BIT(19)
+# define V3D_MMU_CTL_PT_INVALID_INT                    BIT(18)
+# define V3D_MMU_CTL_PT_INVALID_EXCEPTION              BIT(17)
+# define V3D_MMU_CTL_WRITE_VIOLATION                   BIT(16)
+# define V3D_MMU_CTL_WRITE_VIOLATION_ABORT             BIT(11)
+# define V3D_MMU_CTL_WRITE_VIOLATION_INT               BIT(10)
+# define V3D_MMU_CTL_WRITE_VIOLATION_EXCEPTION         BIT(9)
+# define V3D_MMU_CTL_TLB_CLEARING                      BIT(7)
+# define V3D_MMU_CTL_TLB_STATS_CLEAR                   BIT(3)
+# define V3D_MMU_CTL_TLB_CLEAR                         BIT(2)
+# define V3D_MMU_CTL_TLB_STATS_ENABLE                  BIT(1)
+# define V3D_MMU_CTL_ENABLE                            BIT(0)
+
+#define V3D_MMU_PT_PA_BASE                             0x01204
+#define V3D_MMU_HIT                                    0x01208
+#define V3D_MMU_MISSES                                 0x0120c
+#define V3D_MMU_STALLS                                 0x01210
+
+#define V3D_MMU_ADDR_CAP                               0x01214
+# define V3D_MMU_ADDR_CAP_ENABLE                       BIT(31)
+# define V3D_MMU_ADDR_CAP_MPAGE_MASK                   V3D_MASK(11, 0)
+# define V3D_MMU_ADDR_CAP_MPAGE_SHIFT                  0
+
+#define V3D_MMU_SHOOT_DOWN                             0x01218
+# define V3D_MMU_SHOOT_DOWN_SHOOTING                   BIT(29)
+# define V3D_MMU_SHOOT_DOWN_SHOOT                      BIT(28)
+# define V3D_MMU_SHOOT_DOWN_PAGE_MASK                  V3D_MASK(27, 0)
+# define V3D_MMU_SHOOT_DOWN_PAGE_SHIFT                 0
+
+#define V3D_MMU_BYPASS_START                           0x0121c
+#define V3D_MMU_BYPASS_END                             0x01220
+
+/* AXI ID of the access that faulted */
+#define V3D_MMU_VIO_ID                                 0x0122c
+
+/* Address for illegal PTEs to return */
+#define V3D_MMU_ILLEGAL_ADDR                           0x01230
+# define V3D_MMU_ILLEGAL_ADDR_ENABLE                   BIT(31)
+
+/* Address that faulted */
+#define V3D_MMU_VIO_ADDR                               0x01234
+
+/* Per-V3D-core registers */
+
+#define V3D_CTL_IDENT0                                 0x00000
+# define V3D_IDENT0_VER_MASK                           V3D_MASK(31, 24)
+# define V3D_IDENT0_VER_SHIFT                          24
+
+#define V3D_CTL_IDENT1                                 0x00004
+/* Multiples of 1kb */
+# define V3D_IDENT1_VPM_SIZE_MASK                      V3D_MASK(31, 28)
+# define V3D_IDENT1_VPM_SIZE_SHIFT                     28
+# define V3D_IDENT1_NSEM_MASK                          V3D_MASK(23, 16)
+# define V3D_IDENT1_NSEM_SHIFT                         16
+# define V3D_IDENT1_NTMU_MASK                          V3D_MASK(15, 12)
+# define V3D_IDENT1_NTMU_SHIFT                         12
+# define V3D_IDENT1_QUPS_MASK                          V3D_MASK(11, 8)
+# define V3D_IDENT1_QUPS_SHIFT                         8
+# define V3D_IDENT1_NSLC_MASK                          V3D_MASK(7, 4)
+# define V3D_IDENT1_NSLC_SHIFT                         4
+# define V3D_IDENT1_REV_MASK                           V3D_MASK(3, 0)
+# define V3D_IDENT1_REV_SHIFT                          0
+
+#define V3D_CTL_IDENT2                                 0x00008
+# define V3D_IDENT2_BCG_INT                            BIT(28)
+
+#define V3D_CTL_MISCCFG                                0x00018
+# define V3D_MISCCFG_OVRTMUOUT                         BIT(0)
+
+#define V3D_CTL_L2CACTL                                0x00020
+# define V3D_L2CACTL_L2CCLR                            BIT(2)
+# define V3D_L2CACTL_L2CDIS                            BIT(1)
+# define V3D_L2CACTL_L2CENA                            BIT(0)
+
+#define V3D_CTL_SLCACTL                                0x00024
+# define V3D_SLCACTL_TVCCS_MASK                        V3D_MASK(27, 24)
+# define V3D_SLCACTL_TVCCS_SHIFT                       24
+# define V3D_SLCACTL_TDCCS_MASK                        V3D_MASK(19, 16)
+# define V3D_SLCACTL_TDCCS_SHIFT                       16
+# define V3D_SLCACTL_UCC_MASK                          V3D_MASK(11, 8)
+# define V3D_SLCACTL_UCC_SHIFT                         8
+# define V3D_SLCACTL_ICC_MASK                          V3D_MASK(3, 0)
+# define V3D_SLCACTL_ICC_SHIFT                         0
+
+#define V3D_CTL_L2TCACTL                               0x00030
+# define V3D_L2TCACTL_TMUWCF                           BIT(8)
+# define V3D_L2TCACTL_L2T_NO_WM                        BIT(4)
+# define V3D_L2TCACTL_FLM_FLUSH                        0
+# define V3D_L2TCACTL_FLM_CLEAR                        1
+# define V3D_L2TCACTL_FLM_CLEAN                        2
+# define V3D_L2TCACTL_FLM_MASK                         V3D_MASK(2, 1)
+# define V3D_L2TCACTL_FLM_SHIFT                        1
+# define V3D_L2TCACTL_L2TFLS                           BIT(0)
+#define V3D_CTL_L2TFLSTA                               0x00034
+#define V3D_CTL_L2TFLEND                               0x00038
+
+#define V3D_CTL_INT_STS                                0x00050
+#define V3D_CTL_INT_SET                                0x00054
+#define V3D_CTL_INT_CLR                                0x00058
+#define V3D_CTL_INT_MSK_STS                            0x0005c
+#define V3D_CTL_INT_MSK_SET                            0x00060
+#define V3D_CTL_INT_MSK_CLR                            0x00064
+# define V3D_INT_QPU_MASK                              V3D_MASK(27, 16)
+# define V3D_INT_QPU_SHIFT                             16
+# define V3D_INT_GMPV                                  BIT(5)
+# define V3D_INT_TRFB                                  BIT(4)
+# define V3D_INT_SPILLUSE                              BIT(3)
+# define V3D_INT_OUTOMEM                               BIT(2)
+# define V3D_INT_FLDONE                                BIT(1)
+# define V3D_INT_FRDONE                                BIT(0)
+
+#define V3D_CLE_CT0CS                                  0x00100
+#define V3D_CLE_CT1CS                                  0x00104
+#define V3D_CLE_CTNCS(n) (V3D_CLE_CT0CS + 4 * n)
+#define V3D_CLE_CT0EA                                  0x00108
+#define V3D_CLE_CT1EA                                  0x0010c
+#define V3D_CLE_CTNEA(n) (V3D_CLE_CT0EA + 4 * n)
+#define V3D_CLE_CT0CA                                  0x00110
+#define V3D_CLE_CT1CA                                  0x00114
+#define V3D_CLE_CTNCA(n) (V3D_CLE_CT0CA + 4 * n)
+#define V3D_CLE_CT0RA                                  0x00118
+#define V3D_CLE_CT1RA                                  0x0011c
+#define V3D_CLE_CT0LC                                  0x00120
+#define V3D_CLE_CT1LC                                  0x00124
+#define V3D_CLE_CT0PC                                  0x00128
+#define V3D_CLE_CT1PC                                  0x0012c
+#define V3D_CLE_PCS                                    0x00130
+#define V3D_CLE_BFC                                    0x00134
+#define V3D_CLE_RFC                                    0x00138
+#define V3D_CLE_TFBC                                   0x0013c
+#define V3D_CLE_TFIT                                   0x00140
+#define V3D_CLE_CT1CFG                                 0x00144
+#define V3D_CLE_CT1TILECT                              0x00148
+#define V3D_CLE_CT1TSKIP                               0x0014c
+#define V3D_CLE_CT1PTCT                                0x00150
+#define V3D_CLE_CT0SYNC                                0x00154
+#define V3D_CLE_CT1SYNC                                0x00158
+#define V3D_CLE_CT0QTS                                 0x0015c
+# define V3D_CLE_CT0QTS_ENABLE                         BIT(1)
+#define V3D_CLE_CT0QBA                                 0x00160
+#define V3D_CLE_CT1QBA                                 0x00164
+#define V3D_CLE_CTNQBA(n) (V3D_CLE_CT0QBA + 4 * n)
+#define V3D_CLE_CT0QEA                                 0x00168
+#define V3D_CLE_CT1QEA                                 0x0016c
+#define V3D_CLE_CTNQEA(n) (V3D_CLE_CT0QEA + 4 * n)
+#define V3D_CLE_CT0QMA                                 0x00170
+#define V3D_CLE_CT0QMS                                 0x00174
+#define V3D_CLE_CT1QCFG                                0x00178
+/* If set without ETPROC, entirely skip tiles with no primitives. */
+# define V3D_CLE_QCFG_ETFILT                           BIT(7)
+/* If set with ETFILT, just write the clear color to tiles with no
+ * primitives.
+ */
+# define V3D_CLE_QCFG_ETPROC                           BIT(6)
+# define V3D_CLE_QCFG_ETSFLUSH                         BIT(1)
+# define V3D_CLE_QCFG_MCDIS                            BIT(0)
+
+#define V3D_PTB_BPCA                                   0x00300
+#define V3D_PTB_BPCS                                   0x00304
+#define V3D_PTB_BPOA                                   0x00308
+#define V3D_PTB_BPOS                                   0x0030c
+
+#define V3D_PTB_BXCF                                   0x00310
+# define V3D_PTB_BXCF_RWORDERDISA                      BIT(1)
+# define V3D_PTB_BXCF_CLIPDISA                         BIT(0)
+
+#define V3D_GMP_STATUS                                 0x00800
+# define V3D_GMP_STATUS_GMPRST                         BIT(31)
+# define V3D_GMP_STATUS_WR_COUNT_MASK                  V3D_MASK(30, 24)
+# define V3D_GMP_STATUS_WR_COUNT_SHIFT                 24
+# define V3D_GMP_STATUS_RD_COUNT_MASK                  V3D_MASK(22, 16)
+# define V3D_GMP_STATUS_RD_COUNT_SHIFT                 16
+# define V3D_GMP_STATUS_WR_ACTIVE                      BIT(5)
+# define V3D_GMP_STATUS_RD_ACTIVE                      BIT(4)
+# define V3D_GMP_STATUS_CFG_BUSY                       BIT(3)
+# define V3D_GMP_STATUS_CNTOVF                         BIT(2)
+# define V3D_GMP_STATUS_INVPROT                        BIT(1)
+# define V3D_GMP_STATUS_VIO                            BIT(0)
+
+#define V3D_GMP_CFG                                    0x00804
+# define V3D_GMP_CFG_LBURSTEN                          BIT(3)
+# define V3D_GMP_CFG_PGCRSEN                           BIT()
+# define V3D_GMP_CFG_STOP_REQ                          BIT(1)
+# define V3D_GMP_CFG_PROT_ENABLE                       BIT(0)
+
+#define V3D_GMP_VIO_ADDR                               0x00808
+#define V3D_GMP_VIO_TYPE                               0x0080c
+#define V3D_GMP_TABLE_ADDR                             0x00810
+#define V3D_GMP_CLEAR_LOAD                             0x00814
+#define V3D_GMP_PRESERVE_LOAD                          0x00818
+#define V3D_GMP_VALID_LINES                            0x00820
+
+#endif /* V3D_REGS_H */
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
new file mode 100644
index 000000000000..b07bece9417d
--- /dev/null
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (C) 2018 Broadcom */
+
+/**
+ * DOC: Broadcom V3D scheduling
+ *
+ * The shared DRM GPU scheduler is used to coordinate submitting jobs
+ * to the hardware.  Each DRM fd (roughly a client process) gets its
+ * own scheduler entity, which will process jobs in order.  The GPU
+ * scheduler will round-robin between clients to submit the next job.
+ *
+ * For simplicity, and in order to keep latency low for interactive
+ * jobs when bulk background jobs are queued up, we submit a new job
+ * to the HW only when it has completed the last one, instead of
+ * filling up the CT[01]Q FIFOs with jobs.  Similarly, we use
+ * v3d_job_dependency() to manage the dependency between bin and
+ * render, instead of having the clients submit jobs with using the
+ * HW's semaphores to interlock between them.
+ */
+
+#include <linux/kthread.h>
+
+#include "v3d_drv.h"
+#include "v3d_regs.h"
+#include "v3d_trace.h"
+
+static struct v3d_job *
+to_v3d_job(struct drm_sched_job *sched_job)
+{
+	return container_of(sched_job, struct v3d_job, base);
+}
+
+static void
+v3d_job_free(struct drm_sched_job *sched_job)
+{
+	struct v3d_job *job = to_v3d_job(sched_job);
+
+	v3d_exec_put(job->exec);
+}
+
+/**
+ * Returns the fences that the bin job depends on, one by one.
+ * v3d_job_run() won't be called until all of them have been signaled.
+ */
+static struct dma_fence *
+v3d_job_dependency(struct drm_sched_job *sched_job,
+		   struct drm_sched_entity *s_entity)
+{
+	struct v3d_job *job = to_v3d_job(sched_job);
+	struct v3d_exec_info *exec = job->exec;
+	enum v3d_queue q = job == &exec->bin ? V3D_BIN : V3D_RENDER;
+	struct dma_fence *fence;
+
+	fence = job->in_fence;
+	if (fence) {
+		job->in_fence = NULL;
+		return fence;
+	}
+
+	if (q == V3D_RENDER) {
+		/* If we had a bin job, the render job definitely depends on
+		 * it. We first have to wait for bin to be scheduled, so that
+		 * its done_fence is created.
+		 */
+		fence = exec->bin_done_fence;
+		if (fence) {
+			exec->bin_done_fence = NULL;
+			return fence;
+		}
+	}
+
+	/* XXX: Wait on a fence for switching the GMP if necessary,
+	 * and then do so.
+	 */
+
+	return fence;
+}
+
+static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job)
+{
+	struct v3d_job *job = to_v3d_job(sched_job);
+	struct v3d_exec_info *exec = job->exec;
+	enum v3d_queue q = job == &exec->bin ? V3D_BIN : V3D_RENDER;
+	struct v3d_dev *v3d = exec->v3d;
+	struct drm_device *dev = &v3d->drm;
+	struct dma_fence *fence;
+	unsigned long irqflags;
+
+	if (unlikely(job->base.s_fence->finished.error))
+		return NULL;
+
+	/* Lock required around bin_job update vs
+	 * v3d_overflow_mem_work().
+	 */
+	spin_lock_irqsave(&v3d->job_lock, irqflags);
+	if (q == V3D_BIN) {
+		v3d->bin_job = job->exec;
+
+		/* Clear out the overflow allocation, so we don't
+		 * reuse the overflow attached to a previous job.
+		 */
+		V3D_CORE_WRITE(0, V3D_PTB_BPOS, 0);
+	} else {
+		v3d->render_job = job->exec;
+	}
+	spin_unlock_irqrestore(&v3d->job_lock, irqflags);
+
+	/* Can we avoid this flush when q==RENDER?  We need to be
+	 * careful of scheduling, though -- imagine job0 rendering to
+	 * texture and job1 reading, and them being executed as bin0,
+	 * bin1, render0, render1, so that render1's flush at bin time
+	 * wasn't enough.
+	 */
+	v3d_invalidate_caches(v3d);
+
+	fence = v3d_fence_create(v3d, q);
+	if (!fence)
+		return fence;
+
+	if (job->done_fence)
+		dma_fence_put(job->done_fence);
+	job->done_fence = dma_fence_get(fence);
+
+	trace_v3d_submit_cl(dev, q == V3D_RENDER, to_v3d_fence(fence)->seqno,
+			    job->start, job->end);
+
+	if (q == V3D_BIN) {
+		if (exec->qma) {
+			V3D_CORE_WRITE(0, V3D_CLE_CT0QMA, exec->qma);
+			V3D_CORE_WRITE(0, V3D_CLE_CT0QMS, exec->qms);
+		}
+		if (exec->qts) {
+			V3D_CORE_WRITE(0, V3D_CLE_CT0QTS,
+				       V3D_CLE_CT0QTS_ENABLE |
+				       exec->qts);
+		}
+	} else {
+		/* XXX: Set the QCFG */
+	}
+
+	/* Set the current and end address of the control list.
+	 * Writing the end register is what starts the job.
+	 */
+	V3D_CORE_WRITE(0, V3D_CLE_CTNQBA(q), job->start);
+	V3D_CORE_WRITE(0, V3D_CLE_CTNQEA(q), job->end);
+
+	return fence;
+}
+
+static void
+v3d_job_timedout(struct drm_sched_job *sched_job)
+{
+	struct v3d_job *job = to_v3d_job(sched_job);
+	struct v3d_exec_info *exec = job->exec;
+	struct v3d_dev *v3d = exec->v3d;
+	enum v3d_queue q;
+
+	mutex_lock(&v3d->reset_lock);
+
+	/* block scheduler */
+	for (q = 0; q < V3D_MAX_QUEUES; q++) {
+		struct drm_gpu_scheduler *sched = &v3d->queue[q].sched;
+
+		kthread_park(sched->thread);
+		drm_sched_hw_job_reset(sched, (sched_job->sched == sched ?
+					       sched_job : NULL));
+	}
+
+	/* get the GPU back into the init state */
+	v3d_reset(v3d);
+
+	/* Unblock schedulers and restart their jobs. */
+	for (q = 0; q < V3D_MAX_QUEUES; q++) {
+		drm_sched_job_recovery(&v3d->queue[q].sched);
+		kthread_unpark(v3d->queue[q].sched.thread);
+	}
+
+	mutex_unlock(&v3d->reset_lock);
+}
+
+static const struct drm_sched_backend_ops v3d_sched_ops = {
+	.dependency = v3d_job_dependency,
+	.run_job = v3d_job_run,
+	.timedout_job = v3d_job_timedout,
+	.free_job = v3d_job_free
+};
+
+int
+v3d_sched_init(struct v3d_dev *v3d)
+{
+	int hw_jobs_limit = 1;
+	int job_hang_limit = 0;
+	int hang_limit_ms = 500;
+	int ret;
+
+	ret = drm_sched_init(&v3d->queue[V3D_BIN].sched,
+			     &v3d_sched_ops,
+			     hw_jobs_limit, job_hang_limit,
+			     msecs_to_jiffies(hang_limit_ms),
+			     "v3d_bin");
+	if (ret) {
+		dev_err(v3d->dev, "Failed to create bin scheduler: %d.", ret);
+		return ret;
+	}
+
+	ret = drm_sched_init(&v3d->queue[V3D_RENDER].sched,
+			     &v3d_sched_ops,
+			     hw_jobs_limit, job_hang_limit,
+			     msecs_to_jiffies(hang_limit_ms),
+			     "v3d_render");
+	if (ret) {
+		dev_err(v3d->dev, "Failed to create render scheduler: %d.",
+			ret);
+		drm_sched_fini(&v3d->queue[V3D_BIN].sched);
+		return ret;
+	}
+
+	return 0;
+}
+
+void
+v3d_sched_fini(struct v3d_dev *v3d)
+{
+	enum v3d_queue q;
+
+	for (q = 0; q < V3D_MAX_QUEUES; q++)
+		drm_sched_fini(&v3d->queue[q].sched);
+}
diff --git a/drivers/gpu/drm/v3d/v3d_trace.h b/drivers/gpu/drm/v3d/v3d_trace.h
new file mode 100644
index 000000000000..85dd351e1e09
--- /dev/null
+++ b/drivers/gpu/drm/v3d/v3d_trace.h
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (C) 2015-2018 Broadcom */
+
+#if !defined(_V3D_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _V3D_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM v3d
+#define TRACE_INCLUDE_FILE v3d_trace
+
+TRACE_EVENT(v3d_submit_cl,
+	    TP_PROTO(struct drm_device *dev, bool is_render,
+		     uint64_t seqno,
+		     u32 ctnqba, u32 ctnqea),
+	    TP_ARGS(dev, is_render, seqno, ctnqba, ctnqea),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(bool, is_render)
+			     __field(u64, seqno)
+			     __field(u32, ctnqba)
+			     __field(u32, ctnqea)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = dev->primary->index;
+			   __entry->is_render = is_render;
+			   __entry->seqno = seqno;
+			   __entry->ctnqba = ctnqba;
+			   __entry->ctnqea = ctnqea;
+			   ),
+
+	    TP_printk("dev=%u, %s, seqno=%llu, 0x%08x..0x%08x",
+		      __entry->dev,
+		      __entry->is_render ? "RCL" : "BCL",
+		      __entry->seqno,
+		      __entry->ctnqba,
+		      __entry->ctnqea)
+);
+
+TRACE_EVENT(v3d_reset_begin,
+	    TP_PROTO(struct drm_device *dev),
+	    TP_ARGS(dev),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = dev->primary->index;
+			   ),
+
+	    TP_printk("dev=%u",
+		      __entry->dev)
+);
+
+TRACE_EVENT(v3d_reset_end,
+	    TP_PROTO(struct drm_device *dev),
+	    TP_ARGS(dev),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = dev->primary->index;
+			   ),
+
+	    TP_printk("dev=%u",
+		      __entry->dev)
+);
+
+#endif /* _V3D_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/v3d/v3d_trace_points.c b/drivers/gpu/drm/v3d/v3d_trace_points.c
new file mode 100644
index 000000000000..482922d7c7e1
--- /dev/null
+++ b/drivers/gpu/drm/v3d/v3d_trace_points.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (C) 2015 Broadcom */
+
+#include "v3d_drv.h"
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "v3d_trace.h"
+#endif
diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h
new file mode 100644
index 000000000000..7b6627783608
--- /dev/null
+++ b/include/uapi/drm/v3d_drm.h
@@ -0,0 +1,194 @@
+/*
+ * Copyright © 2014-2018 Broadcom
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _V3D_DRM_H_
+#define _V3D_DRM_H_
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#define DRM_V3D_SUBMIT_CL                         0x00
+#define DRM_V3D_WAIT_BO                           0x01
+#define DRM_V3D_CREATE_BO                         0x02
+#define DRM_V3D_MMAP_BO                           0x03
+#define DRM_V3D_GET_PARAM                         0x04
+#define DRM_V3D_GET_BO_OFFSET                     0x05
+
+#define DRM_IOCTL_V3D_SUBMIT_CL           DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl)
+#define DRM_IOCTL_V3D_WAIT_BO             DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo)
+#define DRM_IOCTL_V3D_CREATE_BO           DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_CREATE_BO, struct drm_v3d_create_bo)
+#define DRM_IOCTL_V3D_MMAP_BO             DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_MMAP_BO, struct drm_v3d_mmap_bo)
+#define DRM_IOCTL_V3D_GET_PARAM           DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_PARAM, struct drm_v3d_get_param)
+#define DRM_IOCTL_V3D_GET_BO_OFFSET       DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_BO_OFFSET, struct drm_v3d_get_bo_offset)
+
+/**
+ * struct drm_v3d_submit_cl - ioctl argument for submitting commands to the 3D
+ * engine.
+ *
+ * This asks the kernel to have the GPU execute an optional binner
+ * command list, and a render command list.
+ */
+struct drm_v3d_submit_cl {
+	/* Pointer to the binner command list.
+	 *
+	 * This is the first set of commands executed, which runs the
+	 * coordinate shader to determine where primitives land on the screen,
+	 * then writes out the state updates and draw calls necessary per tile
+	 * to the tile allocation BO.
+	 */
+	__u32 bcl_start;
+
+	 /** End address of the BCL (first byte after the BCL) */
+	__u32 bcl_end;
+
+	/* Offset of the render command list.
+	 *
+	 * This is the second set of commands executed, which will either
+	 * execute the tiles that have been set up by the BCL, or a fixed set
+	 * of tiles (in the case of RCL-only blits).
+	 */
+	__u32 rcl_start;
+
+	 /** End address of the RCL (first byte after the RCL) */
+	__u32 rcl_end;
+
+	/** An optional sync object to wait on before starting the BCL. */
+	__u32 in_sync_bcl;
+	/** An optional sync object to wait on before starting the RCL. */
+	__u32 in_sync_rcl;
+	/** An optional sync object to place the completion fence in. */
+	__u32 out_sync;
+
+	/* Offset of the tile alloc memory
+	 *
+	 * This is optional on V3D 3.3 (where the CL can set the value) but
+	 * required on V3D 4.1.
+	 */
+	__u32 qma;
+
+	/** Size of the tile alloc memory. */
+	__u32 qms;
+
+	/** Offset of the tile state data array. */
+	__u32 qts;
+
+	/* Pointer to a u32 array of the BOs that are referenced by the job.
+	 */
+	__u64 bo_handles;
+
+	/* Number of BO handles passed in (size is that times 4). */
+	__u32 bo_handle_count;
+
+	/* Pad, must be zero-filled. */
+	__u32 pad;
+};
+
+/**
+ * struct drm_v3d_wait_bo - ioctl argument for waiting for
+ * completion of the last DRM_V3D_SUBMIT_CL on a BO.
+ *
+ * This is useful for cases where multiple processes might be
+ * rendering to a BO and you want to wait for all rendering to be
+ * completed.
+ */
+struct drm_v3d_wait_bo {
+	__u32 handle;
+	__u32 pad;
+	__u64 timeout_ns;
+};
+
+/**
+ * struct drm_v3d_create_bo - ioctl argument for creating V3D BOs.
+ *
+ * There are currently no values for the flags argument, but it may be
+ * used in a future extension.
+ */
+struct drm_v3d_create_bo {
+	__u32 size;
+	__u32 flags;
+	/** Returned GEM handle for the BO. */
+	__u32 handle;
+	/**
+	 * Returned offset for the BO in the V3D address space.  This offset
+	 * is private to the DRM fd and is valid for the lifetime of the GEM
+	 * handle.
+	 *
+	 * This offset value will always be nonzero, since various HW
+	 * units treat 0 specially.
+	 */
+	__u32 offset;
+};
+
+/**
+ * struct drm_v3d_mmap_bo - ioctl argument for mapping V3D BOs.
+ *
+ * This doesn't actually perform an mmap.  Instead, it returns the
+ * offset you need to use in an mmap on the DRM device node.  This
+ * means that tools like valgrind end up knowing about the mapped
+ * memory.
+ *
+ * There are currently no values for the flags argument, but it may be
+ * used in a future extension.
+ */
+struct drm_v3d_mmap_bo {
+	/** Handle for the object being mapped. */
+	__u32 handle;
+	__u32 flags;
+	/** offset into the drm node to use for subsequent mmap call. */
+	__u64 offset;
+};
+
+enum drm_v3d_param {
+	DRM_V3D_PARAM_V3D_UIFCFG,
+	DRM_V3D_PARAM_V3D_HUB_IDENT1,
+	DRM_V3D_PARAM_V3D_HUB_IDENT2,
+	DRM_V3D_PARAM_V3D_HUB_IDENT3,
+	DRM_V3D_PARAM_V3D_CORE0_IDENT0,
+	DRM_V3D_PARAM_V3D_CORE0_IDENT1,
+	DRM_V3D_PARAM_V3D_CORE0_IDENT2,
+};
+
+struct drm_v3d_get_param {
+	__u32 param;
+	__u32 pad;
+	__u64 value;
+};
+
+/**
+ * Returns the offset for the BO in the V3D address space for this DRM fd.
+ * This is the same value returned by drm_v3d_create_bo, if that was called
+ * from this DRM fd.
+ */
+struct drm_v3d_get_bo_offset {
+	__u32 handle;
+	__u32 offset;
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _V3D_DRM_H_ */

From 74ba22ead5971e25c2e01bd08f5d814fd8ee41b3 Mon Sep 17 00:00:00 2001
From: Tomasz Lis <tomasz.lis@intel.com>
Date: Wed, 2 May 2018 15:31:42 -0700
Subject: [PATCH 0597/1286] drm/i915/icl: Add configuring MOCS in new Icelake
 engines

In Icelake, there are more engines on which Memory Object Control
States need to be configured. Besides adding Icelake under Skylake
config, the patch makes sure MOCS register addresses for the new
engines are properly defined.

Additional patch might be need later, in case the specification will
propose different MOCS config values for Icelake than in previous
gens.

v2: Restricted comments to gen11, updated description, renamed
defines.

v3: Used proper engine indexes for gen11.

v4: Ensure patch is Icelake only.

v5: Style fixes (proposed by mwajdeczko)

v6 (from Paulo): fix checkpatch's COMMIT_LOG_LONG_LINE (Checkpatch).

BSpec: 19405
BSpec: 21140
Cc: Oscar Mateo Lozano <oscar.mateo@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Reviewed-by: Michel Thierry <michel.thierry@intel.com>
Signed-off-by: Tomasz Lis <tomasz.lis@intel.com>
Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180502223142.3891-1-paulo.r.zanoni@intel.com
---
 drivers/gpu/drm/i915/i915_reg.h   | 2 ++
 drivers/gpu/drm/i915/intel_mocs.c | 5 ++++-
 2 files changed, 6 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 197c9660bbc1..085928c9005e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -9864,6 +9864,8 @@ enum skl_power_gate {
 #define GEN9_MFX1_MOCS(i)	_MMIO(0xca00 + (i) * 4)	/* Media 1 MOCS registers */
 #define GEN9_VEBOX_MOCS(i)	_MMIO(0xcb00 + (i) * 4)	/* Video MOCS registers */
 #define GEN9_BLT_MOCS(i)	_MMIO(0xcc00 + (i) * 4)	/* Blitter MOCS registers */
+/* Media decoder 2 MOCS registers */
+#define GEN11_MFX2_MOCS(i)	_MMIO(0x10000 + (i) * 4)
 
 /* gamt regs */
 #define GEN8_L3_LRA_1_GPGPU _MMIO(0x4dd4)
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index c0b34b7943b9..9f0bd6a4cb79 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -178,7 +178,8 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
 {
 	bool result = false;
 
-	if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
+	if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv) ||
+	    IS_ICELAKE(dev_priv)) {
 		table->size  = ARRAY_SIZE(skylake_mocs_table);
 		table->table = skylake_mocs_table;
 		result = true;
@@ -217,6 +218,8 @@ static i915_reg_t mocs_register(enum intel_engine_id engine_id, int index)
 		return GEN9_VEBOX_MOCS(index);
 	case VCS2:
 		return GEN9_MFX1_MOCS(index);
+	case VCS3:
+		return GEN11_MFX2_MOCS(index);
 	default:
 		MISSING_CASE(engine_id);
 		return INVALID_MMIO_REG;

From 7bd2d2ecedff26b3a87b026b98acc4b7110c9ee6 Mon Sep 17 00:00:00 2001
From: Peter Rosin <peda@axentia.se>
Date: Thu, 26 Apr 2018 23:36:44 +0200
Subject: [PATCH 0598/1286] drm/bridge: adv7511: fix spelling of driver name in
 Kconfig

Could perhaps prevent some confusion.

Signed-off-by: Peter Rosin <peda@axentia.se>
Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Archit Taneja <architt@codeaurora.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20180426213644.29318-1-peda@axentia.se
---
 drivers/gpu/drm/bridge/adv7511/Kconfig | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/bridge/adv7511/Kconfig b/drivers/gpu/drm/bridge/adv7511/Kconfig
index 592b9d2ec034..944e440c4fde 100644
--- a/drivers/gpu/drm/bridge/adv7511/Kconfig
+++ b/drivers/gpu/drm/bridge/adv7511/Kconfig
@@ -1,5 +1,5 @@
 config DRM_I2C_ADV7511
-	tristate "AV7511 encoder"
+	tristate "ADV7511 encoder"
 	depends on OF
 	select DRM_KMS_HELPER
 	select REGMAP_I2C

From 5f27314141757794378abb2907fb7116947d644b Mon Sep 17 00:00:00 2001
From: Jia-Ju Bai <baijiaju1990@gmail.com>
Date: Wed, 11 Apr 2018 16:33:42 +0800
Subject: [PATCH 0599/1286] gpu: drm: bridge: adv7511: Replace mdelay with
 usleep_range in adv7511_probe

adv7511_probe() is never called in atomic context.
This function is only set as ".probe" in struct i2c_driver.

Despite never getting called from atomic context, adv7511_probe()
calls mdelay() to busily wait.
This is not necessary and can be replaced with usleep_range() to
avoid busy waiting.

This is found by a static analysis tool named DCNS written by myself.
And I also manually check it.

Signed-off-by: Jia-Ju Bai <baijiaju1990@gmail.com>
Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Archit Taneja <architt@codeaurora.org>
Link: https://patchwork.freedesktop.org/patch/msgid/1523435622-4329-1-git-send-email-baijiaju1990@gmail.com
---
 drivers/gpu/drm/bridge/adv7511/adv7511_drv.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 2614cea538e2..73021b388e12 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -1127,7 +1127,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
 	}
 
 	if (adv7511->gpio_pd) {
-		mdelay(5);
+		usleep_range(5000, 6000);
 		gpiod_set_value_cansleep(adv7511->gpio_pd, 0);
 	}
 

From dc74f6fec68daa7cb34ad9155da3782c0f9bf86a Mon Sep 17 00:00:00 2001
From: Colin Ian King <colin.king@canonical.com>
Date: Thu, 3 May 2018 16:45:10 +0100
Subject: [PATCH 0600/1286] drm/i915/selftests: fix spelling mistake:
 "parmaters" -> "parameters"

Trivial fix to spelling mistake in pr_err error message

Signed-off-by: Colin Ian King <colin.king@canonical.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180503154510.708-1-colin.king@canonical.com
---
 drivers/gpu/drm/i915/selftests/i915_vma.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index eb89e301b602..e90f97236e50 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -81,7 +81,7 @@ checked_vma_instance(struct drm_i915_gem_object *obj,
 	}
 
 	if (i915_vma_compare(vma, vm, view)) {
-		pr_err("i915_vma_compare failed with create parmaters!\n");
+		pr_err("i915_vma_compare failed with create parameters!\n");
 		return ERR_PTR(-EINVAL);
 	}
 

From 3365e2268b6bc3d9fa6550f2deaf1b6a537f8732 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Thu, 3 May 2018 20:51:14 +0100
Subject: [PATCH 0601/1286] drm/i915: Lazily unbind vma on close

When userspace is passing around swapbuffers using DRI, we frequently
have to open and close the same object in the foreign address space.
This shows itself as the same object being rebound at roughly 30fps
(with a second object also being rebound at 30fps), which involves us
having to rewrite the page tables and maintain the drm_mm range manager
every time.

However, since the object still exists and it is only the local handle
that disappears, if we are lazy and do not unbind the VMA immediately
when the local user closes the object but defer it until the GPU is
idle, then we can reuse the same VMA binding. We still have to be
careful to mark the handle and lookup tables as closed to maintain the
uABI, just allowing the underlying VMA to be resurrected if the user is
able to access the same object from the same context again.

If the object itself is destroyed (neither userspace keeping a handle to
it), the VMA will be reaped immediately as usual.

In the future, this will be even more useful as instantiating a new VMA
for use on the GPU will become heavier. A nuisance indeed, so nip it in
the bud.

v2: s/__i915_vma_final_close/i915_vma_destroy/ etc.
v3: Leave a hint as to why we deferred the unbind on close.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180503195115.22309-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_drv.h               |  1 +
 drivers/gpu/drm/i915/i915_gem.c               |  4 +-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c    |  3 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c           | 14 ++--
 drivers/gpu/drm/i915/i915_vma.c               | 73 ++++++++++++++-----
 drivers/gpu/drm/i915/i915_vma.h               |  6 ++
 drivers/gpu/drm/i915/selftests/huge_pages.c   |  2 +-
 .../gpu/drm/i915/selftests/mock_gem_device.c  |  1 +
 8 files changed, 79 insertions(+), 25 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 11ff84eef52a..04e27806e581 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2062,6 +2062,7 @@ struct drm_i915_private {
 		struct list_head timelines;
 
 		struct list_head active_rings;
+		struct list_head closed_vma;
 		u32 active_requests;
 		u32 request_serial;
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 484354f25f98..5ece6ae4bdff 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -165,6 +165,7 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
 	i915_timelines_park(i915);
 
 	i915_pmu_gt_parked(i915);
+	i915_vma_parked(i915);
 
 	i915->gt.awake = false;
 
@@ -4795,7 +4796,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 					 &obj->vma_list, obj_link) {
 			GEM_BUG_ON(i915_vma_is_active(vma));
 			vma->flags &= ~I915_VMA_PIN_MASK;
-			i915_vma_close(vma);
+			i915_vma_destroy(vma);
 		}
 		GEM_BUG_ON(!list_empty(&obj->vma_list));
 		GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
@@ -5598,6 +5599,7 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
 
 	INIT_LIST_HEAD(&dev_priv->gt.timelines);
 	INIT_LIST_HEAD(&dev_priv->gt.active_rings);
+	INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
 
 	i915_gem_init__mm(dev_priv);
 
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index c74f5df3fb5a..f627a8c47c58 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -762,7 +762,8 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
 		}
 
 		/* transfer ref to ctx */
-		vma->open_count++;
+		if (!vma->open_count++)
+			i915_vma_reopen(vma);
 		list_add(&lut->obj_link, &obj->lut_list);
 		list_add(&lut->ctx_link, &eb->ctx->handles_list);
 		lut->ctx = eb->ctx;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index e9d828324f67..272d6bb407cc 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2218,6 +2218,12 @@ i915_ppgtt_create(struct drm_i915_private *dev_priv,
 }
 
 void i915_ppgtt_close(struct i915_address_space *vm)
+{
+	GEM_BUG_ON(vm->closed);
+	vm->closed = true;
+}
+
+static void ppgtt_destroy_vma(struct i915_address_space *vm)
 {
 	struct list_head *phases[] = {
 		&vm->active_list,
@@ -2226,15 +2232,12 @@ void i915_ppgtt_close(struct i915_address_space *vm)
 		NULL,
 	}, **phase;
 
-	GEM_BUG_ON(vm->closed);
 	vm->closed = true;
-
 	for (phase = phases; *phase; phase++) {
 		struct i915_vma *vma, *vn;
 
 		list_for_each_entry_safe(vma, vn, *phase, vm_link)
-			if (!i915_vma_is_closed(vma))
-				i915_vma_close(vma);
+			i915_vma_destroy(vma);
 	}
 }
 
@@ -2245,7 +2248,8 @@ void i915_ppgtt_release(struct kref *kref)
 
 	trace_i915_ppgtt_release(&ppgtt->base);
 
-	/* vmas should already be unbound and destroyed */
+	ppgtt_destroy_vma(&ppgtt->base);
+
 	GEM_BUG_ON(!list_empty(&ppgtt->base.active_list));
 	GEM_BUG_ON(!list_empty(&ppgtt->base.inactive_list));
 	GEM_BUG_ON(!list_empty(&ppgtt->base.unbound_list));
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 4bda3bd29bf5..9324d476e0a7 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -46,8 +46,6 @@ i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq)
 
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
-	if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
-		WARN_ON(i915_vma_unbind(vma));
 
 	GEM_BUG_ON(!i915_gem_object_is_active(obj));
 	if (--obj->active_count)
@@ -232,7 +230,6 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
 	if (!vma)
 		vma = vma_create(obj, vm, view);
 
-	GEM_BUG_ON(!IS_ERR(vma) && i915_vma_is_closed(vma));
 	GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
 	GEM_BUG_ON(!IS_ERR(vma) && vma_lookup(obj, vm, view) != vma);
 	return vma;
@@ -684,13 +681,43 @@ err_unpin:
 	return ret;
 }
 
-static void i915_vma_destroy(struct i915_vma *vma)
+void i915_vma_close(struct i915_vma *vma)
+{
+	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+
+	GEM_BUG_ON(i915_vma_is_closed(vma));
+	vma->flags |= I915_VMA_CLOSED;
+
+	/*
+	 * We defer actually closing, unbinding and destroying the VMA until
+	 * the next idle point, or if the object is freed in the meantime. By
+	 * postponing the unbind, we allow for it to be resurrected by the
+	 * client, avoiding the work required to rebind the VMA. This is
+	 * advantageous for DRI, where the client/server pass objects
+	 * between themselves, temporarily opening a local VMA to the
+	 * object, and then closing it again. The same object is then reused
+	 * on the next frame (or two, depending on the depth of the swap queue)
+	 * causing us to rebind the VMA once more. This ends up being a lot
+	 * of wasted work for the steady state.
+	 */
+	list_add_tail(&vma->closed_link, &vma->vm->i915->gt.closed_vma);
+}
+
+void i915_vma_reopen(struct i915_vma *vma)
+{
+	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+
+	if (vma->flags & I915_VMA_CLOSED) {
+		vma->flags &= ~I915_VMA_CLOSED;
+		list_del(&vma->closed_link);
+	}
+}
+
+static void __i915_vma_destroy(struct i915_vma *vma)
 {
 	int i;
 
 	GEM_BUG_ON(vma->node.allocated);
-	GEM_BUG_ON(i915_vma_is_active(vma));
-	GEM_BUG_ON(!i915_vma_is_closed(vma));
 	GEM_BUG_ON(vma->fence);
 
 	for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
@@ -699,6 +726,7 @@ static void i915_vma_destroy(struct i915_vma *vma)
 
 	list_del(&vma->obj_link);
 	list_del(&vma->vm_link);
+	rb_erase(&vma->obj_node, &vma->obj->vma_tree);
 
 	if (!i915_vma_is_ggtt(vma))
 		i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
@@ -706,15 +734,30 @@ static void i915_vma_destroy(struct i915_vma *vma)
 	kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
 }
 
-void i915_vma_close(struct i915_vma *vma)
+void i915_vma_destroy(struct i915_vma *vma)
 {
-	GEM_BUG_ON(i915_vma_is_closed(vma));
-	vma->flags |= I915_VMA_CLOSED;
+	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
 
-	rb_erase(&vma->obj_node, &vma->obj->vma_tree);
+	GEM_BUG_ON(i915_vma_is_active(vma));
+	GEM_BUG_ON(i915_vma_is_pinned(vma));
 
-	if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
-		WARN_ON(i915_vma_unbind(vma));
+	if (i915_vma_is_closed(vma))
+		list_del(&vma->closed_link);
+
+	WARN_ON(i915_vma_unbind(vma));
+	__i915_vma_destroy(vma);
+}
+
+void i915_vma_parked(struct drm_i915_private *i915)
+{
+	struct i915_vma *vma, *next;
+
+	list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) {
+		GEM_BUG_ON(!i915_vma_is_closed(vma));
+		i915_vma_destroy(vma);
+	}
+
+	GEM_BUG_ON(!list_empty(&i915->gt.closed_vma));
 }
 
 static void __i915_vma_iounmap(struct i915_vma *vma)
@@ -804,7 +847,7 @@ int i915_vma_unbind(struct i915_vma *vma)
 		return -EBUSY;
 
 	if (!drm_mm_node_allocated(&vma->node))
-		goto destroy;
+		return 0;
 
 	GEM_BUG_ON(obj->bind_count == 0);
 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
@@ -841,10 +884,6 @@ int i915_vma_unbind(struct i915_vma *vma)
 
 	i915_vma_remove(vma);
 
-destroy:
-	if (unlikely(i915_vma_is_closed(vma)))
-		i915_vma_destroy(vma);
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 8c5022095418..fc4294cfaa91 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -119,6 +119,8 @@ struct i915_vma {
 	/** This vma's place in the eviction list */
 	struct list_head evict_link;
 
+	struct list_head closed_link;
+
 	/**
 	 * Used for performing relocations during execbuffer insertion.
 	 */
@@ -285,6 +287,8 @@ void i915_vma_revoke_mmap(struct i915_vma *vma);
 int __must_check i915_vma_unbind(struct i915_vma *vma);
 void i915_vma_unlink_ctx(struct i915_vma *vma);
 void i915_vma_close(struct i915_vma *vma);
+void i915_vma_reopen(struct i915_vma *vma);
+void i915_vma_destroy(struct i915_vma *vma);
 
 int __i915_vma_do_pin(struct i915_vma *vma,
 		      u64 size, u64 alignment, u64 flags);
@@ -408,6 +412,8 @@ i915_vma_unpin_fence(struct i915_vma *vma)
 		__i915_vma_unpin_fence(vma);
 }
 
+void i915_vma_parked(struct drm_i915_private *i915);
+
 #define for_each_until(cond) if (cond) break; else
 
 /**
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index 05bbef363fff..d7c8ef8e6764 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -1091,7 +1091,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
 out_vma_unpin:
 	i915_vma_unpin(vma);
 out_vma_close:
-	i915_vma_close(vma);
+	i915_vma_destroy(vma);
 
 	return err;
 }
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index a662c0450e77..4b6622c6986a 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -226,6 +226,7 @@ struct drm_i915_private *mock_gem_device(void)
 
 	INIT_LIST_HEAD(&i915->gt.timelines);
 	INIT_LIST_HEAD(&i915->gt.active_rings);
+	INIT_LIST_HEAD(&i915->gt.closed_vma);
 
 	mutex_lock(&i915->drm.struct_mutex);
 	mock_init_ggtt(i915);

From 7c572e1bdf8dea0c84ce8da01a84cdaa26d8e138 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Thu, 3 May 2018 20:51:15 +0100
Subject: [PATCH 0602/1286] drm/i915: Keep one request in our ring_list

Don't pre-emptively retire the oldest request in our ring's list if it
is the only request. We keep various bits of state alive using the
active reference from the request and would rather transfer that state
over to a new request rather than the more involved process of retiring
and reacquiring it.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180503195115.22309-2-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_request.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 63bb61089be5..d68739b94dac 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -695,9 +695,9 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 		goto err_unreserve;
 
 	/* Move our oldest request to the slab-cache (if not in use!) */
-	rq = list_first_entry_or_null(&ring->request_list,
-				      typeof(*rq), ring_link);
-	if (rq && i915_request_completed(rq))
+	rq = list_first_entry(&ring->request_list, typeof(*rq), ring_link);
+	if (!list_is_last(&rq->ring_link, &ring->request_list) &&
+	    i915_request_completed(rq))
 		i915_request_retire(rq);
 
 	/*

From 74f9474124ea53f98866e434a8080f1538b8c2b7 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Thu, 3 May 2018 20:54:16 +0100
Subject: [PATCH 0603/1286] drm/i915/execlists: Drop preemption arbitrations
 points along the ring
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Limit the arbitration (where preemption may occur) to inside the batch,
and prevent it from happening on the pipecontrols/flushes we use to
write the breadcrumb seqno. Once the user batch is complete, we have
nothing left to do but serialise and emit the breadcrumb; switching
contexts at this point is futile so don't.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Cc: Michel Thierry <michel.thierry@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Michel Thierry <michel.thierry@intel.com>
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180503195416.22498-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_lrc.c | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 3d747d1c3d4d..9f3cce022b2d 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1933,7 +1933,7 @@ static int gen8_emit_bb_start(struct i915_request *rq,
 		rq->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine);
 	}
 
-	cs = intel_ring_begin(rq, 4);
+	cs = intel_ring_begin(rq, 6);
 	if (IS_ERR(cs))
 		return PTR_ERR(cs);
 
@@ -1962,6 +1962,9 @@ static int gen8_emit_bb_start(struct i915_request *rq,
 		(flags & I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
 	*cs++ = lower_32_bits(offset);
 	*cs++ = upper_32_bits(offset);
+
+	*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+	*cs++ = MI_NOOP;
 	intel_ring_advance(rq, cs);
 
 	return 0;
@@ -2104,7 +2107,7 @@ static void gen8_emit_breadcrumb(struct i915_request *request, u32 *cs)
 	cs = gen8_emit_ggtt_write(cs, request->global_seqno,
 				  intel_hws_seqno_address(request->engine));
 	*cs++ = MI_USER_INTERRUPT;
-	*cs++ = MI_NOOP;
+	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
 	request->tail = intel_ring_offset(request, cs);
 	assert_ring_tail_valid(request->ring, request->tail);
 
@@ -2120,7 +2123,7 @@ static void gen8_emit_breadcrumb_rcs(struct i915_request *request, u32 *cs)
 	cs = gen8_emit_ggtt_write_rcs(cs, request->global_seqno,
 				      intel_hws_seqno_address(request->engine));
 	*cs++ = MI_USER_INTERRUPT;
-	*cs++ = MI_NOOP;
+	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
 	request->tail = intel_ring_offset(request, cs);
 	assert_ring_tail_valid(request->ring, request->tail);
 

From 47d4cb8ae8e7f70363ece0bcc4ffc76a9e4638dc Mon Sep 17 00:00:00 2001
From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Date: Thu, 3 May 2018 21:17:06 +0300
Subject: [PATCH 0604/1286] i915: Convert to use match_string() helper

The new helper returns index of the matching string in an array.
We are going to use it here.

Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180503181706.22120-1-andriy.shevchenko@linux.intel.com
---
 drivers/gpu/drm/i915/intel_pipe_crc.c | 22 ++++++++++------------
 1 file changed, 10 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c
index 4f367c16e9e5..39a4e4edda07 100644
--- a/drivers/gpu/drm/i915/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.c
@@ -766,13 +766,12 @@ display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
 {
 	int i;
 
-	for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
-		if (!strcmp(buf, pipe_crc_objects[i])) {
-			*o = i;
-			return 0;
-		}
+	i = match_string(pipe_crc_objects, ARRAY_SIZE(pipe_crc_objects), buf);
+	if (i < 0)
+		return i;
 
-	return -EINVAL;
+	*o = i;
+	return 0;
 }
 
 static int display_crc_ctl_parse_pipe(struct drm_i915_private *dev_priv,
@@ -798,13 +797,12 @@ display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
 		return 0;
 	}
 
-	for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
-		if (!strcmp(buf, pipe_crc_sources[i])) {
-			*s = i;
-			return 0;
-		}
+	i = match_string(pipe_crc_sources, ARRAY_SIZE(pipe_crc_sources), buf);
+	if (i < 0)
+		return i;
 
-	return -EINVAL;
+	*s = i;
+	return 0;
 }
 
 static int display_crc_ctl_parse(struct drm_i915_private *dev_priv,

From 6f96f2000ac27b0f5f769b0bc2f0440ebfa1c3a3 Mon Sep 17 00:00:00 2001
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Date: Thu, 3 May 2018 13:22:13 +0200
Subject: [PATCH 0605/1286] drm/rect: Round above 1 << 16 upwards to correct
 scale calculation functions.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

When calculating limits we want to be as pessimistic as possible,
so we have to explicitly say whether we want to round up or down
to accurately calculate whether we are below min_scale or above
max_scale.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
[mlankhorst: Fix wording in documentation. (Ville)]
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180503112217.37292-2-maarten.lankhorst@linux.intel.com
---
 drivers/gpu/drm/drm_rect.c | 21 ++++++++++++++++++++-
 1 file changed, 20 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c
index a3783ecea297..a8e934795c7d 100644
--- a/drivers/gpu/drm/drm_rect.c
+++ b/drivers/gpu/drm/drm_rect.c
@@ -106,7 +106,10 @@ static int drm_calc_scale(int src, int dst)
 	if (dst == 0)
 		return 0;
 
-	scale = src / dst;
+	if (src > (dst << 16))
+		return DIV_ROUND_UP(src, dst);
+	else
+		scale = src / dst;
 
 	return scale;
 }
@@ -121,6 +124,10 @@ static int drm_calc_scale(int src, int dst)
  * Calculate the horizontal scaling factor as
  * (@src width) / (@dst width).
  *
+ * If the scale is below 1 << 16, round down. If the scale is above
+ * 1 << 16, round up. This will calculate the scale with the most
+ * pessimistic limit calculation.
+ *
  * RETURNS:
  * The horizontal scaling factor, or errno of out of limits.
  */
@@ -152,6 +159,10 @@ EXPORT_SYMBOL(drm_rect_calc_hscale);
  * Calculate the vertical scaling factor as
  * (@src height) / (@dst height).
  *
+ * If the scale is below 1 << 16, round down. If the scale is above
+ * 1 << 16, round up. This will calculate the scale with the most
+ * pessimistic limit calculation.
+ *
  * RETURNS:
  * The vertical scaling factor, or errno of out of limits.
  */
@@ -189,6 +200,10 @@ EXPORT_SYMBOL(drm_rect_calc_vscale);
  * If the calculated scaling factor is above @max_vscale,
  * decrease the height of rectangle @src to compensate.
  *
+ * If the scale is below 1 << 16, round down. If the scale is above
+ * 1 << 16, round up. This will calculate the scale with the most
+ * pessimistic limit calculation.
+ *
  * RETURNS:
  * The horizontal scaling factor.
  */
@@ -239,6 +254,10 @@ EXPORT_SYMBOL(drm_rect_calc_hscale_relaxed);
  * If the calculated scaling factor is above @max_vscale,
  * decrease the height of rectangle @src to compensate.
  *
+ * If the scale is below 1 << 16, round down. If the scale is above
+ * 1 << 16, round up. This will calculate the scale with the most
+ * pessimistic limit calculation.
+ *
  * RETURNS:
  * The vertical scaling factor.
  */

From f96bdf564f3e7511aecdd4c35cc18ac5e0750a2f Mon Sep 17 00:00:00 2001
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Date: Thu, 3 May 2018 13:22:14 +0200
Subject: [PATCH 0606/1286] drm/rect: Handle rounding errors in
 drm_rect_clip_scaled, v3.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Instead of relying on a scale which may increase rounding errors,
clip src by doing: src * (dst - clip) / dst and rounding the result
away from 1, so the new coordinates get closer to 1. We won't need
to fix up with a magic macro afterwards, because our scaling factor
will never go to the other side of 1.

Changes since v1:
- Adjust dst immediately, else drm_rect_width/height on dst gives bogus
  results.
Change since v2:
- Get rid of macros and use 64-bits math.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
[mlankhorst: Add Villes comment, and rename newsrc to tmp. (Ville)]
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180503112217.37292-3-maarten.lankhorst@linux.intel.com
---
 drivers/gpu/drm/drm_atomic_helper.c |  2 +-
 drivers/gpu/drm/drm_rect.c          | 49 +++++++++++++++++++++--------
 drivers/gpu/drm/i915/intel_sprite.c |  2 +-
 include/drm/drm_rect.h              |  3 +-
 4 files changed, 39 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 9cb2209f6fc8..130da5195f3b 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -766,7 +766,7 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
 	if (crtc_state->enable)
 		drm_mode_get_hv_timing(&crtc_state->mode, &clip.x2, &clip.y2);
 
-	plane_state->visible = drm_rect_clip_scaled(src, dst, &clip, hscale, vscale);
+	plane_state->visible = drm_rect_clip_scaled(src, dst, &clip);
 
 	drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
 
diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c
index a8e934795c7d..8c057829b804 100644
--- a/drivers/gpu/drm/drm_rect.c
+++ b/drivers/gpu/drm/drm_rect.c
@@ -50,13 +50,25 @@ bool drm_rect_intersect(struct drm_rect *r1, const struct drm_rect *r2)
 }
 EXPORT_SYMBOL(drm_rect_intersect);
 
+static u32 clip_scaled(u32 src, u32 dst, u32 clip)
+{
+	u64 tmp = mul_u32_u32(src, dst - clip);
+
+	/*
+	 * Round toward 1.0 when clipping so that we don't accidentally
+	 * change upscaling to downscaling or vice versa.
+	 */
+	if (src < (dst << 16))
+		return DIV_ROUND_UP_ULL(tmp, dst);
+	else
+		return DIV_ROUND_DOWN_ULL(tmp, dst);
+}
+
 /**
  * drm_rect_clip_scaled - perform a scaled clip operation
  * @src: source window rectangle
  * @dst: destination window rectangle
  * @clip: clip rectangle
- * @hscale: horizontal scaling factor
- * @vscale: vertical scaling factor
  *
  * Clip rectangle @dst by rectangle @clip. Clip rectangle @src by the
  * same amounts multiplied by @hscale and @vscale.
@@ -66,33 +78,44 @@ EXPORT_SYMBOL(drm_rect_intersect);
  * %false otherwise
  */
 bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
-			  const struct drm_rect *clip,
-			  int hscale, int vscale)
+			  const struct drm_rect *clip)
 {
 	int diff;
 
 	diff = clip->x1 - dst->x1;
 	if (diff > 0) {
-		int64_t tmp = src->x1 + (int64_t) diff * hscale;
-		src->x1 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
+		u32 new_src_w = clip_scaled(drm_rect_width(src),
+					    drm_rect_width(dst), diff);
+
+		src->x1 = clamp_t(int64_t, src->x2 - new_src_w, INT_MIN, INT_MAX);
+		dst->x1 = clip->x1;
 	}
 	diff = clip->y1 - dst->y1;
 	if (diff > 0) {
-		int64_t tmp = src->y1 + (int64_t) diff * vscale;
-		src->y1 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
+		u32 new_src_h = clip_scaled(drm_rect_height(src),
+					    drm_rect_height(dst), diff);
+
+		src->y1 = clamp_t(int64_t, src->y2 - new_src_h, INT_MIN, INT_MAX);
+		dst->y1 = clip->y1;
 	}
 	diff = dst->x2 - clip->x2;
 	if (diff > 0) {
-		int64_t tmp = src->x2 - (int64_t) diff * hscale;
-		src->x2 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
+		u32 new_src_w = clip_scaled(drm_rect_width(src),
+					    drm_rect_width(dst), diff);
+
+		src->x2 = clamp_t(int64_t, src->x1 + new_src_w, INT_MIN, INT_MAX);
+		dst->x2 = clip->x2;
 	}
 	diff = dst->y2 - clip->y2;
 	if (diff > 0) {
-		int64_t tmp = src->y2 - (int64_t) diff * vscale;
-		src->y2 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
+		u32 new_src_h = clip_scaled(drm_rect_height(src),
+					    drm_rect_height(dst), diff);
+
+		src->y2 = clamp_t(int64_t, src->y1 + new_src_h, INT_MIN, INT_MAX);
+		dst->y2 = clip->y2;
 	}
 
-	return drm_rect_intersect(dst, clip);
+	return drm_rect_visible(dst);
 }
 EXPORT_SYMBOL(drm_rect_clip_scaled);
 
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index dbdcf85032df..e17c26a1cff1 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -1003,7 +1003,7 @@ intel_check_sprite_plane(struct intel_plane *plane,
 		drm_mode_get_hv_timing(&crtc_state->base.mode,
 				       &clip.x2, &clip.y2);
 
-	state->base.visible = drm_rect_clip_scaled(src, dst, &clip, hscale, vscale);
+	state->base.visible = drm_rect_clip_scaled(src, dst, &clip);
 
 	crtc_x = dst->x1;
 	crtc_y = dst->y1;
diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h
index 44bc122b9ee0..6c54544a4be7 100644
--- a/include/drm/drm_rect.h
+++ b/include/drm/drm_rect.h
@@ -175,8 +175,7 @@ static inline bool drm_rect_equals(const struct drm_rect *r1,
 
 bool drm_rect_intersect(struct drm_rect *r, const struct drm_rect *clip);
 bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
-			  const struct drm_rect *clip,
-			  int hscale, int vscale);
+			  const struct drm_rect *clip);
 int drm_rect_calc_hscale(const struct drm_rect *src,
 			 const struct drm_rect *dst,
 			 int min_hscale, int max_hscale);

From 9c1659ebe77d7e111dac4bdc7e082136d223ffb5 Mon Sep 17 00:00:00 2001
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Date: Thu, 3 May 2018 13:22:15 +0200
Subject: [PATCH 0607/1286] drm/i915: Do not adjust scale when out of bounds,
 v2.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

With the previous patch drm_atomic_helper_check_plane_state correctly
calculates clipping and the xf86-video-intel ddx is fixed to fall back
to GPU correctly when SetPlane fails, we can remove the hack where
we try to pan/zoom when out of min/max scaling range. This was already
poor behavior where the screen didn't show what was requested, and now
instead we reject it outright. This simplifies check_sprite_plane a lot.

Changes since v1:
- Set crtc_h to the height correctly.
- Reject < 3x3 rectangles instead of making them invisible for <gen9.
  For gen9+ skl_update_scaler_plane will reject them.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180503112217.37292-4-maarten.lankhorst@linux.intel.com
Acked-by: Jani Nikula <jani.nikula@intel.com>
---
 drivers/gpu/drm/i915/intel_sprite.c | 150 +++++++---------------------
 1 file changed, 38 insertions(+), 112 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index e17c26a1cff1..344228b640b9 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -935,21 +935,11 @@ intel_check_sprite_plane(struct intel_plane *plane,
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 	struct drm_framebuffer *fb = state->base.fb;
-	int crtc_x, crtc_y;
-	unsigned int crtc_w, crtc_h;
-	uint32_t src_x, src_y, src_w, src_h;
-	struct drm_rect *src = &state->base.src;
-	struct drm_rect *dst = &state->base.dst;
-	struct drm_rect clip = {};
 	int max_stride = INTEL_GEN(dev_priv) >= 9 ? 32768 : 16384;
-	int hscale, vscale;
 	int max_scale, min_scale;
 	bool can_scale;
 	int ret;
 
-	*src = drm_plane_state_src(&state->base);
-	*dst = drm_plane_state_dest(&state->base);
-
 	if (!fb) {
 		state->base.visible = false;
 		return 0;
@@ -985,64 +975,19 @@ intel_check_sprite_plane(struct intel_plane *plane,
 		min_scale = plane->can_scale ? 1 : (1 << 16);
 	}
 
-	/*
-	 * FIXME the following code does a bunch of fuzzy adjustments to the
-	 * coordinates and sizes. We probably need some way to decide whether
-	 * more strict checking should be done instead.
-	 */
-	drm_rect_rotate(src, fb->width << 16, fb->height << 16,
-			state->base.rotation);
-
-	hscale = drm_rect_calc_hscale_relaxed(src, dst, min_scale, max_scale);
-	BUG_ON(hscale < 0);
-
-	vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale);
-	BUG_ON(vscale < 0);
-
-	if (crtc_state->base.enable)
-		drm_mode_get_hv_timing(&crtc_state->base.mode,
-				       &clip.x2, &clip.y2);
-
-	state->base.visible = drm_rect_clip_scaled(src, dst, &clip);
-
-	crtc_x = dst->x1;
-	crtc_y = dst->y1;
-	crtc_w = drm_rect_width(dst);
-	crtc_h = drm_rect_height(dst);
+	ret = drm_atomic_helper_check_plane_state(&state->base,
+						  &crtc_state->base,
+						  min_scale, max_scale,
+						  true, true);
+	if (ret)
+		return ret;
 
 	if (state->base.visible) {
-		/* check again in case clipping clamped the results */
-		hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
-		if (hscale < 0) {
-			DRM_DEBUG_KMS("Horizontal scaling factor out of limits\n");
-			drm_rect_debug_print("src: ", src, true);
-			drm_rect_debug_print("dst: ", dst, false);
-
-			return hscale;
-		}
-
-		vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
-		if (vscale < 0) {
-			DRM_DEBUG_KMS("Vertical scaling factor out of limits\n");
-			drm_rect_debug_print("src: ", src, true);
-			drm_rect_debug_print("dst: ", dst, false);
-
-			return vscale;
-		}
-
-		/* Make the source viewport size an exact multiple of the scaling factors. */
-		drm_rect_adjust_size(src,
-				     drm_rect_width(dst) * hscale - drm_rect_width(src),
-				     drm_rect_height(dst) * vscale - drm_rect_height(src));
-
-		drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16,
-				    state->base.rotation);
-
-		/* sanity check to make sure the src viewport wasn't enlarged */
-		WARN_ON(src->x1 < (int) state->base.src_x ||
-			src->y1 < (int) state->base.src_y ||
-			src->x2 > (int) state->base.src_x + state->base.src_w ||
-			src->y2 > (int) state->base.src_y + state->base.src_h);
+		struct drm_rect *src = &state->base.src;
+		struct drm_rect *dst = &state->base.dst;
+		unsigned int crtc_w = drm_rect_width(dst);
+		unsigned int crtc_h = drm_rect_height(dst);
+		uint32_t src_x, src_y, src_w, src_h;
 
 		/*
 		 * Hardware doesn't handle subpixel coordinates.
@@ -1055,57 +1000,38 @@ intel_check_sprite_plane(struct intel_plane *plane,
 		src_y = src->y1 >> 16;
 		src_h = drm_rect_height(src) >> 16;
 
-		if (intel_format_is_yuv(fb->format->format)) {
-			src_x &= ~1;
-			src_w &= ~1;
-
-			/*
-			 * Must keep src and dst the
-			 * same if we can't scale.
-			 */
-			if (!can_scale)
-				crtc_w &= ~1;
-
-			if (crtc_w == 0)
-				state->base.visible = false;
-		}
-	}
-
-	/* Check size restrictions when scaling */
-	if (state->base.visible && (src_w != crtc_w || src_h != crtc_h)) {
-		unsigned int width_bytes;
-		int cpp = fb->format->cpp[0];
-
-		WARN_ON(!can_scale);
-
-		/* FIXME interlacing min height is 6 */
-
-		if (crtc_w < 3 || crtc_h < 3)
-			state->base.visible = false;
-
-		if (src_w < 3 || src_h < 3)
-			state->base.visible = false;
-
-		width_bytes = ((src_x * cpp) & 63) + src_w * cpp;
-
-		if (INTEL_GEN(dev_priv) < 9 && (src_w > 2048 || src_h > 2048 ||
-		    width_bytes > 4096 || fb->pitches[0] > 4096)) {
-			DRM_DEBUG_KMS("Source dimensions exceed hardware limits\n");
-			return -EINVAL;
-		}
-	}
-
-	if (state->base.visible) {
 		src->x1 = src_x << 16;
 		src->x2 = (src_x + src_w) << 16;
 		src->y1 = src_y << 16;
 		src->y2 = (src_y + src_h) << 16;
-	}
 
-	dst->x1 = crtc_x;
-	dst->x2 = crtc_x + crtc_w;
-	dst->y1 = crtc_y;
-	dst->y2 = crtc_y + crtc_h;
+		if (intel_format_is_yuv(fb->format->format) &&
+		    (src_x % 2 || src_w % 2)) {
+			DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n",
+				      src_x, src_w);
+			return -EINVAL;
+		}
+
+		/* Check size restrictions when scaling */
+		if (src_w != crtc_w || src_h != crtc_h) {
+			unsigned int width_bytes;
+			int cpp = fb->format->cpp[0];
+
+			WARN_ON(!can_scale);
+
+			width_bytes = ((src_x * cpp) & 63) + src_w * cpp;
+
+			/* FIXME interlacing min height is 6 */
+			if (INTEL_GEN(dev_priv) < 9 && (
+			     src_w < 3 || src_h < 3 ||
+			     src_w > 2048 || src_h > 2048 ||
+			     crtc_w < 3 || crtc_h < 3 ||
+			     width_bytes > 4096 || fb->pitches[0] > 4096)) {
+				DRM_DEBUG_KMS("Source dimensions exceed hardware limits\n");
+				return -EINVAL;
+			}
+		}
+	}
 
 	if (INTEL_GEN(dev_priv) >= 9) {
 		ret = skl_check_plane_surface(crtc_state, state);

From 34b13e5e4641c0e9e0aad471a6d8dfb7999276f1 Mon Sep 17 00:00:00 2001
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Date: Thu, 3 May 2018 13:22:16 +0200
Subject: [PATCH 0608/1286] drm/selftests: Rename the Kconfig option to
 CONFIG_DRM_DEBUG_SELFTEST
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

We want to add more DRM selftests, and there's not much point in
having a Kconfig option for every single one of them, so make
a generic one.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180503112217.37292-5-maarten.lankhorst@linux.intel.com
[mlankhorst: Fix i915/Kconfig.debug (ickle)]
Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
---
 drivers/gpu/drm/Kconfig            | 8 ++++----
 drivers/gpu/drm/Makefile           | 2 +-
 drivers/gpu/drm/i915/Kconfig.debug | 2 +-
 drivers/gpu/drm/selftests/Makefile | 2 +-
 4 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 1c73a455fdb1..aa0b0d830beb 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -49,16 +49,16 @@ config DRM_DEBUG_MM
 
 	  If in doubt, say "N".
 
-config DRM_DEBUG_MM_SELFTEST
-	tristate "kselftests for DRM range manager (struct drm_mm)"
+config DRM_DEBUG_SELFTEST
+	tristate "kselftests for DRM"
 	depends on DRM
 	depends on DEBUG_KERNEL
 	select PRIME_NUMBERS
 	select DRM_LIB_RANDOM
 	default n
 	help
-	  This option provides a kernel module that can be used to test
-	  the DRM range manager (drm_mm) and its API. This option is not
+	  This option provides kernel modules that can be used to run
+	  various selftests on parts of the DRM api. This option is not
 	  useful for distributions or general kernels, but only for kernel
 	  developers working on DRM and associated drivers.
 
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 7a401edd8761..ef9f3dab287f 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -43,7 +43,7 @@ drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
 drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
 
 obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
-obj-$(CONFIG_DRM_DEBUG_MM_SELFTEST) += selftests/
+obj-$(CONFIG_DRM_DEBUG_SELFTEST) += selftests/
 
 obj-$(CONFIG_DRM)	+= drm.o
 obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 108d21f34777..8c7972df9f0f 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -25,7 +25,7 @@ config DRM_I915_DEBUG
         select X86_MSR # used by igt/pm_rpm
         select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
         select DRM_DEBUG_MM if DRM=y
-	select DRM_DEBUG_MM_SELFTEST
+	select DRM_DEBUG_SELFTEST
 	select SW_SYNC # signaling validation framework (igt/syncobj*)
 	select DRM_I915_SW_FENCE_DEBUG_OBJECTS
 	select DRM_I915_SELFTEST
diff --git a/drivers/gpu/drm/selftests/Makefile b/drivers/gpu/drm/selftests/Makefile
index 4aebfc7f27d4..f7dd66e859a9 100644
--- a/drivers/gpu/drm/selftests/Makefile
+++ b/drivers/gpu/drm/selftests/Makefile
@@ -1 +1 @@
-obj-$(CONFIG_DRM_DEBUG_MM_SELFTEST) += test-drm_mm.o
+obj-$(CONFIG_DRM_DEBUG_SELFTEST) += test-drm_mm.o

From 7420e04963587dc8f6d4e8b7e79b3ad7ab5c5300 Mon Sep 17 00:00:00 2001
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Date: Thu, 3 May 2018 13:22:17 +0200
Subject: [PATCH 0609/1286] drm/selftests: Add drm helper selftest
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180503112217.37292-6-maarten.lankhorst@linux.intel.com
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
---
 drivers/gpu/drm/Kconfig                       |   1 +
 drivers/gpu/drm/selftests/Makefile            |   2 +-
 .../gpu/drm/selftests/drm_helper_selftests.h  |   9 +
 drivers/gpu/drm/selftests/test-drm-helper.c   | 247 ++++++++++++++++++
 4 files changed, 258 insertions(+), 1 deletion(-)
 create mode 100644 drivers/gpu/drm/selftests/drm_helper_selftests.h
 create mode 100644 drivers/gpu/drm/selftests/test-drm-helper.c

diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index aa0b0d830beb..2a72d2feb76d 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -55,6 +55,7 @@ config DRM_DEBUG_SELFTEST
 	depends on DEBUG_KERNEL
 	select PRIME_NUMBERS
 	select DRM_LIB_RANDOM
+	select DRM_KMS_HELPER
 	default n
 	help
 	  This option provides kernel modules that can be used to run
diff --git a/drivers/gpu/drm/selftests/Makefile b/drivers/gpu/drm/selftests/Makefile
index f7dd66e859a9..9fc349fa18e9 100644
--- a/drivers/gpu/drm/selftests/Makefile
+++ b/drivers/gpu/drm/selftests/Makefile
@@ -1 +1 @@
-obj-$(CONFIG_DRM_DEBUG_SELFTEST) += test-drm_mm.o
+obj-$(CONFIG_DRM_DEBUG_SELFTEST) += test-drm_mm.o test-drm-helper.o
diff --git a/drivers/gpu/drm/selftests/drm_helper_selftests.h b/drivers/gpu/drm/selftests/drm_helper_selftests.h
new file mode 100644
index 000000000000..9771290ed228
--- /dev/null
+++ b/drivers/gpu/drm/selftests/drm_helper_selftests.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* List each unit test as selftest(name, function)
+ *
+ * The name is used as both an enum and expanded as igt__name to create
+ * a module parameter. It must be unique and legal for a C identifier.
+ *
+ * Tests are executed in order by igt/drm_selftests_helper
+ */
+selftest(check_plane_state, igt_check_plane_state)
diff --git a/drivers/gpu/drm/selftests/test-drm-helper.c b/drivers/gpu/drm/selftests/test-drm-helper.c
new file mode 100644
index 000000000000..a015712b43e8
--- /dev/null
+++ b/drivers/gpu/drm/selftests/test-drm-helper.c
@@ -0,0 +1,247 @@
+/*
+ * Test cases for the drm_kms_helper functions
+ */
+
+#define pr_fmt(fmt) "drm_kms_helper: " fmt
+
+#include <linux/module.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_modes.h>
+
+#define TESTS "drm_helper_selftests.h"
+#include "drm_selftest.h"
+
+#define FAIL(test, msg, ...) \
+	do { \
+		if (test) { \
+			pr_err("%s/%u: " msg, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
+			return -EINVAL; \
+		} \
+	} while (0)
+
+#define FAIL_ON(x) FAIL((x), "%s", "FAIL_ON(" __stringify(x) ")\n")
+
+static void set_src(struct drm_plane_state *plane_state,
+		    unsigned src_x, unsigned src_y,
+		    unsigned src_w, unsigned src_h)
+{
+	plane_state->src_x = src_x;
+	plane_state->src_y = src_y;
+	plane_state->src_w = src_w;
+	plane_state->src_h = src_h;
+}
+
+static bool check_src_eq(struct drm_plane_state *plane_state,
+			 unsigned src_x, unsigned src_y,
+			 unsigned src_w, unsigned src_h)
+{
+	if (plane_state->src.x1 < 0) {
+		pr_err("src x coordinate %x should never be below 0.\n", plane_state->src.x1);
+		drm_rect_debug_print("src: ", &plane_state->src, true);
+		return false;
+	}
+	if (plane_state->src.y1 < 0) {
+		pr_err("src y coordinate %x should never be below 0.\n", plane_state->src.y1);
+		drm_rect_debug_print("src: ", &plane_state->src, true);
+		return false;
+	}
+
+	if (plane_state->src.x1 != src_x ||
+	    plane_state->src.y1 != src_y ||
+	    drm_rect_width(&plane_state->src) != src_w ||
+	    drm_rect_height(&plane_state->src) != src_h) {
+		drm_rect_debug_print("src: ", &plane_state->src, true);
+		return false;
+	}
+
+	return true;
+}
+
+static void set_crtc(struct drm_plane_state *plane_state,
+		     int crtc_x, int crtc_y,
+		     unsigned crtc_w, unsigned crtc_h)
+{
+	plane_state->crtc_x = crtc_x;
+	plane_state->crtc_y = crtc_y;
+	plane_state->crtc_w = crtc_w;
+	plane_state->crtc_h = crtc_h;
+}
+
+static bool check_crtc_eq(struct drm_plane_state *plane_state,
+			  int crtc_x, int crtc_y,
+			  unsigned crtc_w, unsigned crtc_h)
+{
+	if (plane_state->dst.x1 != crtc_x ||
+	    plane_state->dst.y1 != crtc_y ||
+	    drm_rect_width(&plane_state->dst) != crtc_w ||
+	    drm_rect_height(&plane_state->dst) != crtc_h) {
+		drm_rect_debug_print("dst: ", &plane_state->dst, false);
+
+		return false;
+	}
+
+	return true;
+}
+
+static int igt_check_plane_state(void *ignored)
+{
+	int ret;
+
+	const struct drm_crtc_state crtc_state = {
+		.crtc = ZERO_SIZE_PTR,
+		.enable = true,
+		.active = true,
+		.mode = {
+			DRM_MODE("1024x768", 0, 65000, 1024, 1048,
+				1184, 1344, 0, 768, 771, 777, 806, 0,
+				DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
+		},
+	};
+	struct drm_framebuffer fb = {
+		.width = 2048,
+		.height = 2048
+	};
+	struct drm_plane_state plane_state = {
+		.crtc = ZERO_SIZE_PTR,
+		.fb = &fb,
+		.rotation = DRM_MODE_ROTATE_0
+	};
+
+	/* Simple clipping, no scaling. */
+	set_src(&plane_state, 0, 0, fb.width << 16, fb.height << 16);
+	set_crtc(&plane_state, 0, 0, fb.width, fb.height);
+	ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
+						  DRM_PLANE_HELPER_NO_SCALING,
+						  DRM_PLANE_HELPER_NO_SCALING,
+						  false, false);
+	FAIL(ret < 0, "Simple clipping check should pass\n");
+	FAIL_ON(!plane_state.visible);
+	FAIL_ON(!check_src_eq(&plane_state, 0, 0, 1024 << 16, 768 << 16));
+	FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768));
+
+	/* Rotated clipping + reflection, no scaling. */
+	plane_state.rotation = DRM_MODE_ROTATE_90 | DRM_MODE_REFLECT_X;
+	ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
+						  DRM_PLANE_HELPER_NO_SCALING,
+						  DRM_PLANE_HELPER_NO_SCALING,
+						  false, false);
+	FAIL(ret < 0, "Rotated clipping check should pass\n");
+	FAIL_ON(!plane_state.visible);
+	FAIL_ON(!check_src_eq(&plane_state, 0, 0, 768 << 16, 1024 << 16));
+	FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768));
+	plane_state.rotation = DRM_MODE_ROTATE_0;
+
+	/* Check whether positioning works correctly. */
+	set_src(&plane_state, 0, 0, 1023 << 16, 767 << 16);
+	set_crtc(&plane_state, 0, 0, 1023, 767);
+	ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
+						  DRM_PLANE_HELPER_NO_SCALING,
+						  DRM_PLANE_HELPER_NO_SCALING,
+						  false, false);
+	FAIL(!ret, "Should not be able to position on the crtc with can_position=false\n");
+
+	ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
+						  DRM_PLANE_HELPER_NO_SCALING,
+						  DRM_PLANE_HELPER_NO_SCALING,
+						  true, false);
+	FAIL(ret < 0, "Simple positioning should work\n");
+	FAIL_ON(!plane_state.visible);
+	FAIL_ON(!check_src_eq(&plane_state, 0, 0, 1023 << 16, 767 << 16));
+	FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1023, 767));
+
+	/* Simple scaling tests. */
+	set_src(&plane_state, 0, 0, 512 << 16, 384 << 16);
+	set_crtc(&plane_state, 0, 0, 1024, 768);
+	ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
+						  0x8001,
+						  DRM_PLANE_HELPER_NO_SCALING,
+						  false, false);
+	FAIL(!ret, "Upscaling out of range should fail.\n");
+	ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
+						  0x8000,
+						  DRM_PLANE_HELPER_NO_SCALING,
+						  false, false);
+	FAIL(ret < 0, "Upscaling exactly 2x should work\n");
+	FAIL_ON(!plane_state.visible);
+	FAIL_ON(!check_src_eq(&plane_state, 0, 0, 512 << 16, 384 << 16));
+	FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768));
+
+	set_src(&plane_state, 0, 0, 2048 << 16, 1536 << 16);
+	ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
+						  DRM_PLANE_HELPER_NO_SCALING,
+						  0x1ffff, false, false);
+	FAIL(!ret, "Downscaling out of range should fail.\n");
+	ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
+						  DRM_PLANE_HELPER_NO_SCALING,
+						  0x20000, false, false);
+	FAIL(ret < 0, "Should succeed with exact scaling limit\n");
+	FAIL_ON(!plane_state.visible);
+	FAIL_ON(!check_src_eq(&plane_state, 0, 0, 2048 << 16, 1536 << 16));
+	FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768));
+
+	/* Testing rounding errors. */
+	set_src(&plane_state, 0, 0, 0x40001, 0x40001);
+	set_crtc(&plane_state, 1022, 766, 4, 4);
+	ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
+						  DRM_PLANE_HELPER_NO_SCALING,
+						  0x10001,
+						  true, false);
+	FAIL(ret < 0, "Should succeed by clipping to exact multiple");
+	FAIL_ON(!plane_state.visible);
+	FAIL_ON(!check_src_eq(&plane_state, 0, 0, 2 << 16, 2 << 16));
+	FAIL_ON(!check_crtc_eq(&plane_state, 1022, 766, 2, 2));
+
+	set_src(&plane_state, 0x20001, 0x20001, 0x4040001, 0x3040001);
+	set_crtc(&plane_state, -2, -2, 1028, 772);
+	ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
+						  DRM_PLANE_HELPER_NO_SCALING,
+						  0x10001,
+						  false, false);
+	FAIL(ret < 0, "Should succeed by clipping to exact multiple");
+	FAIL_ON(!plane_state.visible);
+	FAIL_ON(!check_src_eq(&plane_state, 0x40002, 0x40002, 1024 << 16, 768 << 16));
+	FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768));
+
+	set_src(&plane_state, 0, 0, 0x3ffff, 0x3ffff);
+	set_crtc(&plane_state, 1022, 766, 4, 4);
+	ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
+						  0xffff,
+						  DRM_PLANE_HELPER_NO_SCALING,
+						  true, false);
+	FAIL(ret < 0, "Should succeed by clipping to exact multiple");
+	FAIL_ON(!plane_state.visible);
+	/* Should not be rounded to 0x20001, which would be upscaling. */
+	FAIL_ON(!check_src_eq(&plane_state, 0, 0, 2 << 16, 2 << 16));
+	FAIL_ON(!check_crtc_eq(&plane_state, 1022, 766, 2, 2));
+
+	set_src(&plane_state, 0x1ffff, 0x1ffff, 0x403ffff, 0x303ffff);
+	set_crtc(&plane_state, -2, -2, 1028, 772);
+	ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
+						  0xffff,
+						  DRM_PLANE_HELPER_NO_SCALING,
+						  false, false);
+	FAIL(ret < 0, "Should succeed by clipping to exact multiple");
+	FAIL_ON(!plane_state.visible);
+	FAIL_ON(!check_src_eq(&plane_state, 0x3fffe, 0x3fffe, 1024 << 16, 768 << 16));
+	FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768));
+
+	return 0;
+}
+
+#include "drm_selftest.c"
+
+static int __init test_drm_helper_init(void)
+{
+	int err;
+
+	err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
+
+	return err > 0 ? 0 : err;
+}
+
+module_init(test_drm_helper_init);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");

From b1a3dc0b85bde4d8d549ea3aa31106b599694f37 Mon Sep 17 00:00:00 2001
From: Stefan Schake <stschake@gmail.com>
Date: Wed, 18 Apr 2018 03:40:19 -0700
Subject: [PATCH 0610/1286] drm/tegra: hub: Use state directly

Using drm_atomic_get_private_obj_state() after state has been swapped
will return old state.

Fixes: 0281c4149021 ("drm/tegra: hub: Use private object for global state")
Signed-off-by: Stefan Schake <stschake@gmail.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/drm/tegra/hub.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index 9a3f23d4780f..8f4fcbb515fb 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -687,7 +687,7 @@ void tegra_display_hub_atomic_commit(struct drm_device *drm,
 	struct device *dev = hub->client.dev;
 	int err;
 
-	hub_state = tegra_display_hub_get_state(hub, state);
+	hub_state = to_tegra_display_hub_state(hub->base.state);
 
 	if (hub_state->clk) {
 		err = clk_set_rate(hub_state->clk, hub_state->rate);

From c258f91d8ae737cd46a1201aa63655ec8b878ed5 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Thu, 3 May 2018 22:29:56 +0100
Subject: [PATCH 0611/1286] drm/i915/gtt: Tidy up duplicate branches in
 gen8_gmch_probe()
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Following commit f773568b6ff8 ("drm/i915: nuke the duplicated stolen
discovery"), the if-else-chain for determining the GTT size is redundant
with the !chv branches all being the same.

Reported-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
References: f773568b6ff8 ("drm/i915: nuke the duplicated stolen discovery")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180503212956.3948-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 8 ++------
 1 file changed, 2 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 272d6bb407cc..c879bfd9294f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -3326,14 +3326,10 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
 		DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
 
 	pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
-
-	if (INTEL_GEN(dev_priv) >= 9) {
-		size = gen8_get_total_gtt_size(snb_gmch_ctl);
-	} else if (IS_CHERRYVIEW(dev_priv)) {
+	if (IS_CHERRYVIEW(dev_priv))
 		size = chv_get_total_gtt_size(snb_gmch_ctl);
-	} else {
+	else
 		size = gen8_get_total_gtt_size(snb_gmch_ctl);
-	}
 
 	ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
 	ggtt->base.cleanup = gen6_gmch_remove;

From 43c8c44105e30d912746a6dbd10c59ef42c230f0 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Fri, 4 May 2018 11:11:47 +0100
Subject: [PATCH 0612/1286] drm/i915: Remove assertion of active_rings must be
 non-empty if active_requests

"An outstanding request must still be on an active ring somewhere" is
only true if we haven't just been interrupted by the shrinker in the
middle of allocating the request itself. (At the start of
i915_request_alloc() we pin the context and prepare the GT for activity,
marking it as active, and then try to allocate the request. If this
allocation invokes the shrinker, we try to reclaim some space by calling
i915_retire_requests() which may then be confused by the pre-reservation
of active_requests.)

<3>[  125.472695] i915_retire_requests:1429 GEM_BUG_ON(list_empty(&i915->gt.active_rings))
<2>[  125.472792] kernel BUG at drivers/gpu/drm/i915/i915_request.c:1429!
<4>[  125.472822] invalid opcode: 0000 [#1] PREEMPT SMP KASAN PTI
<4>[  125.498764] Modules linked in: snd_hda_codec_hdmi x86_pkg_temp_thermal intel_powerclamp coretemp crct10dif_pclmul crc32_pclmul ghash_clmulni_intel btusb btrtl btbcm btintel cdc_ether snd_hda_codec_realtek bluetooth i915 snd_hda_codec_generic usbnet r8152 mii ecdh_generic lpc_ich mei_me snd_hda_intel snd_hda_codec mei snd_hwdep snd_hda_core snd_pcm prime_numbers
<4>[  125.498923] CPU: 0 PID: 1115 Comm: gem_exec_create Tainted: G     U            4.17.0-rc3-gc49cbe0d1eb8-kasan_32+ #1
<4>[  125.498955] Hardware name: GOOGLE Peppy/Peppy, BIOS MrChromebox 02/04/2018
<4>[  125.499074] RIP: 0010:i915_retire_requests+0x3f2/0x590 [i915]
<4>[  125.499095] RSP: 0018:ffff88004e5dec40 EFLAGS: 00010282
<4>[  125.499117] RAX: 0000000000000010 RBX: ffff8800458f0000 RCX: 0000000000000000
<4>[  125.499140] RDX: dffffc0000000000 RSI: 0000000000000008 RDI: ffff880060c2f6f0
<4>[  125.499164] RBP: ffff88004e5dee30 R08: ffffed000c185ee6 R09: ffffed000c185ee6
<4>[  125.499187] R10: 0000000000000001 R11: ffffed000c185ee5 R12: ffff8800553da160
<4>[  125.499210] R13: dffffc0000000000 R14: 0000000000000000 R15: ffff8800458faed0
<4>[  125.499235] FS:  00007fe18f052980(0000) GS:ffff880065400000(0000) knlGS:0000000000000000
<4>[  125.499262] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
<4>[  125.499282] CR2: 00007f01df11efb8 CR3: 00000000518d4001 CR4: 00000000000606f0
<4>[  125.499304] Call Trace:
<4>[  125.499417]  i915_gem_shrink+0x576/0xb50 [i915]
<4>[  125.499532]  ? i915_gem_shrinker_count+0x2f0/0x2f0 [i915]
<4>[  125.499561]  ? trace_hardirqs_on_thunk+0x1a/0x1c
<4>[  125.499671]  ? i915_gem_shrinker_count+0x1d6/0x2f0 [i915]
<4>[  125.499782]  ? i915_gem_shrinker_scan+0xc4/0x320 [i915]
<4>[  125.499889]  i915_gem_shrinker_scan+0xc4/0x320 [i915]
<4>[  125.499997]  ? i915_gem_shrinker_vmap+0x3a0/0x3a0 [i915]
<4>[  125.500021]  ? do_raw_spin_unlock+0x4f/0x240
<4>[  125.500042]  ? _raw_spin_unlock+0x29/0x40
<4>[  125.500149]  ? i915_gem_shrinker_count+0x1d6/0x2f0 [i915]
<4>[  125.500177]  shrink_slab.part.18+0x23e/0x8f0
<4>[  125.500202]  ? unregister_shrinker+0x1f0/0x1f0
<4>[  125.500226]  ? mem_cgroup_iter+0x379/0xcc0
<4>[  125.500249]  shrink_node+0xa7e/0x1180
<4>[  125.500276]  ? shrink_node_memcg+0x11f0/0x11f0
<4>[  125.500297]  ? __delayacct_freepages_start+0x38/0x80
<4>[  125.500319]  ? __is_insn_slot_addr+0xe3/0x1a0
<4>[  125.500342]  ? recalibrate_cpu_khz+0x10/0x10
<4>[  125.500361]  ? ktime_get+0xb2/0x140
<4>[  125.500382]  do_try_to_free_pages+0x2d3/0xe40
<4>[  125.500407]  ? allow_direct_reclaim.part.23+0x1e0/0x1e0
<4>[  125.500429]  ? shrink_node+0x1180/0x1180
<4>[  125.500450]  ? __read_once_size_nocheck.constprop.4+0x10/0x10
<4>[  125.500476]  try_to_free_pages+0x1af/0x560
<4>[  125.500497]  ? do_try_to_free_pages+0xe40/0xe40
<4>[  125.500525]  __alloc_pages_nodemask+0xadc/0x2130
<4>[  125.500553]  ? gfp_pfmemalloc_allowed+0x150/0x150
<4>[  125.500654]  ? i915_gem_do_execbuffer+0x219d/0x32e0 [i915]
<4>[  125.500678]  ? debug_check_no_locks_freed+0x2a0/0x2a0
<4>[  125.500701]  ? __debug_object_init+0x322/0xd90
<4>[  125.500722]  ? debug_check_no_locks_freed+0x2a0/0x2a0
<4>[  125.500827]  ? i915_gem_do_execbuffer+0xdc2/0x32e0 [i915]
<4>[  125.500942]  ? i915_request_alloc+0x5b5/0x13f0 [i915]
<4>[  125.500964]  ? page_frag_free+0x170/0x170
<4>[  125.500984]  ? debug_check_no_locks_freed+0x2a0/0x2a0
<4>[  125.501008]  new_slab+0x21d/0x5c0
<4>[  125.501029]  ___slab_alloc.constprop.35+0x322/0x3e0
<4>[  125.501052]  ? reservation_object_reserve_shared+0x10b/0x250
<4>[  125.501074]  ? __ww_mutex_lock.constprop.3+0x1104/0x2cf0
<4>[  125.501097]  ? _raw_spin_unlock_irqrestore+0x39/0x60
<4>[  125.501120]  ? fs_reclaim_acquire+0x10/0x10
<4>[  125.501138]  ? lock_acquire+0x138/0x3c0
<4>[  125.501156]  ? lock_acquire+0x3c0/0x3c0
<4>[  125.501176]  ? reservation_object_reserve_shared+0x10b/0x250
<4>[  125.501198]  ? __slab_alloc.isra.27.constprop.34+0x3d/0x70
<4>[  125.501219]  __slab_alloc.isra.27.constprop.34+0x3d/0x70
<4>[  125.501243]  ? reservation_object_reserve_shared+0x10b/0x250
<4>[  125.501265]  __kmalloc_track_caller+0x313/0x350
<4>[  125.501287]  krealloc+0x62/0xb0
<4>[  125.501305]  reservation_object_reserve_shared+0x10b/0x250
<4>[  125.501411]  i915_gem_do_execbuffer+0x2040/0x32e0 [i915]
<4>[  125.501522]  ? eb_relocate_slow+0xad0/0xad0 [i915]
<4>[  125.501544]  ? debug_check_no_locks_freed+0x2a0/0x2a0
<4>[  125.501646]  ? i915_gem_execbuffer2_ioctl+0x108/0x770 [i915]
<4>[  125.501755]  ? i915_gem_execbuffer2_ioctl+0x108/0x770 [i915]
<4>[  125.501779]  ? drm_dev_get+0x20/0x20
<4>[  125.501803]  ? __might_fault+0xea/0x1a0
<4>[  125.501902]  ? i915_gem_execbuffer2_ioctl+0x108/0x770 [i915]
<4>[  125.502012]  ? i915_gem_execbuffer_ioctl+0xb90/0xb90 [i915]
<4>[  125.502116]  ? i915_gem_execbuffer_ioctl+0xb90/0xb90 [i915]
<4>[  125.502218]  i915_gem_execbuffer2_ioctl+0x3c5/0x770 [i915]
<4>[  125.502243]  ? drm_dev_enter+0xe0/0xe0
<4>[  125.502260]  ? lock_acquire+0x138/0x3c0
<4>[  125.502362]  ? i915_gem_execbuffer_ioctl+0xb90/0xb90 [i915]
<4>[  125.502470]  ? i915_gem_object_create.part.28+0x570/0x570 [i915]
<4>[  125.502575]  ? i915_gem_execbuffer_ioctl+0xb90/0xb90 [i915]
<4>[  125.502680]  ? i915_gem_execbuffer_ioctl+0xb90/0xb90 [i915]
<4>[  125.502702]  drm_ioctl_kernel+0x151/0x200
<4>[  125.502721]  ? drm_ioctl_permit+0x2a0/0x2a0
<4>[  125.502746]  drm_ioctl+0x63a/0x920
<4>[  125.502844]  ? i915_gem_execbuffer_ioctl+0xb90/0xb90 [i915]
<4>[  125.502868]  ? drm_getstats+0x20/0x20
<4>[  125.502886]  ? trace_hardirqs_on_thunk+0x1a/0x1c
<4>[  125.502919]  do_vfs_ioctl+0x173/0xe90
<4>[  125.502936]  ? trace_hardirqs_on_thunk+0x1a/0x1c
<4>[  125.502957]  ? ioctl_preallocate+0x170/0x170
<4>[  125.502978]  ? trace_hardirqs_on_thunk+0x1a/0x1c
<4>[  125.503002]  ? retint_kernel+0x2d/0x2d
<4>[  125.503024]  ksys_ioctl+0x35/0x60
<4>[  125.503043]  __x64_sys_ioctl+0x6a/0xb0
<4>[  125.503061]  do_syscall_64+0x97/0x400
<4>[  125.503081]  entry_SYSCALL_64_after_hwframe+0x49/0xbe
<4>[  125.503101] RIP: 0033:0x7fe18e4f65d7
<4>[  125.503116] RSP: 002b:00007ffe2ffc06a8 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
<4>[  125.503145] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007fe18e4f65d7
<4>[  125.503168] RDX: 00007ffe2ffc07f0 RSI: 0000000040406469 RDI: 0000000000000003
<4>[  125.503191] RBP: 00007ffe2ffc07f0 R08: 0000000000000004 R09: 00007ffe2ffcf080
<4>[  125.503215] R10: 000000000002c7de R11: 0000000000000246 R12: 0000000040406469
<4>[  125.503238] R13: 0000000000000003 R14: 0000000000000000 R15: 0000000000000000
<4>[  125.503268] Code: e8 18 a0 c9 da 48 8b 35 25 3a 47 00 49 c7 c0 a0 3b 88 c0 b9 95 05 00 00 48 c7 c2 e0 49 88 c0 48 c7 c7 8d 3b 5d c0 e8 ee 7e db da <0f> 0b 48 89 ef e8 a4 26 f5 da e9 51 fe ff ff e8 8a 26 f5 da e9
<1>[  125.503548] RIP: i915_retire_requests+0x3f2/0x590 [i915] RSP: ffff88004e5dec40

Fixes: 643b450a594e ("drm/i915: Only track live rings for retiring")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180504101147.26286-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_request.c | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index d68739b94dac..e4cf76ec14a6 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1426,9 +1426,6 @@ void i915_retire_requests(struct drm_i915_private *i915)
 	if (!i915->gt.active_requests)
 		return;
 
-	/* An outstanding request must be on a still active ring somewhere */
-	GEM_BUG_ON(list_empty(&i915->gt.active_rings));
-
 	list_for_each_entry_safe(ring, tmp, &i915->gt.active_rings, active_link)
 		ring_retire_requests(ring);
 }

From 6f75b16b2683eb7c86ce2c8d150bf3fa759103b9 Mon Sep 17 00:00:00 2001
From: Dmitry Osipenko <digetx@gmail.com>
Date: Fri, 4 May 2018 02:47:19 +0300
Subject: [PATCH 0613/1286] drm/tegra: dc: Balance IOMMU group refcounting

Remove unneeded iommu_group_get() and add missing iommu_group_put(),
correcting IOMMU group refcount. This is a minor correction / cleanup that
doesn't really fix anything because Tegra's IOMMU driver are built-in and
hence groups refcounting can't hold IOMMU driver from unloading.

Signed-off-by: Dmitry Osipenko <digetx@gmail.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/drm/tegra/dc.c | 31 +++++++++++++++----------------
 drivers/gpu/drm/tegra/dc.h |  2 +-
 2 files changed, 16 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 9f83a65b5ea9..f20648f58e49 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1826,7 +1826,6 @@ static irqreturn_t tegra_dc_irq(int irq, void *data)
 static int tegra_dc_init(struct host1x_client *client)
 {
 	struct drm_device *drm = dev_get_drvdata(client->parent);
-	struct iommu_group *group = iommu_group_get(client->dev);
 	unsigned long flags = HOST1X_SYNCPT_CLIENT_MANAGED;
 	struct tegra_dc *dc = host1x_client_to_dc(client);
 	struct tegra_drm *tegra = drm->dev_private;
@@ -1838,20 +1837,21 @@ static int tegra_dc_init(struct host1x_client *client)
 	if (!dc->syncpt)
 		dev_warn(dc->dev, "failed to allocate syncpoint\n");
 
-	if (group && tegra->domain) {
-		if (group != tegra->group) {
-			err = iommu_attach_group(tegra->domain, group);
+	if (tegra->domain) {
+		dc->group = iommu_group_get(client->dev);
+
+		if (dc->group && dc->group != tegra->group) {
+			err = iommu_attach_group(tegra->domain, dc->group);
 			if (err < 0) {
 				dev_err(dc->dev,
 					"failed to attach to domain: %d\n",
 					err);
+				iommu_group_put(dc->group);
 				return err;
 			}
 
-			tegra->group = group;
+			tegra->group = dc->group;
 		}
-
-		dc->domain = tegra->domain;
 	}
 
 	if (dc->soc->wgrps)
@@ -1916,13 +1916,13 @@ cleanup:
 	if (!IS_ERR(primary))
 		drm_plane_cleanup(primary);
 
-	if (group && dc->domain) {
-		if (group == tegra->group) {
-			iommu_detach_group(dc->domain, group);
+	if (dc->group) {
+		if (dc->group == tegra->group) {
+			iommu_detach_group(tegra->domain, dc->group);
 			tegra->group = NULL;
 		}
 
-		dc->domain = NULL;
+		iommu_group_put(dc->group);
 	}
 
 	return err;
@@ -1931,7 +1931,6 @@ cleanup:
 static int tegra_dc_exit(struct host1x_client *client)
 {
 	struct drm_device *drm = dev_get_drvdata(client->parent);
-	struct iommu_group *group = iommu_group_get(client->dev);
 	struct tegra_dc *dc = host1x_client_to_dc(client);
 	struct tegra_drm *tegra = drm->dev_private;
 	int err;
@@ -1944,13 +1943,13 @@ static int tegra_dc_exit(struct host1x_client *client)
 		return err;
 	}
 
-	if (group && dc->domain) {
-		if (group == tegra->group) {
-			iommu_detach_group(dc->domain, group);
+	if (dc->group) {
+		if (dc->group == tegra->group) {
+			iommu_detach_group(tegra->domain, dc->group);
 			tegra->group = NULL;
 		}
 
-		dc->domain = NULL;
+		iommu_group_put(dc->group);
 	}
 
 	host1x_syncpt_free(dc->syncpt);
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index d2b50d32de4d..7be786febb17 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -92,7 +92,7 @@ struct tegra_dc {
 
 	const struct tegra_dc_soc_info *soc;
 
-	struct iommu_domain *domain;
+	struct iommu_group *group;
 };
 
 static inline struct tegra_dc *

From 5fda01b50d769d600c34cb00ab15ce1b6a66c028 Mon Sep 17 00:00:00 2001
From: Dmitry Osipenko <digetx@gmail.com>
Date: Fri, 4 May 2018 02:47:20 +0300
Subject: [PATCH 0614/1286] drm/tegra: gr2d: Add IOMMU support

Attach GR2D to the display IOMMU group in order to provide GR2D access
to BO's IOVA.

Signed-off-by: Dmitry Osipenko <digetx@gmail.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/drm/tegra/gr2d.c | 31 +++++++++++++++++++++++++++++--
 1 file changed, 29 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index 9a8ea93016a9..8eb530a85dd0 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -7,12 +7,14 @@
  */
 
 #include <linux/clk.h>
+#include <linux/iommu.h>
 
 #include "drm.h"
 #include "gem.h"
 #include "gr2d.h"
 
 struct gr2d {
+	struct iommu_group *group;
 	struct tegra_drm_client client;
 	struct host1x_channel *channel;
 	struct clk *clk;
@@ -30,7 +32,9 @@ static int gr2d_init(struct host1x_client *client)
 	struct tegra_drm_client *drm = host1x_to_drm_client(client);
 	struct drm_device *dev = dev_get_drvdata(client->parent);
 	unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
+	struct tegra_drm *tegra = dev->dev_private;
 	struct gr2d *gr2d = to_gr2d(drm);
+	int err;
 
 	gr2d->channel = host1x_channel_request(client->dev);
 	if (!gr2d->channel)
@@ -42,23 +46,46 @@ static int gr2d_init(struct host1x_client *client)
 		return -ENOMEM;
 	}
 
-	return tegra_drm_register_client(dev->dev_private, drm);
+	if (tegra->domain) {
+		gr2d->group = iommu_group_get(client->dev);
+
+		if (gr2d->group) {
+			err = iommu_attach_group(tegra->domain, gr2d->group);
+			if (err < 0) {
+				dev_err(client->dev,
+					"failed to attach to domain: %d\n",
+					err);
+				host1x_syncpt_free(client->syncpts[0]);
+				host1x_channel_put(gr2d->channel);
+				iommu_group_put(gr2d->group);
+				return err;
+			}
+		}
+	}
+
+	return tegra_drm_register_client(tegra, drm);
 }
 
 static int gr2d_exit(struct host1x_client *client)
 {
 	struct tegra_drm_client *drm = host1x_to_drm_client(client);
 	struct drm_device *dev = dev_get_drvdata(client->parent);
+	struct tegra_drm *tegra = dev->dev_private;
 	struct gr2d *gr2d = to_gr2d(drm);
 	int err;
 
-	err = tegra_drm_unregister_client(dev->dev_private, drm);
+	err = tegra_drm_unregister_client(tegra, drm);
 	if (err < 0)
 		return err;
 
 	host1x_syncpt_free(client->syncpts[0]);
 	host1x_channel_put(gr2d->channel);
 
+	if (gr2d->group) {
+		iommu_detach_group(tegra->domain, gr2d->group);
+		iommu_group_put(gr2d->group);
+	}
+
 	return 0;
 }
 

From c9ac52175b38e7f22fe37b9f943973d9095e53b7 Mon Sep 17 00:00:00 2001
From: Dmitry Osipenko <digetx@gmail.com>
Date: Fri, 4 May 2018 02:47:21 +0300
Subject: [PATCH 0615/1286] drm/tegra: gr3d: Add IOMMU support

Attach GR3D to the displays IOMMU group in order to provide GR3D access
to BO's IOVA.

Signed-off-by: Dmitry Osipenko <digetx@gmail.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/drm/tegra/gr3d.c | 27 +++++++++++++++++++++++++++
 1 file changed, 27 insertions(+)

diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index 28c4ef63065b..ce5120683091 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -9,6 +9,7 @@
 
 #include <linux/clk.h>
 #include <linux/host1x.h>
+#include <linux/iommu.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/reset.h>
@@ -20,6 +21,7 @@
 #include "gr3d.h"
 
 struct gr3d {
+	struct iommu_group *group;
 	struct tegra_drm_client client;
 	struct host1x_channel *channel;
 	struct clk *clk_secondary;
@@ -40,7 +42,9 @@ static int gr3d_init(struct host1x_client *client)
 	struct tegra_drm_client *drm = host1x_to_drm_client(client);
 	struct drm_device *dev = dev_get_drvdata(client->parent);
 	unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
+	struct tegra_drm *tegra = dev->dev_private;
 	struct gr3d *gr3d = to_gr3d(drm);
+	int err;
 
 	gr3d->channel = host1x_channel_request(client->dev);
 	if (!gr3d->channel)
@@ -52,6 +56,23 @@ static int gr3d_init(struct host1x_client *client)
 		return -ENOMEM;
 	}
 
+	if (tegra->domain) {
+		gr3d->group = iommu_group_get(client->dev);
+
+		if (gr3d->group) {
+			err = iommu_attach_group(tegra->domain, gr3d->group);
+			if (err < 0) {
+				dev_err(client->dev,
+					"failed to attach to domain: %d\n",
+					err);
+				host1x_syncpt_free(client->syncpts[0]);
+				host1x_channel_put(gr3d->channel);
+				iommu_group_put(gr3d->group);
+				return err;
+			}
+		}
+	}
+
 	return tegra_drm_register_client(dev->dev_private, drm);
 }
 
@@ -59,6 +80,7 @@ static int gr3d_exit(struct host1x_client *client)
 {
 	struct tegra_drm_client *drm = host1x_to_drm_client(client);
 	struct drm_device *dev = dev_get_drvdata(client->parent);
+	struct tegra_drm *tegra = dev->dev_private;
 	struct gr3d *gr3d = to_gr3d(drm);
 	int err;
 
@@ -69,6 +91,11 @@ static int gr3d_exit(struct host1x_client *client)
 	host1x_syncpt_free(client->syncpts[0]);
 	host1x_channel_put(gr3d->channel);
 
+	if (gr3d->group) {
+		iommu_detach_group(tegra->domain, gr3d->group);
+		iommu_group_put(gr3d->group);
+	}
+
 	return 0;
 }
 

From fd5ec0dc34dafa6c5bb46770ca283ae90a4db3c7 Mon Sep 17 00:00:00 2001
From: Thierry Reding <treding@nvidia.com>
Date: Fri, 4 May 2018 15:00:54 +0200
Subject: [PATCH 0616/1286] drm/tegra: dc: Free syncpoint on errors

If an error happens during display controller initialization, the host1x
syncpoint previously requested would be leaked. Properly clean up the
syncpoint along with the other resources.

Reviewed-by: Dmitry Osipenko <digetx@gmail.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/drm/tegra/dc.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index f20648f58e49..c843f11043db 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1925,6 +1925,8 @@ cleanup:
 		iommu_group_put(dc->group);
 	}
 
+	host1x_syncpt_free(dc->syncpt);
+
 	return err;
 }
 

From dd99b4b48833a55817d78a48034b606664b1fff8 Mon Sep 17 00:00:00 2001
From: Thierry Reding <treding@nvidia.com>
Date: Fri, 4 May 2018 14:58:26 +0200
Subject: [PATCH 0617/1286] drm/tegra: gr2d: Properly clean up resources

Failure to register the Tegra DRM client would leak the resources. Move
cleanup code to error unwinding gotos to fix that and share the cleanup
code with the other error paths.

Reviewed-by: Dmitry Osipenko <digetx@gmail.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/drm/tegra/gr2d.c | 28 ++++++++++++++++++++++------
 1 file changed, 22 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index 8eb530a85dd0..0b42e99da8ad 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -42,8 +42,9 @@ static int gr2d_init(struct host1x_client *client)
 
 	client->syncpts[0] = host1x_syncpt_request(client, flags);
 	if (!client->syncpts[0]) {
-		host1x_channel_put(gr2d->channel);
-		return -ENOMEM;
+		err = -ENOMEM;
+		dev_err(client->dev, "failed to request syncpoint: %d\n", err);
+		goto put;
 	}
 
 	if (tegra->domain) {
@@ -55,15 +56,30 @@ static int gr2d_init(struct host1x_client *client)
 				dev_err(client->dev,
 					"failed to attach to domain: %d\n",
 					err);
-				host1x_syncpt_free(client->syncpts[0]);
-				host1x_channel_put(gr2d->channel);
 				iommu_group_put(gr2d->group);
-				return err;
+				goto free;
 			}
 		}
 	}
 
-	return tegra_drm_register_client(tegra, drm);
+	err = tegra_drm_register_client(tegra, drm);
+	if (err < 0) {
+		dev_err(client->dev, "failed to register client: %d\n", err);
+		goto detach;
+	}
+
+	return 0;
+
+detach:
+	if (gr2d->group) {
+		iommu_detach_group(tegra->domain, gr2d->group);
+		iommu_group_put(gr2d->group);
+	}
+free:
+	host1x_syncpt_free(client->syncpts[0]);
+put:
+	host1x_channel_put(gr2d->channel);
+	return err;
 }
 
 static int gr2d_exit(struct host1x_client *client)

From f2742e471281ee55180cfd49557a5cd26986c6b3 Mon Sep 17 00:00:00 2001
From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Date: Fri, 4 May 2018 12:56:43 +0100
Subject: [PATCH 0618/1286] drm/i915: Include priority and completed status in
 request in/out tracepoints

It is useful to see the priority as requests are coming in and completed
status as requests are coming out of the GPU.

To achieve this in a more readable way we need to abandon the common
request_hw tracepoint class.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180504115643.22437-1-tvrtko.ursulin@linux.intel.com
---
 drivers/gpu/drm/i915/i915_trace.h | 87 +++++++++++++++++++------------
 1 file changed, 55 insertions(+), 32 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 408827bf5d96..77ee5e53eb32 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -679,45 +679,68 @@ DEFINE_EVENT(i915_request, i915_request_execute,
 	     TP_ARGS(rq)
 );
 
-DECLARE_EVENT_CLASS(i915_request_hw,
-		    TP_PROTO(struct i915_request *rq, unsigned int port),
-		    TP_ARGS(rq, port),
+TRACE_EVENT(i915_request_in,
+	    TP_PROTO(struct i915_request *rq, unsigned int port),
+	    TP_ARGS(rq, port),
 
-		    TP_STRUCT__entry(
-				     __field(u32, dev)
-				     __field(u32, hw_id)
-				     __field(u32, ring)
-				     __field(u32, ctx)
-				     __field(u32, seqno)
-				     __field(u32, global_seqno)
-				     __field(u32, port)
-				    ),
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(u32, hw_id)
+			     __field(u32, ring)
+			     __field(u32, ctx)
+			     __field(u32, seqno)
+			     __field(u32, global_seqno)
+			     __field(u32, port)
+			     __field(u32, prio)
+			    ),
 
-		    TP_fast_assign(
-				   __entry->dev = rq->i915->drm.primary->index;
-				   __entry->hw_id = rq->ctx->hw_id;
-				   __entry->ring = rq->engine->id;
-				   __entry->ctx = rq->fence.context;
-				   __entry->seqno = rq->fence.seqno;
-				   __entry->global_seqno = rq->global_seqno;
-				   __entry->port = port;
-				  ),
+	    TP_fast_assign(
+			   __entry->dev = rq->i915->drm.primary->index;
+			   __entry->hw_id = rq->ctx->hw_id;
+			   __entry->ring = rq->engine->id;
+			   __entry->ctx = rq->fence.context;
+			   __entry->seqno = rq->fence.seqno;
+			   __entry->global_seqno = rq->global_seqno;
+			   __entry->prio = rq->sched.attr.priority;
+			   __entry->port = port;
+			   ),
 
-		    TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, port=%u",
+	    TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, prio=%u, global=%u, port=%u",
+		      __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
+		      __entry->seqno, __entry->prio, __entry->global_seqno,
+		      __entry->port)
+);
+
+TRACE_EVENT(i915_request_out,
+	    TP_PROTO(struct i915_request *rq),
+	    TP_ARGS(rq),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(u32, hw_id)
+			     __field(u32, ring)
+			     __field(u32, ctx)
+			     __field(u32, seqno)
+			     __field(u32, global_seqno)
+			     __field(u32, completed)
+			    ),
+
+	    TP_fast_assign(
+			   __entry->dev = rq->i915->drm.primary->index;
+			   __entry->hw_id = rq->ctx->hw_id;
+			   __entry->ring = rq->engine->id;
+			   __entry->ctx = rq->fence.context;
+			   __entry->seqno = rq->fence.seqno;
+			   __entry->global_seqno = rq->global_seqno;
+			   __entry->completed = i915_request_completed(rq);
+			   ),
+
+		    TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, completed?=%u",
 			      __entry->dev, __entry->hw_id, __entry->ring,
 			      __entry->ctx, __entry->seqno,
-			      __entry->global_seqno, __entry->port)
+			      __entry->global_seqno, __entry->completed)
 );
 
-DEFINE_EVENT(i915_request_hw, i915_request_in,
-	     TP_PROTO(struct i915_request *rq, unsigned int port),
-	     TP_ARGS(rq, port)
-);
-
-DEFINE_EVENT(i915_request, i915_request_out,
-	     TP_PROTO(struct i915_request *rq),
-	     TP_ARGS(rq)
-);
 #else
 #if !defined(TRACE_HEADER_MULTI_READ)
 static inline void

From 280b54ade5914d3b4abe4f0ebe083ddbd4603246 Mon Sep 17 00:00:00 2001
From: Florent Flament <contact@florentflament.com>
Date: Thu, 19 Apr 2018 19:07:00 +0300
Subject: [PATCH 0619/1286] drm/i915: Fix drm:intel_enable_lvds ERROR message
 in kernel log
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Fix `[drm:intel_enable_lvds] *ERROR* timed out waiting for panel to
power on` in kernel log at boot time.

Toshiba Satellite Z930 laptops needs between 1 and 2 seconds to power
on its screen during Intel i915 DRM initialization. This currently
results in a `[drm:intel_enable_lvds] *ERROR* timed out waiting for
panel to power on` message appearing in the kernel log during boot
time and when stopping the machine.

This change increases the timeout of the `intel_enable_lvds` function
from 1 to 5 seconds, letting enough time for the Satellite 930 LCD
screen to power on, and suppressing the error message from the kernel
log.

This patch has been successfully tested on Linux 4.14 running on a
Toshiba Satellite Z930.

[vsyrjala: bump the timeout from 2 to 5 seconds to match the DP
 code and properly cover the max hw timeout of ~4 seconds, and
 drop the comment about the specific machine since this is not
 a particulary surprising issue, nor specific to that one machine]

Signed-off-by: Florent Flament <contact@florentflament.com>
Cc: stable@vger.kernel.org
Cc: Pavel Petrovic <ppetrovic@acm.org>
Cc: Sérgio M. Basto <sergio@serjux.com>
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=103414
References: https://bugzilla.kernel.org/show_bug.cgi?id=57591
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180419160700.19828-1-ville.syrjala@linux.intel.com
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
---
 drivers/gpu/drm/i915/intel_lvds.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index d35d2d50f595..8691c86f579c 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -326,7 +326,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
 
 	I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON);
 	POSTING_READ(lvds_encoder->reg);
-	if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 1000))
+
+	if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 5000))
 		DRM_ERROR("timed out waiting for panel to power on\n");
 
 	intel_panel_enable_backlight(pipe_config, conn_state);

From 52cc80146d935aa902a3e0fc54268a99fcf68ccf Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Fri, 4 May 2018 13:42:02 +0100
Subject: [PATCH 0620/1286] drm/i915/selftests: Skip the execlists tests on
 !execlists machines

Ignore the tests looking at the innards of execlists and its submission
tasklets on machines that don't support execlists!

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180504124202.24894-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/selftests/intel_lrc.c | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index ee7e22d18ff8..b7460b5dd4f7 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -505,5 +505,9 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(live_preempt),
 		SUBTEST(live_late_preempt),
 	};
+
+	if (!HAS_EXECLISTS(i915))
+		return 0;
+
 	return i915_subtests(tests, i915);
 }

From c18e9a098605abe5a1dc1c5dd9cfeda322ed36d8 Mon Sep 17 00:00:00 2001
From: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
Date: Tue, 24 Apr 2018 16:39:42 +0100
Subject: [PATCH 0621/1286] drm: rcar-du: of: Include header to define
 prototypes

The symbol 'rcar_du_of_init' is defined by the rcar_du_of module header,
but it is not included by the C implementation.

Include the header to correctly define the function prototypes.

Fixes the following warning:

linux/drivers/gpu/drm/rcar-du/rcar_du_of.c:319:13:
   warning: symbol 'rcar_du_of_init' was not declared. Should it be static?
    CC      drivers/gpu/drm/rcar-du/rcar_du_of.o

Fixes: 81c0e3dd8292 ("drm: rcar-du: Fix legacy DT to create LVDS encoder nodes")
Signed-off-by: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
Reviewed-by: Vaishali Thakkar <vthakkar@vaishalithakkar.in>
Reviewed-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
Signed-off-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
---
 drivers/gpu/drm/rcar-du/rcar_du_of.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/rcar-du/rcar_du_of.c b/drivers/gpu/drm/rcar-du/rcar_du_of.c
index 68a0b82cb17e..afef69669bb4 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_of.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_of.c
@@ -18,6 +18,7 @@
 
 #include "rcar_du_crtc.h"
 #include "rcar_du_drv.h"
+#include "rcar_du_of.h"
 
 /* -----------------------------------------------------------------------------
  * Generic Overlay Handling

From 3b0033eb39360dc655466c5f1e6852d5a83b384d Mon Sep 17 00:00:00 2001
From: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
Date: Tue, 24 Apr 2018 16:40:03 +0100
Subject: [PATCH 0622/1286] drm: rcar-du: Use NULL for table initialisation

Replace the initialisation of the vsps table with a NULL specifier.

Fixes the following warning:
 linux/drivers/gpu/drm/rcar-du/rcar_du_kms.c:483:40:
    warning: Using plain integer as NULL pointer
      CC      drivers/gpu/drm/rcar-du/rcar_du_kms.o

Fixes: 3e81374e2014 ("drm: rcar-du: Support multiple sources from the same VSP")
Signed-off-by: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
Reviewed-by: Vaishali Thakkar <vthakkar@vaishalithakkar.in>
Reviewed-by: Simon Horman <horms+renesas@verge.net.au>
Reviewed-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
Signed-off-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
---
 drivers/gpu/drm/rcar-du/rcar_du_kms.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 0329b354bfa0..0c8b7e5686bb 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -441,7 +441,7 @@ static int rcar_du_vsps_init(struct rcar_du_device *rcdu)
 	struct {
 		struct device_node *np;
 		unsigned int crtcs_mask;
-	} vsps[RCAR_DU_MAX_VSPS] = { { 0, }, };
+	} vsps[RCAR_DU_MAX_VSPS] = { { NULL, }, };
 	unsigned int vsps_count = 0;
 	unsigned int cells;
 	unsigned int i;

From a4af8423cfe50e5cafa1893fc40643245793e3cd Mon Sep 17 00:00:00 2001
From: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
Date: Thu, 26 Apr 2018 17:53:30 +0100
Subject: [PATCH 0623/1286] dt-bindings: display: renesas: du: Increase indent
 in output table

The DU output table lists the port combinations for each supported DU
type.  Newer models of R-Car Gen3 platforms have an increased string
length.

Increase the table indentation in preparation for supporting new target
types.

Signed-off-by: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
Reviewed-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
Signed-off-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
---
 .../bindings/display/renesas,du.txt           | 26 +++++++++----------
 1 file changed, 13 insertions(+), 13 deletions(-)

diff --git a/Documentation/devicetree/bindings/display/renesas,du.txt b/Documentation/devicetree/bindings/display/renesas,du.txt
index c9cd17f99702..a36a6e7ee54f 100644
--- a/Documentation/devicetree/bindings/display/renesas,du.txt
+++ b/Documentation/devicetree/bindings/display/renesas,du.txt
@@ -47,20 +47,20 @@ bindings specified in Documentation/devicetree/bindings/graph.txt.
 The following table lists for each supported model the port number
 corresponding to each DU output.
 
-                      Port0          Port1          Port2          Port3
+                        Port0          Port1          Port2          Port3
 -----------------------------------------------------------------------------
- R8A7743 (RZ/G1M)     DPAD 0         LVDS 0         -              -
- R8A7745 (RZ/G1E)     DPAD 0         DPAD 1         -              -
- R8A7779 (R-Car H1)   DPAD 0         DPAD 1         -              -
- R8A7790 (R-Car H2)   DPAD 0         LVDS 0         LVDS 1         -
- R8A7791 (R-Car M2-W) DPAD 0         LVDS 0         -              -
- R8A7792 (R-Car V2H)  DPAD 0         DPAD 1         -              -
- R8A7793 (R-Car M2-N) DPAD 0         LVDS 0         -              -
- R8A7794 (R-Car E2)   DPAD 0         DPAD 1         -              -
- R8A7795 (R-Car H3)   DPAD 0         HDMI 0         HDMI 1         LVDS 0
- R8A7796 (R-Car M3-W) DPAD 0         HDMI 0         LVDS 0         -
- R8A77970 (R-Car V3M) DPAD 0         LVDS 0         -              -
- R8A77995 (R-Car D3)  DPAD 0         LVDS 0         LVDS 1         -
+ R8A7743 (RZ/G1M)       DPAD 0         LVDS 0         -              -
+ R8A7745 (RZ/G1E)       DPAD 0         DPAD 1         -              -
+ R8A7779 (R-Car H1)     DPAD 0         DPAD 1         -              -
+ R8A7790 (R-Car H2)     DPAD 0         LVDS 0         LVDS 1         -
+ R8A7791 (R-Car M2-W)   DPAD 0         LVDS 0         -              -
+ R8A7792 (R-Car V2H)    DPAD 0         DPAD 1         -              -
+ R8A7793 (R-Car M2-N)   DPAD 0         LVDS 0         -              -
+ R8A7794 (R-Car E2)     DPAD 0         DPAD 1         -              -
+ R8A7795 (R-Car H3)     DPAD 0         HDMI 0         HDMI 1         LVDS 0
+ R8A7796 (R-Car M3-W)   DPAD 0         HDMI 0         LVDS 0         -
+ R8A77970 (R-Car V3M)   DPAD 0         LVDS 0         -              -
+ R8A77995 (R-Car D3)    DPAD 0         LVDS 0         LVDS 1         -
 
 
 Example: R8A7795 (R-Car H3) ES2.0 DU

From dc8142901befabea974393d49b803f131243feb4 Mon Sep 17 00:00:00 2001
From: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
Date: Thu, 26 Apr 2018 17:53:31 +0100
Subject: [PATCH 0624/1286] dt-bindings: display: renesas: du: Document the
 r8a77965 bindings

Document the M3-N (r8a77965) SoC in the R-Car DU bindings.

Signed-off-by: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
Reviewed-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
Signed-off-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
---
 Documentation/devicetree/bindings/display/renesas,du.txt | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/Documentation/devicetree/bindings/display/renesas,du.txt b/Documentation/devicetree/bindings/display/renesas,du.txt
index a36a6e7ee54f..7c6854bd0a04 100644
--- a/Documentation/devicetree/bindings/display/renesas,du.txt
+++ b/Documentation/devicetree/bindings/display/renesas,du.txt
@@ -13,6 +13,7 @@ Required Properties:
     - "renesas,du-r8a7794" for R8A7794 (R-Car E2) compatible DU
     - "renesas,du-r8a7795" for R8A7795 (R-Car H3) compatible DU
     - "renesas,du-r8a7796" for R8A7796 (R-Car M3-W) compatible DU
+    - "renesas,du-r8a77965" for R8A77965 (R-Car M3-N) compatible DU
     - "renesas,du-r8a77970" for R8A77970 (R-Car V3M) compatible DU
     - "renesas,du-r8a77995" for R8A77995 (R-Car D3) compatible DU
 
@@ -59,6 +60,7 @@ corresponding to each DU output.
  R8A7794 (R-Car E2)     DPAD 0         DPAD 1         -              -
  R8A7795 (R-Car H3)     DPAD 0         HDMI 0         HDMI 1         LVDS 0
  R8A7796 (R-Car M3-W)   DPAD 0         HDMI 0         LVDS 0         -
+ R8A77965 (R-Car M3-N)  DPAD 0         HDMI 0         LVDS 0         -
  R8A77970 (R-Car V3M)   DPAD 0         LVDS 0         -              -
  R8A77995 (R-Car D3)    DPAD 0         LVDS 0         LVDS 1         -
 

From 4012532e040ba4c6bba0883c27b57adb1fd88db8 Mon Sep 17 00:00:00 2001
From: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
Date: Thu, 26 Apr 2018 17:53:33 +0100
Subject: [PATCH 0625/1286] drm: rcar-du: Use the correct naming for ODPM
 fields in DEFR6

The naming of the fields for the ODPM signals in the DU extensional
function control register 6 (DEFR6) is incorrect against the data sheets
for both R-Car Gen2 and R-Car Gen3.

Rename the fields to match the datasheet.

Signed-off-by: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
Reviewed-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
Signed-off-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
---
 drivers/gpu/drm/rcar-du/rcar_du_group.c |  4 ++--
 drivers/gpu/drm/rcar-du/rcar_du_regs.h  | 16 ++++++++--------
 2 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
index 2f37ea901873..eead202c95c7 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c
@@ -46,10 +46,10 @@ void rcar_du_group_write(struct rcar_du_group *rgrp, u32 reg, u32 data)
 
 static void rcar_du_group_setup_pins(struct rcar_du_group *rgrp)
 {
-	u32 defr6 = DEFR6_CODE | DEFR6_ODPM12_DISP;
+	u32 defr6 = DEFR6_CODE | DEFR6_ODPM02_DISP;
 
 	if (rgrp->num_crtcs > 1)
-		defr6 |= DEFR6_ODPM22_DISP;
+		defr6 |= DEFR6_ODPM12_DISP;
 
 	rcar_du_group_write(rgrp, DEFR6, defr6);
 }
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
index d5bae99d3cfe..9dfd220ceda1 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
@@ -187,14 +187,14 @@
 
 #define DEFR6			0x000e8
 #define DEFR6_CODE		(0x7778 << 16)
-#define DEFR6_ODPM22_DSMR	(0 << 10)
-#define DEFR6_ODPM22_DISP	(2 << 10)
-#define DEFR6_ODPM22_CDE	(3 << 10)
-#define DEFR6_ODPM22_MASK	(3 << 10)
-#define DEFR6_ODPM12_DSMR	(0 << 8)
-#define DEFR6_ODPM12_DISP	(2 << 8)
-#define DEFR6_ODPM12_CDE	(3 << 8)
-#define DEFR6_ODPM12_MASK	(3 << 8)
+#define DEFR6_ODPM12_DSMR	(0 << 10)
+#define DEFR6_ODPM12_DISP	(2 << 10)
+#define DEFR6_ODPM12_CDE	(3 << 10)
+#define DEFR6_ODPM12_MASK	(3 << 10)
+#define DEFR6_ODPM02_DSMR	(0 << 8)
+#define DEFR6_ODPM02_DISP	(2 << 8)
+#define DEFR6_ODPM02_CDE	(3 << 8)
+#define DEFR6_ODPM02_MASK	(3 << 8)
 #define DEFR6_TCNE1		(1 << 6)
 #define DEFR6_TCNE0		(1 << 4)
 #define DEFR6_MLOS1		(1 << 2)

From 425f33bdcd4f492546354cbe4daafe420c450a83 Mon Sep 17 00:00:00 2001
From: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
Date: Fri, 27 Apr 2018 23:21:50 +0100
Subject: [PATCH 0626/1286] dt-bindings: display: renesas: Add R-Car M3-N HDMI
 TX DT bindings

The M3-N HDMI TX controller is compatible with the M3-W and H3. No
extension to the DT bindings are needed.

Add an SoC-specific compatible string in case differences between the IP
versions are found later and require model-specific handling.

Signed-off-by: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
---
 .../devicetree/bindings/display/bridge/renesas,dw-hdmi.txt       | 1 +
 1 file changed, 1 insertion(+)

diff --git a/Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt b/Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt
index 3a72a103a18a..a41d280c3f9f 100644
--- a/Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt
+++ b/Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt
@@ -14,6 +14,7 @@ Required properties:
 - compatible : Shall contain one or more of
   - "renesas,r8a7795-hdmi" for R8A7795 (R-Car H3) compatible HDMI TX
   - "renesas,r8a7796-hdmi" for R8A7796 (R-Car M3-W) compatible HDMI TX
+  - "renesas,r8a77965-hdmi" for R8A77965 (R-Car M3-N) compatible HDMI TX
   - "renesas,rcar-gen3-hdmi" for the generic R-Car Gen3 compatible HDMI TX
 
     When compatible with generic versions, nodes must list the SoC-specific

From 5361cc7f8e9146f393cfcb76890d8c80a4e73086 Mon Sep 17 00:00:00 2001
From: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
Date: Fri, 27 Apr 2018 23:21:52 +0100
Subject: [PATCH 0627/1286] drm: rcar-du: Split CRTC handling to support
 hardware indexing

The DU CRTC driver does not support distinguishing between a hardware
index, and a software (CRTC) index in the event that a DU channel might
not be populated by the hardware.

Support this by adapting the rcar_du_device_info structure to store a
bitmask of available channels rather than a count of CRTCs. The count
can then be obtained by determining the hamming weight of the bitmask.

This allows the rcar_du_crtc_create() function to distinguish between
both index types, and non-populated DU channels will be skipped without
leaving a gap in the software CRTC indexes.

Signed-off-by: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
---
 drivers/gpu/drm/rcar-du/rcar_du_crtc.c | 26 ++++++++++++++------------
 drivers/gpu/drm/rcar-du/rcar_du_crtc.h |  3 ++-
 drivers/gpu/drm/rcar-du/rcar_du_drv.c  | 22 +++++++++++-----------
 drivers/gpu/drm/rcar-du/rcar_du_drv.h  |  4 ++--
 drivers/gpu/drm/rcar-du/rcar_du_kms.c  | 18 +++++++++++++-----
 5 files changed, 42 insertions(+), 31 deletions(-)

diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index c4420538ec85..f2a0bd1e5119 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -767,7 +767,8 @@ static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
  * Initialization
  */
 
-int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
+int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
+			unsigned int hwindex)
 {
 	static const unsigned int mmio_offsets[] = {
 		DU0_REG_OFFSET, DU1_REG_OFFSET, DU2_REG_OFFSET, DU3_REG_OFFSET
@@ -775,7 +776,7 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
 
 	struct rcar_du_device *rcdu = rgrp->dev;
 	struct platform_device *pdev = to_platform_device(rcdu->dev);
-	struct rcar_du_crtc *rcrtc = &rcdu->crtcs[index];
+	struct rcar_du_crtc *rcrtc = &rcdu->crtcs[swindex];
 	struct drm_crtc *crtc = &rcrtc->crtc;
 	struct drm_plane *primary;
 	unsigned int irqflags;
@@ -787,7 +788,7 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
 
 	/* Get the CRTC clock and the optional external clock. */
 	if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
-		sprintf(clk_name, "du.%u", index);
+		sprintf(clk_name, "du.%u", hwindex);
 		name = clk_name;
 	} else {
 		name = NULL;
@@ -795,16 +796,16 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
 
 	rcrtc->clock = devm_clk_get(rcdu->dev, name);
 	if (IS_ERR(rcrtc->clock)) {
-		dev_err(rcdu->dev, "no clock for CRTC %u\n", index);
+		dev_err(rcdu->dev, "no clock for DU channel %u\n", hwindex);
 		return PTR_ERR(rcrtc->clock);
 	}
 
-	sprintf(clk_name, "dclkin.%u", index);
+	sprintf(clk_name, "dclkin.%u", hwindex);
 	clk = devm_clk_get(rcdu->dev, clk_name);
 	if (!IS_ERR(clk)) {
 		rcrtc->extclock = clk;
 	} else if (PTR_ERR(rcrtc->clock) == -EPROBE_DEFER) {
-		dev_info(rcdu->dev, "can't get external clock %u\n", index);
+		dev_info(rcdu->dev, "can't get external clock %u\n", hwindex);
 		return -EPROBE_DEFER;
 	}
 
@@ -813,13 +814,13 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
 	spin_lock_init(&rcrtc->vblank_lock);
 
 	rcrtc->group = rgrp;
-	rcrtc->mmio_offset = mmio_offsets[index];
-	rcrtc->index = index;
+	rcrtc->mmio_offset = mmio_offsets[hwindex];
+	rcrtc->index = hwindex;
 
 	if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE))
 		primary = &rcrtc->vsp->planes[rcrtc->vsp_pipe].plane;
 	else
-		primary = &rgrp->planes[index % 2].plane;
+		primary = &rgrp->planes[swindex % 2].plane;
 
 	ret = drm_crtc_init_with_planes(rcdu->ddev, crtc, primary,
 					NULL, &crtc_funcs, NULL);
@@ -833,7 +834,8 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
 
 	/* Register the interrupt handler. */
 	if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
-		irq = platform_get_irq(pdev, index);
+		/* The IRQ's are associated with the CRTC (sw)index. */
+		irq = platform_get_irq(pdev, swindex);
 		irqflags = 0;
 	} else {
 		irq = platform_get_irq(pdev, 0);
@@ -841,7 +843,7 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
 	}
 
 	if (irq < 0) {
-		dev_err(rcdu->dev, "no IRQ for CRTC %u\n", index);
+		dev_err(rcdu->dev, "no IRQ for CRTC %u\n", swindex);
 		return irq;
 	}
 
@@ -849,7 +851,7 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
 			       dev_name(rcdu->dev), rcrtc);
 	if (ret < 0) {
 		dev_err(rcdu->dev,
-			"failed to register IRQ for CRTC %u\n", index);
+			"failed to register IRQ for CRTC %u\n", swindex);
 		return ret;
 	}
 
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index fdc2bf99bda1..84b5e23a85b1 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -80,7 +80,8 @@ enum rcar_du_output {
 	RCAR_DU_OUTPUT_MAX,
 };
 
-int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index);
+int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
+			unsigned int hwindex);
 void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc);
 void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc);
 
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 3917d839c04c..2aa392b03e73 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -40,7 +40,7 @@ static const struct rcar_du_device_info rzg1_du_r8a7743_info = {
 	.gen = 2,
 	.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
 		  | RCAR_DU_FEATURE_EXT_CTRL_REGS,
-	.num_crtcs = 2,
+	.channels_mask = BIT(1) | BIT(0),
 	.routes = {
 		/*
 		 * R8A7743 has one RGB output and one LVDS output
@@ -61,7 +61,7 @@ static const struct rcar_du_device_info rzg1_du_r8a7745_info = {
 	.gen = 2,
 	.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
 		  | RCAR_DU_FEATURE_EXT_CTRL_REGS,
-	.num_crtcs = 2,
+	.channels_mask = BIT(1) | BIT(0),
 	.routes = {
 		/*
 		 * R8A7745 has two RGB outputs
@@ -80,7 +80,7 @@ static const struct rcar_du_device_info rzg1_du_r8a7745_info = {
 static const struct rcar_du_device_info rcar_du_r8a7779_info = {
 	.gen = 2,
 	.features = 0,
-	.num_crtcs = 2,
+	.channels_mask = BIT(1) | BIT(0),
 	.routes = {
 		/*
 		 * R8A7779 has two RGB outputs and one (currently unsupported)
@@ -102,7 +102,7 @@ static const struct rcar_du_device_info rcar_du_r8a7790_info = {
 	.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
 		  | RCAR_DU_FEATURE_EXT_CTRL_REGS,
 	.quirks = RCAR_DU_QUIRK_ALIGN_128B,
-	.num_crtcs = 3,
+	.channels_mask = BIT(2) | BIT(1) | BIT(0),
 	.routes = {
 		/*
 		 * R8A7790 has one RGB output, two LVDS outputs and one
@@ -129,7 +129,7 @@ static const struct rcar_du_device_info rcar_du_r8a7791_info = {
 	.gen = 2,
 	.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
 		  | RCAR_DU_FEATURE_EXT_CTRL_REGS,
-	.num_crtcs = 2,
+	.channels_mask = BIT(1) | BIT(0),
 	.routes = {
 		/*
 		 * R8A779[13] has one RGB output, one LVDS output and one
@@ -151,7 +151,7 @@ static const struct rcar_du_device_info rcar_du_r8a7792_info = {
 	.gen = 2,
 	.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
 		  | RCAR_DU_FEATURE_EXT_CTRL_REGS,
-	.num_crtcs = 2,
+	.channels_mask = BIT(1) | BIT(0),
 	.routes = {
 		/* R8A7792 has two RGB outputs. */
 		[RCAR_DU_OUTPUT_DPAD0] = {
@@ -169,7 +169,7 @@ static const struct rcar_du_device_info rcar_du_r8a7794_info = {
 	.gen = 2,
 	.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
 		  | RCAR_DU_FEATURE_EXT_CTRL_REGS,
-	.num_crtcs = 2,
+	.channels_mask = BIT(1) | BIT(0),
 	.routes = {
 		/*
 		 * R8A7794 has two RGB outputs and one (currently unsupported)
@@ -191,7 +191,7 @@ static const struct rcar_du_device_info rcar_du_r8a7795_info = {
 	.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
 		  | RCAR_DU_FEATURE_EXT_CTRL_REGS
 		  | RCAR_DU_FEATURE_VSP1_SOURCE,
-	.num_crtcs = 4,
+	.channels_mask = BIT(3) | BIT(2) | BIT(1) | BIT(0),
 	.routes = {
 		/*
 		 * R8A7795 has one RGB output, two HDMI outputs and one
@@ -215,7 +215,7 @@ static const struct rcar_du_device_info rcar_du_r8a7795_info = {
 		},
 	},
 	.num_lvds = 1,
-	.dpll_ch =  BIT(1) | BIT(2),
+	.dpll_ch =  BIT(2) | BIT(1),
 };
 
 static const struct rcar_du_device_info rcar_du_r8a7796_info = {
@@ -223,7 +223,7 @@ static const struct rcar_du_device_info rcar_du_r8a7796_info = {
 	.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
 		  | RCAR_DU_FEATURE_EXT_CTRL_REGS
 		  | RCAR_DU_FEATURE_VSP1_SOURCE,
-	.num_crtcs = 3,
+	.channels_mask = BIT(2) | BIT(1) | BIT(0),
 	.routes = {
 		/*
 		 * R8A7796 has one RGB output, one LVDS output and one HDMI
@@ -251,7 +251,7 @@ static const struct rcar_du_device_info rcar_du_r8a77970_info = {
 	.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
 		  | RCAR_DU_FEATURE_EXT_CTRL_REGS
 		  | RCAR_DU_FEATURE_VSP1_SOURCE,
-	.num_crtcs = 1,
+	.channels_mask = BIT(0),
 	.routes = {
 		/* R8A77970 has one RGB output and one LVDS output. */
 		[RCAR_DU_OUTPUT_DPAD0] = {
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index 5c7ec15818c7..5385bb5f6d00 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -52,7 +52,7 @@ struct rcar_du_output_routing {
  * @gen: device generation (2 or 3)
  * @features: device features (RCAR_DU_FEATURE_*)
  * @quirks: device quirks (RCAR_DU_QUIRK_*)
- * @num_crtcs: total number of CRTCs
+ * @channels_mask: bit mask of available DU channels
  * @routes: array of CRTC to output routes, indexed by output (RCAR_DU_OUTPUT_*)
  * @num_lvds: number of internal LVDS encoders
  */
@@ -60,7 +60,7 @@ struct rcar_du_device_info {
 	unsigned int gen;
 	unsigned int features;
 	unsigned int quirks;
-	unsigned int num_crtcs;
+	unsigned int channels_mask;
 	struct rcar_du_output_routing routes[RCAR_DU_OUTPUT_MAX];
 	unsigned int num_lvds;
 	unsigned int dpll_ch;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 0c8b7e5686bb..b5e331cb0d1c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -520,6 +520,8 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
 	struct drm_fbdev_cma *fbdev;
 	unsigned int num_encoders;
 	unsigned int num_groups;
+	unsigned int swindex;
+	unsigned int hwindex;
 	unsigned int i;
 	int ret;
 
@@ -532,7 +534,7 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
 	dev->mode_config.funcs = &rcar_du_mode_config_funcs;
 	dev->mode_config.helper_private = &rcar_du_mode_config_helper;
 
-	rcdu->num_crtcs = rcdu->info->num_crtcs;
+	rcdu->num_crtcs = hweight8(rcdu->info->channels_mask);
 
 	ret = rcar_du_properties_init(rcdu);
 	if (ret < 0)
@@ -542,7 +544,7 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
 	 * Initialize vertical blanking interrupts handling. Start with vblank
 	 * disabled for all CRTCs.
 	 */
-	ret = drm_vblank_init(dev, (1 << rcdu->info->num_crtcs) - 1);
+	ret = drm_vblank_init(dev, (1 << rcdu->num_crtcs) - 1);
 	if (ret < 0)
 		return ret;
 
@@ -584,10 +586,16 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
 	}
 
 	/* Create the CRTCs. */
-	for (i = 0; i < rcdu->num_crtcs; ++i) {
-		struct rcar_du_group *rgrp = &rcdu->groups[i / 2];
+	for (swindex = 0, hwindex = 0; swindex < rcdu->num_crtcs; ++hwindex) {
+		struct rcar_du_group *rgrp;
 
-		ret = rcar_du_crtc_create(rgrp, i);
+		/* Skip unpopulated DU channels. */
+		if (!(rcdu->info->channels_mask & BIT(hwindex)))
+			continue;
+
+		rgrp = &rcdu->groups[hwindex / 2];
+
+		ret = rcar_du_crtc_create(rgrp, swindex++, hwindex);
 		if (ret < 0)
 			return ret;
 	}

From 7ae90455bc865ab1c30fb4db53ac56ec32741ab9 Mon Sep 17 00:00:00 2001
From: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
Date: Fri, 27 Apr 2018 23:21:53 +0100
Subject: [PATCH 0628/1286] drm: rcar-du: Allow DU groups to work with hardware
 indexing

The group objects assume linear indexing, and more so always assume that
channel 0 of any active group is used.

Now that the CRTC objects support non-linear indexing, adapt the groups
to remove assumptions that channel 0 is utilised in each group by using
the channel mask provided in the device structures.

Finally ensure that the RGB routing is determined from the index of the
CRTC object (which represents the hardware DU channel index).

Signed-off-by: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
---
 drivers/gpu/drm/rcar-du/rcar_du_group.c | 14 +++++++++-----
 drivers/gpu/drm/rcar-du/rcar_du_group.h |  2 ++
 drivers/gpu/drm/rcar-du/rcar_du_kms.c   |  5 ++++-
 3 files changed, 15 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
index eead202c95c7..d539cb290a35 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c
@@ -46,9 +46,12 @@ void rcar_du_group_write(struct rcar_du_group *rgrp, u32 reg, u32 data)
 
 static void rcar_du_group_setup_pins(struct rcar_du_group *rgrp)
 {
-	u32 defr6 = DEFR6_CODE | DEFR6_ODPM02_DISP;
+	u32 defr6 = DEFR6_CODE;
 
-	if (rgrp->num_crtcs > 1)
+	if (rgrp->channels_mask & BIT(0))
+		defr6 |= DEFR6_ODPM02_DISP;
+
+	if (rgrp->channels_mask & BIT(1))
 		defr6 |= DEFR6_ODPM12_DISP;
 
 	rcar_du_group_write(rgrp, DEFR6, defr6);
@@ -80,10 +83,11 @@ static void rcar_du_group_setup_defr8(struct rcar_du_group *rgrp)
 		 * On Gen3 VSPD routing can't be configured, but DPAD routing
 		 * needs to be set despite having a single option available.
 		 */
-		u32 crtc = ffs(possible_crtcs) - 1;
+		unsigned int rgb_crtc = ffs(possible_crtcs) - 1;
+		struct rcar_du_crtc *crtc = &rcdu->crtcs[rgb_crtc];
 
-		if (crtc / 2 == rgrp->index)
-			defr8 |= DEFR8_DRGBS_DU(crtc);
+		if (crtc->index / 2 == rgrp->index)
+			defr8 |= DEFR8_DRGBS_DU(crtc->index);
 	}
 
 	rcar_du_group_write(rgrp, DEFR8, defr8);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.h b/drivers/gpu/drm/rcar-du/rcar_du_group.h
index 5e3adc6b31b5..42105aedecc8 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.h
@@ -25,6 +25,7 @@ struct rcar_du_device;
  * @dev: the DU device
  * @mmio_offset: registers offset in the device memory map
  * @index: group index
+ * @channels_mask: bitmask of populated DU channels in this group
  * @num_crtcs: number of CRTCs in this group (1 or 2)
  * @use_count: number of users of the group (rcar_du_group_(get|put))
  * @used_crtcs: number of CRTCs currently in use
@@ -39,6 +40,7 @@ struct rcar_du_group {
 	unsigned int mmio_offset;
 	unsigned int index;
 
+	unsigned int channels_mask;
 	unsigned int num_crtcs;
 	unsigned int use_count;
 	unsigned int used_crtcs;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index b5e331cb0d1c..34f1c27e9cf9 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -559,7 +559,10 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
 		rgrp->dev = rcdu;
 		rgrp->mmio_offset = mmio_offsets[i];
 		rgrp->index = i;
-		rgrp->num_crtcs = min(rcdu->num_crtcs - 2 * i, 2U);
+		/* Extract the channel mask for this group only. */
+		rgrp->channels_mask = (rcdu->info->channels_mask >> (2 * i))
+				   & GENMASK(1, 0);
+		rgrp->num_crtcs = hweight8(rgrp->channels_mask);
 
 		/*
 		 * If we have more than one CRTCs in this group pre-associate

From f1e9a22ac3cff749077f40bf1a149aaaf587ae2d Mon Sep 17 00:00:00 2001
From: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
Date: Fri, 27 Apr 2018 23:21:54 +0100
Subject: [PATCH 0629/1286] drm: rcar-du: Add R8A77965 support

The R8A77965 (M3-N) SoC provides RGB, HDMI and LVDS output.

This platform is unusual in that the RGB is connected to DU3 leaving DU2
unpopulated. This is reflected by the channels_mask accordingly.

Signed-off-by: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
---
 drivers/gpu/drm/rcar-du/rcar_du_drv.c | 29 +++++++++++++++++++++++++++
 1 file changed, 29 insertions(+)

diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 2aa392b03e73..02aee6cb0e53 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -246,6 +246,34 @@ static const struct rcar_du_device_info rcar_du_r8a7796_info = {
 	.dpll_ch =  BIT(1),
 };
 
+static const struct rcar_du_device_info rcar_du_r8a77965_info = {
+	.gen = 3,
+	.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+		  | RCAR_DU_FEATURE_EXT_CTRL_REGS
+		  | RCAR_DU_FEATURE_VSP1_SOURCE,
+	.channels_mask = BIT(3) | BIT(1) | BIT(0),
+	.routes = {
+		/*
+		 * R8A77965 has one RGB output, one LVDS output and one HDMI
+		 * output.
+		 */
+		[RCAR_DU_OUTPUT_DPAD0] = {
+			.possible_crtcs = BIT(2),
+			.port = 0,
+		},
+		[RCAR_DU_OUTPUT_HDMI0] = {
+			.possible_crtcs = BIT(1),
+			.port = 1,
+		},
+		[RCAR_DU_OUTPUT_LVDS0] = {
+			.possible_crtcs = BIT(0),
+			.port = 2,
+		},
+	},
+	.num_lvds = 1,
+	.dpll_ch =  BIT(1),
+};
+
 static const struct rcar_du_device_info rcar_du_r8a77970_info = {
 	.gen = 3,
 	.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
@@ -277,6 +305,7 @@ static const struct of_device_id rcar_du_of_table[] = {
 	{ .compatible = "renesas,du-r8a7794", .data = &rcar_du_r8a7794_info },
 	{ .compatible = "renesas,du-r8a7795", .data = &rcar_du_r8a7795_info },
 	{ .compatible = "renesas,du-r8a7796", .data = &rcar_du_r8a7796_info },
+	{ .compatible = "renesas,du-r8a77965", .data = &rcar_du_r8a77965_info },
 	{ .compatible = "renesas,du-r8a77970", .data = &rcar_du_r8a77970_info },
 	{ }
 };

From b06078de418d6f77c81aa74516f787663f51a262 Mon Sep 17 00:00:00 2001
From: Emre Ucan <eucan@de.adit-jv.com>
Date: Mon, 30 Apr 2018 14:02:04 +0200
Subject: [PATCH 0630/1286] drm: rcar-du: Track dma-buf fences

We have to check dma-buf reservation objects of our framebuffers before
we use them. Otherwise, another driver might be writing on the same
buffer which we are using. This would cause visible tearing effects
on display.

We can use existing atomic helper functions to solve this problem.

Signed-off-by: Emre Ucan <eucan@de.adit-jv.com>
Reviewed-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
Reviewed-by: Daniel Vetter <daniel@ffwll.ch>
Signed-off-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
---
 drivers/gpu/drm/rcar-du/rcar_du_vsp.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index 4a01a99a4674..3badf02ab24a 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -17,6 +17,7 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_plane_helper.h>
 
 #include <linux/bitops.h>
@@ -237,6 +238,10 @@ static int rcar_du_vsp_plane_prepare_fb(struct drm_plane *plane,
 		}
 	}
 
+	ret = drm_gem_fb_prepare_fb(plane, state);
+	if (ret)
+		goto fail;
+
 	return 0;
 
 fail:

From 28eff78618c67277ffe3a6eb4557ad918fd09fe5 Mon Sep 17 00:00:00 2001
From: Ezequiel Garcia <ezequiel@collabora.com>
Date: Fri, 4 May 2018 15:00:37 -0300
Subject: [PATCH 0631/1286] dma-buf: Remove unneeded stubs around sync_debug
 interfaces

The sync_debug.h header is internal, and only used by
sw_sync.c. Therefore, SW_SYNC is always defined and there
is no need for the stubs. Remove them and make the code
simpler.

Signed-off-by: Ezequiel Garcia <ezequiel@collabora.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20180504180037.10661-1-ezequiel@collabora.com
---
 drivers/dma-buf/sync_debug.h | 10 ----------
 1 file changed, 10 deletions(-)

diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h
index d615a89f774c..05e33f937ad0 100644
--- a/drivers/dma-buf/sync_debug.h
+++ b/drivers/dma-buf/sync_debug.h
@@ -62,8 +62,6 @@ struct sync_pt {
 	struct rb_node node;
 };
 
-#ifdef CONFIG_SW_SYNC
-
 extern const struct file_operations sw_sync_debugfs_fops;
 
 void sync_timeline_debug_add(struct sync_timeline *obj);
@@ -72,12 +70,4 @@ void sync_file_debug_add(struct sync_file *fence);
 void sync_file_debug_remove(struct sync_file *fence);
 void sync_dump(void);
 
-#else
-# define sync_timeline_debug_add(obj)
-# define sync_timeline_debug_remove(obj)
-# define sync_file_debug_add(fence)
-# define sync_file_debug_remove(fence)
-# define sync_dump()
-#endif
-
 #endif /* _LINUX_SYNC_H */

From 13e1592f7379422c8d45ccd9c8c8a4698c97b39b Mon Sep 17 00:00:00 2001
From: Imre Deak <imre.deak@intel.com>
Date: Tue, 17 Apr 2018 14:31:47 +0300
Subject: [PATCH 0632/1286] drm/i915: Add documentation to gen9_set_dc_state()

Add documentation to gen9_set_dc_state() on what enabling a given DC
state means and at what point HW/DMC actually enters/exits these states.

Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20180417113147.25120-1-imre.deak@intel.com
---
 drivers/gpu/drm/i915/intel_runtime_pm.c | 23 +++++++++++++++++++++++
 1 file changed, 23 insertions(+)

diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 3fffbfe4521d..53a6eaa9671a 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -542,6 +542,29 @@ void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
 	dev_priv->csr.dc_state = val;
 }
 
+/**
+ * gen9_set_dc_state - set target display C power state
+ * @dev_priv: i915 device instance
+ * @state: target DC power state
+ * - DC_STATE_DISABLE
+ * - DC_STATE_EN_UPTO_DC5
+ * - DC_STATE_EN_UPTO_DC6
+ * - DC_STATE_EN_DC9
+ *
+ * Signal to DMC firmware/HW the target DC power state passed in @state.
+ * DMC/HW can turn off individual display clocks and power rails when entering
+ * a deeper DC power state (higher in number) and turns these back when exiting
+ * that state to a shallower power state (lower in number). The HW will decide
+ * when to actually enter a given state on an on-demand basis, for instance
+ * depending on the active state of display pipes. The state of display
+ * registers backed by affected power rails are saved/restored as needed.
+ *
+ * Based on the above enabling a deeper DC power state is asynchronous wrt.
+ * enabling it. Disabling a deeper power state is synchronous: for instance
+ * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
+ * back on and register state is restored. This is guaranteed by the MMIO write
+ * to DC_STATE_EN blocking until the state is restored.
+ */
 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
 {
 	uint32_t val;

From c27e917e2bda748777b7927d7cb7c911bc2027c8 Mon Sep 17 00:00:00 2001
From: Paulo Zanoni <paulo.r.zanoni@intel.com>
Date: Fri, 27 Apr 2018 16:14:36 -0700
Subject: [PATCH 0633/1286] drm/i915/icl: add basic support for the ICL clocks

This commit introduces the definitions for the ICL clocks and adds the
basic functions to the shared DPLL framework. It adds code for the
Enable and Disable sequences for some PLLs, but it does not have the
code to compute the actual PLL values, which are marked as TODO
comments and should be introduced as separate commits.

Special thanks to James Ausmus for investigating and fixing a bug with
the placement of icl_unmap_plls_to_ports() function.

v2:
 - Rebase around dpll_lock changes.
v3:
 - The spec now says what the timeouts should be.
 - Touch DPCLKA_CFGCR0_ICL at the appropriate time so we don't freeze
   the machine.
 - Checkpatch found a white space problem.
 - Small adjustments before upstreaming.
v4:
 - Move the ICL checks out of the *map_plls_to_ports() functions
  (James)
 - Add extra encoder check (James)
 - Call icl_unmap_plls_to_ports() later (James)
v5:
 - Rebase after the pll struct changes.
v6:
 - Properly make the unmap function based on encoders_post_disable()
   with regarding to checks and iterators.
 - Address checkpatch comment on "min = max = x()".

Cc: James Ausmus <james.ausmus@intel.com>
Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: James Ausmus <james.ausmus@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180427231436.9353-1-paulo.r.zanoni@intel.com
---
 drivers/gpu/drm/i915/i915_debugfs.c   |  22 ++
 drivers/gpu/drm/i915/intel_ddi.c      |  98 +++++++-
 drivers/gpu/drm/i915/intel_display.c  |  16 ++
 drivers/gpu/drm/i915/intel_dpll_mgr.c | 313 +++++++++++++++++++++++++-
 drivers/gpu/drm/i915/intel_dpll_mgr.h |  41 ++++
 drivers/gpu/drm/i915/intel_drv.h      |   6 +
 6 files changed, 491 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 85911bc0b703..13e7b9e4a6e6 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -3368,6 +3368,28 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
 		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
 		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
 		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
+		seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
+		seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
+		seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
+			   pll->state.hw_state.mg_refclkin_ctl);
+		seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
+			   pll->state.hw_state.mg_clktop2_coreclkctl1);
+		seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
+			   pll->state.hw_state.mg_clktop2_hsclkctl);
+		seq_printf(m, " mg_pll_div0:  0x%08x\n",
+			   pll->state.hw_state.mg_pll_div0);
+		seq_printf(m, " mg_pll_div1:  0x%08x\n",
+			   pll->state.hw_state.mg_pll_div1);
+		seq_printf(m, " mg_pll_lf:    0x%08x\n",
+			   pll->state.hw_state.mg_pll_lf);
+		seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
+			   pll->state.hw_state.mg_pll_frac_lock);
+		seq_printf(m, " mg_pll_ssc:   0x%08x\n",
+			   pll->state.hw_state.mg_pll_ssc);
+		seq_printf(m, " mg_pll_bias:  0x%08x\n",
+			   pll->state.hw_state.mg_pll_bias);
+		seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
+			   pll->state.hw_state.mg_pll_tdc_coldst_bias);
 	}
 	drm_modeset_unlock_all(dev);
 
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 8225d223f452..b98ac0541f19 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1052,6 +1052,25 @@ static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
 	}
 }
 
+static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
+				       const struct intel_shared_dpll *pll)
+{
+	const enum intel_dpll_id id = pll->info->id;
+
+	switch (id) {
+	default:
+		MISSING_CASE(id);
+	case DPLL_ID_ICL_DPLL0:
+	case DPLL_ID_ICL_DPLL1:
+		return DDI_CLK_SEL_NONE;
+	case DPLL_ID_ICL_MGPLL1:
+	case DPLL_ID_ICL_MGPLL2:
+	case DPLL_ID_ICL_MGPLL3:
+	case DPLL_ID_ICL_MGPLL4:
+		return DDI_CLK_SEL_MG;
+	}
+}
+
 /* Starting with Haswell, different DDI ports can work in FDI mode for
  * connection to the PCH-located connectors. For this, it is necessary to train
  * both the DDI port and PCH receiver for the desired DDI buffer settings.
@@ -2421,6 +2440,69 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
 	return DDI_BUF_TRANS_SELECT(level);
 }
 
+void icl_map_plls_to_ports(struct drm_crtc *crtc,
+			   struct intel_crtc_state *crtc_state,
+			   struct drm_atomic_state *old_state)
+{
+	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+	struct drm_connector_state *conn_state;
+	struct drm_connector *conn;
+	int i;
+
+	for_each_new_connector_in_state(old_state, conn, conn_state, i) {
+		struct intel_encoder *encoder =
+			to_intel_encoder(conn_state->best_encoder);
+		enum port port = encoder->port;
+		uint32_t val;
+
+		if (conn_state->crtc != crtc)
+			continue;
+
+		mutex_lock(&dev_priv->dpll_lock);
+
+		val = I915_READ(DPCLKA_CFGCR0_ICL);
+		WARN_ON((val & DPCLKA_CFGCR0_DDI_CLK_OFF(port)) == 0);
+
+		if (port == PORT_A || port == PORT_B) {
+			val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
+			val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
+			I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+			POSTING_READ(DPCLKA_CFGCR0_ICL);
+		}
+
+		val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
+		I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+
+		mutex_unlock(&dev_priv->dpll_lock);
+	}
+}
+
+void icl_unmap_plls_to_ports(struct drm_crtc *crtc,
+			     struct intel_crtc_state *crtc_state,
+			     struct drm_atomic_state *old_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+	struct drm_connector_state *old_conn_state;
+	struct drm_connector *conn;
+	int i;
+
+	for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
+		struct intel_encoder *encoder =
+			to_intel_encoder(old_conn_state->best_encoder);
+		enum port port = encoder->port;
+
+		if (old_conn_state->crtc != crtc)
+			continue;
+
+		mutex_lock(&dev_priv->dpll_lock);
+		I915_WRITE(DPCLKA_CFGCR0_ICL,
+			   I915_READ(DPCLKA_CFGCR0_ICL) |
+			   DPCLKA_CFGCR0_DDI_CLK_OFF(port));
+		mutex_unlock(&dev_priv->dpll_lock);
+	}
+}
+
 static void intel_ddi_clk_select(struct intel_encoder *encoder,
 				 const struct intel_shared_dpll *pll)
 {
@@ -2433,7 +2515,11 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
 
 	mutex_lock(&dev_priv->dpll_lock);
 
-	if (IS_CANNONLAKE(dev_priv)) {
+	if (IS_ICELAKE(dev_priv)) {
+		if (port >= PORT_C)
+			I915_WRITE(DDI_CLK_SEL(port),
+				   icl_pll_to_ddi_pll_sel(encoder, pll));
+	} else if (IS_CANNONLAKE(dev_priv)) {
 		/* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
 		val = I915_READ(DPCLKA_CFGCR0);
 		val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
@@ -2471,14 +2557,18 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	enum port port = encoder->port;
 
-	if (IS_CANNONLAKE(dev_priv))
+	if (IS_ICELAKE(dev_priv)) {
+		if (port >= PORT_C)
+			I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
+	} else if (IS_CANNONLAKE(dev_priv)) {
 		I915_WRITE(DPCLKA_CFGCR0, I915_READ(DPCLKA_CFGCR0) |
 			   DPCLKA_CFGCR0_DDI_CLK_OFF(port));
-	else if (IS_GEN9_BC(dev_priv))
+	} else if (IS_GEN9_BC(dev_priv)) {
 		I915_WRITE(DPLL_CTRL2, I915_READ(DPLL_CTRL2) |
 			   DPLL_CTRL2_DDI_CLK_OFF(port));
-	else if (INTEL_GEN(dev_priv) < 9)
+	} else if (INTEL_GEN(dev_priv) < 9) {
 		I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+	}
 }
 
 static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3fd249c05e4e..cdfe0951d171 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -5559,6 +5559,9 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
 	if (intel_crtc->config->shared_dpll)
 		intel_enable_shared_dpll(intel_crtc);
 
+	if (INTEL_GEN(dev_priv) >= 11)
+		icl_map_plls_to_ports(crtc, pipe_config, old_state);
+
 	if (intel_crtc_has_dp_encoder(intel_crtc->config))
 		intel_dp_set_m_n(intel_crtc, M1_N1);
 
@@ -5756,6 +5759,9 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
 		intel_ddi_disable_pipe_clock(intel_crtc->config);
 
 	intel_encoders_post_disable(crtc, old_crtc_state, old_state);
+
+	if (INTEL_GEN(dev_priv) >= 11)
+		icl_unmap_plls_to_ports(crtc, old_crtc_state, old_state);
 }
 
 static void i9xx_pfit_enable(struct intel_crtc *crtc)
@@ -11386,6 +11392,16 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
 	PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
 	PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
 	PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
+	PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
+	PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
+	PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
+	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
+	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
+	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
+	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
+	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
+	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
+	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
 
 	PIPE_CONF_CHECK_X(dsi_pll.ctrl);
 	PIPE_CONF_CHECK_X(dsi_pll.div);
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index d5e114e9660b..14f5414ceab2 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -2399,6 +2399,315 @@ static const struct intel_dpll_mgr cnl_pll_mgr = {
 	.dump_hw_state = cnl_dump_hw_state,
 };
 
+static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
+				struct intel_encoder *encoder, int clock,
+				struct intel_dpll_hw_state *pll_state)
+{
+	/* TODO */
+	return true;
+}
+
+static enum port icl_mg_pll_id_to_port(enum intel_dpll_id id)
+{
+	return id - DPLL_ID_ICL_MGPLL1 + PORT_C;
+}
+
+static enum intel_dpll_id icl_port_to_mg_pll_id(enum port port)
+{
+	return port - PORT_C + DPLL_ID_ICL_MGPLL1;
+}
+
+static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
+				  struct intel_encoder *encoder, int clock,
+				  struct intel_dpll_hw_state *pll_state)
+{
+	/* TODO */
+	return true;
+}
+
+static struct intel_shared_dpll *
+icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+	     struct intel_encoder *encoder)
+{
+	struct intel_shared_dpll *pll;
+	struct intel_dpll_hw_state pll_state = {};
+	enum port port = encoder->port;
+	enum intel_dpll_id min, max;
+	int clock = crtc_state->port_clock;
+	bool ret;
+
+	switch (port) {
+	case PORT_A:
+	case PORT_B:
+		min = DPLL_ID_ICL_DPLL0;
+		max = DPLL_ID_ICL_DPLL1;
+		ret = icl_calc_dpll_state(crtc_state, encoder, clock,
+					  &pll_state);
+		break;
+	case PORT_C:
+	case PORT_D:
+	case PORT_E:
+	case PORT_F:
+		min = icl_port_to_mg_pll_id(port);
+		max = min;
+		ret = icl_calc_mg_pll_state(crtc_state, encoder, clock,
+					    &pll_state);
+		break;
+	default:
+		MISSING_CASE(port);
+		return NULL;
+	}
+
+	if (!ret) {
+		DRM_DEBUG_KMS("Could not calculate PLL state.\n");
+		return NULL;
+	}
+
+	crtc_state->dpll_hw_state = pll_state;
+
+	pll = intel_find_shared_dpll(crtc, crtc_state, min, max);
+	if (!pll) {
+		DRM_DEBUG_KMS("No PLL selected\n");
+		return NULL;
+	}
+
+	intel_reference_shared_dpll(pll, crtc_state);
+
+	return pll;
+}
+
+static i915_reg_t icl_pll_id_to_enable_reg(enum intel_dpll_id id)
+{
+	switch (id) {
+	default:
+		MISSING_CASE(id);
+	case DPLL_ID_ICL_DPLL0:
+	case DPLL_ID_ICL_DPLL1:
+		return CNL_DPLL_ENABLE(id);
+	case DPLL_ID_ICL_MGPLL1:
+	case DPLL_ID_ICL_MGPLL2:
+	case DPLL_ID_ICL_MGPLL3:
+	case DPLL_ID_ICL_MGPLL4:
+		return MG_PLL_ENABLE(icl_mg_pll_id_to_port(id));
+	}
+}
+
+static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
+				 struct intel_shared_dpll *pll,
+				 struct intel_dpll_hw_state *hw_state)
+{
+	const enum intel_dpll_id id = pll->info->id;
+	uint32_t val;
+	enum port port;
+	bool ret = false;
+
+	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+		return false;
+
+	val = I915_READ(icl_pll_id_to_enable_reg(id));
+	if (!(val & PLL_ENABLE))
+		goto out;
+
+	switch (id) {
+	case DPLL_ID_ICL_DPLL0:
+	case DPLL_ID_ICL_DPLL1:
+		hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
+		hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
+		break;
+	case DPLL_ID_ICL_MGPLL1:
+	case DPLL_ID_ICL_MGPLL2:
+	case DPLL_ID_ICL_MGPLL3:
+	case DPLL_ID_ICL_MGPLL4:
+		port = icl_mg_pll_id_to_port(id);
+		hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(port));
+		hw_state->mg_clktop2_coreclkctl1 =
+			I915_READ(MG_CLKTOP2_CORECLKCTL1(port));
+		hw_state->mg_clktop2_hsclkctl =
+			I915_READ(MG_CLKTOP2_HSCLKCTL(port));
+		hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(port));
+		hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(port));
+		hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(port));
+		hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(port));
+		hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(port));
+		hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(port));
+		hw_state->mg_pll_tdc_coldst_bias =
+			I915_READ(MG_PLL_TDC_COLDST_BIAS(port));
+		break;
+	default:
+		MISSING_CASE(id);
+	}
+
+	ret = true;
+out:
+	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+	return ret;
+}
+
+static void icl_dpll_write(struct drm_i915_private *dev_priv,
+			   struct intel_shared_dpll *pll)
+{
+	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
+	const enum intel_dpll_id id = pll->info->id;
+
+	I915_WRITE(ICL_DPLL_CFGCR0(id), hw_state->cfgcr0);
+	I915_WRITE(ICL_DPLL_CFGCR1(id), hw_state->cfgcr1);
+	POSTING_READ(ICL_DPLL_CFGCR1(id));
+}
+
+static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
+			     struct intel_shared_dpll *pll)
+{
+	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
+	enum port port = icl_mg_pll_id_to_port(pll->info->id);
+
+	I915_WRITE(MG_REFCLKIN_CTL(port), hw_state->mg_refclkin_ctl);
+	I915_WRITE(MG_CLKTOP2_CORECLKCTL1(port),
+		   hw_state->mg_clktop2_coreclkctl1);
+	I915_WRITE(MG_CLKTOP2_HSCLKCTL(port), hw_state->mg_clktop2_hsclkctl);
+	I915_WRITE(MG_PLL_DIV0(port), hw_state->mg_pll_div0);
+	I915_WRITE(MG_PLL_DIV1(port), hw_state->mg_pll_div1);
+	I915_WRITE(MG_PLL_LF(port), hw_state->mg_pll_lf);
+	I915_WRITE(MG_PLL_FRAC_LOCK(port), hw_state->mg_pll_frac_lock);
+	I915_WRITE(MG_PLL_SSC(port), hw_state->mg_pll_ssc);
+	I915_WRITE(MG_PLL_BIAS(port), hw_state->mg_pll_bias);
+	I915_WRITE(MG_PLL_TDC_COLDST_BIAS(port),
+		   hw_state->mg_pll_tdc_coldst_bias);
+	POSTING_READ(MG_PLL_TDC_COLDST_BIAS(port));
+}
+
+static void icl_pll_enable(struct drm_i915_private *dev_priv,
+			   struct intel_shared_dpll *pll)
+{
+	const enum intel_dpll_id id = pll->info->id;
+	i915_reg_t enable_reg = icl_pll_id_to_enable_reg(id);
+	uint32_t val;
+
+	val = I915_READ(enable_reg);
+	val |= PLL_POWER_ENABLE;
+	I915_WRITE(enable_reg, val);
+
+	/*
+	 * The spec says we need to "wait" but it also says it should be
+	 * immediate.
+	 */
+	if (intel_wait_for_register(dev_priv, enable_reg, PLL_POWER_STATE,
+				    PLL_POWER_STATE, 1))
+		DRM_ERROR("PLL %d Power not enabled\n", id);
+
+	switch (id) {
+	case DPLL_ID_ICL_DPLL0:
+	case DPLL_ID_ICL_DPLL1:
+		icl_dpll_write(dev_priv, pll);
+		break;
+	case DPLL_ID_ICL_MGPLL1:
+	case DPLL_ID_ICL_MGPLL2:
+	case DPLL_ID_ICL_MGPLL3:
+	case DPLL_ID_ICL_MGPLL4:
+		icl_mg_pll_write(dev_priv, pll);
+		break;
+	default:
+		MISSING_CASE(id);
+	}
+
+	/*
+	 * DVFS pre sequence would be here, but in our driver the cdclk code
+	 * paths should already be setting the appropriate voltage, hence we do
+	 * nothign here.
+	 */
+
+	val = I915_READ(enable_reg);
+	val |= PLL_ENABLE;
+	I915_WRITE(enable_reg, val);
+
+	if (intel_wait_for_register(dev_priv, enable_reg, PLL_LOCK, PLL_LOCK,
+				    1)) /* 600us actually. */
+		DRM_ERROR("PLL %d not locked\n", id);
+
+	/* DVFS post sequence would be here. See the comment above. */
+}
+
+static void icl_pll_disable(struct drm_i915_private *dev_priv,
+			    struct intel_shared_dpll *pll)
+{
+	const enum intel_dpll_id id = pll->info->id;
+	i915_reg_t enable_reg = icl_pll_id_to_enable_reg(id);
+	uint32_t val;
+
+	/* The first steps are done by intel_ddi_post_disable(). */
+
+	/*
+	 * DVFS pre sequence would be here, but in our driver the cdclk code
+	 * paths should already be setting the appropriate voltage, hence we do
+	 * nothign here.
+	 */
+
+	val = I915_READ(enable_reg);
+	val &= ~PLL_ENABLE;
+	I915_WRITE(enable_reg, val);
+
+	/* Timeout is actually 1us. */
+	if (intel_wait_for_register(dev_priv, enable_reg, PLL_LOCK, 0, 1))
+		DRM_ERROR("PLL %d locked\n", id);
+
+	/* DVFS post sequence would be here. See the comment above. */
+
+	val = I915_READ(enable_reg);
+	val &= ~PLL_POWER_ENABLE;
+	I915_WRITE(enable_reg, val);
+
+	/*
+	 * The spec says we need to "wait" but it also says it should be
+	 * immediate.
+	 */
+	if (intel_wait_for_register(dev_priv, enable_reg, PLL_POWER_STATE, 0,
+				    1))
+		DRM_ERROR("PLL %d Power not disabled\n", id);
+}
+
+static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
+			      struct intel_dpll_hw_state *hw_state)
+{
+	DRM_DEBUG_KMS("dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
+		      "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
+		      "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
+		      "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
+		      "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
+		      "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
+		      hw_state->cfgcr0, hw_state->cfgcr1,
+		      hw_state->mg_refclkin_ctl,
+		      hw_state->mg_clktop2_coreclkctl1,
+		      hw_state->mg_clktop2_hsclkctl,
+		      hw_state->mg_pll_div0,
+		      hw_state->mg_pll_div1,
+		      hw_state->mg_pll_lf,
+		      hw_state->mg_pll_frac_lock,
+		      hw_state->mg_pll_ssc,
+		      hw_state->mg_pll_bias,
+		      hw_state->mg_pll_tdc_coldst_bias);
+}
+
+static const struct intel_shared_dpll_funcs icl_pll_funcs = {
+	.enable = icl_pll_enable,
+	.disable = icl_pll_disable,
+	.get_hw_state = icl_pll_get_hw_state,
+};
+
+static const struct dpll_info icl_plls[] = {
+	{ "DPLL 0",   &icl_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
+	{ "DPLL 1",   &icl_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
+	{ "MG PLL 1", &icl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
+	{ "MG PLL 2", &icl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
+	{ "MG PLL 3", &icl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
+	{ "MG PLL 4", &icl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
+	{ },
+};
+
+static const struct intel_dpll_mgr icl_pll_mgr = {
+	.dpll_info = icl_plls,
+	.get_dpll = icl_get_dpll,
+	.dump_hw_state = icl_dump_hw_state,
+};
+
 /**
  * intel_shared_dpll_init - Initialize shared DPLLs
  * @dev: drm device
@@ -2412,7 +2721,9 @@ void intel_shared_dpll_init(struct drm_device *dev)
 	const struct dpll_info *dpll_info;
 	int i;
 
-	if (IS_CANNONLAKE(dev_priv))
+	if (IS_ICELAKE(dev_priv))
+		dpll_mgr = &icl_pll_mgr;
+	else if (IS_CANNONLAKE(dev_priv))
 		dpll_mgr = &cnl_pll_mgr;
 	else if (IS_GEN9_BC(dev_priv))
 		dpll_mgr = &skl_pll_mgr;
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index 4febfaa90bde..7a0cd564a9ee 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -103,6 +103,32 @@ enum intel_dpll_id {
 	 * @DPLL_ID_SKL_DPLL3: SKL and later DPLL3
 	 */
 	DPLL_ID_SKL_DPLL3 = 3,
+
+
+	/**
+	 * @DPLL_ID_ICL_DPLL0: ICL combo PHY DPLL0
+	 */
+	DPLL_ID_ICL_DPLL0 = 0,
+	/**
+	 * @DPLL_ID_ICL_DPLL1: ICL combo PHY DPLL1
+	 */
+	DPLL_ID_ICL_DPLL1 = 1,
+	/**
+	 * @DPLL_ID_ICL_MGPLL1: ICL MG PLL 1 port 1 (C)
+	 */
+	DPLL_ID_ICL_MGPLL1 = 2,
+	/**
+	 * @DPLL_ID_ICL_MGPLL2: ICL MG PLL 1 port 2 (D)
+	 */
+	DPLL_ID_ICL_MGPLL2 = 3,
+	/**
+	 * @DPLL_ID_ICL_MGPLL3: ICL MG PLL 1 port 3 (E)
+	 */
+	DPLL_ID_ICL_MGPLL3 = 4,
+	/**
+	 * @DPLL_ID_ICL_MGPLL4: ICL MG PLL 1 port 4 (F)
+	 */
+	DPLL_ID_ICL_MGPLL4 = 5,
 };
 #define I915_NUM_PLLS 6
 
@@ -135,6 +161,21 @@ struct intel_dpll_hw_state {
 	/* bxt */
 	uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10,
 		 pcsdw12;
+
+	/*
+	 * ICL uses the following, already defined:
+	 * uint32_t cfgcr0, cfgcr1;
+	 */
+	uint32_t mg_refclkin_ctl;
+	uint32_t mg_clktop2_coreclkctl1;
+	uint32_t mg_clktop2_hsclkctl;
+	uint32_t mg_pll_div0;
+	uint32_t mg_pll_div1;
+	uint32_t mg_pll_lf;
+	uint32_t mg_pll_frac_lock;
+	uint32_t mg_pll_ssc;
+	uint32_t mg_pll_bias;
+	uint32_t mg_pll_tdc_coldst_bias;
 };
 
 /**
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 11a1932cde6e..52337f487ebc 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1409,6 +1409,12 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
 u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
 int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
 				     bool enable);
+void icl_map_plls_to_ports(struct drm_crtc *crtc,
+			   struct intel_crtc_state *crtc_state,
+			   struct drm_atomic_state *old_state);
+void icl_unmap_plls_to_ports(struct drm_crtc *crtc,
+			     struct intel_crtc_state *crtc_state,
+			     struct drm_atomic_state *old_state);
 
 unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
 				   int plane, unsigned int height);

From febafb93181e4fb4de19f4484df62ce2d04155aa Mon Sep 17 00:00:00 2001
From: Paulo Zanoni <paulo.r.zanoni@intel.com>
Date: Wed, 28 Mar 2018 14:57:59 -0700
Subject: [PATCH 0634/1286] drm/i915/icl: compute the combo PHY (DPLL) HDMI
 registers

HDMI mode DPLL programming on ICL is the same as CNL, so just reuse
the CNL code.

v2:
 - Properly detect HDMI crtcs.
 - Rebase after changes to the cnl function (clock * 1000).
v3:
 - Add a comment to clarify why we treat 38.4 as 19.2 (James).

Reviewed-by: James Ausmus <james.ausmus@intel.com>
Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180328215803.13835-5-paulo.r.zanoni@intel.com
---
 drivers/gpu/drm/i915/intel_dpll_mgr.c | 39 ++++++++++++++++++++++++---
 1 file changed, 36 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 14f5414ceab2..a1c2bd10a72e 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -2218,6 +2218,7 @@ cnl_ddi_calculate_wrpll(int clock,
 			struct skl_wrpll_params *wrpll_params)
 {
 	u32 afe_clock = clock * 5;
+	uint32_t ref_clock;
 	u32 dco_min = 7998000;
 	u32 dco_max = 10000000;
 	u32 dco_mid = (dco_min + dco_max) / 2;
@@ -2250,8 +2251,17 @@ cnl_ddi_calculate_wrpll(int clock,
 
 	cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
 
-	cnl_wrpll_params_populate(wrpll_params, best_dco,
-				  dev_priv->cdclk.hw.ref, pdiv, qdiv, kdiv);
+	ref_clock = dev_priv->cdclk.hw.ref;
+
+	/*
+	 * For ICL, the spec states: if reference frequency is 38.4, use 19.2
+	 * because the DPLL automatically divides that by 2.
+	 */
+	if (IS_ICELAKE(dev_priv) && ref_clock == 38400)
+		ref_clock = 19200;
+
+	cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock, pdiv, qdiv,
+				  kdiv);
 
 	return true;
 }
@@ -2403,7 +2413,30 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
 				struct intel_encoder *encoder, int clock,
 				struct intel_dpll_hw_state *pll_state)
 {
-	/* TODO */
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	uint32_t cfgcr0, cfgcr1;
+	struct skl_wrpll_params pll_params = { 0 };
+	bool ret;
+
+	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+		ret = cnl_ddi_calculate_wrpll(clock, dev_priv, &pll_params);
+	else
+		ret = false; /* TODO */
+
+	if (!ret)
+		return false;
+
+	cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(pll_params.dco_fraction) |
+		 pll_params.dco_integer;
+
+	cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params.qdiv_ratio) |
+		 DPLL_CFGCR1_QDIV_MODE(pll_params.qdiv_mode) |
+		 DPLL_CFGCR1_KDIV(pll_params.kdiv) |
+		 DPLL_CFGCR1_PDIV(pll_params.pdiv) |
+		 DPLL_CFGCR1_CENTRAL_FREQ_8400;
+
+	pll_state->cfgcr0 = cfgcr0;
+	pll_state->cfgcr1 = cfgcr1;
 	return true;
 }
 

From bb82139b4bbf8bdb825a7339d34d231632e67f27 Mon Sep 17 00:00:00 2001
From: Paulo Zanoni <paulo.r.zanoni@intel.com>
Date: Wed, 28 Mar 2018 14:58:00 -0700
Subject: [PATCH 0635/1286] drm/i915/icl: compute the combo PHY (DPLL) DP
 registers

Just use the hardcoded tables provided by our spec.

v2: Rebase.
v3: Clarify that 38.4 uses the 19.2 table (James).

Reviewed-by: James Ausmus <james.ausmus@intel.com>
Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180328215803.13835-6-paulo.r.zanoni@intel.com
---
 drivers/gpu/drm/i915/intel_dpll_mgr.c | 87 ++++++++++++++++++++++++++-
 1 file changed, 86 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index a1c2bd10a72e..30eca819cb11 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -2409,6 +2409,91 @@ static const struct intel_dpll_mgr cnl_pll_mgr = {
 	.dump_hw_state = cnl_dump_hw_state,
 };
 
+/*
+ * These values alrea already adjusted: they're the bits we write to the
+ * registers, not the logical values.
+ */
+static const struct skl_wrpll_params icl_dp_combo_pll_24MHz_values[] = {
+	{ .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
+	  .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
+	{ .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
+	  .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
+	{ .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
+	  .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
+	{ .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
+	  .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
+	{ .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
+	  .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2},
+	{ .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
+	  .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
+	{ .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
+	  .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
+	{ .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
+	  .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
+};
+
+/* Also used for 38.4 MHz values. */
+static const struct skl_wrpll_params icl_dp_combo_pll_19_2MHz_values[] = {
+	{ .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
+	  .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
+	{ .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
+	  .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
+	{ .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
+	  .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
+	{ .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
+	  .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
+	{ .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
+	  .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2},
+	{ .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
+	  .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
+	{ .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
+	  .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
+	{ .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
+	  .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
+};
+
+static bool icl_calc_dp_combo_pll(struct drm_i915_private *dev_priv, int clock,
+				  struct skl_wrpll_params *pll_params)
+{
+	const struct skl_wrpll_params *params;
+
+	params = dev_priv->cdclk.hw.ref == 24000 ?
+			icl_dp_combo_pll_24MHz_values :
+			icl_dp_combo_pll_19_2MHz_values;
+
+	switch (clock) {
+	case 540000:
+		*pll_params = params[0];
+		break;
+	case 270000:
+		*pll_params = params[1];
+		break;
+	case 162000:
+		*pll_params = params[2];
+		break;
+	case 324000:
+		*pll_params = params[3];
+		break;
+	case 216000:
+		*pll_params = params[4];
+		break;
+	case 432000:
+		*pll_params = params[5];
+		break;
+	case 648000:
+		*pll_params = params[6];
+		break;
+	case 810000:
+		*pll_params = params[7];
+		break;
+	default:
+		MISSING_CASE(clock);
+		return false;
+	}
+
+	return true;
+}
+
 static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
 				struct intel_encoder *encoder, int clock,
 				struct intel_dpll_hw_state *pll_state)
@@ -2421,7 +2506,7 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
 		ret = cnl_ddi_calculate_wrpll(clock, dev_priv, &pll_params);
 	else
-		ret = false; /* TODO */
+		ret = icl_calc_dp_combo_pll(dev_priv, clock, &pll_params);
 
 	if (!ret)
 		return false;

From 145ef0d17d57788293b5569cd03118fc0e220a61 Mon Sep 17 00:00:00 2001
From: Paulo Zanoni <paulo.r.zanoni@intel.com>
Date: Wed, 28 Mar 2018 14:58:01 -0700
Subject: [PATCH 0636/1286] drm/i915/icl: compute the MG PLL registers

This implements the "MG PLL Programming" sequence from our spec. The
biggest problem was that the spec assumes real numbers, so we had to
adjust some numbers and calculations due to the fact that the Kernel
prefers to deal with integers.

I recommend grabbing some coffee, a pen and paper before reviewing
this patch.

v2:
 - Correctly identify DP encoders after upstream change.
 - Small checkpatch issues.
 - Rebase.
v3:
 - Try to impove the comment on the tdc_targetcnt calculation based on
   Manasi's feedback (Manasi).
 - Rebase.

Reviewed-by: Manasi Navare <manasi.d.navare@intel.com>
Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180328215803.13835-7-paulo.r.zanoni@intel.com
---
 drivers/gpu/drm/i915/intel_dpll_mgr.c | 223 +++++++++++++++++++++++++-
 1 file changed, 222 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 30eca819cb11..383fbc15113d 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -2535,11 +2535,232 @@ static enum intel_dpll_id icl_port_to_mg_pll_id(enum port port)
 	return port - PORT_C + DPLL_ID_ICL_MGPLL1;
 }
 
+static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
+				     uint32_t *target_dco_khz,
+				     struct intel_dpll_hw_state *state)
+{
+	uint32_t dco_min_freq, dco_max_freq;
+	int div1_vals[] = {7, 5, 3, 2};
+	unsigned int i;
+	int div2;
+
+	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
+	dco_max_freq = is_dp ? 8100000 : 10000000;
+
+	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
+		int div1 = div1_vals[i];
+
+		for (div2 = 10; div2 > 0; div2--) {
+			int dco = div1 * div2 * clock_khz * 5;
+			int a_divratio, tlinedrv, inputsel, hsdiv;
+
+			if (dco < dco_min_freq || dco > dco_max_freq)
+				continue;
+
+			if (div2 >= 2) {
+				a_divratio = is_dp ? 10 : 5;
+				tlinedrv = 2;
+			} else {
+				a_divratio = 5;
+				tlinedrv = 0;
+			}
+			inputsel = is_dp ? 0 : 1;
+
+			switch (div1) {
+			default:
+				MISSING_CASE(div1);
+			case 2:
+				hsdiv = 0;
+				break;
+			case 3:
+				hsdiv = 1;
+				break;
+			case 5:
+				hsdiv = 2;
+				break;
+			case 7:
+				hsdiv = 3;
+				break;
+			}
+
+			*target_dco_khz = dco;
+
+			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
+
+			state->mg_clktop2_coreclkctl1 =
+				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
+
+			state->mg_clktop2_hsclkctl =
+				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
+				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
+				MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO(hsdiv) |
+				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
+
+			return true;
+		}
+	}
+
+	return false;
+}
+
+/*
+ * The specification for this function uses real numbers, so the math had to be
+ * adapted to integer-only calculation, that's why it looks so different.
+ */
 static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
 				  struct intel_encoder *encoder, int clock,
 				  struct intel_dpll_hw_state *pll_state)
 {
-	/* TODO */
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	int refclk_khz = dev_priv->cdclk.hw.ref;
+	uint32_t dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
+	uint32_t iref_ndiv, iref_trim, iref_pulse_w;
+	uint32_t prop_coeff, int_coeff;
+	uint32_t tdc_targetcnt, feedfwgain;
+	uint64_t ssc_stepsize, ssc_steplen, ssc_steplog;
+	uint64_t tmp;
+	bool use_ssc = false;
+	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
+
+	if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
+				      pll_state)) {
+		DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock);
+		return false;
+	}
+
+	m1div = 2;
+	m2div_int = dco_khz / (refclk_khz * m1div);
+	if (m2div_int > 255) {
+		m1div = 4;
+		m2div_int = dco_khz / (refclk_khz * m1div);
+		if (m2div_int > 255) {
+			DRM_DEBUG_KMS("Failed to find mdiv for clock %d\n",
+				      clock);
+			return false;
+		}
+	}
+	m2div_rem = dco_khz % (refclk_khz * m1div);
+
+	tmp = (uint64_t)m2div_rem * (1 << 22);
+	do_div(tmp, refclk_khz * m1div);
+	m2div_frac = tmp;
+
+	switch (refclk_khz) {
+	case 19200:
+		iref_ndiv = 1;
+		iref_trim = 28;
+		iref_pulse_w = 1;
+		break;
+	case 24000:
+		iref_ndiv = 1;
+		iref_trim = 25;
+		iref_pulse_w = 2;
+		break;
+	case 38400:
+		iref_ndiv = 2;
+		iref_trim = 28;
+		iref_pulse_w = 1;
+		break;
+	default:
+		MISSING_CASE(refclk_khz);
+		return false;
+	}
+
+	/*
+	 * tdc_res = 0.000003
+	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
+	 *
+	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
+	 * was supposed to be a division, but we rearranged the operations of
+	 * the formula to avoid early divisions so we don't multiply the
+	 * rounding errors.
+	 *
+	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
+	 * we also rearrange to work with integers.
+	 *
+	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
+	 * last division by 10.
+	 */
+	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
+
+	/*
+	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
+	 * 32 bits. That's not a problem since we round the division down
+	 * anyway.
+	 */
+	feedfwgain = (use_ssc || m2div_rem > 0) ?
+		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
+
+	if (dco_khz >= 9000000) {
+		prop_coeff = 5;
+		int_coeff = 10;
+	} else {
+		prop_coeff = 4;
+		int_coeff = 8;
+	}
+
+	if (use_ssc) {
+		tmp = (uint64_t)dco_khz * 47 * 32;
+		do_div(tmp, refclk_khz * m1div * 10000);
+		ssc_stepsize = tmp;
+
+		tmp = (uint64_t)dco_khz * 1000;
+		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
+	} else {
+		ssc_stepsize = 0;
+		ssc_steplen = 0;
+	}
+	ssc_steplog = 4;
+
+	pll_state->mg_pll_div0 = (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
+				  MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
+				  MG_PLL_DIV0_FBDIV_INT(m2div_int);
+
+	pll_state->mg_pll_div1 = MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
+				 MG_PLL_DIV1_DITHER_DIV_2 |
+				 MG_PLL_DIV1_NDIVRATIO(1) |
+				 MG_PLL_DIV1_FBPREDIV(m1div);
+
+	pll_state->mg_pll_lf = MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
+			       MG_PLL_LF_AFCCNTSEL_512 |
+			       MG_PLL_LF_GAINCTRL(1) |
+			       MG_PLL_LF_INT_COEFF(int_coeff) |
+			       MG_PLL_LF_PROP_COEFF(prop_coeff);
+
+	pll_state->mg_pll_frac_lock = MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
+				      MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
+				      MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
+				      MG_PLL_FRAC_LOCK_DCODITHEREN |
+				      MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
+	if (use_ssc || m2div_rem > 0)
+		pll_state->mg_pll_frac_lock |= MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
+
+	pll_state->mg_pll_ssc = (use_ssc ? MG_PLL_SSC_EN : 0) |
+				MG_PLL_SSC_TYPE(2) |
+				MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
+				MG_PLL_SSC_STEPNUM(ssc_steplog) |
+				MG_PLL_SSC_FLLEN |
+				MG_PLL_SSC_STEPSIZE(ssc_stepsize);
+
+	pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART;
+
+	if (refclk_khz != 38400) {
+		pll_state->mg_pll_tdc_coldst_bias |=
+			MG_PLL_TDC_COLDST_IREFINT_EN |
+			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
+			MG_PLL_TDC_COLDST_COLDSTART |
+			MG_PLL_TDC_TDCOVCCORR_EN |
+			MG_PLL_TDC_TDCSEL(3);
+
+		pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) |
+					 MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
+					 MG_PLL_BIAS_BIAS_BONUS(10) |
+					 MG_PLL_BIAS_BIASCAL_EN |
+					 MG_PLL_BIAS_CTRIM(12) |
+					 MG_PLL_BIAS_VREF_RDAC(4) |
+					 MG_PLL_BIAS_IREFTRIM(iref_trim);
+	}
+
 	return true;
 }
 

From 4e8507ba774f1fe5cd0c26a7cafc09afb8a6ba8e Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Sun, 6 May 2018 19:31:47 +0100
Subject: [PATCH 0637/1286] drm/i915: Don't request a bug report for unsafe
 module parameters

Unsafe module parameters are just that, unsafe. If the user is foolish
enough to try them and the kernel breaks, they get to keep both pieces.
Don't ask them to file a bug report if they broke it themselves.

References: https://bugs.freedesktop.org/show_bug.cgi?id=106423
Fixes: d15d7538c6d2 ("drm/i915: Tune down init error message due to failure injection")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Acked-by: Jani Nikula <jani.nikula@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180506183147.2690-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_drv.c | 8 +++++++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 8c2986849236..2a96d082addf 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -101,7 +101,13 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
 		   __builtin_return_address(0), &vaf);
 
 	if (is_error && !shown_bug_once) {
-		dev_notice(kdev, "%s", FDO_BUG_MSG);
+		/*
+		 * Ask the user to file a bug report for the error, except
+		 * if they may have caused the bug by fiddling with unsafe
+		 * module parameters.
+		 */
+		if (!test_taint(TAINT_USER))
+			dev_notice(kdev, "%s", FDO_BUG_MSG);
 		shown_bug_once = true;
 	}
 

From 87c7acf867700a9f32db81b227174bf83fecfde3 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Tue, 8 May 2018 01:30:45 +0100
Subject: [PATCH 0638/1286] drm/i915/execlists: Drop unused parameter to
 lookup_priolist()

lookup_priolist() no longer attaches the request into the priolist, it
just returns the priolist for the given priority instead. Drop the
unused parameter.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180508003046.2633-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_lrc.c | 8 +++-----
 1 file changed, 3 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 9f3cce022b2d..bf9a44dae558 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -258,9 +258,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
 }
 
 static struct i915_priolist *
-lookup_priolist(struct intel_engine_cs *engine,
-		struct i915_sched_node *node,
-		int prio)
+lookup_priolist(struct intel_engine_cs *engine, int prio)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
 	struct i915_priolist *p;
@@ -345,7 +343,7 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
 		GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
 		if (rq_prio(rq) != last_prio) {
 			last_prio = rq_prio(rq);
-			p = lookup_priolist(engine, &rq->sched, last_prio);
+			p = lookup_priolist(engine, last_prio);
 		}
 
 		list_add(&rq->sched.link, &p->requests);
@@ -1145,7 +1143,7 @@ static void queue_request(struct intel_engine_cs *engine,
 			  int prio)
 {
 	list_add_tail(&node->link,
-		      &lookup_priolist(engine, node, prio)->requests);
+		      &lookup_priolist(engine, prio)->requests);
 }
 
 static void __submit_queue(struct intel_engine_cs *engine, int prio)

From a02eb975be78171f66a47c103e57e7940d0860a7 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Tue, 8 May 2018 01:30:46 +0100
Subject: [PATCH 0639/1286] drm/i915/execlists: Cache the priolist when
 rescheduling

When rescheduling a change of dependencies, they all need to be added to
the same priolist (at least the ones on the same engine!). Since we
likely want to move a batch of requests, keep the priolist around.

v2: Throw in an assert to catch trivial errors quickly.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180508003046.2633-2-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_lrc.c | 13 ++++++++++---
 1 file changed, 10 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index bf9a44dae558..046adf397a71 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -346,6 +346,7 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
 			p = lookup_priolist(engine, last_prio);
 		}
 
+		GEM_BUG_ON(p->priority != rq_prio(rq));
 		list_add(&rq->sched.link, &p->requests);
 	}
 }
@@ -1198,7 +1199,8 @@ sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
 static void execlists_schedule(struct i915_request *request,
 			       const struct i915_sched_attr *attr)
 {
-	struct intel_engine_cs *engine;
+	struct i915_priolist *uninitialized_var(pl);
+	struct intel_engine_cs *engine, *last;
 	struct i915_dependency *dep, *p;
 	struct i915_dependency stack;
 	const int prio = attr->priority;
@@ -1271,6 +1273,7 @@ static void execlists_schedule(struct i915_request *request,
 		__list_del_entry(&stack.dfs_link);
 	}
 
+	last = NULL;
 	engine = request->engine;
 	spin_lock_irq(&engine->timeline.lock);
 
@@ -1287,8 +1290,12 @@ static void execlists_schedule(struct i915_request *request,
 
 		node->attr.priority = prio;
 		if (!list_empty(&node->link)) {
-			__list_del_entry(&node->link);
-			queue_request(engine, node, prio);
+			if (last != engine) {
+				pl = lookup_priolist(engine, prio);
+				last = engine;
+			}
+			GEM_BUG_ON(pl->priority != prio);
+			list_move_tail(&node->link, &pl->requests);
 		}
 
 		if (prio > engine->execlists.queue_priority &&

From e30ca4bcf0d9ad2c6f5716d6098b935f0d584c76 Mon Sep 17 00:00:00 2001
From: Dan Carpenter <dan.carpenter@oracle.com>
Date: Tue, 8 May 2018 12:26:50 +0300
Subject: [PATCH 0640/1286] drm/xen-front: checking for NULL instead of IS_ERR

drm_dev_alloc() returns error pointers, it never returns NULL.

Fixes: c575b7eeb89f ("drm/xen-front: Add support for Xen PV display frontend")
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Reviewed-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180508092650.GA661@mwanda
---
 drivers/gpu/drm/xen/xen_drm_front.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index 1b0ea9ac330e..8615e8522c7a 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -543,8 +543,8 @@ static int xen_drm_drv_init(struct xen_drm_front_info *front_info)
 	front_info->drm_info = drm_info;
 
 	drm_dev = drm_dev_alloc(&xen_drm_driver, dev);
-	if (!drm_dev) {
-		ret = -ENOMEM;
+	if (IS_ERR(drm_dev)) {
+		ret = PTR_ERR(drm_dev);
 		goto fail;
 	}
 

From 18f20bc5303cf6276cce9ae1742f3835244ad087 Mon Sep 17 00:00:00 2001
From: Dan Carpenter <dan.carpenter@oracle.com>
Date: Tue, 8 May 2018 12:27:39 +0300
Subject: [PATCH 0641/1286] drm/xen-front: fix xen_drm_front_shbuf_alloc()
 error handling

The xen_drm_front_shbuf_alloc() function was returning a mix of error
pointers and NULL and the the caller wasn't checking correctly.  I've
changed it to always return error pointer consistently.

Fixes: c575b7eeb89f ("drm/xen-front: Add support for Xen PV display frontend")
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Reviewed-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180508092739.GB661@mwanda
---
 drivers/gpu/drm/xen/xen_drm_front.c       | 4 ++--
 drivers/gpu/drm/xen/xen_drm_front_shbuf.c | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index 8615e8522c7a..378cb7ce0db5 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -188,8 +188,8 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
 	buf_cfg.be_alloc = front_info->cfg.be_alloc;
 
 	shbuf = xen_drm_front_shbuf_alloc(&buf_cfg);
-	if (!shbuf)
-		return -ENOMEM;
+	if (IS_ERR(shbuf))
+		return PTR_ERR(shbuf);
 
 	ret = dbuf_add_to_list(front_info, shbuf, dbuf_cookie);
 	if (ret < 0) {
diff --git a/drivers/gpu/drm/xen/xen_drm_front_shbuf.c b/drivers/gpu/drm/xen/xen_drm_front_shbuf.c
index d5705251a0d6..8099cb343ae3 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_shbuf.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_shbuf.c
@@ -383,7 +383,7 @@ xen_drm_front_shbuf_alloc(struct xen_drm_front_shbuf_cfg *cfg)
 
 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
 	if (!buf)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 
 	if (cfg->be_alloc)
 		buf->ops = &backend_ops;

From f45140df31717bfab0974a722800d3ac0587b3f0 Mon Sep 17 00:00:00 2001
From: Dan Carpenter <dan.carpenter@oracle.com>
Date: Tue, 8 May 2018 12:28:29 +0300
Subject: [PATCH 0642/1286] drm/xen-front: Fix loop timeout

If the loop times out then we want to exit with "to" set to zero, but in
the current code it's set to -1.

Fixes: c575b7eeb89f ("drm/xen-front: Add support for Xen PV display frontend")
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Reviewed-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180508092829.GC661@mwanda
---
 drivers/gpu/drm/xen/xen_drm_front.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index 378cb7ce0db5..3345ac71b391 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -778,7 +778,7 @@ static int xen_drv_remove(struct xenbus_device *dev)
 	 */
 	while ((xenbus_read_unsigned(front_info->xb_dev->otherend, "state",
 				     XenbusStateUnknown) != XenbusStateInitWait) &&
-				     to--)
+				     --to)
 		msleep(10);
 
 	if (!to) {

From c11c7bfd213495784b22ef82a69b6489f8d0092f Mon Sep 17 00:00:00 2001
From: Matthew Auld <matthew.auld@intel.com>
Date: Wed, 2 May 2018 20:50:21 +0100
Subject: [PATCH 0643/1286] drm/i915/userptr: reject zero user_size

Operating on a zero sized GEM userptr object will lead to explosions.

Fixes: 5cc9ed4b9a7a ("drm/i915: Introduce mapping of user pages into video memory (userptr) ioctl")
Testcase: igt/gem_userptr_blits/input-checking
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180502195021.30900-1-matthew.auld@intel.com
---
 drivers/gpu/drm/i915/i915_gem_userptr.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index d596a8302ca3..854bd51b9478 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -778,6 +778,9 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
 			    I915_USERPTR_UNSYNCHRONIZED))
 		return -EINVAL;
 
+	if (!args->user_size)
+		return -EINVAL;
+
 	if (offset_in_page(args->user_ptr | args->user_size))
 		return -EINVAL;
 

From 98dc0454c023985cb31de2578c941391a900e941 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Sat, 5 May 2018 10:10:13 +0100
Subject: [PATCH 0644/1286] drm/i915/selftests: Refactor common flush_test()

Pull igt_flush_test() out into its own library before copying and
pasting the code for a third time.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180505091014.26126-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/Makefile                 |  3 +-
 .../gpu/drm/i915/selftests/igt_flush_test.c   | 64 +++++++++++++++++
 .../gpu/drm/i915/selftests/igt_flush_test.h   | 14 ++++
 .../gpu/drm/i915/selftests/intel_hangcheck.c  | 66 ++----------------
 drivers/gpu/drm/i915/selftests/intel_lrc.c    | 68 ++-----------------
 5 files changed, 93 insertions(+), 122 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/selftests/igt_flush_test.c
 create mode 100644 drivers/gpu/drm/i915/selftests/igt_flush_test.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 00c13382b008..4c6adae23e18 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -158,7 +158,8 @@ i915-y += dvo_ch7017.o \
 i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
 i915-$(CONFIG_DRM_I915_SELFTEST) += \
 	selftests/i915_random.o \
-	selftests/i915_selftest.o
+	selftests/i915_selftest.o \
+	selftests/igt_flush_test.o
 
 # virtual gpu code
 i915-y += i915_vgpu.o
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
new file mode 100644
index 000000000000..abff2f04ea84
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -0,0 +1,64 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "../i915_drv.h"
+
+#include "../i915_selftest.h"
+#include "igt_flush_test.h"
+
+struct wedge_me {
+	struct delayed_work work;
+	struct drm_i915_private *i915;
+	const void *symbol;
+};
+
+static void wedge_me(struct work_struct *work)
+{
+	struct wedge_me *w = container_of(work, typeof(*w), work.work);
+
+	pr_err("%pS timed out, cancelling all further testing.\n", w->symbol);
+
+	GEM_TRACE("%pS timed out.\n", w->symbol);
+	GEM_TRACE_DUMP();
+
+	i915_gem_set_wedged(w->i915);
+}
+
+static void __init_wedge(struct wedge_me *w,
+			 struct drm_i915_private *i915,
+			 long timeout,
+			 const void *symbol)
+{
+	w->i915 = i915;
+	w->symbol = symbol;
+
+	INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me);
+	schedule_delayed_work(&w->work, timeout);
+}
+
+static void __fini_wedge(struct wedge_me *w)
+{
+	cancel_delayed_work_sync(&w->work);
+	destroy_delayed_work_on_stack(&w->work);
+	w->i915 = NULL;
+}
+
+#define wedge_on_timeout(W, DEV, TIMEOUT)				\
+	for (__init_wedge((W), (DEV), (TIMEOUT), __builtin_return_address(0)); \
+	     (W)->i915;							\
+	     __fini_wedge((W)))
+
+int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
+{
+	struct wedge_me w;
+
+	cond_resched();
+
+	wedge_on_timeout(&w, i915, HZ)
+		i915_gem_wait_for_idle(i915, flags);
+
+	return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0;
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.h b/drivers/gpu/drm/i915/selftests/igt_flush_test.h
new file mode 100644
index 000000000000..63e009927c43
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.h
@@ -0,0 +1,14 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef IGT_FLUSH_TEST_H
+#define IGT_FLUSH_TEST_H
+
+struct drm_i915_private;
+
+int igt_flush_test(struct drm_i915_private *i915, unsigned int flags);
+
+#endif /* IGT_FLUSH_TEST_H */
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index c61bf65454a9..438e0b045a2c 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -26,6 +26,7 @@
 
 #include "../i915_selftest.h"
 #include "i915_random.h"
+#include "igt_flush_test.h"
 
 #include "mock_context.h"
 #include "mock_drm.h"
@@ -253,61 +254,6 @@ static u32 hws_seqno(const struct hang *h, const struct i915_request *rq)
 	return READ_ONCE(h->seqno[rq->fence.context % (PAGE_SIZE/sizeof(u32))]);
 }
 
-struct wedge_me {
-	struct delayed_work work;
-	struct drm_i915_private *i915;
-	const void *symbol;
-};
-
-static void wedge_me(struct work_struct *work)
-{
-	struct wedge_me *w = container_of(work, typeof(*w), work.work);
-
-	pr_err("%pS timed out, cancelling all further testing.\n", w->symbol);
-
-	GEM_TRACE("%pS timed out.\n", w->symbol);
-	GEM_TRACE_DUMP();
-
-	i915_gem_set_wedged(w->i915);
-}
-
-static void __init_wedge(struct wedge_me *w,
-			 struct drm_i915_private *i915,
-			 long timeout,
-			 const void *symbol)
-{
-	w->i915 = i915;
-	w->symbol = symbol;
-
-	INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me);
-	schedule_delayed_work(&w->work, timeout);
-}
-
-static void __fini_wedge(struct wedge_me *w)
-{
-	cancel_delayed_work_sync(&w->work);
-	destroy_delayed_work_on_stack(&w->work);
-	w->i915 = NULL;
-}
-
-#define wedge_on_timeout(W, DEV, TIMEOUT)				\
-	for (__init_wedge((W), (DEV), (TIMEOUT), __builtin_return_address(0)); \
-	     (W)->i915;							\
-	     __fini_wedge((W)))
-
-static noinline int
-flush_test(struct drm_i915_private *i915, unsigned int flags)
-{
-	struct wedge_me w;
-
-	cond_resched();
-
-	wedge_on_timeout(&w, i915, HZ)
-		i915_gem_wait_for_idle(i915, flags);
-
-	return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0;
-}
-
 static void hang_fini(struct hang *h)
 {
 	*h->batch = MI_BATCH_BUFFER_END;
@@ -321,7 +267,7 @@ static void hang_fini(struct hang *h)
 
 	kernel_context_close(h->ctx);
 
-	flush_test(h->i915, I915_WAIT_LOCKED);
+	igt_flush_test(h->i915, I915_WAIT_LOCKED);
 }
 
 static bool wait_until_running(struct hang *h, struct i915_request *rq)
@@ -575,7 +521,7 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
 		if (err)
 			break;
 
-		err = flush_test(i915, 0);
+		err = igt_flush_test(i915, 0);
 		if (err)
 			break;
 	}
@@ -874,7 +820,7 @@ unwind:
 		if (err)
 			break;
 
-		err = flush_test(i915, 0);
+		err = igt_flush_test(i915, 0);
 		if (err)
 			break;
 	}
@@ -1168,7 +1114,7 @@ static int igt_reset_queue(void *arg)
 
 		i915_request_put(prev);
 
-		err = flush_test(i915, I915_WAIT_LOCKED);
+		err = igt_flush_test(i915, I915_WAIT_LOCKED);
 		if (err)
 			break;
 	}
@@ -1280,7 +1226,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
 	err = i915_subtests(tests, i915);
 
 	mutex_lock(&i915->drm.struct_mutex);
-	flush_test(i915, I915_WAIT_LOCKED);
+	igt_flush_test(i915, I915_WAIT_LOCKED);
 	mutex_unlock(&i915->drm.struct_mutex);
 
 	i915_modparams.enable_hangcheck = saved_hangcheck;
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index b7460b5dd4f7..1b8a07125150 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -5,6 +5,7 @@
  */
 
 #include "../i915_selftest.h"
+#include "igt_flush_test.h"
 
 #include "mock_context.h"
 
@@ -168,61 +169,6 @@ static u32 hws_seqno(const struct spinner *spin, const struct i915_request *rq)
 	return READ_ONCE(*seqno);
 }
 
-struct wedge_me {
-	struct delayed_work work;
-	struct drm_i915_private *i915;
-	const void *symbol;
-};
-
-static void wedge_me(struct work_struct *work)
-{
-	struct wedge_me *w = container_of(work, typeof(*w), work.work);
-
-	pr_err("%pS timed out, cancelling all further testing.\n", w->symbol);
-
-	GEM_TRACE("%pS timed out.\n", w->symbol);
-	GEM_TRACE_DUMP();
-
-	i915_gem_set_wedged(w->i915);
-}
-
-static void __init_wedge(struct wedge_me *w,
-			 struct drm_i915_private *i915,
-			 long timeout,
-			 const void *symbol)
-{
-	w->i915 = i915;
-	w->symbol = symbol;
-
-	INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me);
-	schedule_delayed_work(&w->work, timeout);
-}
-
-static void __fini_wedge(struct wedge_me *w)
-{
-	cancel_delayed_work_sync(&w->work);
-	destroy_delayed_work_on_stack(&w->work);
-	w->i915 = NULL;
-}
-
-#define wedge_on_timeout(W, DEV, TIMEOUT)				\
-	for (__init_wedge((W), (DEV), (TIMEOUT), __builtin_return_address(0)); \
-	     (W)->i915;							\
-	     __fini_wedge((W)))
-
-static noinline int
-flush_test(struct drm_i915_private *i915, unsigned int flags)
-{
-	struct wedge_me w;
-
-	cond_resched();
-
-	wedge_on_timeout(&w, i915, HZ)
-		i915_gem_wait_for_idle(i915, flags);
-
-	return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0;
-}
-
 static void spinner_end(struct spinner *spin)
 {
 	*spin->batch = MI_BATCH_BUFFER_END;
@@ -295,7 +241,7 @@ static int live_sanitycheck(void *arg)
 		}
 
 		spinner_end(&spin);
-		if (flush_test(i915, I915_WAIT_LOCKED)) {
+		if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
 			err = -EIO;
 			goto err_ctx;
 		}
@@ -307,7 +253,7 @@ err_ctx:
 err_spin:
 	spinner_fini(&spin);
 err_unlock:
-	flush_test(i915, I915_WAIT_LOCKED);
+	igt_flush_test(i915, I915_WAIT_LOCKED);
 	mutex_unlock(&i915->drm.struct_mutex);
 	return err;
 }
@@ -380,7 +326,7 @@ static int live_preempt(void *arg)
 
 		spinner_end(&spin_hi);
 		spinner_end(&spin_lo);
-		if (flush_test(i915, I915_WAIT_LOCKED)) {
+		if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
 			err = -EIO;
 			goto err_ctx_lo;
 		}
@@ -396,7 +342,7 @@ err_spin_lo:
 err_spin_hi:
 	spinner_fini(&spin_hi);
 err_unlock:
-	flush_test(i915, I915_WAIT_LOCKED);
+	igt_flush_test(i915, I915_WAIT_LOCKED);
 	mutex_unlock(&i915->drm.struct_mutex);
 	return err;
 }
@@ -470,7 +416,7 @@ static int live_late_preempt(void *arg)
 
 		spinner_end(&spin_hi);
 		spinner_end(&spin_lo);
-		if (flush_test(i915, I915_WAIT_LOCKED)) {
+		if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
 			err = -EIO;
 			goto err_ctx_lo;
 		}
@@ -486,7 +432,7 @@ err_spin_lo:
 err_spin_hi:
 	spinner_fini(&spin_hi);
 err_unlock:
-	flush_test(i915, I915_WAIT_LOCKED);
+	igt_flush_test(i915, I915_WAIT_LOCKED);
 	mutex_unlock(&i915->drm.struct_mutex);
 	return err;
 

From 7c2f5bc5f0f41a3e294f5fa3b010a10f47512706 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Sat, 5 May 2018 10:10:14 +0100
Subject: [PATCH 0645/1286] drm/i915/selftests: Flush GPU activity before
 completing live_contexts

igt_ctx_exec() expects that we retire all active requests/objects before
completing, so that when we clean up the files afterwards they are ready
to be freed. Before we do so, it is then prudent to ensure that we have
indeed retired the GPU activity, raising an error if it fails. If we do
not, we run the risk of triggering an assertion when freeing the object:

  __i915_gem_free_objects:4793 GEM_BUG_ON(i915_gem_object_is_active(obj))

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180505091014.26126-2-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/selftests/i915_gem_context.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 7ecaed50d0b9..ddb03f009232 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -23,6 +23,7 @@
  */
 
 #include "../i915_selftest.h"
+#include "igt_flush_test.h"
 
 #include "mock_drm.h"
 #include "huge_gem_object.h"
@@ -411,6 +412,8 @@ static int igt_ctx_exec(void *arg)
 	}
 
 out_unlock:
+	if (igt_flush_test(i915, I915_WAIT_LOCKED))
+		err = -EIO;
 	mutex_unlock(&i915->drm.struct_mutex);
 
 	mock_file_free(i915, file);

From aaefa06a0ea845a7088585ca42259515769ea496 Mon Sep 17 00:00:00 2001
From: Matthew Auld <matthew.auld@intel.com>
Date: Thu, 1 Mar 2018 11:46:39 +0000
Subject: [PATCH 0646/1286] drm/i915: don't leak the pin_map on error

Add some onion to populate_lr_context.

v2: prefer err_unpin_ctx
    drop the fixes tag, worst case we just spew a warn before everything
    is cleaned up and balance is restored

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180301114639.510-1-matthew.auld@intel.com
---
 drivers/gpu/drm/i915/intel_lrc.c | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 046adf397a71..da08225fc482 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -2574,8 +2574,10 @@ populate_lr_context(struct i915_gem_context *ctx,
 
 		defaults = i915_gem_object_pin_map(engine->default_state,
 						   I915_MAP_WB);
-		if (IS_ERR(defaults))
-			return PTR_ERR(defaults);
+		if (IS_ERR(defaults)) {
+			ret = PTR_ERR(defaults);
+			goto err_unpin_ctx;
+		}
 
 		memcpy(vaddr + start, defaults + start, engine->context_size);
 		i915_gem_object_unpin_map(engine->default_state);
@@ -2593,9 +2595,9 @@ populate_lr_context(struct i915_gem_context *ctx,
 			_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
 					   CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT);
 
+err_unpin_ctx:
 	i915_gem_object_unpin_map(ctx_obj);
-
-	return 0;
+	return ret;
 }
 
 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,

From 4cdf65ce8cc28e72089605250b887ab70e10f750 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Tue, 8 May 2018 12:53:12 +0100
Subject: [PATCH 0647/1286] drm/i915/selftests: Return to kernel context after
 each test

As we flush each test and wait for idle before the next, also switch
back to the kernel context. This helps limit the amount of collateral
damage a test may cause by resetting to the default state each time (and
also helps clean up temporaries used by the test).

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180508115312.12628-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/selftests/igt_flush_test.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index abff2f04ea84..7f35bddc2e95 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -57,6 +57,11 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
 
 	cond_resched();
 
+	if (i915_gem_switch_to_kernel_context(i915)) {
+		pr_err("Failed to switch back to kernel context; declaring wedged\n");
+		i915_gem_set_wedged(i915);
+	}
+
 	wedge_on_timeout(&w, i915, HZ)
 		i915_gem_wait_for_idle(i915, flags);
 

From 4f6d8fcf1a53d4d544d9a13f70cb2669572b7ecb Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Mon, 7 May 2018 14:57:25 +0100
Subject: [PATCH 0648/1286] drm/i915: Flush submission tasklet after bumping
 priority

When called from process context tasklet_schedule() defers itself to
ksoftirqd. From experience this may cause unacceptable latencies of over
200ms in executing the submission tasklet, our goal is to reprioritise
the HW execution queue and trigger HW preemption immediately, so disable
bh over the call to schedule and force the tasklet to run afterwards if
scheduled.

v2: Keep rcu_read_lock() around for PREEMPT_RCU

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180507135731.10587-1-chris@chris-wilson.co.uk
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5ece6ae4bdff..89bf5d67cb74 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -578,10 +578,12 @@ static void __fence_set_priority(struct dma_fence *fence,
 	rq = to_request(fence);
 	engine = rq->engine;
 
-	rcu_read_lock();
+	local_bh_disable();
+	rcu_read_lock(); /* RCU serialisation for set-wedged protection */
 	if (engine->schedule)
 		engine->schedule(rq, attr);
 	rcu_read_unlock();
+	local_bh_enable(); /* kick the tasklets if queues were reprioritised */
 }
 
 static void fence_set_priority(struct dma_fence *fence,

From 71ace7ca2545d7cd7522988c16ad6c94e6169366 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Mon, 7 May 2018 14:57:26 +0100
Subject: [PATCH 0649/1286] drm/i915: Disable tasklet scheduling across initial
 scheduling

During request submission, we call the engine->schedule() function so
that we may reorder the active requests as required for inheriting the
new request's priority. This may schedule several tasklets to run on the
local CPU, but we will need to schedule the tasklets again for the new
request. Delay all the local tasklets until the end, so that we only
have to process the queue just once.

v2: Beware PREEMPT_RCU, as then local_bh_disable() is then not a
superset of rcu_read_lock().

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180507135731.10587-2-chris@chris-wilson.co.uk
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/i915_request.c | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index e4cf76ec14a6..f336942229cf 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1110,12 +1110,11 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
 	 * decide whether to preempt the entire chain so that it is ready to
 	 * run at the earliest possible convenience.
 	 */
-	rcu_read_lock();
+	local_bh_disable();
+	rcu_read_lock(); /* RCU serialisation for set-wedged protection */
 	if (engine->schedule)
 		engine->schedule(request, &request->ctx->sched);
 	rcu_read_unlock();
-
-	local_bh_disable();
 	i915_sw_fence_commit(&request->submit);
 	local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
 

From 0597017cd18dc973ec6c80e55abfa36df05665d6 Mon Sep 17 00:00:00 2001
From: Matt Atwood <matthew.s.atwood@intel.com>
Date: Fri, 4 May 2018 15:17:59 -0700
Subject: [PATCH 0650/1286] drm/dp: Add DP_DPCD_REV_XX to drm_dp_helper

As more differentation occurs between DP spec. Its useful to have these
as macros in a drm_dp_helper.

v2: DPCD_REV_XX to DP_DPCD_REV_XX

Signed-off-by: Matt Atwood <matthew.s.atwood@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180504221800.17830-1-matthew.s.atwood@intel.com
---
 include/drm/drm_dp_helper.h | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 930919f74af5..fc01341a46fa 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -64,6 +64,11 @@
 /* AUX CH addresses */
 /* DPCD */
 #define DP_DPCD_REV                         0x000
+# define DP_DPCD_REV_10                     0x10
+# define DP_DPCD_REV_11                     0x11
+# define DP_DPCD_REV_12                     0x12
+# define DP_DPCD_REV_13                     0x13
+# define DP_DPCD_REV_14                     0x14
 
 #define DP_MAX_LINK_RATE                    0x001
 

From 2f065d8ae918159791474049ab67a0cb85723b81 Mon Sep 17 00:00:00 2001
From: Matt Atwood <matthew.s.atwood@intel.com>
Date: Fri, 4 May 2018 15:18:00 -0700
Subject: [PATCH 0651/1286] drm/dp: Correctly mask DP_TRAINING_AUX_RD_INTERVAL
 values for DP 1.4

DP_TRAINING_AUX_RD_INTERVAL with DP 1.3 spec changed bit scheeme from 8
bits to 7 in DPCD 0x000e. The 8th bit is used to identify extended
receiver capabilities. For panels that use this new feature wait interval
would be increased by 512 ms, when spec is max 16 ms. This behavior is
described in table 2-158 of DP 1.4 spec address 0000eh.

With the introduction of DP 1.4 spec main link clock recovery was
standardized to 100 us regardless of TRAINING_AUX_RD_INTERVAL value.

To avoid breaking panels that are not spec compiant we now warn on
invalid values.

V2: commit title/message, masking all 7 bits, warn on out of spec values.
V3: commit message, make link train clock recovery follow DP 1.4 spec.
V4: style changes
V5: typo
V6: print statement revisions, DP_REV to DPCD_REV, comment correction
V7: typo
V8: Style
V9: Strip out DPCD_REV_XX into seperate patch
v10: DPCD_REV_XX to DP_DPCD_REV_XX

Signed-off-by: Matt Atwood <matthew.s.atwood@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180504221800.17830-2-matthew.s.atwood@intel.com
---
 drivers/gpu/drm/drm_dp_helper.c | 22 ++++++++++++++++++----
 include/drm/drm_dp_helper.h     |  1 +
 2 files changed, 19 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index ffe14ec3e7f2..36c7609a4bd5 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -119,18 +119,32 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI
 EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
 
 void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
-	if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
+	int rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
+			  DP_TRAINING_AUX_RD_MASK;
+
+	if (rd_interval > 4)
+		DRM_DEBUG_KMS("AUX interval %d, out of range (max 4)\n",
+			      rd_interval);
+
+	if (rd_interval == 0 || dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14)
 		udelay(100);
 	else
-		mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
+		mdelay(rd_interval * 4);
 }
 EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
 
 void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
-	if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
+	int rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
+			  DP_TRAINING_AUX_RD_MASK;
+
+	if (rd_interval > 4)
+		DRM_DEBUG_KMS("AUX interval %d, out of range (max 4)\n",
+			      rd_interval);
+
+	if (rd_interval == 0)
 		udelay(400);
 	else
-		mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
+		mdelay(rd_interval * 4);
 }
 EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
 
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index fc01341a46fa..c7b285637f86 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -124,6 +124,7 @@
 # define DP_DPCD_DISPLAY_CONTROL_CAPABLE     (1 << 3) /* edp v1.2 or higher */
 
 #define DP_TRAINING_AUX_RD_INTERVAL         0x00e   /* XXX 1.2? */
+# define DP_TRAINING_AUX_RD_MASK            0x7F    /* XXX 1.2? */
 
 #define DP_ADAPTER_CAP			    0x00f   /* 1.2 */
 # define DP_FORCE_LOAD_SENSE_CAP	    (1 << 0)

From a33f084cc5eaa3a71d872baad83d6738cbf0f783 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Tue, 8 May 2018 16:15:52 +0100
Subject: [PATCH 0652/1286] drm/i915: Remove unused i915_flip tracepoints
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

The i915_flip* tracepoints are no longer in use since the removal of CS
flip in commit 8b5d27b911d7 ("drm/i915: Remove intel_flip_work
infrastructure")

References: 8b5d27b911d7 ("drm/i915: Remove intel_flip_work infrastructure")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Cc: Daniel Vetter <daniel.vetter@intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180508151552.31024-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_trace.h | 36 -------------------------------
 1 file changed, 36 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 77ee5e53eb32..8cc3a256f29d 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -834,42 +834,6 @@ DEFINE_EVENT(i915_request, i915_request_wait_end,
 	    TP_ARGS(rq)
 );
 
-TRACE_EVENT(i915_flip_request,
-	    TP_PROTO(int plane, struct drm_i915_gem_object *obj),
-
-	    TP_ARGS(plane, obj),
-
-	    TP_STRUCT__entry(
-		    __field(int, plane)
-		    __field(struct drm_i915_gem_object *, obj)
-		    ),
-
-	    TP_fast_assign(
-		    __entry->plane = plane;
-		    __entry->obj = obj;
-		    ),
-
-	    TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
-);
-
-TRACE_EVENT(i915_flip_complete,
-	    TP_PROTO(int plane, struct drm_i915_gem_object *obj),
-
-	    TP_ARGS(plane, obj),
-
-	    TP_STRUCT__entry(
-		    __field(int, plane)
-		    __field(struct drm_i915_gem_object *, obj)
-		    ),
-
-	    TP_fast_assign(
-		    __entry->plane = plane;
-		    __entry->obj = obj;
-		    ),
-
-	    TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
-);
-
 TRACE_EVENT_CONDITION(i915_reg_rw,
 	TP_PROTO(bool write, i915_reg_t reg, u64 val, int len, bool trace),
 

From 0adb90d330bb5f0d7fba511af5af3fc1ba93fb7a Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Tue, 8 May 2018 16:35:14 +0100
Subject: [PATCH 0653/1286] drm/i915: Annotate timeline lock nesting

CI noticed

<4>[   23.430701] ============================================
<4>[   23.430706] WARNING: possible recursive locking detected
<4>[   23.430713] 4.17.0-rc4-CI-CI_DRM_4156+ #1 Not tainted
<4>[   23.430720] --------------------------------------------
<4>[   23.430725] systemd-udevd/169 is trying to acquire lock:
<4>[   23.430732]         (ptrval) (&(&timeline->lock)->rlock){....}, at: move_to_timeline+0x48/0x12c [i915]
<4>[   23.430888]
                  but task is already holding lock:
<4>[   23.430894]         (ptrval) (&(&timeline->lock)->rlock){....}, at: i915_request_submit+0x1a/0x40 [i915]
<4>[   23.430995]
                  other info that might help us debug this:
<4>[   23.431002]  Possible unsafe locking scenario:

<4>[   23.431007]        CPU0
<4>[   23.431010]        ----
<4>[   23.431013]   lock(&(&timeline->lock)->rlock);
<4>[   23.431021]   lock(&(&timeline->lock)->rlock);
<4>[   23.431028]
                   *** DEADLOCK ***

<4>[   23.431036]  May be due to missing lock nesting notation

<4>[   23.431044] 5 locks held by systemd-udevd/169:
<4>[   23.431049]  #0:         (ptrval) (&dev->mutex){....}, at: __driver_attach+0x42/0xe0
<4>[   23.431065]  #1:         (ptrval) (&dev->mutex){....}, at: __driver_attach+0x50/0xe0
<4>[   23.431078]  #2:         (ptrval) (&dev->struct_mutex){+.+.}, at: i915_gem_init+0xca/0x630 [i915]
<4>[   23.431174]  #3:         (ptrval) (rcu_read_lock){....}, at: submit_notify+0x35/0x124 [i915]
<4>[   23.431271]  #4:         (ptrval) (&(&timeline->lock)->rlock){....}, at: i915_request_submit+0x1a/0x40 [i915]
<4>[   23.431369]
                  stack backtrace:
<4>[   23.431377] CPU: 0 PID: 169 Comm: systemd-udevd Not tainted 4.17.0-rc4-CI-CI_DRM_4156+ #1
<4>[   23.431385] Hardware name: Dell Inc.                 OptiPlex GX280               /0G8310, BIOS A04 02/09/2005
<4>[   23.431394] Call Trace:
<4>[   23.431403]  dump_stack+0x67/0x9b
<4>[   23.431411]  __lock_acquire+0xc67/0x1b50
<4>[   23.431421]  ? ring_buffer_lock_reserve+0x154/0x3f0
<4>[   23.431429]  ? lock_acquire+0xa6/0x210
<4>[   23.431435]  lock_acquire+0xa6/0x210
<4>[   23.431530]  ? move_to_timeline+0x48/0x12c [i915]
<4>[   23.431540]  _raw_spin_lock+0x2a/0x40
<4>[   23.431634]  ? move_to_timeline+0x48/0x12c [i915]
<4>[   23.431730]  move_to_timeline+0x48/0x12c [i915]
<4>[   23.431826]  __i915_request_submit+0xfa/0x280 [i915]
<4>[   23.431923]  i915_request_submit+0x25/0x40 [i915]
<4>[   23.432024]  i9xx_submit_request+0x11/0x140 [i915]
<4>[   23.432120]  submit_notify+0x8d/0x124 [i915]
<4>[   23.432202]  __i915_sw_fence_complete+0x81/0x250 [i915]
<4>[   23.432300]  __i915_request_add+0x31c/0x7c0 [i915]
<4>[   23.432395]  i915_gem_init+0x621/0x630 [i915]
<4>[   23.432476]  i915_driver_load+0xbee/0x10b0 [i915]
<4>[   23.432485]  ? trace_hardirqs_on_caller+0xe0/0x1b0
<4>[   23.432566]  i915_pci_probe+0x29/0x90 [i915]
<4>[   23.432574]  pci_device_probe+0xa1/0x130
<4>[   23.432582]  driver_probe_device+0x306/0x480
<4>[   23.432589]  __driver_attach+0xb7/0xe0
<4>[   23.432596]  ? driver_probe_device+0x480/0x480
<4>[   23.432602]  ? driver_probe_device+0x480/0x480
<4>[   23.432609]  bus_for_each_dev+0x74/0xc0
<4>[   23.432616]  bus_add_driver+0x15f/0x250
<4>[   23.432623]  ? 0xffffffffa02d7000
<4>[   23.432629]  driver_register+0x52/0xc0
<4>[   23.432635]  ? 0xffffffffa02d7000
<4>[   23.432642]  do_one_initcall+0x58/0x370
<4>[   23.432653]  ? do_init_module+0x1d/0x1ea
<4>[   23.432660]  ? rcu_read_lock_sched_held+0x6f/0x80
<4>[   23.432667]  ? kmem_cache_alloc_trace+0x282/0x2e0
<4>[   23.432675]  do_init_module+0x56/0x1ea
<4>[   23.432682]  load_module+0x2435/0x2b20
<4>[   23.432694]  ? __se_sys_finit_module+0xd3/0xf0
<4>[   23.432701]  __se_sys_finit_module+0xd3/0xf0
<4>[   23.432710]  do_syscall_64+0x55/0x190
<4>[   23.432717]  entry_SYSCALL_64_after_hwframe+0x49/0xbe
<4>[   23.432724] RIP: 0033:0x7fa780782839
<4>[   23.432729] RSP: 002b:00007ffcea73e668 EFLAGS: 00000246 ORIG_RAX: 0000000000000139
<4>[   23.432738] RAX: ffffffffffffffda RBX: 0000561a472a4b30 RCX: 00007fa780782839
<4>[   23.432745] RDX: 0000000000000000 RSI: 00007fa7804610e5 RDI: 000000000000000e
<4>[   23.432752] RBP: 00007fa7804610e5 R08: 0000000000000000 R09: 00007ffcea73e780
<4>[   23.432758] R10: 000000000000000e R11: 0000000000000246 R12: 0000000000000000
<4>[   23.432765] R13: 0000561a47296450 R14: 0000000000020000 R15: 0000561a472a4b30

but did not report it as an issue as it only occurred during the first
module on boot. This is due to the removal of the distinct global
timeline, and its separate lock class. So instead mark up the expected
nesting. An alternative would be to define a separate lock class for the
engine, but since we only expect to have a single point of nesting, we
can avoid having multiple lock classes for the struct.

Fixes: a89d1f921c15 ("drm/i915: Split i915_gem_timeline into individual timelines")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Tested-by: Michel Thierry <michel.thierry@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180508153514.20251-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_request.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index f336942229cf..8928894dd9c7 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -502,7 +502,7 @@ static void move_to_timeline(struct i915_request *request,
 	GEM_BUG_ON(request->timeline == &request->engine->timeline);
 	lockdep_assert_held(&request->engine->timeline.lock);
 
-	spin_lock(&request->timeline->lock);
+	spin_lock_nested(&request->timeline->lock, SINGLE_DEPTH_NESTING);
 	list_move_tail(&request->link, &timeline->requests);
 	spin_unlock(&request->timeline->lock);
 }

From 8a8d9b2c38d9e050bec8d203ba2fb40c663c1b9c Mon Sep 17 00:00:00 2001
From: Souptick Joarder <jrdr.linux@gmail.com>
Date: Sat, 14 Apr 2018 21:34:29 +0530
Subject: [PATCH 0654/1286] gpu: drm: exynos: Change return type to vm_fault_t

Use new return type vm_fault_t for fault handler
in struct vm_operations_struct.

Signed-off-by: Souptick Joarder <jrdr.linux@gmail.com>
Reviewed-by: Matthew Wilcox <mawilcox@microsoft.com>
Signed-off-by: Inki Dae <inki.dae@samsung.com>
---
 drivers/gpu/drm/exynos/exynos_drm_gem.c | 21 ++++-----------------
 drivers/gpu/drm/exynos/exynos_drm_gem.h |  3 ++-
 2 files changed, 6 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 11cc01b47bc0..6e1494fa71b4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -431,37 +431,24 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
 	return 0;
 }
 
-int exynos_drm_gem_fault(struct vm_fault *vmf)
+vm_fault_t exynos_drm_gem_fault(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
 	struct drm_gem_object *obj = vma->vm_private_data;
 	struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
 	unsigned long pfn;
 	pgoff_t page_offset;
-	int ret;
 
 	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 
 	if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) {
 		DRM_ERROR("invalid page offset\n");
-		ret = -EINVAL;
-		goto out;
+		return VM_FAULT_SIGBUS;
 	}
 
 	pfn = page_to_pfn(exynos_gem->pages[page_offset]);
-	ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
-
-out:
-	switch (ret) {
-	case 0:
-	case -ERESTARTSYS:
-	case -EINTR:
-		return VM_FAULT_NOPAGE;
-	case -ENOMEM:
-		return VM_FAULT_OOM;
-	default:
-		return VM_FAULT_SIGBUS;
-	}
+	return vmf_insert_mixed(vma, vmf->address,
+			__pfn_to_pfn_t(pfn, PFN_DEV));
 }
 
 static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 5a4c7de80f65..9057d7f1d6ed 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -13,6 +13,7 @@
 #define _EXYNOS_DRM_GEM_H_
 
 #include <drm/drm_gem.h>
+#include <linux/mm_types.h>
 
 #define to_exynos_gem(x)	container_of(x, struct exynos_drm_gem, base)
 
@@ -111,7 +112,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
 			       struct drm_mode_create_dumb *args);
 
 /* page fault handler and mmap fault address(virtual) to physical memory. */
-int exynos_drm_gem_fault(struct vm_fault *vmf);
+vm_fault_t exynos_drm_gem_fault(struct vm_fault *vmf);
 
 /* set vm_flags and we can change the vm attribute to other one at here. */
 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);

From fa50b7b4ba50f015acd0a6ca505582851e316d2a Mon Sep 17 00:00:00 2001
From: Tomasz Figa <tomasz.figa@gmail.com>
Date: Sat, 21 Apr 2018 19:26:10 +0200
Subject: [PATCH 0655/1286] drm/exynos: fimd: Add support for S5PV210 FIMD
 variant
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This patch adds support for FIMD variant found on S5PV210 SoC.
Except CLKSEL bit availability, it is identical to Exynos4210.

Tested-by: Paweł Chmiel <pawel.mikolaj.chmiel@gmail.com>
Signed-off-by: Tomasz Figa <tomasz.figa@gmail.com>
Signed-off-by: Paweł Chmiel <pawel.mikolaj.chmiel@gmail.com>
Signed-off-by: Inki Dae <inki.dae@samsung.com>
---
 drivers/gpu/drm/exynos/exynos_drm_fimd.c | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index d42ae2bc3e56..01b1570d0c3a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -121,6 +121,12 @@ static struct fimd_driver_data s3c64xx_fimd_driver_data = {
 	.has_limited_fmt = 1,
 };
 
+static struct fimd_driver_data s5pv210_fimd_driver_data = {
+	.timing_base = 0x0,
+	.has_shadowcon = 1,
+	.has_clksel = 1,
+};
+
 static struct fimd_driver_data exynos3_fimd_driver_data = {
 	.timing_base = 0x20000,
 	.lcdblk_offset = 0x210,
@@ -193,6 +199,8 @@ struct fimd_context {
 static const struct of_device_id fimd_driver_dt_match[] = {
 	{ .compatible = "samsung,s3c6400-fimd",
 	  .data = &s3c64xx_fimd_driver_data },
+	{ .compatible = "samsung,s5pv210-fimd",
+	  .data = &s5pv210_fimd_driver_data },
 	{ .compatible = "samsung,exynos3250-fimd",
 	  .data = &exynos3_fimd_driver_data },
 	{ .compatible = "samsung,exynos4210-fimd",

From 5fae288d8ddd5b75d38d323cb4aa51ed2190ce17 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pawe=C5=82=20Chmiel?= <pawel.mikolaj.chmiel@gmail.com>
Date: Sat, 21 Apr 2018 19:26:11 +0200
Subject: [PATCH 0656/1286] drm/exynos: Allow DRM_EXYNOS on s5pv210.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This patch brings back possibility to use drivers depending on
DRM_EXYNOS, on Samsung S5PV210/S5PC110 series based systems.

Fixes: dbbc925bb83a ("drm/exynos: depend on ARCH_EXYNOS for DRM_EXYNOS")
Signed-off-by: Paweł Chmiel <pawel.mikolaj.chmiel@gmail.com>
Signed-off-by: Inki Dae <inki.dae@samsung.com>
---
 drivers/gpu/drm/exynos/Kconfig | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 735ce47688f9..1548a784ef71 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -1,6 +1,6 @@
 config DRM_EXYNOS
 	tristate "DRM Support for Samsung SoC EXYNOS Series"
-	depends on OF && DRM && (ARCH_S3C64XX || ARCH_EXYNOS || ARCH_MULTIPLATFORM)
+	depends on OF && DRM && (ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM)
 	select DRM_KMS_HELPER
 	select VIDEOMODE_HELPERS
 	select SND_SOC_HDMI_CODEC if SND_SOC

From 1d7a99f5148fdcdb9d40367d6d0668a34df161d4 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Tue, 8 May 2018 22:10:56 +0100
Subject: [PATCH 0657/1286] drm/i915/selftests: Create mock_engine() under
 struct_mutex

Calling mock_engine() calls i915_timeline_init() and that requires
struct_mutex to be held as it adds itself to the global list of
timelines. This error was introduced by commit a89d1f921c15 ("drm/i915:
Split i915_gem_timeline into individual timelines") but the issue was
masked in CI by the earlier lockdep spam.

Fixes: a89d1f921c15 ("drm/i915: Split i915_gem_timeline into individual timelines")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Michel Thierry <michel.thierry@intel.com>
Reviewed-by: Michel Thierry <michel.thierry@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180508211056.17151-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/selftests/mock_gem_device.c | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 4b6622c6986a..94baedfa0f74 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -229,18 +229,20 @@ struct drm_i915_private *mock_gem_device(void)
 	INIT_LIST_HEAD(&i915->gt.closed_vma);
 
 	mutex_lock(&i915->drm.struct_mutex);
+
 	mock_init_ggtt(i915);
-	mutex_unlock(&i915->drm.struct_mutex);
 
 	mkwrite_device_info(i915)->ring_mask = BIT(0);
 	i915->engine[RCS] = mock_engine(i915, "mock", RCS);
 	if (!i915->engine[RCS])
-		goto err_priorities;
+		goto err_unlock;
 
 	i915->kernel_context = mock_context(i915, NULL);
 	if (!i915->kernel_context)
 		goto err_engine;
 
+	mutex_unlock(&i915->drm.struct_mutex);
+
 	WARN_ON(i915_gemfs_init(i915));
 
 	return i915;
@@ -248,7 +250,8 @@ struct drm_i915_private *mock_gem_device(void)
 err_engine:
 	for_each_engine(engine, i915, id)
 		mock_engine_free(engine);
-err_priorities:
+err_unlock:
+	mutex_unlock(&i915->drm.struct_mutex);
 	kmem_cache_destroy(i915->priorities);
 err_dependencies:
 	kmem_cache_destroy(i915->dependencies);

From b9777c6f86ac8c21f82211ab982ca48302042ede Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Wed, 9 May 2018 07:59:26 +0100
Subject: [PATCH 0658/1286] drm/i915/selftests: Only switch to kernel context
 when locked

In igt_flush_test() we try to switch back to the kernel context, but we
are only able to do so when we are called with struct_mutex held.

More of my CI fallout from lockdep being temporarily suppressed :(

Fixes: 4cdf65ce8cc2 ("drm/i915/selftests: Return to kernel context after each test")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180509065926.19207-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/selftests/igt_flush_test.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index 7f35bddc2e95..0d06f559243f 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -57,7 +57,8 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
 
 	cond_resched();
 
-	if (i915_gem_switch_to_kernel_context(i915)) {
+	if (flags & I915_WAIT_LOCKED &&
+	    i915_gem_switch_to_kernel_context(i915)) {
 		pr_err("Failed to switch back to kernel context; declaring wedged\n");
 		i915_gem_set_wedged(i915);
 	}

From 4413c474b18f116f96157599b7cc418a6471ed31 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Tue, 8 May 2018 22:03:17 +0100
Subject: [PATCH 0659/1286] drm/i915/execlists: Make submission tasklet hardirq
 safe

Prepare to allow the execlists submission to be run from underneath a
hardirq timer context (and not just the current softirq context) as is
required for fast preemption resets and context switches.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180508210318.10274-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_lrc.c | 42 ++++++++++++++++++++++----------
 1 file changed, 29 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index da08225fc482..d3c00f60c1b0 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -356,10 +356,13 @@ execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
 {
 	struct intel_engine_cs *engine =
 		container_of(execlists, typeof(*engine), execlists);
+	unsigned long flags;
+
+	spin_lock_irqsave(&engine->timeline.lock, flags);
 
-	spin_lock_irq(&engine->timeline.lock);
 	__unwind_incomplete_requests(engine);
-	spin_unlock_irq(&engine->timeline.lock);
+
+	spin_unlock_irqrestore(&engine->timeline.lock, flags);
 }
 
 static inline void
@@ -553,7 +556,7 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
 	execlists_set_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT);
 }
 
-static void execlists_dequeue(struct intel_engine_cs *engine)
+static bool __execlists_dequeue(struct intel_engine_cs *engine)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
 	struct execlist_port *port = execlists->port;
@@ -563,6 +566,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	struct rb_node *rb;
 	bool submit = false;
 
+	lockdep_assert_held(&engine->timeline.lock);
+
 	/* Hardware submission is through 2 ports. Conceptually each port
 	 * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
 	 * static for a context, and unique to each, so we only execute
@@ -584,7 +589,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	 * and context switches) submission.
 	 */
 
-	spin_lock_irq(&engine->timeline.lock);
 	rb = execlists->first;
 	GEM_BUG_ON(rb_first(&execlists->queue) != rb);
 
@@ -599,7 +603,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 						EXECLISTS_ACTIVE_USER));
 		GEM_BUG_ON(!port_count(&port[0]));
 		if (port_count(&port[0]) > 1)
-			goto unlock;
+			return false;
 
 		/*
 		 * If we write to ELSP a second time before the HW has had
@@ -609,11 +613,11 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 		 * the HW to indicate that it has had a chance to respond.
 		 */
 		if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
-			goto unlock;
+			return false;
 
 		if (need_preempt(engine, last, execlists->queue_priority)) {
 			inject_preempt_context(engine);
-			goto unlock;
+			return false;
 		}
 
 		/*
@@ -638,7 +642,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 		 * priorities of the ports haven't been switch.
 		 */
 		if (port_count(&port[1]))
-			goto unlock;
+			return false;
 
 		/*
 		 * WaIdleLiteRestore:bdw,skl
@@ -743,13 +747,25 @@ done:
 	/* We must always keep the beast fed if we have work piled up */
 	GEM_BUG_ON(execlists->first && !port_isset(execlists->port));
 
-unlock:
-	spin_unlock_irq(&engine->timeline.lock);
-
-	if (submit) {
+	/* Re-evaluate the executing context setup after each preemptive kick */
+	if (last)
 		execlists_user_begin(execlists, execlists->port);
+
+	return submit;
+}
+
+static void execlists_dequeue(struct intel_engine_cs *engine)
+{
+	struct intel_engine_execlists * const execlists = &engine->execlists;
+	unsigned long flags;
+	bool submit;
+
+	spin_lock_irqsave(&engine->timeline.lock, flags);
+	submit = __execlists_dequeue(engine);
+	spin_unlock_irqrestore(&engine->timeline.lock, flags);
+
+	if (submit)
 		execlists_submit_ports(engine);
-	}
 
 	GEM_BUG_ON(port_isset(execlists->port) &&
 		   !execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));

From 6486d84b1cca1b4374286ba2685161824f0bfa7d Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Tue, 8 May 2018 22:03:18 +0100
Subject: [PATCH 0660/1286] drm/i915/guc: Make submission tasklet hardirq safe

Prepare to allow the GuC submission to be run from underneath a
hardirq timer context (and not just the current softirq context) as is
required for fast preemption resets and context switches.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180508210318.10274-2-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_guc_submission.c | 34 +++++++++++++++------
 1 file changed, 25 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 62828e39ee26..2feb65096966 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -669,7 +669,7 @@ static inline int port_prio(const struct execlist_port *port)
 	return rq_prio(port_request(port));
 }
 
-static void guc_dequeue(struct intel_engine_cs *engine)
+static bool __guc_dequeue(struct intel_engine_cs *engine)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
 	struct execlist_port *port = execlists->port;
@@ -679,7 +679,8 @@ static void guc_dequeue(struct intel_engine_cs *engine)
 	bool submit = false;
 	struct rb_node *rb;
 
-	spin_lock_irq(&engine->timeline.lock);
+	lockdep_assert_held(&engine->timeline.lock);
+
 	rb = execlists->first;
 	GEM_BUG_ON(rb_first(&execlists->queue) != rb);
 
@@ -694,13 +695,13 @@ static void guc_dequeue(struct intel_engine_cs *engine)
 						     EXECLISTS_ACTIVE_PREEMPT);
 				queue_work(engine->i915->guc.preempt_wq,
 					   &preempt_work->work);
-				goto unlock;
+				return false;
 			}
 		}
 
 		port++;
 		if (port_isset(port))
-			goto unlock;
+			return false;
 	}
 	GEM_BUG_ON(port_isset(port));
 
@@ -738,19 +739,34 @@ static void guc_dequeue(struct intel_engine_cs *engine)
 done:
 	execlists->queue_priority = rb ? to_priolist(rb)->priority : INT_MIN;
 	execlists->first = rb;
-	if (submit) {
+	if (submit)
 		port_assign(port, last);
+	if (last)
 		execlists_user_begin(execlists, execlists->port);
-		guc_submit(engine);
-	}
 
 	/* We must always keep the beast fed if we have work piled up */
 	GEM_BUG_ON(port_isset(execlists->port) &&
 		   !execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
 	GEM_BUG_ON(execlists->first && !port_isset(execlists->port));
 
-unlock:
-	spin_unlock_irq(&engine->timeline.lock);
+	return submit;
+}
+
+static void guc_dequeue(struct intel_engine_cs *engine)
+{
+	unsigned long flags;
+	bool submit;
+
+	local_irq_save(flags);
+
+	spin_lock(&engine->timeline.lock);
+	submit = __guc_dequeue(engine);
+	spin_unlock(&engine->timeline.lock);
+
+	if (submit)
+		guc_submit(engine);
+
+	local_irq_restore(flags);
 }
 
 static void guc_submission_tasklet(unsigned long data)

From 2bdd045e3a30f7ddda1604f835df9c8d14d6d048 Mon Sep 17 00:00:00 2001
From: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Date: Tue, 8 May 2018 17:35:24 -0700
Subject: [PATCH 0661/1286] drm/i915/psr: Check if VBT says PSR can be enabled.

Driver features data block has a boolean flag for PSR, use this to decide
whether PSR should be enabled on a platform. The module parameter can
still be used to override this.

Note: The feature currently remains disabled by default for all platforms
irrespective of what VBT says.

Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180509003524.3199-1-dhinakaran.pandiyan@intel.com
---
 drivers/gpu/drm/i915/i915_drv.h   | 1 +
 drivers/gpu/drm/i915/intel_bios.c | 1 +
 drivers/gpu/drm/i915/intel_psr.c  | 7 +++++--
 3 files changed, 7 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 04e27806e581..24c5e4765afd 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1073,6 +1073,7 @@ struct intel_vbt_data {
 	} edp;
 
 	struct {
+		bool enable;
 		bool full_link;
 		bool require_aux_wakeup;
 		int idle_frames;
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 702d3fab97fc..54270bdde100 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -530,6 +530,7 @@ parse_driver_features(struct drm_i915_private *dev_priv,
 	 */
 	if (!driver->drrs_enabled)
 		dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
+	dev_priv->vbt.psr.enable = driver->psr_enabled;
 }
 
 static void
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 6233a322aac5..db27f2faa1de 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -1173,9 +1173,12 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
 	if (!dev_priv->psr.sink_support)
 		return;
 
-	/* Per platform default: all disabled. */
-	if (i915_modparams.enable_psr == -1)
+	if (i915_modparams.enable_psr == -1) {
+		i915_modparams.enable_psr = dev_priv->vbt.psr.enable;
+
+		/* Per platform default: all disabled. */
 		i915_modparams.enable_psr = 0;
+	}
 
 	/* Set link_standby x link_off defaults */
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))

From a88ad3ded15daa0389106779c60b8a5e76d4b20a Mon Sep 17 00:00:00 2001
From: David Lechner <david@lechnology.com>
Date: Wed, 14 Mar 2018 17:58:45 -0500
Subject: [PATCH 0662/1286] drm/tilcdc: Fix setting clock divider for omap-l138

This fixes setting the clock divider on the TI OMAP-L138 LCDK board.

The clock drivers for OMAP-L138 are being covernted to the common clock
framework. When this happens, clk_set_rate() will no longer return an
error. However, on this SoC, the clock rate cannot actually be changed
because the clock has to maintain a fixed ratio to the ARM clock. So
after attempting to set the clock rate, we need to check to see if the
new rate is actually close enough. If not, then follow the previous
error path to adjust the divider in LCDC IP block to compensate for not
being able to change the parent clock rate.

Tested working on a TI OMAP-L138 LCDK board.

Signed-off-by: David Lechner <david@lechnology.com>
Signed-off-by: Jyri Sarha <jsarha@ti.com>
---
 drivers/gpu/drm/tilcdc/tilcdc_crtc.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 1b278a22c8b7..1067e702c22c 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -224,7 +224,7 @@ static void tilcdc_crtc_set_clk(struct drm_crtc *crtc)
 
 	ret = clk_set_rate(priv->clk, req_rate * clkdiv);
 	clk_rate = clk_get_rate(priv->clk);
-	if (ret < 0) {
+	if (ret < 0 || tilcdc_pclk_diff(req_rate, clk_rate) > 5) {
 		/*
 		 * If we fail to set the clock rate (some architectures don't
 		 * use the common clock framework yet and may not implement

From 9913f74fe15705acd5163551ddf449568cf0048d Mon Sep 17 00:00:00 2001
From: Marek Szyprowski <m.szyprowski@samsung.com>
Date: Thu, 10 May 2018 08:46:36 +0900
Subject: [PATCH 0663/1286] drm/exynos: ipp: Add IPP v2 framework

This patch adds Exynos IPP v2 subsystem and userspace API.

New userspace API is focused ONLY on memory-to-memory image processing.
The two remainging operation modes of obsolete IPP v1 API (framebuffer
writeback and local-path output with image processing) can be implemented
using standard DRM features: writeback connectors and additional DRM planes
with scaling features.

V2 IPP userspace API is based on stateless approach, which much better fits
to memory-to-memory image processing model. It also provides support for
all image formats, which are both already defined in DRM API and supported
by the existing IPP hardware modules.

The API consists of the following ioctls:
- DRM_IOCTL_EXYNOS_IPP_GET_RESOURCES: to enumerate all available image
  processing modules,
- DRM_IOCTL_EXYNOS_IPP_GET_CAPS: to query capabilities and supported image
  formats of given IPP module,
- DRM_IOCTL_EXYNOS_IPP_GET_LIMITS: to query hardware limitiations for
  selected image format of given IPP module,
- DRM_IOCTL_EXYNOS_IPP_COMMIT: to perform operation described by the
  provided structures (source and destination buffers, operation rectangle,
  transformation, etc).

The proposed userspace API is extensible. In the future more advanced image
processing operations can be defined to support for example blending.

Userspace API is fully functional also on DRM render nodes, so it is not
limited to the root/privileged client.

Internal driver API also has been completely rewritten. New IPP core
performs all possible input validation, checks and object life-time
control. The drivers can focus only on writing configuration to hardware
registers. Stateless nature of DRM_IOCTL_EXYNOS_IPP_COMMIT ioctl simplifies
the driver API. Minimal driver needs to provide a single callback for
starting processing and an array with supported image formats.

Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Tested-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Merge conflict so merged manually.
Signed-off-by: Inki Dae <inki.dae@samsung.com>
---
 drivers/gpu/drm/exynos/Kconfig          |   3 +
 drivers/gpu/drm/exynos/Makefile         |   1 +
 drivers/gpu/drm/exynos/exynos_drm_drv.c |  22 +-
 drivers/gpu/drm/exynos/exynos_drm_ipp.c | 916 ++++++++++++++++++++++++
 drivers/gpu/drm/exynos/exynos_drm_ipp.h | 175 +++++
 include/uapi/drm/exynos_drm.h           | 240 +++++++
 6 files changed, 1355 insertions(+), 2 deletions(-)
 create mode 100644 drivers/gpu/drm/exynos/exynos_drm_ipp.c
 create mode 100644 drivers/gpu/drm/exynos/exynos_drm_ipp.h

diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 1548a784ef71..9e914655c430 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -95,6 +95,9 @@ config DRM_EXYNOS_G2D
 	help
 	  Choose this option if you want to use Exynos G2D for DRM.
 
+config DRM_EXYNOS_IPP
+	bool
+
 config DRM_EXYNOS_FIMC
 	bool "FIMC"
 	depends on BROKEN && MFD_SYSCON
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index a51c5459bb13..bdf4212dde7b 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -18,6 +18,7 @@ exynosdrm-$(CONFIG_DRM_EXYNOS_MIXER)	+= exynos_mixer.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI)	+= exynos_hdmi.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI)	+= exynos_drm_vidi.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_G2D)	+= exynos_drm_g2d.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_IPP)	+= exynos_drm_ipp.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_FIMC)	+= exynos_drm_fimc.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_ROTATOR)	+= exynos_drm_rotator.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_GSC)	+= exynos_drm_gsc.o
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index a518e9c6d6cc..37c0db759674 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -27,15 +27,23 @@
 #include "exynos_drm_fb.h"
 #include "exynos_drm_gem.h"
 #include "exynos_drm_plane.h"
+#include "exynos_drm_ipp.h"
 #include "exynos_drm_vidi.h"
 #include "exynos_drm_g2d.h"
 #include "exynos_drm_iommu.h"
 
 #define DRIVER_NAME	"exynos"
 #define DRIVER_DESC	"Samsung SoC DRM"
-#define DRIVER_DATE	"20110530"
+#define DRIVER_DATE	"20180330"
+
+/*
+ * Interface history:
+ *
+ * 1.0 - Original version
+ * 1.1 - Upgrade IPP driver to version 2.0
+ */
 #define DRIVER_MAJOR	1
-#define DRIVER_MINOR	0
+#define DRIVER_MINOR	1
 
 int exynos_atomic_check(struct drm_device *dev,
 			struct drm_atomic_state *state)
@@ -108,6 +116,16 @@ static const struct drm_ioctl_desc exynos_ioctls[] = {
 			DRM_AUTH | DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, exynos_g2d_exec_ioctl,
 			DRM_AUTH | DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_RESOURCES,
+			exynos_drm_ipp_get_res_ioctl,
+			DRM_AUTH | DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_CAPS, exynos_drm_ipp_get_caps_ioctl,
+			DRM_AUTH | DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_LIMITS,
+			exynos_drm_ipp_get_limits_ioctl,
+			DRM_AUTH | DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_COMMIT, exynos_drm_ipp_commit_ioctl,
+			DRM_AUTH | DRM_RENDER_ALLOW),
 };
 
 static const struct file_operations exynos_drm_driver_fops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
new file mode 100644
index 000000000000..26374e58c557
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -0,0 +1,916 @@
+/*
+ * Copyright (C) 2017 Samsung Electronics Co.Ltd
+ * Authors:
+ *	Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * Exynos DRM Image Post Processing (IPP) related functions
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ */
+
+
+#include <drm/drmP.h>
+#include <drm/drm_mode.h>
+#include <uapi/drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
+#include "exynos_drm_ipp.h"
+
+static int num_ipp;
+static LIST_HEAD(ipp_list);
+
+/**
+ * exynos_drm_ipp_register - Register a new picture processor hardware module
+ * @dev: DRM device
+ * @ipp: ipp module to init
+ * @funcs: callbacks for the new ipp object
+ * @caps: bitmask of ipp capabilities (%DRM_EXYNOS_IPP_CAP_*)
+ * @formats: array of supported formats
+ * @num_formats: size of the supported formats array
+ * @name: name (for debugging purposes)
+ *
+ * Initializes a ipp module.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int exynos_drm_ipp_register(struct drm_device *dev, struct exynos_drm_ipp *ipp,
+		const struct exynos_drm_ipp_funcs *funcs, unsigned int caps,
+		const struct exynos_drm_ipp_formats *formats,
+		unsigned int num_formats, const char *name)
+{
+	WARN_ON(!ipp);
+	WARN_ON(!funcs);
+	WARN_ON(!formats);
+	WARN_ON(!num_formats);
+
+	spin_lock_init(&ipp->lock);
+	INIT_LIST_HEAD(&ipp->todo_list);
+	init_waitqueue_head(&ipp->done_wq);
+	ipp->dev = dev;
+	ipp->funcs = funcs;
+	ipp->capabilities = caps;
+	ipp->name = name;
+	ipp->formats = formats;
+	ipp->num_formats = num_formats;
+
+	/* ipp_list modification is serialized by component framework */
+	list_add_tail(&ipp->head, &ipp_list);
+	ipp->id = num_ipp++;
+
+	DRM_DEBUG_DRIVER("Registered ipp %d\n", ipp->id);
+
+	return 0;
+}
+
+/**
+ * exynos_drm_ipp_unregister - Unregister the picture processor module
+ * @dev: DRM device
+ * @ipp: ipp module
+ */
+void exynos_drm_ipp_unregister(struct drm_device *dev,
+			       struct exynos_drm_ipp *ipp)
+{
+	WARN_ON(ipp->task);
+	WARN_ON(!list_empty(&ipp->todo_list));
+	list_del(&ipp->head);
+}
+
+/**
+ * exynos_drm_ipp_ioctl_get_res_ioctl - enumerate all ipp modules
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Construct a list of ipp ids.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int exynos_drm_ipp_get_res_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	struct drm_exynos_ioctl_ipp_get_res *resp = data;
+	struct exynos_drm_ipp *ipp;
+	uint32_t __user *ipp_ptr = (uint32_t __user *)
+						(unsigned long)resp->ipp_id_ptr;
+	unsigned int count = num_ipp, copied = 0;
+
+	/*
+	 * This ioctl is called twice, once to determine how much space is
+	 * needed, and the 2nd time to fill it.
+	 */
+	if (count && resp->count_ipps >= count) {
+		list_for_each_entry(ipp, &ipp_list, head) {
+			if (put_user(ipp->id, ipp_ptr + copied))
+				return -EFAULT;
+			copied++;
+		}
+	}
+	resp->count_ipps = count;
+
+	return 0;
+}
+
+static inline struct exynos_drm_ipp *__ipp_get(uint32_t id)
+{
+	struct exynos_drm_ipp *ipp;
+
+	list_for_each_entry(ipp, &ipp_list, head)
+		if (ipp->id == id)
+			return ipp;
+	return NULL;
+}
+
+/**
+ * exynos_drm_ipp_ioctl_get_caps - get ipp module capabilities and formats
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Construct a structure describing ipp module capabilities.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int exynos_drm_ipp_get_caps_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file_priv)
+{
+	struct drm_exynos_ioctl_ipp_get_caps *resp = data;
+	void __user *ptr = (void __user *)(unsigned long)resp->formats_ptr;
+	struct exynos_drm_ipp *ipp;
+	int i;
+
+	ipp = __ipp_get(resp->ipp_id);
+	if (!ipp)
+		return -ENOENT;
+
+	resp->ipp_id = ipp->id;
+	resp->capabilities = ipp->capabilities;
+
+	/*
+	 * This ioctl is called twice, once to determine how much space is
+	 * needed, and the 2nd time to fill it.
+	 */
+	if (resp->formats_count >= ipp->num_formats) {
+		for (i = 0; i < ipp->num_formats; i++) {
+			struct drm_exynos_ipp_format tmp = {
+				.fourcc = ipp->formats[i].fourcc,
+				.type = ipp->formats[i].type,
+				.modifier = ipp->formats[i].modifier,
+			};
+
+			if (copy_to_user(ptr, &tmp, sizeof(tmp)))
+				return -EFAULT;
+			ptr += sizeof(tmp);
+		}
+	}
+	resp->formats_count = ipp->num_formats;
+
+	return 0;
+}
+
+static inline const struct exynos_drm_ipp_formats *__ipp_format_get(
+				struct exynos_drm_ipp *ipp, uint32_t fourcc,
+				uint64_t mod, unsigned int type)
+{
+	int i;
+
+	for (i = 0; i < ipp->num_formats; i++) {
+		if ((ipp->formats[i].type & type) &&
+		    ipp->formats[i].fourcc == fourcc &&
+		    ipp->formats[i].modifier == mod)
+			return &ipp->formats[i];
+	}
+	return NULL;
+}
+
+/**
+ * exynos_drm_ipp_get_limits_ioctl - get ipp module limits
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Construct a structure describing ipp module limitations for provided
+ * picture format.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int exynos_drm_ipp_get_limits_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file_priv)
+{
+	struct drm_exynos_ioctl_ipp_get_limits *resp = data;
+	void __user *ptr = (void __user *)(unsigned long)resp->limits_ptr;
+	const struct exynos_drm_ipp_formats *format;
+	struct exynos_drm_ipp *ipp;
+
+	if (resp->type != DRM_EXYNOS_IPP_FORMAT_SOURCE &&
+	    resp->type != DRM_EXYNOS_IPP_FORMAT_DESTINATION)
+		return -EINVAL;
+
+	ipp = __ipp_get(resp->ipp_id);
+	if (!ipp)
+		return -ENOENT;
+
+	format = __ipp_format_get(ipp, resp->fourcc, resp->modifier,
+				  resp->type);
+	if (!format)
+		return -EINVAL;
+
+	/*
+	 * This ioctl is called twice, once to determine how much space is
+	 * needed, and the 2nd time to fill it.
+	 */
+	if (format->num_limits && resp->limits_count >= format->num_limits)
+		if (copy_to_user((void __user *)ptr, format->limits,
+				 sizeof(*format->limits) * format->num_limits))
+			return -EFAULT;
+	resp->limits_count = format->num_limits;
+
+	return 0;
+}
+
+struct drm_pending_exynos_ipp_event {
+	struct drm_pending_event base;
+	struct drm_exynos_ipp_event event;
+};
+
+static inline struct exynos_drm_ipp_task *
+			exynos_drm_ipp_task_alloc(struct exynos_drm_ipp *ipp)
+{
+	struct exynos_drm_ipp_task *task;
+
+	task = kzalloc(sizeof(*task), GFP_KERNEL);
+	if (!task)
+		return NULL;
+
+	task->dev = ipp->dev;
+	task->ipp = ipp;
+
+	/* some defaults */
+	task->src.rect.w = task->dst.rect.w = UINT_MAX;
+	task->src.rect.h = task->dst.rect.h = UINT_MAX;
+	task->transform.rotation = DRM_MODE_ROTATE_0;
+
+	DRM_DEBUG_DRIVER("Allocated task %pK\n", task);
+
+	return task;
+}
+
+static const struct exynos_drm_param_map {
+	unsigned int id;
+	unsigned int size;
+	unsigned int offset;
+} exynos_drm_ipp_params_maps[] = {
+	{
+		DRM_EXYNOS_IPP_TASK_BUFFER | DRM_EXYNOS_IPP_TASK_TYPE_SOURCE,
+		sizeof(struct drm_exynos_ipp_task_buffer),
+		offsetof(struct exynos_drm_ipp_task, src.buf),
+	}, {
+		DRM_EXYNOS_IPP_TASK_BUFFER |
+			DRM_EXYNOS_IPP_TASK_TYPE_DESTINATION,
+		sizeof(struct drm_exynos_ipp_task_buffer),
+		offsetof(struct exynos_drm_ipp_task, dst.buf),
+	}, {
+		DRM_EXYNOS_IPP_TASK_RECTANGLE | DRM_EXYNOS_IPP_TASK_TYPE_SOURCE,
+		sizeof(struct drm_exynos_ipp_task_rect),
+		offsetof(struct exynos_drm_ipp_task, src.rect),
+	}, {
+		DRM_EXYNOS_IPP_TASK_RECTANGLE |
+			DRM_EXYNOS_IPP_TASK_TYPE_DESTINATION,
+		sizeof(struct drm_exynos_ipp_task_rect),
+		offsetof(struct exynos_drm_ipp_task, dst.rect),
+	}, {
+		DRM_EXYNOS_IPP_TASK_TRANSFORM,
+		sizeof(struct drm_exynos_ipp_task_transform),
+		offsetof(struct exynos_drm_ipp_task, transform),
+	}, {
+		DRM_EXYNOS_IPP_TASK_ALPHA,
+		sizeof(struct drm_exynos_ipp_task_alpha),
+		offsetof(struct exynos_drm_ipp_task, alpha),
+	},
+};
+
+static int exynos_drm_ipp_task_set(struct exynos_drm_ipp_task *task,
+				   struct drm_exynos_ioctl_ipp_commit *arg)
+{
+	const struct exynos_drm_param_map *map = exynos_drm_ipp_params_maps;
+	void __user *params = (void __user *)(unsigned long)arg->params_ptr;
+	unsigned int size = arg->params_size;
+	uint32_t id;
+	int i;
+
+	while (size) {
+		if (get_user(id, (uint32_t __user *)params))
+			return -EFAULT;
+
+		for (i = 0; i < ARRAY_SIZE(exynos_drm_ipp_params_maps); i++)
+			if (map[i].id == id)
+				break;
+		if (i == ARRAY_SIZE(exynos_drm_ipp_params_maps) ||
+		    map[i].size > size)
+			return -EINVAL;
+
+		if (copy_from_user((void *)task + map[i].offset, params,
+				   map[i].size))
+			return -EFAULT;
+
+		params += map[i].size;
+		size -= map[i].size;
+	}
+
+	DRM_DEBUG_DRIVER("Got task %pK configuration from userspace\n", task);
+	return 0;
+}
+
+static int exynos_drm_ipp_task_setup_buffer(struct exynos_drm_ipp_buffer *buf,
+					    struct drm_file *filp)
+{
+	int ret = 0;
+	int i;
+
+	/* basic checks */
+	if (buf->buf.width == 0 || buf->buf.height == 0)
+		return -EINVAL;
+	buf->format = drm_format_info(buf->buf.fourcc);
+	for (i = 0; i < buf->format->num_planes; i++) {
+		unsigned int width = (i == 0) ? buf->buf.width :
+			     DIV_ROUND_UP(buf->buf.width, buf->format->hsub);
+
+		if (buf->buf.pitch[i] == 0)
+			buf->buf.pitch[i] = width * buf->format->cpp[i];
+		if (buf->buf.pitch[i] < width * buf->format->cpp[i])
+			return -EINVAL;
+		if (!buf->buf.gem_id[i])
+			return -ENOENT;
+	}
+
+	/* pitch for additional planes must match */
+	if (buf->format->num_planes > 2 &&
+	    buf->buf.pitch[1] != buf->buf.pitch[2])
+		return -EINVAL;
+
+	/* get GEM buffers and check their size */
+	for (i = 0; i < buf->format->num_planes; i++) {
+		unsigned int height = (i == 0) ? buf->buf.height :
+			     DIV_ROUND_UP(buf->buf.height, buf->format->vsub);
+		unsigned long size = height * buf->buf.pitch[i];
+		struct drm_gem_object *obj = drm_gem_object_lookup(filp,
+							    buf->buf.gem_id[i]);
+		if (!obj) {
+			ret = -ENOENT;
+			goto gem_free;
+		}
+		buf->exynos_gem[i] = to_exynos_gem(obj);
+
+		if (size + buf->buf.offset[i] > buf->exynos_gem[i]->size) {
+			i++;
+			ret = -EINVAL;
+			goto gem_free;
+		}
+		buf->dma_addr[i] = buf->exynos_gem[i]->dma_addr +
+				   buf->buf.offset[i];
+	}
+
+	return 0;
+gem_free:
+	while (i--) {
+		drm_gem_object_put_unlocked(&buf->exynos_gem[i]->base);
+		buf->exynos_gem[i] = NULL;
+	}
+	return ret;
+}
+
+static void exynos_drm_ipp_task_release_buf(struct exynos_drm_ipp_buffer *buf)
+{
+	int i;
+
+	if (!buf->exynos_gem[0])
+		return;
+	for (i = 0; i < buf->format->num_planes; i++)
+		drm_gem_object_put_unlocked(&buf->exynos_gem[i]->base);
+}
+
+static void exynos_drm_ipp_task_free(struct exynos_drm_ipp *ipp,
+				 struct exynos_drm_ipp_task *task)
+{
+	DRM_DEBUG_DRIVER("Freeing task %pK\n", task);
+
+	exynos_drm_ipp_task_release_buf(&task->src);
+	exynos_drm_ipp_task_release_buf(&task->dst);
+	if (task->event)
+		drm_event_cancel_free(ipp->dev, &task->event->base);
+	kfree(task);
+}
+
+struct drm_ipp_limit {
+	struct drm_exynos_ipp_limit_val h;
+	struct drm_exynos_ipp_limit_val v;
+};
+
+enum drm_ipp_size_id {
+	IPP_LIMIT_BUFFER, IPP_LIMIT_AREA, IPP_LIMIT_ROTATED, IPP_LIMIT_MAX
+};
+
+static const enum drm_ipp_size_id limit_id_fallback[IPP_LIMIT_MAX][4] = {
+	[IPP_LIMIT_BUFFER]  = { DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
+	[IPP_LIMIT_AREA]    = { DRM_EXYNOS_IPP_LIMIT_SIZE_AREA,
+				DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
+	[IPP_LIMIT_ROTATED] = { DRM_EXYNOS_IPP_LIMIT_SIZE_ROTATED,
+				DRM_EXYNOS_IPP_LIMIT_SIZE_AREA,
+				DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
+};
+
+static inline void __limit_set_val(unsigned int *ptr, unsigned int val)
+{
+	if (!*ptr)
+		*ptr = val;
+}
+
+static void __get_size_limit(const struct drm_exynos_ipp_limit *limits,
+			     unsigned int num_limits, enum drm_ipp_size_id id,
+			     struct drm_ipp_limit *res)
+{
+	const struct drm_exynos_ipp_limit *l = limits;
+	int i = 0;
+
+	memset(res, 0, sizeof(*res));
+	for (i = 0; limit_id_fallback[id][i]; i++)
+		for (l = limits; l - limits < num_limits; l++) {
+			if (((l->type & DRM_EXYNOS_IPP_LIMIT_TYPE_MASK) !=
+			      DRM_EXYNOS_IPP_LIMIT_TYPE_SIZE) ||
+			    ((l->type & DRM_EXYNOS_IPP_LIMIT_SIZE_MASK) !=
+						     limit_id_fallback[id][i]))
+				continue;
+			__limit_set_val(&res->h.min, l->h.min);
+			__limit_set_val(&res->h.max, l->h.max);
+			__limit_set_val(&res->h.align, l->h.align);
+			__limit_set_val(&res->v.min, l->v.min);
+			__limit_set_val(&res->v.max, l->v.max);
+			__limit_set_val(&res->v.align, l->v.align);
+		}
+}
+
+static inline bool __align_check(unsigned int val, unsigned int align)
+{
+	if (align && (val & (align - 1))) {
+		DRM_DEBUG_DRIVER("Value %d exceeds HW limits (align %d)\n",
+				 val, align);
+		return false;
+	}
+	return true;
+}
+
+static inline bool __size_limit_check(unsigned int val,
+				 struct drm_exynos_ipp_limit_val *l)
+{
+	if ((l->min && val < l->min) || (l->max && val > l->max)) {
+		DRM_DEBUG_DRIVER("Value %d exceeds HW limits (min %d, max %d)\n",
+				 val, l->min, l->max);
+		return false;
+	}
+	return __align_check(val, l->align);
+}
+
+static int exynos_drm_ipp_check_size_limits(struct exynos_drm_ipp_buffer *buf,
+	const struct drm_exynos_ipp_limit *limits, unsigned int num_limits,
+	bool rotate, bool swap)
+{
+	enum drm_ipp_size_id id = rotate ? IPP_LIMIT_ROTATED : IPP_LIMIT_AREA;
+	struct drm_ipp_limit l;
+	struct drm_exynos_ipp_limit_val *lh = &l.h, *lv = &l.v;
+
+	if (!limits)
+		return 0;
+
+	__get_size_limit(limits, num_limits, IPP_LIMIT_BUFFER, &l);
+	if (!__size_limit_check(buf->buf.width, &l.h) ||
+	    !__size_limit_check(buf->buf.height, &l.v))
+		return -EINVAL;
+
+	if (swap) {
+		lv = &l.h;
+		lh = &l.v;
+	}
+	__get_size_limit(limits, num_limits, id, &l);
+	if (!__size_limit_check(buf->rect.w, lh) ||
+	    !__align_check(buf->rect.x, lh->align) ||
+	    !__size_limit_check(buf->rect.h, lv) ||
+	    !__align_check(buf->rect.y, lv->align))
+		return -EINVAL;
+
+	return 0;
+}
+
+static inline bool __scale_limit_check(unsigned int src, unsigned int dst,
+				       unsigned int min, unsigned int max)
+{
+	if ((max && (dst << 16) > src * max) ||
+	    (min && (dst << 16) < src * min)) {
+		DRM_DEBUG_DRIVER("Scale from %d to %d exceeds HW limits (ratio min %d.%05d, max %d.%05d)\n",
+			 src, dst,
+			 min >> 16, 100000 * (min & 0xffff) / (1 << 16),
+			 max >> 16, 100000 * (max & 0xffff) / (1 << 16));
+		return false;
+	}
+	return true;
+}
+
+static int exynos_drm_ipp_check_scale_limits(
+				struct drm_exynos_ipp_task_rect *src,
+				struct drm_exynos_ipp_task_rect *dst,
+				const struct drm_exynos_ipp_limit *limits,
+				unsigned int num_limits, bool swap)
+{
+	const struct drm_exynos_ipp_limit_val *lh, *lv;
+	int dw, dh;
+
+	for (; num_limits; limits++, num_limits--)
+		if ((limits->type & DRM_EXYNOS_IPP_LIMIT_TYPE_MASK) ==
+		    DRM_EXYNOS_IPP_LIMIT_TYPE_SCALE)
+			break;
+	if (!num_limits)
+		return 0;
+
+	lh = (!swap) ? &limits->h : &limits->v;
+	lv = (!swap) ? &limits->v : &limits->h;
+	dw = (!swap) ? dst->w : dst->h;
+	dh = (!swap) ? dst->h : dst->w;
+
+	if (!__scale_limit_check(src->w, dw, lh->min, lh->max) ||
+	    !__scale_limit_check(src->h, dh, lv->min, lv->max))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
+{
+	struct exynos_drm_ipp *ipp = task->ipp;
+	const struct exynos_drm_ipp_formats *src_fmt, *dst_fmt;
+	struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
+	unsigned int rotation = task->transform.rotation;
+	int ret = 0;
+	bool swap = drm_rotation_90_or_270(rotation);
+	bool rotate = (rotation != DRM_MODE_ROTATE_0);
+	bool scale = false;
+
+	DRM_DEBUG_DRIVER("Checking task %pK\n", task);
+
+	if (src->rect.w == UINT_MAX)
+		src->rect.w = src->buf.width;
+	if (src->rect.h == UINT_MAX)
+		src->rect.h = src->buf.height;
+	if (dst->rect.w == UINT_MAX)
+		dst->rect.w = dst->buf.width;
+	if (dst->rect.h == UINT_MAX)
+		dst->rect.h = dst->buf.height;
+
+	if (src->rect.x + src->rect.w > (src->buf.width) ||
+	    src->rect.y + src->rect.h > (src->buf.height) ||
+	    dst->rect.x + dst->rect.w > (dst->buf.width) ||
+	    dst->rect.y + dst->rect.h > (dst->buf.height)) {
+		DRM_DEBUG_DRIVER("Task %pK: defined area is outside provided buffers\n",
+				 task);
+		return -EINVAL;
+	}
+
+	if ((!swap && (src->rect.w != dst->rect.w ||
+		       src->rect.h != dst->rect.h)) ||
+	    (swap && (src->rect.w != dst->rect.h ||
+		      src->rect.h != dst->rect.w)))
+		scale = true;
+
+	if ((!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_CROP) &&
+	     (src->rect.x || src->rect.y || dst->rect.x || dst->rect.y)) ||
+	    (!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_ROTATE) && rotate) ||
+	    (!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_SCALE) && scale) ||
+	    (!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_CONVERT) &&
+	     src->buf.fourcc != dst->buf.fourcc)) {
+		DRM_DEBUG_DRIVER("Task %pK: hw capabilities exceeded\n", task);
+		return -EINVAL;
+	}
+
+	src_fmt = __ipp_format_get(ipp, src->buf.fourcc, src->buf.modifier,
+				   DRM_EXYNOS_IPP_FORMAT_SOURCE);
+	if (!src_fmt) {
+		DRM_DEBUG_DRIVER("Task %pK: src format not supported\n", task);
+		return -EINVAL;
+	}
+	ret = exynos_drm_ipp_check_size_limits(src, src_fmt->limits,
+					       src_fmt->num_limits,
+					       rotate, false);
+	if (ret)
+		return ret;
+	ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
+						src_fmt->limits,
+						src_fmt->num_limits, swap);
+	if (ret)
+		return ret;
+
+	dst_fmt = __ipp_format_get(ipp, dst->buf.fourcc, dst->buf.modifier,
+				   DRM_EXYNOS_IPP_FORMAT_DESTINATION);
+	if (!dst_fmt) {
+		DRM_DEBUG_DRIVER("Task %pK: dst format not supported\n", task);
+		return -EINVAL;
+	}
+	ret = exynos_drm_ipp_check_size_limits(dst, dst_fmt->limits,
+					       dst_fmt->num_limits,
+					       false, swap);
+	if (ret)
+		return ret;
+	ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
+						dst_fmt->limits,
+						dst_fmt->num_limits, swap);
+	if (ret)
+		return ret;
+
+	DRM_DEBUG_DRIVER("Task %pK: all checks done.\n", task);
+
+	return ret;
+}
+
+static int exynos_drm_ipp_task_setup_buffers(struct exynos_drm_ipp_task *task,
+				     struct drm_file *filp)
+{
+	struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
+	int ret = 0;
+
+	DRM_DEBUG_DRIVER("Setting buffer for task %pK\n", task);
+
+	ret = exynos_drm_ipp_task_setup_buffer(src, filp);
+	if (ret) {
+		DRM_DEBUG_DRIVER("Task %pK: src buffer setup failed\n", task);
+		return ret;
+	}
+	ret = exynos_drm_ipp_task_setup_buffer(dst, filp);
+	if (ret) {
+		DRM_DEBUG_DRIVER("Task %pK: dst buffer setup failed\n", task);
+		return ret;
+	}
+
+	DRM_DEBUG_DRIVER("Task %pK: buffers prepared.\n", task);
+
+	return ret;
+}
+
+
+static int exynos_drm_ipp_event_create(struct exynos_drm_ipp_task *task,
+				 struct drm_file *file_priv, uint64_t user_data)
+{
+	struct drm_pending_exynos_ipp_event *e = NULL;
+	int ret;
+
+	e = kzalloc(sizeof(*e), GFP_KERNEL);
+	if (!e)
+		return -ENOMEM;
+
+	e->event.base.type = DRM_EXYNOS_IPP_EVENT;
+	e->event.base.length = sizeof(e->event);
+	e->event.user_data = user_data;
+
+	ret = drm_event_reserve_init(task->dev, file_priv, &e->base,
+				     &e->event.base);
+	if (ret)
+		goto free;
+
+	task->event = e;
+	return 0;
+free:
+	kfree(e);
+	return ret;
+}
+
+static void exynos_drm_ipp_event_send(struct exynos_drm_ipp_task *task)
+{
+	struct timespec64 now;
+
+	ktime_get_ts64(&now);
+	task->event->event.tv_sec = now.tv_sec;
+	task->event->event.tv_usec = now.tv_nsec / NSEC_PER_USEC;
+	task->event->event.sequence = atomic_inc_return(&task->ipp->sequence);
+
+	drm_send_event(task->dev, &task->event->base);
+}
+
+static int exynos_drm_ipp_task_cleanup(struct exynos_drm_ipp_task *task)
+{
+	int ret = task->ret;
+
+	if (ret == 0 && task->event) {
+		exynos_drm_ipp_event_send(task);
+		/* ensure event won't be canceled on task free */
+		task->event = NULL;
+	}
+
+	exynos_drm_ipp_task_free(task->ipp, task);
+	return ret;
+}
+
+static void exynos_drm_ipp_cleanup_work(struct work_struct *work)
+{
+	struct exynos_drm_ipp_task *task = container_of(work,
+				      struct exynos_drm_ipp_task, cleanup_work);
+
+	exynos_drm_ipp_task_cleanup(task);
+}
+
+static void exynos_drm_ipp_next_task(struct exynos_drm_ipp *ipp);
+
+/**
+ * exynos_drm_ipp_task_done - finish given task and set return code
+ * @task: ipp task to finish
+ * @ret: error code or 0 if operation has been performed successfully
+ */
+void exynos_drm_ipp_task_done(struct exynos_drm_ipp_task *task, int ret)
+{
+	struct exynos_drm_ipp *ipp = task->ipp;
+	unsigned long flags;
+
+	DRM_DEBUG_DRIVER("ipp: %d, task %pK done: %d\n", ipp->id, task, ret);
+
+	spin_lock_irqsave(&ipp->lock, flags);
+	if (ipp->task == task)
+		ipp->task = NULL;
+	task->flags |= DRM_EXYNOS_IPP_TASK_DONE;
+	task->ret = ret;
+	spin_unlock_irqrestore(&ipp->lock, flags);
+
+	exynos_drm_ipp_next_task(ipp);
+	wake_up(&ipp->done_wq);
+
+	if (task->flags & DRM_EXYNOS_IPP_TASK_ASYNC) {
+		INIT_WORK(&task->cleanup_work, exynos_drm_ipp_cleanup_work);
+		schedule_work(&task->cleanup_work);
+	}
+}
+
+static void exynos_drm_ipp_next_task(struct exynos_drm_ipp *ipp)
+{
+	struct exynos_drm_ipp_task *task;
+	unsigned long flags;
+	int ret;
+
+	DRM_DEBUG_DRIVER("ipp: %d, try to run new task\n", ipp->id);
+
+	spin_lock_irqsave(&ipp->lock, flags);
+
+	if (ipp->task || list_empty(&ipp->todo_list)) {
+		spin_unlock_irqrestore(&ipp->lock, flags);
+		return;
+	}
+
+	task = list_first_entry(&ipp->todo_list, struct exynos_drm_ipp_task,
+				head);
+	list_del_init(&task->head);
+	ipp->task = task;
+
+	spin_unlock_irqrestore(&ipp->lock, flags);
+
+	DRM_DEBUG_DRIVER("ipp: %d, selected task %pK to run\n", ipp->id, task);
+
+	ret = ipp->funcs->commit(ipp, task);
+	if (ret)
+		exynos_drm_ipp_task_done(task, ret);
+}
+
+static void exynos_drm_ipp_schedule_task(struct exynos_drm_ipp *ipp,
+					 struct exynos_drm_ipp_task *task)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ipp->lock, flags);
+	list_add(&task->head, &ipp->todo_list);
+	spin_unlock_irqrestore(&ipp->lock, flags);
+
+	exynos_drm_ipp_next_task(ipp);
+}
+
+static void exynos_drm_ipp_task_abort(struct exynos_drm_ipp *ipp,
+				      struct exynos_drm_ipp_task *task)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ipp->lock, flags);
+	if (task->flags & DRM_EXYNOS_IPP_TASK_DONE) {
+		/* already completed task */
+		exynos_drm_ipp_task_cleanup(task);
+	} else if (ipp->task != task) {
+		/* task has not been scheduled for execution yet */
+		list_del_init(&task->head);
+		exynos_drm_ipp_task_cleanup(task);
+	} else {
+		/*
+		 * currently processed task, call abort() and perform
+		 * cleanup with async worker
+		 */
+		task->flags |= DRM_EXYNOS_IPP_TASK_ASYNC;
+		spin_unlock_irqrestore(&ipp->lock, flags);
+		if (ipp->funcs->abort)
+			ipp->funcs->abort(ipp, task);
+		return;
+	}
+	spin_unlock_irqrestore(&ipp->lock, flags);
+}
+
+/**
+ * exynos_drm_ipp_commit_ioctl - perform image processing operation
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Construct a ipp task from the set of properties provided from the user
+ * and try to schedule it to framebuffer processor hardware.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int exynos_drm_ipp_commit_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv)
+{
+	struct drm_exynos_ioctl_ipp_commit *arg = data;
+	struct exynos_drm_ipp *ipp;
+	struct exynos_drm_ipp_task *task;
+	int ret = 0;
+
+	if ((arg->flags & ~DRM_EXYNOS_IPP_FLAGS) || arg->reserved)
+		return -EINVAL;
+
+	/* can't test and expect an event at the same time */
+	if ((arg->flags & DRM_EXYNOS_IPP_FLAG_TEST_ONLY) &&
+			(arg->flags & DRM_EXYNOS_IPP_FLAG_EVENT))
+		return -EINVAL;
+
+	ipp = __ipp_get(arg->ipp_id);
+	if (!ipp)
+		return -ENOENT;
+
+	task = exynos_drm_ipp_task_alloc(ipp);
+	if (!task)
+		return -ENOMEM;
+
+	ret = exynos_drm_ipp_task_set(task, arg);
+	if (ret)
+		goto free;
+
+	ret = exynos_drm_ipp_task_check(task);
+	if (ret)
+		goto free;
+
+	ret = exynos_drm_ipp_task_setup_buffers(task, file_priv);
+	if (ret || arg->flags & DRM_EXYNOS_IPP_FLAG_TEST_ONLY)
+		goto free;
+
+	if (arg->flags & DRM_EXYNOS_IPP_FLAG_EVENT) {
+		ret = exynos_drm_ipp_event_create(task, file_priv,
+						 arg->user_data);
+		if (ret)
+			goto free;
+	}
+
+	/*
+	 * Queue task for processing on the hardware. task object will be
+	 * then freed after exynos_drm_ipp_task_done()
+	 */
+	if (arg->flags & DRM_EXYNOS_IPP_FLAG_NONBLOCK) {
+		DRM_DEBUG_DRIVER("ipp: %d, nonblocking processing task %pK\n",
+				 ipp->id, task);
+
+		task->flags |= DRM_EXYNOS_IPP_TASK_ASYNC;
+		exynos_drm_ipp_schedule_task(task->ipp, task);
+		ret = 0;
+	} else {
+		DRM_DEBUG_DRIVER("ipp: %d, processing task %pK\n", ipp->id,
+				 task);
+		exynos_drm_ipp_schedule_task(ipp, task);
+		ret = wait_event_interruptible(ipp->done_wq,
+					task->flags & DRM_EXYNOS_IPP_TASK_DONE);
+		if (ret)
+			exynos_drm_ipp_task_abort(ipp, task);
+		else
+			ret = exynos_drm_ipp_task_cleanup(task);
+	}
+	return ret;
+free:
+	exynos_drm_ipp_task_free(ipp, task);
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
new file mode 100644
index 000000000000..0b27d4a9bf94
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_IPP_H_
+#define _EXYNOS_DRM_IPP_H_
+
+#include <drm/drmP.h>
+
+struct exynos_drm_ipp;
+struct exynos_drm_ipp_task;
+
+/**
+ * struct exynos_drm_ipp_funcs - exynos_drm_ipp control functions
+ */
+struct exynos_drm_ipp_funcs {
+	/**
+	 * @commit:
+	 *
+	 * This is the main entry point to start framebuffer processing
+	 * in the hardware. The exynos_drm_ipp_task has been already validated.
+	 * This function must not wait until the device finishes processing.
+	 * When the driver finishes processing, it has to call
+	 * exynos_exynos_drm_ipp_task_done() function.
+	 *
+	 * RETURNS:
+	 *
+	 * 0 on success or negative error codes in case of failure.
+	 */
+	int (*commit)(struct exynos_drm_ipp *ipp,
+		      struct exynos_drm_ipp_task *task);
+
+	/**
+	 * @abort:
+	 *
+	 * Informs the driver that it has to abort the currently running
+	 * task as soon as possible (i.e. as soon as it can stop the device
+	 * safely), even if the task would not have been finished by then.
+	 * After the driver performs the necessary steps, it has to call
+	 * exynos_drm_ipp_task_done() (as if the task ended normally).
+	 * This function does not have to (and will usually not) wait
+	 * until the device enters a state when it can be stopped.
+	 */
+	void (*abort)(struct exynos_drm_ipp *ipp,
+		      struct exynos_drm_ipp_task *task);
+};
+
+/**
+ * struct exynos_drm_ipp - central picture processor module structure
+ */
+struct exynos_drm_ipp {
+	struct drm_device *dev;
+	struct list_head head;
+	unsigned int id;
+
+	const char *name;
+	const struct exynos_drm_ipp_funcs *funcs;
+	unsigned int capabilities;
+	const struct exynos_drm_ipp_formats *formats;
+	unsigned int num_formats;
+	atomic_t sequence;
+
+	spinlock_t lock;
+	struct exynos_drm_ipp_task *task;
+	struct list_head todo_list;
+	wait_queue_head_t done_wq;
+};
+
+struct exynos_drm_ipp_buffer {
+	struct drm_exynos_ipp_task_buffer buf;
+	struct drm_exynos_ipp_task_rect rect;
+
+	struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
+	const struct drm_format_info *format;
+	dma_addr_t dma_addr[MAX_FB_BUFFER];
+};
+
+/**
+ * struct exynos_drm_ipp_task - a structure describing transformation that
+ * has to be performed by the picture processor hardware module
+ */
+struct exynos_drm_ipp_task {
+	struct drm_device *dev;
+	struct exynos_drm_ipp *ipp;
+	struct list_head head;
+
+	struct exynos_drm_ipp_buffer src;
+	struct exynos_drm_ipp_buffer dst;
+
+	struct drm_exynos_ipp_task_transform transform;
+	struct drm_exynos_ipp_task_alpha alpha;
+
+	struct work_struct cleanup_work;
+	unsigned int flags;
+	int ret;
+
+	struct drm_pending_exynos_ipp_event *event;
+};
+
+#define DRM_EXYNOS_IPP_TASK_DONE	(1 << 0)
+#define DRM_EXYNOS_IPP_TASK_ASYNC	(1 << 1)
+
+struct exynos_drm_ipp_formats {
+	uint32_t fourcc;
+	uint32_t type;
+	uint64_t modifier;
+	const struct drm_exynos_ipp_limit *limits;
+	unsigned int num_limits;
+};
+
+/* helper macros to set exynos_drm_ipp_formats structure and limits*/
+#define IPP_SRCDST_MFORMAT(f, m, l) \
+	.fourcc = DRM_FORMAT_##f, .modifier = m, .limits = l, \
+	.num_limits = ARRAY_SIZE(l), \
+	.type = (DRM_EXYNOS_IPP_FORMAT_SOURCE | \
+		 DRM_EXYNOS_IPP_FORMAT_DESTINATION)
+
+#define IPP_SRCDST_FORMAT(f, l) IPP_SRCDST_MFORMAT(f, 0, l)
+
+#define IPP_SIZE_LIMIT(l, val...)	\
+	.type = (DRM_EXYNOS_IPP_LIMIT_TYPE_SIZE | \
+		 DRM_EXYNOS_IPP_LIMIT_SIZE_##l), val
+
+#define IPP_SCALE_LIMIT(val...)		\
+	.type = (DRM_EXYNOS_IPP_LIMIT_TYPE_SCALE), val
+
+int exynos_drm_ipp_register(struct drm_device *dev, struct exynos_drm_ipp *ipp,
+		const struct exynos_drm_ipp_funcs *funcs, unsigned int caps,
+		const struct exynos_drm_ipp_formats *formats,
+		unsigned int num_formats, const char *name);
+void exynos_drm_ipp_unregister(struct drm_device *dev,
+			       struct exynos_drm_ipp *ipp);
+
+void exynos_drm_ipp_task_done(struct exynos_drm_ipp_task *task, int ret);
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+int exynos_drm_ipp_get_res_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+int exynos_drm_ipp_get_caps_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file_priv);
+int exynos_drm_ipp_get_limits_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file_priv);
+int exynos_drm_ipp_commit_ioctl(struct drm_device *dev,
+				void *data, struct drm_file *file_priv);
+#else
+static inline int exynos_drm_ipp_get_res_ioctl(struct drm_device *dev,
+	 void *data, struct drm_file *file_priv)
+{
+	struct drm_exynos_ioctl_ipp_get_res *resp = data;
+
+	resp->count_ipps = 0;
+	return 0;
+}
+static inline int exynos_drm_ipp_get_caps_ioctl(struct drm_device *dev,
+	 void *data, struct drm_file *file_priv)
+{
+	return -ENODEV;
+}
+static inline int exynos_drm_ipp_get_limits_ioctl(struct drm_device *dev,
+	 void *data, struct drm_file *file_priv)
+{
+	return -ENODEV;
+}
+static inline int exynos_drm_ipp_commit_ioctl(struct drm_device *dev,
+	 void *data, struct drm_file *file_priv)
+{
+	return -ENODEV;
+}
+#endif
+#endif
diff --git a/include/uapi/drm/exynos_drm.h b/include/uapi/drm/exynos_drm.h
index 4a54305120e0..3e59b8382dd8 100644
--- a/include/uapi/drm/exynos_drm.h
+++ b/include/uapi/drm/exynos_drm.h
@@ -135,6 +135,219 @@ struct drm_exynos_g2d_exec {
 	__u64					async;
 };
 
+/* Exynos DRM IPP v2 API */
+
+/**
+ * Enumerate available IPP hardware modules.
+ *
+ * @count_ipps: size of ipp_id array / number of ipp modules (set by driver)
+ * @reserved: padding
+ * @ipp_id_ptr: pointer to ipp_id array or NULL
+ */
+struct drm_exynos_ioctl_ipp_get_res {
+	__u32 count_ipps;
+	__u32 reserved;
+	__u64 ipp_id_ptr;
+};
+
+enum drm_exynos_ipp_format_type {
+	DRM_EXYNOS_IPP_FORMAT_SOURCE		= 0x01,
+	DRM_EXYNOS_IPP_FORMAT_DESTINATION	= 0x02,
+};
+
+struct drm_exynos_ipp_format {
+	__u32 fourcc;
+	__u32 type;
+	__u64 modifier;
+};
+
+enum drm_exynos_ipp_capability {
+	DRM_EXYNOS_IPP_CAP_CROP		= 0x01,
+	DRM_EXYNOS_IPP_CAP_ROTATE	= 0x02,
+	DRM_EXYNOS_IPP_CAP_SCALE	= 0x04,
+	DRM_EXYNOS_IPP_CAP_CONVERT	= 0x08,
+};
+
+/**
+ * Get IPP hardware capabilities and supported image formats.
+ *
+ * @ipp_id: id of IPP module to query
+ * @capabilities: bitmask of drm_exynos_ipp_capability (set by driver)
+ * @reserved: padding
+ * @formats_count: size of formats array (in entries) / number of filled
+ *		   formats (set by driver)
+ * @formats_ptr: pointer to formats array or NULL
+ */
+struct drm_exynos_ioctl_ipp_get_caps {
+	__u32 ipp_id;
+	__u32 capabilities;
+	__u32 reserved;
+	__u32 formats_count;
+	__u64 formats_ptr;
+};
+
+enum drm_exynos_ipp_limit_type {
+	/* size (horizontal/vertial) limits, in pixels (min, max, alignment) */
+	DRM_EXYNOS_IPP_LIMIT_TYPE_SIZE		= 0x0001,
+	/* scale ratio (horizonta/vertial), 16.16 fixed point (min, max) */
+	DRM_EXYNOS_IPP_LIMIT_TYPE_SCALE		= 0x0002,
+
+	/* image buffer area */
+	DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER	= 0x0001 << 16,
+	/* src/dst rectangle area */
+	DRM_EXYNOS_IPP_LIMIT_SIZE_AREA		= 0x0002 << 16,
+	/* src/dst rectangle area when rotation enabled */
+	DRM_EXYNOS_IPP_LIMIT_SIZE_ROTATED	= 0x0003 << 16,
+
+	DRM_EXYNOS_IPP_LIMIT_TYPE_MASK		= 0x000f,
+	DRM_EXYNOS_IPP_LIMIT_SIZE_MASK		= 0x000f << 16,
+};
+
+struct drm_exynos_ipp_limit_val {
+	__u32 min;
+	__u32 max;
+	__u32 align;
+	__u32 reserved;
+};
+
+/**
+ * IPP module limitation.
+ *
+ * @type: limit type (see drm_exynos_ipp_limit_type enum)
+ * @reserved: padding
+ * @h: horizontal limits
+ * @v: vertical limits
+ */
+struct drm_exynos_ipp_limit {
+	__u32 type;
+	__u32 reserved;
+	struct drm_exynos_ipp_limit_val h;
+	struct drm_exynos_ipp_limit_val v;
+};
+
+/**
+ * Get IPP limits for given image format.
+ *
+ * @ipp_id: id of IPP module to query
+ * @fourcc: image format code (see DRM_FORMAT_* in drm_fourcc.h)
+ * @modifier: image format modifier (see DRM_FORMAT_MOD_* in drm_fourcc.h)
+ * @type: source/destination identifier (drm_exynos_ipp_format_flag enum)
+ * @limits_count: size of limits array (in entries) / number of filled entries
+ *		 (set by driver)
+ * @limits_ptr: pointer to limits array or NULL
+ */
+struct drm_exynos_ioctl_ipp_get_limits {
+	__u32 ipp_id;
+	__u32 fourcc;
+	__u64 modifier;
+	__u32 type;
+	__u32 limits_count;
+	__u64 limits_ptr;
+};
+
+enum drm_exynos_ipp_task_id {
+	/* buffer described by struct drm_exynos_ipp_task_buffer */
+	DRM_EXYNOS_IPP_TASK_BUFFER		= 0x0001,
+	/* rectangle described by struct drm_exynos_ipp_task_rect */
+	DRM_EXYNOS_IPP_TASK_RECTANGLE		= 0x0002,
+	/* transformation described by struct drm_exynos_ipp_task_transform */
+	DRM_EXYNOS_IPP_TASK_TRANSFORM		= 0x0003,
+	/* alpha configuration described by struct drm_exynos_ipp_task_alpha */
+	DRM_EXYNOS_IPP_TASK_ALPHA		= 0x0004,
+
+	/* source image data (for buffer and rectangle chunks) */
+	DRM_EXYNOS_IPP_TASK_TYPE_SOURCE		= 0x0001 << 16,
+	/* destination image data (for buffer and rectangle chunks) */
+	DRM_EXYNOS_IPP_TASK_TYPE_DESTINATION	= 0x0002 << 16,
+};
+
+/**
+ * Memory buffer with image data.
+ *
+ * @id: must be DRM_EXYNOS_IPP_TASK_BUFFER
+ * other parameters are same as for AddFB2 generic DRM ioctl
+ */
+struct drm_exynos_ipp_task_buffer {
+	__u32	id;
+	__u32	fourcc;
+	__u32	width, height;
+	__u32	gem_id[4];
+	__u32	offset[4];
+	__u32	pitch[4];
+	__u64	modifier;
+};
+
+/**
+ * Rectangle for processing.
+ *
+ * @id: must be DRM_EXYNOS_IPP_TASK_RECTANGLE
+ * @reserved: padding
+ * @x,@y: left corner in pixels
+ * @w,@h: width/height in pixels
+ */
+struct drm_exynos_ipp_task_rect {
+	__u32	id;
+	__u32	reserved;
+	__u32	x;
+	__u32	y;
+	__u32	w;
+	__u32	h;
+};
+
+/**
+ * Image tranformation description.
+ *
+ * @id: must be DRM_EXYNOS_IPP_TASK_TRANSFORM
+ * @rotation: DRM_MODE_ROTATE_* and DRM_MODE_REFLECT_* values
+ */
+struct drm_exynos_ipp_task_transform {
+	__u32	id;
+	__u32	rotation;
+};
+
+/**
+ * Image global alpha configuration for formats without alpha values.
+ *
+ * @id: must be DRM_EXYNOS_IPP_TASK_ALPHA
+ * @value: global alpha value (0-255)
+ */
+struct drm_exynos_ipp_task_alpha {
+	__u32	id;
+	__u32	value;
+};
+
+enum drm_exynos_ipp_flag {
+	/* generate DRM event after processing */
+	DRM_EXYNOS_IPP_FLAG_EVENT	= 0x01,
+	/* dry run, only check task parameters */
+	DRM_EXYNOS_IPP_FLAG_TEST_ONLY	= 0x02,
+	/* non-blocking processing */
+	DRM_EXYNOS_IPP_FLAG_NONBLOCK	= 0x04,
+};
+
+#define DRM_EXYNOS_IPP_FLAGS (DRM_EXYNOS_IPP_FLAG_EVENT |\
+		DRM_EXYNOS_IPP_FLAG_TEST_ONLY | DRM_EXYNOS_IPP_FLAG_NONBLOCK)
+
+/**
+ * Perform image processing described by array of drm_exynos_ipp_task_*
+ * structures (parameters array).
+ *
+ * @ipp_id: id of IPP module to run the task
+ * @flags: bitmask of drm_exynos_ipp_flag values
+ * @reserved: padding
+ * @params_size: size of parameters array (in bytes)
+ * @params_ptr: pointer to parameters array or NULL
+ * @user_data: (optional) data for drm event
+ */
+struct drm_exynos_ioctl_ipp_commit {
+	__u32 ipp_id;
+	__u32 flags;
+	__u32 reserved;
+	__u32 params_size;
+	__u64 params_ptr;
+	__u64 user_data;
+};
+
 #define DRM_EXYNOS_GEM_CREATE		0x00
 #define DRM_EXYNOS_GEM_MAP		0x01
 /* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */
@@ -147,6 +360,11 @@ struct drm_exynos_g2d_exec {
 #define DRM_EXYNOS_G2D_EXEC		0x22
 
 /* Reserved 0x30 ~ 0x33 for obsolete Exynos IPP ioctls */
+/* IPP - Image Post Processing */
+#define DRM_EXYNOS_IPP_GET_RESOURCES	0x40
+#define DRM_EXYNOS_IPP_GET_CAPS		0x41
+#define DRM_EXYNOS_IPP_GET_LIMITS	0x42
+#define DRM_EXYNOS_IPP_COMMIT		0x43
 
 #define DRM_IOCTL_EXYNOS_GEM_CREATE		DRM_IOWR(DRM_COMMAND_BASE + \
 		DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create)
@@ -165,8 +383,20 @@ struct drm_exynos_g2d_exec {
 #define DRM_IOCTL_EXYNOS_G2D_EXEC		DRM_IOWR(DRM_COMMAND_BASE + \
 		DRM_EXYNOS_G2D_EXEC, struct drm_exynos_g2d_exec)
 
+#define DRM_IOCTL_EXYNOS_IPP_GET_RESOURCES	DRM_IOWR(DRM_COMMAND_BASE + \
+		DRM_EXYNOS_IPP_GET_RESOURCES, \
+		struct drm_exynos_ioctl_ipp_get_res)
+#define DRM_IOCTL_EXYNOS_IPP_GET_CAPS		DRM_IOWR(DRM_COMMAND_BASE + \
+		DRM_EXYNOS_IPP_GET_CAPS, struct drm_exynos_ioctl_ipp_get_caps)
+#define DRM_IOCTL_EXYNOS_IPP_GET_LIMITS		DRM_IOWR(DRM_COMMAND_BASE + \
+		DRM_EXYNOS_IPP_GET_LIMITS, \
+		struct drm_exynos_ioctl_ipp_get_limits)
+#define DRM_IOCTL_EXYNOS_IPP_COMMIT		DRM_IOWR(DRM_COMMAND_BASE + \
+		DRM_EXYNOS_IPP_COMMIT, struct drm_exynos_ioctl_ipp_commit)
+
 /* EXYNOS specific events */
 #define DRM_EXYNOS_G2D_EVENT		0x80000000
+#define DRM_EXYNOS_IPP_EVENT		0x80000002
 
 struct drm_exynos_g2d_event {
 	struct drm_event	base;
@@ -177,6 +407,16 @@ struct drm_exynos_g2d_event {
 	__u32			reserved;
 };
 
+struct drm_exynos_ipp_event {
+	struct drm_event	base;
+	__u64			user_data;
+	__u32			tv_sec;
+	__u32			tv_usec;
+	__u32			ipp_id;
+	__u32			sequence;
+	__u64			reserved;
+};
+
 #if defined(__cplusplus)
 }
 #endif

From d8cb9eeaa79fce028982589da8696df6bb10b903 Mon Sep 17 00:00:00 2001
From: Marek Szyprowski <m.szyprowski@samsung.com>
Date: Wed, 9 May 2018 10:59:23 +0200
Subject: [PATCH 0664/1286] drm/exynos: rotator: Convert driver to IPP v2 core
 API

This patch adapts Exynos DRM rotator driver to new IPP v2 core API.
The side effect of this conversion is a switch to driver component API
to register properly in the Exynos DRM core.

Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: Inki Dae <inki.dae@samsung.com>
---
 drivers/gpu/drm/exynos/Kconfig              |   2 +-
 drivers/gpu/drm/exynos/exynos_drm_drv.c     |   1 +
 drivers/gpu/drm/exynos/exynos_drm_rotator.c | 772 +++++---------------
 3 files changed, 197 insertions(+), 578 deletions(-)

diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 9e914655c430..63a27c14b133 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -106,7 +106,7 @@ config DRM_EXYNOS_FIMC
 
 config DRM_EXYNOS_ROTATOR
 	bool "Rotator"
-	depends on BROKEN
+	select DRM_EXYNOS_IPP
 	help
 	  Choose this option if you want to use Exynos Rotator for DRM.
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 37c0db759674..537a588ef370 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -263,6 +263,7 @@ static struct exynos_drm_driver_info exynos_drm_drivers[] = {
 		DRV_PTR(fimc_driver, CONFIG_DRM_EXYNOS_FIMC),
 	}, {
 		DRV_PTR(rotator_driver, CONFIG_DRM_EXYNOS_ROTATOR),
+		DRM_COMPONENT_DRIVER
 	}, {
 		DRV_PTR(gsc_driver, CONFIG_DRM_EXYNOS_GSC),
 	}, {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 79282a820ecc..1a76dd3d52e1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -10,6 +10,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/component.h>
 #include <linux/err.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
@@ -22,29 +23,18 @@
 #include <drm/exynos_drm.h>
 #include "regs-rotator.h"
 #include "exynos_drm_drv.h"
+#include "exynos_drm_iommu.h"
 #include "exynos_drm_ipp.h"
 
 /*
  * Rotator supports image crop/rotator and input/output DMA operations.
  * input DMA reads image data from the memory.
  * output DMA writes image data to memory.
- *
- * M2M operation : supports crop/scale/rotation/csc so on.
- * Memory ----> Rotator H/W ----> Memory.
  */
 
-/*
- * TODO
- * 1. check suspend/resume api if needed.
- * 2. need to check use case platform_device_id.
- * 3. check src/dst size with, height.
- * 4. need to add supported list in prop_list.
- */
+#define ROTATOR_AUTOSUSPEND_DELAY	2000
 
-#define get_rot_context(dev)	platform_get_drvdata(to_platform_device(dev))
-#define get_ctx_from_ippdrv(ippdrv)	container_of(ippdrv,\
-					struct rot_context, ippdrv);
-#define rot_read(offset)		readl(rot->regs + (offset))
+#define rot_read(offset)	readl(rot->regs + (offset))
 #define rot_write(cfg, offset)	writel(cfg, rot->regs + (offset))
 
 enum rot_irq_status {
@@ -52,54 +42,28 @@ enum rot_irq_status {
 	ROT_IRQ_STATUS_ILLEGAL	= 9,
 };
 
-/*
- * A structure of limitation.
- *
- * @min_w: minimum width.
- * @min_h: minimum height.
- * @max_w: maximum width.
- * @max_h: maximum height.
- * @align: align size.
- */
-struct rot_limit {
-	u32	min_w;
-	u32	min_h;
-	u32	max_w;
-	u32	max_h;
-	u32	align;
-};
-
-/*
- * A structure of limitation table.
- *
- * @ycbcr420_2p: case of YUV.
- * @rgb888: case of RGB.
- */
-struct rot_limit_table {
-	struct rot_limit	ycbcr420_2p;
-	struct rot_limit	rgb888;
+struct rot_variant {
+	const struct exynos_drm_ipp_formats *formats;
+	unsigned int	num_formats;
 };
 
 /*
  * A structure of rotator context.
  * @ippdrv: prepare initialization using ippdrv.
- * @regs_res: register resources.
  * @regs: memory mapped io registers.
  * @clock: rotator gate clock.
  * @limit_tbl: limitation of rotator.
  * @irq: irq number.
- * @cur_buf_id: current operation buffer id.
- * @suspended: suspended state.
  */
 struct rot_context {
-	struct exynos_drm_ippdrv	ippdrv;
-	struct resource	*regs_res;
+	struct exynos_drm_ipp ipp;
+	struct drm_device *drm_dev;
+	struct device	*dev;
 	void __iomem	*regs;
 	struct clk	*clock;
-	struct rot_limit_table	*limit_tbl;
-	int	irq;
-	int	cur_buf_id[EXYNOS_DRM_OPS_MAX];
-	bool	suspended;
+	const struct exynos_drm_ipp_formats *formats;
+	unsigned int	num_formats;
+	struct exynos_drm_ipp_task	*task;
 };
 
 static void rotator_reg_set_irq(struct rot_context *rot, bool enable)
@@ -114,15 +78,6 @@ static void rotator_reg_set_irq(struct rot_context *rot, bool enable)
 	rot_write(val, ROT_CONFIG);
 }
 
-static u32 rotator_reg_get_fmt(struct rot_context *rot)
-{
-	u32 val = rot_read(ROT_CONTROL);
-
-	val &= ROT_CONTROL_FMT_MASK;
-
-	return val;
-}
-
 static enum rot_irq_status rotator_reg_get_irq_status(struct rot_context *rot)
 {
 	u32 val = rot_read(ROT_STATUS);
@@ -138,9 +93,6 @@ static enum rot_irq_status rotator_reg_get_irq_status(struct rot_context *rot)
 static irqreturn_t rotator_irq_handler(int irq, void *arg)
 {
 	struct rot_context *rot = arg;
-	struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
-	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
-	struct drm_exynos_ipp_event_work *event_work = c_node->event_work;
 	enum rot_irq_status irq_status;
 	u32 val;
 
@@ -152,56 +104,21 @@ static irqreturn_t rotator_irq_handler(int irq, void *arg)
 	val |= ROT_STATUS_IRQ_PENDING((u32)irq_status);
 	rot_write(val, ROT_STATUS);
 
-	if (irq_status == ROT_IRQ_STATUS_COMPLETE) {
-		event_work->ippdrv = ippdrv;
-		event_work->buf_id[EXYNOS_DRM_OPS_DST] =
-			rot->cur_buf_id[EXYNOS_DRM_OPS_DST];
-		queue_work(ippdrv->event_workq, &event_work->work);
-	} else {
-		DRM_ERROR("the SFR is set illegally\n");
+	if (rot->task) {
+		struct exynos_drm_ipp_task *task = rot->task;
+
+		rot->task = NULL;
+		pm_runtime_mark_last_busy(rot->dev);
+		pm_runtime_put_autosuspend(rot->dev);
+		exynos_drm_ipp_task_done(task,
+			irq_status == ROT_IRQ_STATUS_COMPLETE ? 0 : -EINVAL);
 	}
 
 	return IRQ_HANDLED;
 }
 
-static void rotator_align_size(struct rot_context *rot, u32 fmt, u32 *hsize,
-		u32 *vsize)
+static void rotator_src_set_fmt(struct rot_context *rot, u32 fmt)
 {
-	struct rot_limit_table *limit_tbl = rot->limit_tbl;
-	struct rot_limit *limit;
-	u32 mask, val;
-
-	/* Get size limit */
-	if (fmt == ROT_CONTROL_FMT_RGB888)
-		limit = &limit_tbl->rgb888;
-	else
-		limit = &limit_tbl->ycbcr420_2p;
-
-	/* Get mask for rounding to nearest aligned val */
-	mask = ~((1 << limit->align) - 1);
-
-	/* Set aligned width */
-	val = ROT_ALIGN(*hsize, limit->align, mask);
-	if (val < limit->min_w)
-		*hsize = ROT_MIN(limit->min_w, mask);
-	else if (val > limit->max_w)
-		*hsize = ROT_MAX(limit->max_w, mask);
-	else
-		*hsize = val;
-
-	/* Set aligned height */
-	val = ROT_ALIGN(*vsize, limit->align, mask);
-	if (val < limit->min_h)
-		*vsize = ROT_MIN(limit->min_h, mask);
-	else if (val > limit->max_h)
-		*vsize = ROT_MAX(limit->max_h, mask);
-	else
-		*vsize = val;
-}
-
-static int rotator_src_set_fmt(struct device *dev, u32 fmt)
-{
-	struct rot_context *rot = dev_get_drvdata(dev);
 	u32 val;
 
 	val = rot_read(ROT_CONTROL);
@@ -214,515 +131,176 @@ static int rotator_src_set_fmt(struct device *dev, u32 fmt)
 	case DRM_FORMAT_XRGB8888:
 		val |= ROT_CONTROL_FMT_RGB888;
 		break;
-	default:
-		DRM_ERROR("invalid image format\n");
-		return -EINVAL;
 	}
 
 	rot_write(val, ROT_CONTROL);
-
-	return 0;
 }
 
-static inline bool rotator_check_reg_fmt(u32 fmt)
+static void rotator_src_set_buf(struct rot_context *rot,
+				struct exynos_drm_ipp_buffer *buf)
 {
-	if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) ||
-	    (fmt == ROT_CONTROL_FMT_RGB888))
-		return true;
-
-	return false;
-}
-
-static int rotator_src_set_size(struct device *dev, int swap,
-		struct drm_exynos_pos *pos,
-		struct drm_exynos_sz *sz)
-{
-	struct rot_context *rot = dev_get_drvdata(dev);
-	u32 fmt, hsize, vsize;
 	u32 val;
 
-	/* Get format */
-	fmt = rotator_reg_get_fmt(rot);
-	if (!rotator_check_reg_fmt(fmt)) {
-		DRM_ERROR("invalid format.\n");
-		return -EINVAL;
-	}
-
-	/* Align buffer size */
-	hsize = sz->hsize;
-	vsize = sz->vsize;
-	rotator_align_size(rot, fmt, &hsize, &vsize);
-
 	/* Set buffer size configuration */
-	val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
+	val = ROT_SET_BUF_SIZE_H(buf->buf.height) |
+	      ROT_SET_BUF_SIZE_W(buf->buf.pitch[0] / buf->format->cpp[0]);
 	rot_write(val, ROT_SRC_BUF_SIZE);
 
 	/* Set crop image position configuration */
-	val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
+	val = ROT_CROP_POS_Y(buf->rect.y) | ROT_CROP_POS_X(buf->rect.x);
 	rot_write(val, ROT_SRC_CROP_POS);
-	val = ROT_SRC_CROP_SIZE_H(pos->h) | ROT_SRC_CROP_SIZE_W(pos->w);
+	val = ROT_SRC_CROP_SIZE_H(buf->rect.h) |
+	      ROT_SRC_CROP_SIZE_W(buf->rect.w);
 	rot_write(val, ROT_SRC_CROP_SIZE);
 
-	return 0;
+	/* Set buffer DMA address */
+	rot_write(buf->dma_addr[0], ROT_SRC_BUF_ADDR(0));
+	rot_write(buf->dma_addr[1], ROT_SRC_BUF_ADDR(1));
 }
 
-static int rotator_src_set_addr(struct device *dev,
-		struct drm_exynos_ipp_buf_info *buf_info,
-		u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
+static void rotator_dst_set_transf(struct rot_context *rot,
+				   unsigned int rotation)
 {
-	struct rot_context *rot = dev_get_drvdata(dev);
-	dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
-	u32 val, fmt, hsize, vsize;
-	int i;
-
-	/* Set current buf_id */
-	rot->cur_buf_id[EXYNOS_DRM_OPS_SRC] = buf_id;
-
-	switch (buf_type) {
-	case IPP_BUF_ENQUEUE:
-		/* Set address configuration */
-		for_each_ipp_planar(i)
-			addr[i] = buf_info->base[i];
-
-		/* Get format */
-		fmt = rotator_reg_get_fmt(rot);
-		if (!rotator_check_reg_fmt(fmt)) {
-			DRM_ERROR("invalid format.\n");
-			return -EINVAL;
-		}
-
-		/* Re-set cb planar for NV12 format */
-		if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
-		    !addr[EXYNOS_DRM_PLANAR_CB]) {
-
-			val = rot_read(ROT_SRC_BUF_SIZE);
-			hsize = ROT_GET_BUF_SIZE_W(val);
-			vsize = ROT_GET_BUF_SIZE_H(val);
-
-			/* Set cb planar */
-			addr[EXYNOS_DRM_PLANAR_CB] =
-				addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
-		}
-
-		for_each_ipp_planar(i)
-			rot_write(addr[i], ROT_SRC_BUF_ADDR(i));
-		break;
-	case IPP_BUF_DEQUEUE:
-		for_each_ipp_planar(i)
-			rot_write(0x0, ROT_SRC_BUF_ADDR(i));
-		break;
-	default:
-		/* Nothing to do */
-		break;
-	}
-
-	return 0;
-}
-
-static int rotator_dst_set_transf(struct device *dev,
-		enum drm_exynos_degree degree,
-		enum drm_exynos_flip flip, bool *swap)
-{
-	struct rot_context *rot = dev_get_drvdata(dev);
 	u32 val;
 
 	/* Set transform configuration */
 	val = rot_read(ROT_CONTROL);
 	val &= ~ROT_CONTROL_FLIP_MASK;
 
-	switch (flip) {
-	case EXYNOS_DRM_FLIP_VERTICAL:
-		val |= ROT_CONTROL_FLIP_VERTICAL;
-		break;
-	case EXYNOS_DRM_FLIP_HORIZONTAL:
+	if (rotation & DRM_MODE_REFLECT_X)
 		val |= ROT_CONTROL_FLIP_HORIZONTAL;
-		break;
-	default:
-		/* Flip None */
-		break;
-	}
+	if (rotation & DRM_MODE_REFLECT_Y)
+		val |= ROT_CONTROL_FLIP_VERTICAL;
 
 	val &= ~ROT_CONTROL_ROT_MASK;
 
-	switch (degree) {
-	case EXYNOS_DRM_DEGREE_90:
+	if (rotation & DRM_MODE_ROTATE_90)
 		val |= ROT_CONTROL_ROT_90;
-		break;
-	case EXYNOS_DRM_DEGREE_180:
+	else if (rotation & DRM_MODE_ROTATE_180)
 		val |= ROT_CONTROL_ROT_180;
-		break;
-	case EXYNOS_DRM_DEGREE_270:
+	else if (rotation & DRM_MODE_ROTATE_270)
 		val |= ROT_CONTROL_ROT_270;
-		break;
-	default:
-		/* Rotation 0 Degree */
-		break;
-	}
 
 	rot_write(val, ROT_CONTROL);
-
-	/* Check degree for setting buffer size swap */
-	if ((degree == EXYNOS_DRM_DEGREE_90) ||
-	    (degree == EXYNOS_DRM_DEGREE_270))
-		*swap = true;
-	else
-		*swap = false;
-
-	return 0;
 }
 
-static int rotator_dst_set_size(struct device *dev, int swap,
-		struct drm_exynos_pos *pos,
-		struct drm_exynos_sz *sz)
+static void rotator_dst_set_buf(struct rot_context *rot,
+				struct exynos_drm_ipp_buffer *buf)
 {
-	struct rot_context *rot = dev_get_drvdata(dev);
-	u32 val, fmt, hsize, vsize;
-
-	/* Get format */
-	fmt = rotator_reg_get_fmt(rot);
-	if (!rotator_check_reg_fmt(fmt)) {
-		DRM_ERROR("invalid format.\n");
-		return -EINVAL;
-	}
-
-	/* Align buffer size */
-	hsize = sz->hsize;
-	vsize = sz->vsize;
-	rotator_align_size(rot, fmt, &hsize, &vsize);
+	u32 val;
 
 	/* Set buffer size configuration */
-	val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
+	val = ROT_SET_BUF_SIZE_H(buf->buf.height) |
+	      ROT_SET_BUF_SIZE_W(buf->buf.pitch[0] / buf->format->cpp[0]);
 	rot_write(val, ROT_DST_BUF_SIZE);
 
 	/* Set crop image position configuration */
-	val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
+	val = ROT_CROP_POS_Y(buf->rect.y) | ROT_CROP_POS_X(buf->rect.x);
 	rot_write(val, ROT_DST_CROP_POS);
 
-	return 0;
+	/* Set buffer DMA address */
+	rot_write(buf->dma_addr[0], ROT_DST_BUF_ADDR(0));
+	rot_write(buf->dma_addr[1], ROT_DST_BUF_ADDR(1));
 }
 
-static int rotator_dst_set_addr(struct device *dev,
-		struct drm_exynos_ipp_buf_info *buf_info,
-		u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
+static void rotator_start(struct rot_context *rot)
 {
-	struct rot_context *rot = dev_get_drvdata(dev);
-	dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
-	u32 val, fmt, hsize, vsize;
-	int i;
-
-	/* Set current buf_id */
-	rot->cur_buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
-
-	switch (buf_type) {
-	case IPP_BUF_ENQUEUE:
-		/* Set address configuration */
-		for_each_ipp_planar(i)
-			addr[i] = buf_info->base[i];
-
-		/* Get format */
-		fmt = rotator_reg_get_fmt(rot);
-		if (!rotator_check_reg_fmt(fmt)) {
-			DRM_ERROR("invalid format.\n");
-			return -EINVAL;
-		}
-
-		/* Re-set cb planar for NV12 format */
-		if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
-		    !addr[EXYNOS_DRM_PLANAR_CB]) {
-			/* Get buf size */
-			val = rot_read(ROT_DST_BUF_SIZE);
-
-			hsize = ROT_GET_BUF_SIZE_W(val);
-			vsize = ROT_GET_BUF_SIZE_H(val);
-
-			/* Set cb planar */
-			addr[EXYNOS_DRM_PLANAR_CB] =
-				addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
-		}
-
-		for_each_ipp_planar(i)
-			rot_write(addr[i], ROT_DST_BUF_ADDR(i));
-		break;
-	case IPP_BUF_DEQUEUE:
-		for_each_ipp_planar(i)
-			rot_write(0x0, ROT_DST_BUF_ADDR(i));
-		break;
-	default:
-		/* Nothing to do */
-		break;
-	}
-
-	return 0;
-}
-
-static struct exynos_drm_ipp_ops rot_src_ops = {
-	.set_fmt	=	rotator_src_set_fmt,
-	.set_size	=	rotator_src_set_size,
-	.set_addr	=	rotator_src_set_addr,
-};
-
-static struct exynos_drm_ipp_ops rot_dst_ops = {
-	.set_transf	=	rotator_dst_set_transf,
-	.set_size	=	rotator_dst_set_size,
-	.set_addr	=	rotator_dst_set_addr,
-};
-
-static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
-{
-	struct drm_exynos_ipp_prop_list *prop_list = &ippdrv->prop_list;
-
-	prop_list->version = 1;
-	prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
-				(1 << EXYNOS_DRM_FLIP_HORIZONTAL);
-	prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
-				(1 << EXYNOS_DRM_DEGREE_90) |
-				(1 << EXYNOS_DRM_DEGREE_180) |
-				(1 << EXYNOS_DRM_DEGREE_270);
-	prop_list->csc = 0;
-	prop_list->crop = 0;
-	prop_list->scale = 0;
-
-	return 0;
-}
-
-static inline bool rotator_check_drm_fmt(u32 fmt)
-{
-	switch (fmt) {
-	case DRM_FORMAT_XRGB8888:
-	case DRM_FORMAT_NV12:
-		return true;
-	default:
-		DRM_DEBUG_KMS("not support format\n");
-		return false;
-	}
-}
-
-static inline bool rotator_check_drm_flip(enum drm_exynos_flip flip)
-{
-	switch (flip) {
-	case EXYNOS_DRM_FLIP_NONE:
-	case EXYNOS_DRM_FLIP_VERTICAL:
-	case EXYNOS_DRM_FLIP_HORIZONTAL:
-	case EXYNOS_DRM_FLIP_BOTH:
-		return true;
-	default:
-		DRM_DEBUG_KMS("invalid flip\n");
-		return false;
-	}
-}
-
-static int rotator_ippdrv_check_property(struct device *dev,
-		struct drm_exynos_ipp_property *property)
-{
-	struct drm_exynos_ipp_config *src_config =
-					&property->config[EXYNOS_DRM_OPS_SRC];
-	struct drm_exynos_ipp_config *dst_config =
-					&property->config[EXYNOS_DRM_OPS_DST];
-	struct drm_exynos_pos *src_pos = &src_config->pos;
-	struct drm_exynos_pos *dst_pos = &dst_config->pos;
-	struct drm_exynos_sz *src_sz = &src_config->sz;
-	struct drm_exynos_sz *dst_sz = &dst_config->sz;
-	bool swap = false;
-
-	/* Check format configuration */
-	if (src_config->fmt != dst_config->fmt) {
-		DRM_DEBUG_KMS("not support csc feature\n");
-		return -EINVAL;
-	}
-
-	if (!rotator_check_drm_fmt(dst_config->fmt)) {
-		DRM_DEBUG_KMS("invalid format\n");
-		return -EINVAL;
-	}
-
-	/* Check transform configuration */
-	if (src_config->degree != EXYNOS_DRM_DEGREE_0) {
-		DRM_DEBUG_KMS("not support source-side rotation\n");
-		return -EINVAL;
-	}
-
-	switch (dst_config->degree) {
-	case EXYNOS_DRM_DEGREE_90:
-	case EXYNOS_DRM_DEGREE_270:
-		swap = true;
-	case EXYNOS_DRM_DEGREE_0:
-	case EXYNOS_DRM_DEGREE_180:
-		/* No problem */
-		break;
-	default:
-		DRM_DEBUG_KMS("invalid degree\n");
-		return -EINVAL;
-	}
-
-	if (src_config->flip != EXYNOS_DRM_FLIP_NONE) {
-		DRM_DEBUG_KMS("not support source-side flip\n");
-		return -EINVAL;
-	}
-
-	if (!rotator_check_drm_flip(dst_config->flip)) {
-		DRM_DEBUG_KMS("invalid flip\n");
-		return -EINVAL;
-	}
-
-	/* Check size configuration */
-	if ((src_pos->x + src_pos->w > src_sz->hsize) ||
-		(src_pos->y + src_pos->h > src_sz->vsize)) {
-		DRM_DEBUG_KMS("out of source buffer bound\n");
-		return -EINVAL;
-	}
-
-	if (swap) {
-		if ((dst_pos->x + dst_pos->h > dst_sz->vsize) ||
-			(dst_pos->y + dst_pos->w > dst_sz->hsize)) {
-			DRM_DEBUG_KMS("out of destination buffer bound\n");
-			return -EINVAL;
-		}
-
-		if ((src_pos->w != dst_pos->h) || (src_pos->h != dst_pos->w)) {
-			DRM_DEBUG_KMS("not support scale feature\n");
-			return -EINVAL;
-		}
-	} else {
-		if ((dst_pos->x + dst_pos->w > dst_sz->hsize) ||
-			(dst_pos->y + dst_pos->h > dst_sz->vsize)) {
-			DRM_DEBUG_KMS("out of destination buffer bound\n");
-			return -EINVAL;
-		}
-
-		if ((src_pos->w != dst_pos->w) || (src_pos->h != dst_pos->h)) {
-			DRM_DEBUG_KMS("not support scale feature\n");
-			return -EINVAL;
-		}
-	}
-
-	return 0;
-}
-
-static int rotator_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
-{
-	struct rot_context *rot = dev_get_drvdata(dev);
 	u32 val;
 
-	if (rot->suspended) {
-		DRM_ERROR("suspended state\n");
-		return -EPERM;
-	}
-
-	if (cmd != IPP_CMD_M2M) {
-		DRM_ERROR("not support cmd: %d\n", cmd);
-		return -EINVAL;
-	}
-
 	/* Set interrupt enable */
 	rotator_reg_set_irq(rot, true);
 
 	val = rot_read(ROT_CONTROL);
 	val |= ROT_CONTROL_START;
-
 	rot_write(val, ROT_CONTROL);
+}
+
+static int rotator_commit(struct exynos_drm_ipp *ipp,
+			  struct exynos_drm_ipp_task *task)
+{
+	struct rot_context *rot =
+			container_of(ipp, struct rot_context, ipp);
+
+	pm_runtime_get_sync(rot->dev);
+	rot->task = task;
+
+	rotator_src_set_fmt(rot, task->src.buf.fourcc);
+	rotator_src_set_buf(rot, &task->src);
+	rotator_dst_set_transf(rot, task->transform.rotation);
+	rotator_dst_set_buf(rot, &task->dst);
+	rotator_start(rot);
 
 	return 0;
 }
 
-static struct rot_limit_table rot_limit_tbl_4210 = {
-	.ycbcr420_2p = {
-		.min_w = 32,
-		.min_h = 32,
-		.max_w = SZ_64K,
-		.max_h = SZ_64K,
-		.align = 3,
-	},
-	.rgb888 = {
-		.min_w = 8,
-		.min_h = 8,
-		.max_w = SZ_16K,
-		.max_h = SZ_16K,
-		.align = 2,
-	},
+static const struct exynos_drm_ipp_funcs ipp_funcs = {
+	.commit = rotator_commit,
 };
 
-static struct rot_limit_table rot_limit_tbl_4x12 = {
-	.ycbcr420_2p = {
-		.min_w = 32,
-		.min_h = 32,
-		.max_w = SZ_32K,
-		.max_h = SZ_32K,
-		.align = 3,
-	},
-	.rgb888 = {
-		.min_w = 8,
-		.min_h = 8,
-		.max_w = SZ_8K,
-		.max_h = SZ_8K,
-		.align = 2,
-	},
-};
+static int rotator_bind(struct device *dev, struct device *master, void *data)
+{
+	struct rot_context *rot = dev_get_drvdata(dev);
+	struct drm_device *drm_dev = data;
+	struct exynos_drm_ipp *ipp = &rot->ipp;
 
-static struct rot_limit_table rot_limit_tbl_5250 = {
-	.ycbcr420_2p = {
-		.min_w = 32,
-		.min_h = 32,
-		.max_w = SZ_32K,
-		.max_h = SZ_32K,
-		.align = 3,
-	},
-	.rgb888 = {
-		.min_w = 8,
-		.min_h = 8,
-		.max_w = SZ_8K,
-		.max_h = SZ_8K,
-		.align = 1,
-	},
-};
+	rot->drm_dev = drm_dev;
+	drm_iommu_attach_device(drm_dev, dev);
 
-static const struct of_device_id exynos_rotator_match[] = {
-	{
-		.compatible = "samsung,exynos4210-rotator",
-		.data = &rot_limit_tbl_4210,
-	},
-	{
-		.compatible = "samsung,exynos4212-rotator",
-		.data = &rot_limit_tbl_4x12,
-	},
-	{
-		.compatible = "samsung,exynos5250-rotator",
-		.data = &rot_limit_tbl_5250,
-	},
-	{},
+	exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
+			   DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE,
+			   rot->formats, rot->num_formats, "rotator");
+
+	dev_info(dev, "The exynos rotator has been probed successfully\n");
+
+	return 0;
+}
+
+static void rotator_unbind(struct device *dev, struct device *master,
+			void *data)
+{
+	struct rot_context *rot = dev_get_drvdata(dev);
+	struct drm_device *drm_dev = data;
+	struct exynos_drm_ipp *ipp = &rot->ipp;
+
+	exynos_drm_ipp_unregister(drm_dev, ipp);
+	drm_iommu_detach_device(rot->drm_dev, rot->dev);
+}
+
+static const struct component_ops rotator_component_ops = {
+	.bind	= rotator_bind,
+	.unbind = rotator_unbind,
 };
-MODULE_DEVICE_TABLE(of, exynos_rotator_match);
 
 static int rotator_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
+	struct resource	*regs_res;
 	struct rot_context *rot;
-	struct exynos_drm_ippdrv *ippdrv;
+	const struct rot_variant *variant;
+	int irq;
 	int ret;
 
-	if (!dev->of_node) {
-		dev_err(dev, "cannot find of_node.\n");
-		return -ENODEV;
-	}
-
 	rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL);
 	if (!rot)
 		return -ENOMEM;
 
-	rot->limit_tbl = (struct rot_limit_table *)
-				of_device_get_match_data(dev);
-	rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	rot->regs = devm_ioremap_resource(dev, rot->regs_res);
+	variant = of_device_get_match_data(dev);
+	rot->formats = variant->formats;
+	rot->num_formats = variant->num_formats;
+	rot->dev = dev;
+	regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	rot->regs = devm_ioremap_resource(dev, regs_res);
 	if (IS_ERR(rot->regs))
 		return PTR_ERR(rot->regs);
 
-	rot->irq = platform_get_irq(pdev, 0);
-	if (rot->irq < 0) {
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
 		dev_err(dev, "failed to get irq\n");
-		return rot->irq;
+		return irq;
 	}
 
-	ret = devm_request_threaded_irq(dev, rot->irq, NULL,
-			rotator_irq_handler, IRQF_ONESHOT, "drm_rotator", rot);
+	ret = devm_request_irq(dev, irq, rotator_irq_handler, 0, dev_name(dev),
+			       rot);
 	if (ret < 0) {
 		dev_err(dev, "failed to request irq\n");
 		return ret;
@@ -734,35 +312,19 @@ static int rotator_probe(struct platform_device *pdev)
 		return PTR_ERR(rot->clock);
 	}
 
+	pm_runtime_use_autosuspend(dev);
+	pm_runtime_set_autosuspend_delay(dev, ROTATOR_AUTOSUSPEND_DELAY);
 	pm_runtime_enable(dev);
-
-	ippdrv = &rot->ippdrv;
-	ippdrv->dev = dev;
-	ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &rot_src_ops;
-	ippdrv->ops[EXYNOS_DRM_OPS_DST] = &rot_dst_ops;
-	ippdrv->check_property = rotator_ippdrv_check_property;
-	ippdrv->start = rotator_ippdrv_start;
-	ret = rotator_init_prop_list(ippdrv);
-	if (ret < 0) {
-		dev_err(dev, "failed to init property list.\n");
-		goto err_ippdrv_register;
-	}
-
-	DRM_DEBUG_KMS("ippdrv[%pK]\n", ippdrv);
-
 	platform_set_drvdata(pdev, rot);
 
-	ret = exynos_drm_ippdrv_register(ippdrv);
-	if (ret < 0) {
-		dev_err(dev, "failed to register drm rotator device\n");
-		goto err_ippdrv_register;
-	}
-
-	dev_info(dev, "The exynos rotator is probed successfully\n");
+	ret = component_add(dev, &rotator_component_ops);
+	if (ret)
+		goto err_component;
 
 	return 0;
 
-err_ippdrv_register:
+err_component:
+	pm_runtime_dont_use_autosuspend(dev);
 	pm_runtime_disable(dev);
 	return ret;
 }
@@ -770,45 +332,101 @@ err_ippdrv_register:
 static int rotator_remove(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
-	struct rot_context *rot = dev_get_drvdata(dev);
-	struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
-
-	exynos_drm_ippdrv_unregister(ippdrv);
 
+	component_del(dev, &rotator_component_ops);
+	pm_runtime_dont_use_autosuspend(dev);
 	pm_runtime_disable(dev);
 
 	return 0;
 }
 
 #ifdef CONFIG_PM
-static int rotator_clk_crtl(struct rot_context *rot, bool enable)
-{
-	if (enable) {
-		clk_prepare_enable(rot->clock);
-		rot->suspended = false;
-	} else {
-		clk_disable_unprepare(rot->clock);
-		rot->suspended = true;
-	}
-
-	return 0;
-}
-
 static int rotator_runtime_suspend(struct device *dev)
 {
 	struct rot_context *rot = dev_get_drvdata(dev);
 
-	return  rotator_clk_crtl(rot, false);
+	clk_disable_unprepare(rot->clock);
+	return 0;
 }
 
 static int rotator_runtime_resume(struct device *dev)
 {
 	struct rot_context *rot = dev_get_drvdata(dev);
 
-	return  rotator_clk_crtl(rot, true);
+	return clk_prepare_enable(rot->clock);
 }
 #endif
 
+static const struct drm_exynos_ipp_limit rotator_4210_rbg888_limits[] = {
+	{ IPP_SIZE_LIMIT(BUFFER, .h = { 8, SZ_16K }, .v = { 8, SZ_16K }) },
+	{ IPP_SIZE_LIMIT(AREA, .h.align = 4, .v.align = 4) },
+};
+
+static const struct drm_exynos_ipp_limit rotator_4412_rbg888_limits[] = {
+	{ IPP_SIZE_LIMIT(BUFFER, .h = { 8, SZ_8K }, .v = { 8, SZ_8K }) },
+	{ IPP_SIZE_LIMIT(AREA, .h.align = 4, .v.align = 4) },
+};
+
+static const struct drm_exynos_ipp_limit rotator_5250_rbg888_limits[] = {
+	{ IPP_SIZE_LIMIT(BUFFER, .h = { 8, SZ_8K }, .v = { 8, SZ_8K }) },
+	{ IPP_SIZE_LIMIT(AREA, .h.align = 2, .v.align = 2) },
+};
+
+static const struct drm_exynos_ipp_limit rotator_4210_yuv_limits[] = {
+	{ IPP_SIZE_LIMIT(BUFFER, .h = { 32, SZ_64K }, .v = { 32, SZ_64K }) },
+	{ IPP_SIZE_LIMIT(AREA, .h.align = 8, .v.align = 8) },
+};
+
+static const struct drm_exynos_ipp_limit rotator_4412_yuv_limits[] = {
+	{ IPP_SIZE_LIMIT(BUFFER, .h = { 32, SZ_32K }, .v = { 32, SZ_32K }) },
+	{ IPP_SIZE_LIMIT(AREA, .h.align = 8, .v.align = 8) },
+};
+
+static const struct exynos_drm_ipp_formats rotator_4210_formats[] = {
+	{ IPP_SRCDST_FORMAT(XRGB8888, rotator_4210_rbg888_limits) },
+	{ IPP_SRCDST_FORMAT(NV12, rotator_4210_yuv_limits) },
+};
+
+static const struct exynos_drm_ipp_formats rotator_4412_formats[] = {
+	{ IPP_SRCDST_FORMAT(XRGB8888, rotator_4412_rbg888_limits) },
+	{ IPP_SRCDST_FORMAT(NV12, rotator_4412_yuv_limits) },
+};
+
+static const struct exynos_drm_ipp_formats rotator_5250_formats[] = {
+	{ IPP_SRCDST_FORMAT(XRGB8888, rotator_5250_rbg888_limits) },
+	{ IPP_SRCDST_FORMAT(NV12, rotator_4412_yuv_limits) },
+};
+
+static const struct rot_variant rotator_4210_data = {
+	.formats = rotator_4210_formats,
+	.num_formats = ARRAY_SIZE(rotator_4210_formats),
+};
+
+static const struct rot_variant rotator_4412_data = {
+	.formats = rotator_4412_formats,
+	.num_formats = ARRAY_SIZE(rotator_4412_formats),
+};
+
+static const struct rot_variant rotator_5250_data = {
+	.formats = rotator_5250_formats,
+	.num_formats = ARRAY_SIZE(rotator_5250_formats),
+};
+
+static const struct of_device_id exynos_rotator_match[] = {
+	{
+		.compatible = "samsung,exynos4210-rotator",
+		.data = &rotator_4210_data,
+	}, {
+		.compatible = "samsung,exynos4212-rotator",
+		.data = &rotator_4412_data,
+	}, {
+		.compatible = "samsung,exynos5250-rotator",
+		.data = &rotator_5250_data,
+	}, {
+	},
+};
+MODULE_DEVICE_TABLE(of, exynos_rotator_match);
+
 static const struct dev_pm_ops rotator_pm_ops = {
 	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
 				pm_runtime_force_resume)
@@ -820,7 +438,7 @@ struct platform_driver rotator_driver = {
 	.probe		= rotator_probe,
 	.remove		= rotator_remove,
 	.driver		= {
-		.name	= "exynos-rot",
+		.name	= "exynos-rotator",
 		.owner	= THIS_MODULE,
 		.pm	= &rotator_pm_ops,
 		.of_match_table = exynos_rotator_match,

From 8b7d3ec83aba6381bfc123c7aebcd78199635c3a Mon Sep 17 00:00:00 2001
From: Marek Szyprowski <m.szyprowski@samsung.com>
Date: Wed, 9 May 2018 10:59:24 +0200
Subject: [PATCH 0665/1286] drm/exynos: gsc: Convert driver to IPP v2 core API

This patch adapts Exynos DRM GScaler driver to new IPP v2 core API.
The side effect of this conversion is a switch to driver component API
to register properly in the Exynos DRM core. During the conversion
driver has been adapted to support more specific compatible strings
to distinguish between Exynos5250 and Exynos5420 (different hardware
limits). Support for Exynos5433 variant has been added too
(different limits table, removed dependency on ARCH_EXYNOS5).

Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Tested-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
Signed-off-by: Inki Dae <inki.dae@samsung.com>
---
 drivers/gpu/drm/exynos/Kconfig          |    3 +-
 drivers/gpu/drm/exynos/exynos_drm_drv.c |    1 +
 drivers/gpu/drm/exynos/exynos_drm_gsc.c | 1083 +++++++----------------
 drivers/gpu/drm/exynos/exynos_drm_gsc.h |   24 -
 4 files changed, 342 insertions(+), 769 deletions(-)
 delete mode 100644 drivers/gpu/drm/exynos/exynos_drm_gsc.h

diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 63a27c14b133..5c216548ea18 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -112,7 +112,8 @@ config DRM_EXYNOS_ROTATOR
 
 config DRM_EXYNOS_GSC
 	bool "GScaler"
-	depends on BROKEN && ARCH_EXYNOS5 && VIDEO_SAMSUNG_EXYNOS_GSC=n
+	depends on VIDEO_SAMSUNG_EXYNOS_GSC=n
+	select DRM_EXYNOS_IPP
 	help
 	  Choose this option if you want to use Exynos GSC for DRM.
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 537a588ef370..2dcb94034716 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -266,6 +266,7 @@ static struct exynos_drm_driver_info exynos_drm_drivers[] = {
 		DRM_COMPONENT_DRIVER
 	}, {
 		DRV_PTR(gsc_driver, CONFIG_DRM_EXYNOS_GSC),
+		DRM_COMPONENT_DRIVER
 	}, {
 		&exynos_drm_platform_driver,
 		DRM_VIRTUAL_DEVICE
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 0506b2b17ac1..e99dd1e4ba65 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -12,18 +12,20 @@
  *
  */
 #include <linux/kernel.h>
+#include <linux/component.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
 #include <linux/pm_runtime.h>
 #include <linux/mfd/syscon.h>
+#include <linux/of_device.h>
 #include <linux/regmap.h>
 
 #include <drm/drmP.h>
 #include <drm/exynos_drm.h>
 #include "regs-gsc.h"
 #include "exynos_drm_drv.h"
+#include "exynos_drm_iommu.h"
 #include "exynos_drm_ipp.h"
-#include "exynos_drm_gsc.h"
 
 /*
  * GSC stands for General SCaler and
@@ -31,26 +33,10 @@
  * input DMA reads image data from the memory.
  * output DMA writes image data to memory.
  * GSC supports image rotation and image effect functions.
- *
- * M2M operation : supports crop/scale/rotation/csc so on.
- * Memory ----> GSC H/W ----> Memory.
- * Writeback operation : supports cloned screen with FIMD.
- * FIMD ----> GSC H/W ----> Memory.
- * Output operation : supports direct display using local path.
- * Memory ----> GSC H/W ----> FIMD, Mixer.
  */
 
-/*
- * TODO
- * 1. check suspend/resume api if needed.
- * 2. need to check use case platform_device_id.
- * 3. check src/dst size with, height.
- * 4. added check_prepare api for right register.
- * 5. need to add supported list in prop_list.
- * 6. check prescaler/scaler optimization.
- */
 
-#define GSC_MAX_DEVS	4
+#define GSC_MAX_CLOCKS	8
 #define GSC_MAX_SRC		4
 #define GSC_MAX_DST		16
 #define GSC_RESET_TIMEOUT	50
@@ -65,8 +51,6 @@
 #define GSC_SC_DOWN_RATIO_4_8		131072
 #define GSC_SC_DOWN_RATIO_3_8		174762
 #define GSC_SC_DOWN_RATIO_2_8		262144
-#define GSC_REFRESH_MIN	12
-#define GSC_REFRESH_MAX	60
 #define GSC_CROP_MAX	8192
 #define GSC_CROP_MIN	32
 #define GSC_SCALE_MAX	4224
@@ -77,10 +61,9 @@
 #define GSC_COEF_H_8T	8
 #define GSC_COEF_V_4T	4
 #define GSC_COEF_DEPTH	3
+#define GSC_AUTOSUSPEND_DELAY		2000
 
 #define get_gsc_context(dev)	platform_get_drvdata(to_platform_device(dev))
-#define get_ctx_from_ippdrv(ippdrv)	container_of(ippdrv,\
-					struct gsc_context, ippdrv);
 #define gsc_read(offset)		readl(ctx->regs + (offset))
 #define gsc_write(cfg, offset)	writel(cfg, ctx->regs + (offset))
 
@@ -103,51 +86,48 @@ struct gsc_scaler {
 	unsigned long main_vratio;
 };
 
-/*
- * A structure of scaler capability.
- *
- * find user manual 49.2 features.
- * @tile_w: tile mode or rotation width.
- * @tile_h: tile mode or rotation height.
- * @w: other cases width.
- * @h: other cases height.
- */
-struct gsc_capability {
-	/* tile or rotation */
-	u32	tile_w;
-	u32	tile_h;
-	/* other cases */
-	u32	w;
-	u32	h;
-};
-
 /*
  * A structure of gsc context.
  *
- * @ippdrv: prepare initialization using ippdrv.
  * @regs_res: register resources.
  * @regs: memory mapped io registers.
- * @sysreg: handle to SYSREG block regmap.
- * @lock: locking of operations.
  * @gsc_clk: gsc gate clock.
  * @sc: scaler infomations.
  * @id: gsc id.
  * @irq: irq number.
  * @rotation: supports rotation of src.
- * @suspended: qos operations.
  */
 struct gsc_context {
-	struct exynos_drm_ippdrv	ippdrv;
+	struct exynos_drm_ipp ipp;
+	struct drm_device *drm_dev;
+	struct device	*dev;
+	struct exynos_drm_ipp_task	*task;
+	struct exynos_drm_ipp_formats	*formats;
+	unsigned int			num_formats;
+
 	struct resource	*regs_res;
 	void __iomem	*regs;
-	struct regmap	*sysreg;
-	struct mutex	lock;
-	struct clk	*gsc_clk;
+	const char	**clk_names;
+	struct clk	*clocks[GSC_MAX_CLOCKS];
+	int		num_clocks;
 	struct gsc_scaler	sc;
 	int	id;
 	int	irq;
 	bool	rotation;
-	bool	suspended;
+};
+
+/**
+ * struct gsc_driverdata - per device type driver data for init time.
+ *
+ * @limits: picture size limits array
+ * @clk_names: names of clocks needed by this variant
+ * @num_clocks: the number of clocks needed by this variant
+ */
+struct gsc_driverdata {
+	const struct drm_exynos_ipp_limit *limits;
+	int		num_limits;
+	const char	*clk_names[GSC_MAX_CLOCKS];
+	int		num_clocks;
 };
 
 /* 8-tap Filter Coefficient */
@@ -438,25 +418,6 @@ static int gsc_sw_reset(struct gsc_context *ctx)
 	return 0;
 }
 
-static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable)
-{
-	unsigned int gscblk_cfg;
-
-	if (!ctx->sysreg)
-		return;
-
-	regmap_read(ctx->sysreg, SYSREG_GSCBLK_CFG1, &gscblk_cfg);
-
-	if (enable)
-		gscblk_cfg |= GSC_BLK_DISP1WB_DEST(ctx->id) |
-				GSC_BLK_GSCL_WB_IN_SRC_SEL(ctx->id) |
-				GSC_BLK_SW_RESET_WB_DEST(ctx->id);
-	else
-		gscblk_cfg |= GSC_BLK_PXLASYNC_LO_MASK_WB(ctx->id);
-
-	regmap_write(ctx->sysreg, SYSREG_GSCBLK_CFG1, gscblk_cfg);
-}
-
 static void gsc_handle_irq(struct gsc_context *ctx, bool enable,
 		bool overflow, bool done)
 {
@@ -487,10 +448,8 @@ static void gsc_handle_irq(struct gsc_context *ctx, bool enable,
 }
 
 
-static int gsc_src_set_fmt(struct device *dev, u32 fmt)
+static void gsc_src_set_fmt(struct gsc_context *ctx, u32 fmt)
 {
-	struct gsc_context *ctx = get_gsc_context(dev);
-	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
 	u32 cfg;
 
 	DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
@@ -506,6 +465,7 @@ static int gsc_src_set_fmt(struct device *dev, u32 fmt)
 		cfg |= GSC_IN_RGB565;
 		break;
 	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_ARGB8888:
 		cfg |= GSC_IN_XRGB8888;
 		break;
 	case DRM_FORMAT_BGRX8888:
@@ -548,115 +508,84 @@ static int gsc_src_set_fmt(struct device *dev, u32 fmt)
 		cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
 			GSC_IN_YUV420_2P);
 		break;
-	default:
-		dev_err(ippdrv->dev, "invalid target yuv order 0x%x.\n", fmt);
-		return -EINVAL;
 	}
 
 	gsc_write(cfg, GSC_IN_CON);
-
-	return 0;
 }
 
-static int gsc_src_set_transf(struct device *dev,
-		enum drm_exynos_degree degree,
-		enum drm_exynos_flip flip, bool *swap)
+static void gsc_src_set_transf(struct gsc_context *ctx, unsigned int rotation)
 {
-	struct gsc_context *ctx = get_gsc_context(dev);
-	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	unsigned int degree = rotation & DRM_MODE_ROTATE_MASK;
 	u32 cfg;
 
-	DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip);
-
 	cfg = gsc_read(GSC_IN_CON);
 	cfg &= ~GSC_IN_ROT_MASK;
 
 	switch (degree) {
-	case EXYNOS_DRM_DEGREE_0:
-		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+	case DRM_MODE_ROTATE_0:
+		if (rotation & DRM_MODE_REFLECT_Y)
 			cfg |= GSC_IN_ROT_XFLIP;
-		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+		if (rotation & DRM_MODE_REFLECT_X)
 			cfg |= GSC_IN_ROT_YFLIP;
 		break;
-	case EXYNOS_DRM_DEGREE_90:
-		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
-			cfg |= GSC_IN_ROT_90_XFLIP;
-		else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
-			cfg |= GSC_IN_ROT_90_YFLIP;
-		else
-			cfg |= GSC_IN_ROT_90;
+	case DRM_MODE_ROTATE_90:
+		cfg |= GSC_IN_ROT_90;
+		if (rotation & DRM_MODE_REFLECT_Y)
+			cfg |= GSC_IN_ROT_XFLIP;
+		if (rotation & DRM_MODE_REFLECT_X)
+			cfg |= GSC_IN_ROT_YFLIP;
 		break;
-	case EXYNOS_DRM_DEGREE_180:
+	case DRM_MODE_ROTATE_180:
 		cfg |= GSC_IN_ROT_180;
-		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+		if (rotation & DRM_MODE_REFLECT_Y)
 			cfg &= ~GSC_IN_ROT_XFLIP;
-		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+		if (rotation & DRM_MODE_REFLECT_X)
 			cfg &= ~GSC_IN_ROT_YFLIP;
 		break;
-	case EXYNOS_DRM_DEGREE_270:
+	case DRM_MODE_ROTATE_270:
 		cfg |= GSC_IN_ROT_270;
-		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+		if (rotation & DRM_MODE_REFLECT_Y)
 			cfg &= ~GSC_IN_ROT_XFLIP;
-		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+		if (rotation & DRM_MODE_REFLECT_X)
 			cfg &= ~GSC_IN_ROT_YFLIP;
 		break;
-	default:
-		dev_err(ippdrv->dev, "invalid degree value %d.\n", degree);
-		return -EINVAL;
 	}
 
 	gsc_write(cfg, GSC_IN_CON);
 
 	ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
-	*swap = ctx->rotation;
-
-	return 0;
 }
 
-static int gsc_src_set_size(struct device *dev, int swap,
-		struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+static void gsc_src_set_size(struct gsc_context *ctx,
+			     struct exynos_drm_ipp_buffer *buf)
 {
-	struct gsc_context *ctx = get_gsc_context(dev);
-	struct drm_exynos_pos img_pos = *pos;
 	struct gsc_scaler *sc = &ctx->sc;
 	u32 cfg;
 
-	DRM_DEBUG_KMS("swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
-		swap, pos->x, pos->y, pos->w, pos->h);
-
-	if (swap) {
-		img_pos.w = pos->h;
-		img_pos.h = pos->w;
-	}
-
 	/* pixel offset */
-	cfg = (GSC_SRCIMG_OFFSET_X(img_pos.x) |
-		GSC_SRCIMG_OFFSET_Y(img_pos.y));
+	cfg = (GSC_SRCIMG_OFFSET_X(buf->rect.x) |
+		GSC_SRCIMG_OFFSET_Y(buf->rect.y));
 	gsc_write(cfg, GSC_SRCIMG_OFFSET);
 
 	/* cropped size */
-	cfg = (GSC_CROPPED_WIDTH(img_pos.w) |
-		GSC_CROPPED_HEIGHT(img_pos.h));
+	cfg = (GSC_CROPPED_WIDTH(buf->rect.w) |
+		GSC_CROPPED_HEIGHT(buf->rect.h));
 	gsc_write(cfg, GSC_CROPPED_SIZE);
 
-	DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", sz->hsize, sz->vsize);
-
 	/* original size */
 	cfg = gsc_read(GSC_SRCIMG_SIZE);
 	cfg &= ~(GSC_SRCIMG_HEIGHT_MASK |
 		GSC_SRCIMG_WIDTH_MASK);
 
-	cfg |= (GSC_SRCIMG_WIDTH(sz->hsize) |
-		GSC_SRCIMG_HEIGHT(sz->vsize));
+	cfg |= (GSC_SRCIMG_WIDTH(buf->buf.width) |
+		GSC_SRCIMG_HEIGHT(buf->buf.height));
 
 	gsc_write(cfg, GSC_SRCIMG_SIZE);
 
 	cfg = gsc_read(GSC_IN_CON);
 	cfg &= ~GSC_IN_RGB_TYPE_MASK;
 
-	DRM_DEBUG_KMS("width[%d]range[%d]\n", pos->w, sc->range);
-
-	if (pos->w >= GSC_WIDTH_ITU_709)
+	if (buf->rect.w >= GSC_WIDTH_ITU_709)
 		if (sc->range)
 			cfg |= GSC_IN_RGB_HD_WIDE;
 		else
@@ -668,103 +597,39 @@ static int gsc_src_set_size(struct device *dev, int swap,
 			cfg |= GSC_IN_RGB_SD_NARROW;
 
 	gsc_write(cfg, GSC_IN_CON);
-
-	return 0;
 }
 
-static int gsc_src_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
-		enum drm_exynos_ipp_buf_type buf_type)
+static void gsc_src_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
+			       bool enqueue)
 {
-	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
-	bool masked;
+	bool masked = !enqueue;
 	u32 cfg;
 	u32 mask = 0x00000001 << buf_id;
 
-	DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type);
-
 	/* mask register set */
 	cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
 
-	switch (buf_type) {
-	case IPP_BUF_ENQUEUE:
-		masked = false;
-		break;
-	case IPP_BUF_DEQUEUE:
-		masked = true;
-		break;
-	default:
-		dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
-		return -EINVAL;
-	}
-
 	/* sequence id */
 	cfg &= ~mask;
 	cfg |= masked << buf_id;
 	gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
 	gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
 	gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
-
-	return 0;
 }
 
-static int gsc_src_set_addr(struct device *dev,
-		struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
-		enum drm_exynos_ipp_buf_type buf_type)
+static void gsc_src_set_addr(struct gsc_context *ctx, u32 buf_id,
+			    struct exynos_drm_ipp_buffer *buf)
 {
-	struct gsc_context *ctx = get_gsc_context(dev);
-	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
-	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
-	struct drm_exynos_ipp_property *property;
-
-	if (!c_node) {
-		DRM_ERROR("failed to get c_node.\n");
-		return -EFAULT;
-	}
-
-	property = &c_node->property;
-
-	DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]buf_type[%d]\n",
-		property->prop_id, buf_id, buf_type);
-
-	if (buf_id > GSC_MAX_SRC) {
-		dev_info(ippdrv->dev, "invalid buf_id %d.\n", buf_id);
-		return -EINVAL;
-	}
-
 	/* address register set */
-	switch (buf_type) {
-	case IPP_BUF_ENQUEUE:
-		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
-			GSC_IN_BASE_ADDR_Y(buf_id));
-		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
-			GSC_IN_BASE_ADDR_CB(buf_id));
-		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
-			GSC_IN_BASE_ADDR_CR(buf_id));
-		break;
-	case IPP_BUF_DEQUEUE:
-		gsc_write(0x0, GSC_IN_BASE_ADDR_Y(buf_id));
-		gsc_write(0x0, GSC_IN_BASE_ADDR_CB(buf_id));
-		gsc_write(0x0, GSC_IN_BASE_ADDR_CR(buf_id));
-		break;
-	default:
-		/* bypass */
-		break;
-	}
+	gsc_write(buf->dma_addr[0], GSC_IN_BASE_ADDR_Y(buf_id));
+	gsc_write(buf->dma_addr[1], GSC_IN_BASE_ADDR_CB(buf_id));
+	gsc_write(buf->dma_addr[2], GSC_IN_BASE_ADDR_CR(buf_id));
 
-	return gsc_src_set_buf_seq(ctx, buf_id, buf_type);
+	gsc_src_set_buf_seq(ctx, buf_id, true);
 }
 
-static struct exynos_drm_ipp_ops gsc_src_ops = {
-	.set_fmt = gsc_src_set_fmt,
-	.set_transf = gsc_src_set_transf,
-	.set_size = gsc_src_set_size,
-	.set_addr = gsc_src_set_addr,
-};
-
-static int gsc_dst_set_fmt(struct device *dev, u32 fmt)
+static void gsc_dst_set_fmt(struct gsc_context *ctx, u32 fmt)
 {
-	struct gsc_context *ctx = get_gsc_context(dev);
-	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
 	u32 cfg;
 
 	DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
@@ -779,8 +644,9 @@ static int gsc_dst_set_fmt(struct device *dev, u32 fmt)
 	case DRM_FORMAT_RGB565:
 		cfg |= GSC_OUT_RGB565;
 		break;
+	case DRM_FORMAT_ARGB8888:
 	case DRM_FORMAT_XRGB8888:
-		cfg |= GSC_OUT_XRGB8888;
+		cfg |= (GSC_OUT_XRGB8888 | GSC_OUT_GLOBAL_ALPHA(0xff));
 		break;
 	case DRM_FORMAT_BGRX8888:
 		cfg |= (GSC_OUT_XRGB8888 | GSC_OUT_RB_SWAP);
@@ -819,69 +685,9 @@ static int gsc_dst_set_fmt(struct device *dev, u32 fmt)
 		cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
 			GSC_OUT_YUV420_2P);
 		break;
-	default:
-		dev_err(ippdrv->dev, "invalid target yuv order 0x%x.\n", fmt);
-		return -EINVAL;
 	}
 
 	gsc_write(cfg, GSC_OUT_CON);
-
-	return 0;
-}
-
-static int gsc_dst_set_transf(struct device *dev,
-		enum drm_exynos_degree degree,
-		enum drm_exynos_flip flip, bool *swap)
-{
-	struct gsc_context *ctx = get_gsc_context(dev);
-	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
-	u32 cfg;
-
-	DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip);
-
-	cfg = gsc_read(GSC_IN_CON);
-	cfg &= ~GSC_IN_ROT_MASK;
-
-	switch (degree) {
-	case EXYNOS_DRM_DEGREE_0:
-		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
-			cfg |= GSC_IN_ROT_XFLIP;
-		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
-			cfg |= GSC_IN_ROT_YFLIP;
-		break;
-	case EXYNOS_DRM_DEGREE_90:
-		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
-			cfg |= GSC_IN_ROT_90_XFLIP;
-		else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
-			cfg |= GSC_IN_ROT_90_YFLIP;
-		else
-			cfg |= GSC_IN_ROT_90;
-		break;
-	case EXYNOS_DRM_DEGREE_180:
-		cfg |= GSC_IN_ROT_180;
-		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
-			cfg &= ~GSC_IN_ROT_XFLIP;
-		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
-			cfg &= ~GSC_IN_ROT_YFLIP;
-		break;
-	case EXYNOS_DRM_DEGREE_270:
-		cfg |= GSC_IN_ROT_270;
-		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
-			cfg &= ~GSC_IN_ROT_XFLIP;
-		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
-			cfg &= ~GSC_IN_ROT_YFLIP;
-		break;
-	default:
-		dev_err(ippdrv->dev, "invalid degree value %d.\n", degree);
-		return -EINVAL;
-	}
-
-	gsc_write(cfg, GSC_IN_CON);
-
-	ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
-	*swap = ctx->rotation;
-
-	return 0;
 }
 
 static int gsc_get_ratio_shift(u32 src, u32 dst, u32 *ratio)
@@ -919,9 +725,9 @@ static void gsc_get_prescaler_shfactor(u32 hratio, u32 vratio, u32 *shfactor)
 }
 
 static int gsc_set_prescaler(struct gsc_context *ctx, struct gsc_scaler *sc,
-		struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
+			     struct drm_exynos_ipp_task_rect *src,
+			     struct drm_exynos_ipp_task_rect *dst)
 {
-	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
 	u32 cfg;
 	u32 src_w, src_h, dst_w, dst_h;
 	int ret = 0;
@@ -939,13 +745,13 @@ static int gsc_set_prescaler(struct gsc_context *ctx, struct gsc_scaler *sc,
 
 	ret = gsc_get_ratio_shift(src_w, dst_w, &sc->pre_hratio);
 	if (ret) {
-		dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
+		dev_err(ctx->dev, "failed to get ratio horizontal.\n");
 		return ret;
 	}
 
 	ret = gsc_get_ratio_shift(src_h, dst_h, &sc->pre_vratio);
 	if (ret) {
-		dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
+		dev_err(ctx->dev, "failed to get ratio vertical.\n");
 		return ret;
 	}
 
@@ -1039,47 +845,37 @@ static void gsc_set_scaler(struct gsc_context *ctx, struct gsc_scaler *sc)
 	gsc_write(cfg, GSC_MAIN_V_RATIO);
 }
 
-static int gsc_dst_set_size(struct device *dev, int swap,
-		struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+static void gsc_dst_set_size(struct gsc_context *ctx,
+			     struct exynos_drm_ipp_buffer *buf)
 {
-	struct gsc_context *ctx = get_gsc_context(dev);
-	struct drm_exynos_pos img_pos = *pos;
 	struct gsc_scaler *sc = &ctx->sc;
 	u32 cfg;
 
-	DRM_DEBUG_KMS("swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
-		swap, pos->x, pos->y, pos->w, pos->h);
-
-	if (swap) {
-		img_pos.w = pos->h;
-		img_pos.h = pos->w;
-	}
-
 	/* pixel offset */
-	cfg = (GSC_DSTIMG_OFFSET_X(pos->x) |
-		GSC_DSTIMG_OFFSET_Y(pos->y));
+	cfg = (GSC_DSTIMG_OFFSET_X(buf->rect.x) |
+		GSC_DSTIMG_OFFSET_Y(buf->rect.y));
 	gsc_write(cfg, GSC_DSTIMG_OFFSET);
 
 	/* scaled size */
-	cfg = (GSC_SCALED_WIDTH(img_pos.w) | GSC_SCALED_HEIGHT(img_pos.h));
+	if (ctx->rotation)
+		cfg = (GSC_SCALED_WIDTH(buf->rect.h) |
+		       GSC_SCALED_HEIGHT(buf->rect.w));
+	else
+		cfg = (GSC_SCALED_WIDTH(buf->rect.w) |
+		       GSC_SCALED_HEIGHT(buf->rect.h));
 	gsc_write(cfg, GSC_SCALED_SIZE);
 
-	DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", sz->hsize, sz->vsize);
-
 	/* original size */
 	cfg = gsc_read(GSC_DSTIMG_SIZE);
-	cfg &= ~(GSC_DSTIMG_HEIGHT_MASK |
-		GSC_DSTIMG_WIDTH_MASK);
-	cfg |= (GSC_DSTIMG_WIDTH(sz->hsize) |
-		GSC_DSTIMG_HEIGHT(sz->vsize));
+	cfg &= ~(GSC_DSTIMG_HEIGHT_MASK | GSC_DSTIMG_WIDTH_MASK);
+	cfg |= GSC_DSTIMG_WIDTH(buf->buf.width) |
+	       GSC_DSTIMG_HEIGHT(buf->buf.height);
 	gsc_write(cfg, GSC_DSTIMG_SIZE);
 
 	cfg = gsc_read(GSC_OUT_CON);
 	cfg &= ~GSC_OUT_RGB_TYPE_MASK;
 
-	DRM_DEBUG_KMS("width[%d]range[%d]\n", pos->w, sc->range);
-
-	if (pos->w >= GSC_WIDTH_ITU_709)
+	if (buf->rect.w >= GSC_WIDTH_ITU_709)
 		if (sc->range)
 			cfg |= GSC_OUT_RGB_HD_WIDE;
 		else
@@ -1091,8 +887,6 @@ static int gsc_dst_set_size(struct device *dev, int swap,
 			cfg |= GSC_OUT_RGB_SD_NARROW;
 
 	gsc_write(cfg, GSC_OUT_CON);
-
-	return 0;
 }
 
 static int gsc_dst_get_buf_seq(struct gsc_context *ctx)
@@ -1111,35 +905,16 @@ static int gsc_dst_get_buf_seq(struct gsc_context *ctx)
 	return buf_num;
 }
 
-static int gsc_dst_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
-		enum drm_exynos_ipp_buf_type buf_type)
+static void gsc_dst_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
+				bool enqueue)
 {
-	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
-	bool masked;
+	bool masked = !enqueue;
 	u32 cfg;
 	u32 mask = 0x00000001 << buf_id;
-	int ret = 0;
-
-	DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type);
-
-	mutex_lock(&ctx->lock);
 
 	/* mask register set */
 	cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
 
-	switch (buf_type) {
-	case IPP_BUF_ENQUEUE:
-		masked = false;
-		break;
-	case IPP_BUF_DEQUEUE:
-		masked = true;
-		break;
-	default:
-		dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
-		ret =  -EINVAL;
-		goto err_unlock;
-	}
-
 	/* sequence id */
 	cfg &= ~mask;
 	cfg |= masked << buf_id;
@@ -1148,94 +923,29 @@ static int gsc_dst_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
 	gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
 
 	/* interrupt enable */
-	if (buf_type == IPP_BUF_ENQUEUE &&
-	    gsc_dst_get_buf_seq(ctx) >= GSC_BUF_START)
+	if (enqueue && gsc_dst_get_buf_seq(ctx) >= GSC_BUF_START)
 		gsc_handle_irq(ctx, true, false, true);
 
 	/* interrupt disable */
-	if (buf_type == IPP_BUF_DEQUEUE &&
-	    gsc_dst_get_buf_seq(ctx) <= GSC_BUF_STOP)
+	if (!enqueue && gsc_dst_get_buf_seq(ctx) <= GSC_BUF_STOP)
 		gsc_handle_irq(ctx, false, false, true);
-
-err_unlock:
-	mutex_unlock(&ctx->lock);
-	return ret;
 }
 
-static int gsc_dst_set_addr(struct device *dev,
-		struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
-		enum drm_exynos_ipp_buf_type buf_type)
+static void gsc_dst_set_addr(struct gsc_context *ctx,
+			     u32 buf_id, struct exynos_drm_ipp_buffer *buf)
 {
-	struct gsc_context *ctx = get_gsc_context(dev);
-	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
-	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
-	struct drm_exynos_ipp_property *property;
-
-	if (!c_node) {
-		DRM_ERROR("failed to get c_node.\n");
-		return -EFAULT;
-	}
-
-	property = &c_node->property;
-
-	DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]buf_type[%d]\n",
-		property->prop_id, buf_id, buf_type);
-
-	if (buf_id > GSC_MAX_DST) {
-		dev_info(ippdrv->dev, "invalid buf_id %d.\n", buf_id);
-		return -EINVAL;
-	}
-
 	/* address register set */
-	switch (buf_type) {
-	case IPP_BUF_ENQUEUE:
-		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
-			GSC_OUT_BASE_ADDR_Y(buf_id));
-		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
-			GSC_OUT_BASE_ADDR_CB(buf_id));
-		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
-			GSC_OUT_BASE_ADDR_CR(buf_id));
-		break;
-	case IPP_BUF_DEQUEUE:
-		gsc_write(0x0, GSC_OUT_BASE_ADDR_Y(buf_id));
-		gsc_write(0x0, GSC_OUT_BASE_ADDR_CB(buf_id));
-		gsc_write(0x0, GSC_OUT_BASE_ADDR_CR(buf_id));
-		break;
-	default:
-		/* bypass */
-		break;
-	}
+	gsc_write(buf->dma_addr[0], GSC_OUT_BASE_ADDR_Y(buf_id));
+	gsc_write(buf->dma_addr[1], GSC_OUT_BASE_ADDR_CB(buf_id));
+	gsc_write(buf->dma_addr[2], GSC_OUT_BASE_ADDR_CR(buf_id));
 
-	return gsc_dst_set_buf_seq(ctx, buf_id, buf_type);
-}
-
-static struct exynos_drm_ipp_ops gsc_dst_ops = {
-	.set_fmt = gsc_dst_set_fmt,
-	.set_transf = gsc_dst_set_transf,
-	.set_size = gsc_dst_set_size,
-	.set_addr = gsc_dst_set_addr,
-};
-
-static int gsc_clk_ctrl(struct gsc_context *ctx, bool enable)
-{
-	DRM_DEBUG_KMS("enable[%d]\n", enable);
-
-	if (enable) {
-		clk_prepare_enable(ctx->gsc_clk);
-		ctx->suspended = false;
-	} else {
-		clk_disable_unprepare(ctx->gsc_clk);
-		ctx->suspended = true;
-	}
-
-	return 0;
+	gsc_dst_set_buf_seq(ctx, buf_id, true);
 }
 
 static int gsc_get_src_buf_index(struct gsc_context *ctx)
 {
 	u32 cfg, curr_index, i;
 	u32 buf_id = GSC_MAX_SRC;
-	int ret;
 
 	DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id);
 
@@ -1249,19 +959,15 @@ static int gsc_get_src_buf_index(struct gsc_context *ctx)
 		}
 	}
 
+	DRM_DEBUG_KMS("cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg,
+		curr_index, buf_id);
+
 	if (buf_id == GSC_MAX_SRC) {
 		DRM_ERROR("failed to get in buffer index.\n");
 		return -EINVAL;
 	}
 
-	ret = gsc_src_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
-	if (ret < 0) {
-		DRM_ERROR("failed to dequeue.\n");
-		return ret;
-	}
-
-	DRM_DEBUG_KMS("cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg,
-		curr_index, buf_id);
+	gsc_src_set_buf_seq(ctx, buf_id, false);
 
 	return buf_id;
 }
@@ -1270,7 +976,6 @@ static int gsc_get_dst_buf_index(struct gsc_context *ctx)
 {
 	u32 cfg, curr_index, i;
 	u32 buf_id = GSC_MAX_DST;
-	int ret;
 
 	DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id);
 
@@ -1289,11 +994,7 @@ static int gsc_get_dst_buf_index(struct gsc_context *ctx)
 		return -EINVAL;
 	}
 
-	ret = gsc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
-	if (ret < 0) {
-		DRM_ERROR("failed to dequeue.\n");
-		return ret;
-	}
+	gsc_dst_set_buf_seq(ctx, buf_id, false);
 
 	DRM_DEBUG_KMS("cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg,
 		curr_index, buf_id);
@@ -1304,215 +1005,55 @@ static int gsc_get_dst_buf_index(struct gsc_context *ctx)
 static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
 {
 	struct gsc_context *ctx = dev_id;
-	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
-	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
-	struct drm_exynos_ipp_event_work *event_work =
-		c_node->event_work;
 	u32 status;
-	int buf_id[EXYNOS_DRM_OPS_MAX];
+	int err = 0;
 
 	DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id);
 
 	status = gsc_read(GSC_IRQ);
 	if (status & GSC_IRQ_STATUS_OR_IRQ) {
-		dev_err(ippdrv->dev, "occurred overflow at %d, status 0x%x.\n",
+		dev_err(ctx->dev, "occurred overflow at %d, status 0x%x.\n",
 			ctx->id, status);
-		return IRQ_NONE;
+		err = -EINVAL;
 	}
 
 	if (status & GSC_IRQ_STATUS_OR_FRM_DONE) {
-		dev_dbg(ippdrv->dev, "occurred frame done at %d, status 0x%x.\n",
+		int src_buf_id, dst_buf_id;
+
+		dev_dbg(ctx->dev, "occurred frame done at %d, status 0x%x.\n",
 			ctx->id, status);
 
-		buf_id[EXYNOS_DRM_OPS_SRC] = gsc_get_src_buf_index(ctx);
-		if (buf_id[EXYNOS_DRM_OPS_SRC] < 0)
-			return IRQ_HANDLED;
+		src_buf_id = gsc_get_src_buf_index(ctx);
+		dst_buf_id = gsc_get_dst_buf_index(ctx);
 
-		buf_id[EXYNOS_DRM_OPS_DST] = gsc_get_dst_buf_index(ctx);
-		if (buf_id[EXYNOS_DRM_OPS_DST] < 0)
-			return IRQ_HANDLED;
+		DRM_DEBUG_KMS("buf_id_src[%d]buf_id_dst[%d]\n",	src_buf_id,
+			      dst_buf_id);
 
-		DRM_DEBUG_KMS("buf_id_src[%d]buf_id_dst[%d]\n",
-			buf_id[EXYNOS_DRM_OPS_SRC], buf_id[EXYNOS_DRM_OPS_DST]);
+		if (src_buf_id < 0 || dst_buf_id < 0)
+			err = -EINVAL;
+	}
 
-		event_work->ippdrv = ippdrv;
-		event_work->buf_id[EXYNOS_DRM_OPS_SRC] =
-			buf_id[EXYNOS_DRM_OPS_SRC];
-		event_work->buf_id[EXYNOS_DRM_OPS_DST] =
-			buf_id[EXYNOS_DRM_OPS_DST];
-		queue_work(ippdrv->event_workq, &event_work->work);
+	if (ctx->task) {
+		struct exynos_drm_ipp_task *task = ctx->task;
+
+		ctx->task = NULL;
+		pm_runtime_mark_last_busy(ctx->dev);
+		pm_runtime_put_autosuspend(ctx->dev);
+		exynos_drm_ipp_task_done(task, err);
 	}
 
 	return IRQ_HANDLED;
 }
 
-static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+static int gsc_reset(struct gsc_context *ctx)
 {
-	struct drm_exynos_ipp_prop_list *prop_list = &ippdrv->prop_list;
-
-	prop_list->version = 1;
-	prop_list->writeback = 1;
-	prop_list->refresh_min = GSC_REFRESH_MIN;
-	prop_list->refresh_max = GSC_REFRESH_MAX;
-	prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
-				(1 << EXYNOS_DRM_FLIP_HORIZONTAL);
-	prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
-				(1 << EXYNOS_DRM_DEGREE_90) |
-				(1 << EXYNOS_DRM_DEGREE_180) |
-				(1 << EXYNOS_DRM_DEGREE_270);
-	prop_list->csc = 1;
-	prop_list->crop = 1;
-	prop_list->crop_max.hsize = GSC_CROP_MAX;
-	prop_list->crop_max.vsize = GSC_CROP_MAX;
-	prop_list->crop_min.hsize = GSC_CROP_MIN;
-	prop_list->crop_min.vsize = GSC_CROP_MIN;
-	prop_list->scale = 1;
-	prop_list->scale_max.hsize = GSC_SCALE_MAX;
-	prop_list->scale_max.vsize = GSC_SCALE_MAX;
-	prop_list->scale_min.hsize = GSC_SCALE_MIN;
-	prop_list->scale_min.vsize = GSC_SCALE_MIN;
-
-	return 0;
-}
-
-static inline bool gsc_check_drm_flip(enum drm_exynos_flip flip)
-{
-	switch (flip) {
-	case EXYNOS_DRM_FLIP_NONE:
-	case EXYNOS_DRM_FLIP_VERTICAL:
-	case EXYNOS_DRM_FLIP_HORIZONTAL:
-	case EXYNOS_DRM_FLIP_BOTH:
-		return true;
-	default:
-		DRM_DEBUG_KMS("invalid flip\n");
-		return false;
-	}
-}
-
-static int gsc_ippdrv_check_property(struct device *dev,
-		struct drm_exynos_ipp_property *property)
-{
-	struct gsc_context *ctx = get_gsc_context(dev);
-	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
-	struct drm_exynos_ipp_prop_list *pp = &ippdrv->prop_list;
-	struct drm_exynos_ipp_config *config;
-	struct drm_exynos_pos *pos;
-	struct drm_exynos_sz *sz;
-	bool swap;
-	int i;
-
-	for_each_ipp_ops(i) {
-		if ((i == EXYNOS_DRM_OPS_SRC) &&
-			(property->cmd == IPP_CMD_WB))
-			continue;
-
-		config = &property->config[i];
-		pos = &config->pos;
-		sz = &config->sz;
-
-		/* check for flip */
-		if (!gsc_check_drm_flip(config->flip)) {
-			DRM_ERROR("invalid flip.\n");
-			goto err_property;
-		}
-
-		/* check for degree */
-		switch (config->degree) {
-		case EXYNOS_DRM_DEGREE_90:
-		case EXYNOS_DRM_DEGREE_270:
-			swap = true;
-			break;
-		case EXYNOS_DRM_DEGREE_0:
-		case EXYNOS_DRM_DEGREE_180:
-			swap = false;
-			break;
-		default:
-			DRM_ERROR("invalid degree.\n");
-			goto err_property;
-		}
-
-		/* check for buffer bound */
-		if ((pos->x + pos->w > sz->hsize) ||
-			(pos->y + pos->h > sz->vsize)) {
-			DRM_ERROR("out of buf bound.\n");
-			goto err_property;
-		}
-
-		/* check for crop */
-		if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
-			if (swap) {
-				if ((pos->h < pp->crop_min.hsize) ||
-					(sz->vsize > pp->crop_max.hsize) ||
-					(pos->w < pp->crop_min.vsize) ||
-					(sz->hsize > pp->crop_max.vsize)) {
-					DRM_ERROR("out of crop size.\n");
-					goto err_property;
-				}
-			} else {
-				if ((pos->w < pp->crop_min.hsize) ||
-					(sz->hsize > pp->crop_max.hsize) ||
-					(pos->h < pp->crop_min.vsize) ||
-					(sz->vsize > pp->crop_max.vsize)) {
-					DRM_ERROR("out of crop size.\n");
-					goto err_property;
-				}
-			}
-		}
-
-		/* check for scale */
-		if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
-			if (swap) {
-				if ((pos->h < pp->scale_min.hsize) ||
-					(sz->vsize > pp->scale_max.hsize) ||
-					(pos->w < pp->scale_min.vsize) ||
-					(sz->hsize > pp->scale_max.vsize)) {
-					DRM_ERROR("out of scale size.\n");
-					goto err_property;
-				}
-			} else {
-				if ((pos->w < pp->scale_min.hsize) ||
-					(sz->hsize > pp->scale_max.hsize) ||
-					(pos->h < pp->scale_min.vsize) ||
-					(sz->vsize > pp->scale_max.vsize)) {
-					DRM_ERROR("out of scale size.\n");
-					goto err_property;
-				}
-			}
-		}
-	}
-
-	return 0;
-
-err_property:
-	for_each_ipp_ops(i) {
-		if ((i == EXYNOS_DRM_OPS_SRC) &&
-			(property->cmd == IPP_CMD_WB))
-			continue;
-
-		config = &property->config[i];
-		pos = &config->pos;
-		sz = &config->sz;
-
-		DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
-			i ? "dst" : "src", config->flip, config->degree,
-			pos->x, pos->y, pos->w, pos->h,
-			sz->hsize, sz->vsize);
-	}
-
-	return -EINVAL;
-}
-
-
-static int gsc_ippdrv_reset(struct device *dev)
-{
-	struct gsc_context *ctx = get_gsc_context(dev);
 	struct gsc_scaler *sc = &ctx->sc;
 	int ret;
 
 	/* reset h/w block */
 	ret = gsc_sw_reset(ctx);
 	if (ret < 0) {
-		dev_err(dev, "failed to reset hardware.\n");
+		dev_err(ctx->dev, "failed to reset hardware.\n");
 		return ret;
 	}
 
@@ -1523,166 +1064,172 @@ static int gsc_ippdrv_reset(struct device *dev)
 	return 0;
 }
 
-static int gsc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+static void gsc_start(struct gsc_context *ctx)
 {
-	struct gsc_context *ctx = get_gsc_context(dev);
-	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
-	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
-	struct drm_exynos_ipp_property *property;
-	struct drm_exynos_ipp_config *config;
-	struct drm_exynos_pos	img_pos[EXYNOS_DRM_OPS_MAX];
-	struct drm_exynos_ipp_set_wb set_wb;
 	u32 cfg;
-	int ret, i;
-
-	DRM_DEBUG_KMS("cmd[%d]\n", cmd);
-
-	if (!c_node) {
-		DRM_ERROR("failed to get c_node.\n");
-		return -EINVAL;
-	}
-
-	property = &c_node->property;
 
 	gsc_handle_irq(ctx, true, false, true);
 
-	for_each_ipp_ops(i) {
-		config = &property->config[i];
-		img_pos[i] = config->pos;
-	}
+	/* enable one shot */
+	cfg = gsc_read(GSC_ENABLE);
+	cfg &= ~(GSC_ENABLE_ON_CLEAR_MASK |
+		GSC_ENABLE_CLK_GATE_MODE_MASK);
+	cfg |= GSC_ENABLE_ON_CLEAR_ONESHOT;
+	gsc_write(cfg, GSC_ENABLE);
 
-	switch (cmd) {
-	case IPP_CMD_M2M:
-		/* enable one shot */
-		cfg = gsc_read(GSC_ENABLE);
-		cfg &= ~(GSC_ENABLE_ON_CLEAR_MASK |
-			GSC_ENABLE_CLK_GATE_MODE_MASK);
-		cfg |= GSC_ENABLE_ON_CLEAR_ONESHOT;
-		gsc_write(cfg, GSC_ENABLE);
+	/* src dma memory */
+	cfg = gsc_read(GSC_IN_CON);
+	cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+	cfg |= GSC_IN_PATH_MEMORY;
+	gsc_write(cfg, GSC_IN_CON);
 
-		/* src dma memory */
-		cfg = gsc_read(GSC_IN_CON);
-		cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
-		cfg |= GSC_IN_PATH_MEMORY;
-		gsc_write(cfg, GSC_IN_CON);
-
-		/* dst dma memory */
-		cfg = gsc_read(GSC_OUT_CON);
-		cfg |= GSC_OUT_PATH_MEMORY;
-		gsc_write(cfg, GSC_OUT_CON);
-		break;
-	case IPP_CMD_WB:
-		set_wb.enable = 1;
-		set_wb.refresh = property->refresh_rate;
-		gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
-		exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
-
-		/* src local path */
-		cfg = gsc_read(GSC_IN_CON);
-		cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
-		cfg |= (GSC_IN_PATH_LOCAL | GSC_IN_LOCAL_FIMD_WB);
-		gsc_write(cfg, GSC_IN_CON);
-
-		/* dst dma memory */
-		cfg = gsc_read(GSC_OUT_CON);
-		cfg |= GSC_OUT_PATH_MEMORY;
-		gsc_write(cfg, GSC_OUT_CON);
-		break;
-	case IPP_CMD_OUTPUT:
-		/* src dma memory */
-		cfg = gsc_read(GSC_IN_CON);
-		cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
-		cfg |= GSC_IN_PATH_MEMORY;
-		gsc_write(cfg, GSC_IN_CON);
-
-		/* dst local path */
-		cfg = gsc_read(GSC_OUT_CON);
-		cfg |= GSC_OUT_PATH_MEMORY;
-		gsc_write(cfg, GSC_OUT_CON);
-		break;
-	default:
-		ret = -EINVAL;
-		dev_err(dev, "invalid operations.\n");
-		return ret;
-	}
-
-	ret = gsc_set_prescaler(ctx, &ctx->sc,
-		&img_pos[EXYNOS_DRM_OPS_SRC],
-		&img_pos[EXYNOS_DRM_OPS_DST]);
-	if (ret) {
-		dev_err(dev, "failed to set prescaler.\n");
-		return ret;
-	}
+	/* dst dma memory */
+	cfg = gsc_read(GSC_OUT_CON);
+	cfg |= GSC_OUT_PATH_MEMORY;
+	gsc_write(cfg, GSC_OUT_CON);
 
 	gsc_set_scaler(ctx, &ctx->sc);
 
 	cfg = gsc_read(GSC_ENABLE);
 	cfg |= GSC_ENABLE_ON;
 	gsc_write(cfg, GSC_ENABLE);
+}
+
+static int gsc_commit(struct exynos_drm_ipp *ipp,
+			  struct exynos_drm_ipp_task *task)
+{
+	struct gsc_context *ctx = container_of(ipp, struct gsc_context, ipp);
+	int ret;
+
+	pm_runtime_get_sync(ctx->dev);
+	ctx->task = task;
+
+	ret = gsc_reset(ctx);
+	if (ret) {
+		pm_runtime_put_autosuspend(ctx->dev);
+		ctx->task = NULL;
+		return ret;
+	}
+
+	gsc_src_set_fmt(ctx, task->src.buf.fourcc);
+	gsc_src_set_transf(ctx, task->transform.rotation);
+	gsc_src_set_size(ctx, &task->src);
+	gsc_src_set_addr(ctx, 0, &task->src);
+	gsc_dst_set_fmt(ctx, task->dst.buf.fourcc);
+	gsc_dst_set_size(ctx, &task->dst);
+	gsc_dst_set_addr(ctx, 0, &task->dst);
+	gsc_set_prescaler(ctx, &ctx->sc, &task->src.rect, &task->dst.rect);
+	gsc_start(ctx);
 
 	return 0;
 }
 
-static void gsc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+static void gsc_abort(struct exynos_drm_ipp *ipp,
+			  struct exynos_drm_ipp_task *task)
 {
-	struct gsc_context *ctx = get_gsc_context(dev);
-	struct drm_exynos_ipp_set_wb set_wb = {0, 0};
-	u32 cfg;
+	struct gsc_context *ctx =
+			container_of(ipp, struct gsc_context, ipp);
 
-	DRM_DEBUG_KMS("cmd[%d]\n", cmd);
+	gsc_reset(ctx);
+	if (ctx->task) {
+		struct exynos_drm_ipp_task *task = ctx->task;
 
-	switch (cmd) {
-	case IPP_CMD_M2M:
-		/* bypass */
-		break;
-	case IPP_CMD_WB:
-		gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
-		exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
-		break;
-	case IPP_CMD_OUTPUT:
-	default:
-		dev_err(dev, "invalid operations.\n");
-		break;
+		ctx->task = NULL;
+		pm_runtime_mark_last_busy(ctx->dev);
+		pm_runtime_put_autosuspend(ctx->dev);
+		exynos_drm_ipp_task_done(task, -EIO);
 	}
-
-	gsc_handle_irq(ctx, false, false, true);
-
-	/* reset sequence */
-	gsc_write(0xff, GSC_OUT_BASE_ADDR_Y_MASK);
-	gsc_write(0xff, GSC_OUT_BASE_ADDR_CB_MASK);
-	gsc_write(0xff, GSC_OUT_BASE_ADDR_CR_MASK);
-
-	cfg = gsc_read(GSC_ENABLE);
-	cfg &= ~GSC_ENABLE_ON;
-	gsc_write(cfg, GSC_ENABLE);
 }
 
+static struct exynos_drm_ipp_funcs ipp_funcs = {
+	.commit = gsc_commit,
+	.abort = gsc_abort,
+};
+
+static int gsc_bind(struct device *dev, struct device *master, void *data)
+{
+	struct gsc_context *ctx = dev_get_drvdata(dev);
+	struct drm_device *drm_dev = data;
+	struct exynos_drm_ipp *ipp = &ctx->ipp;
+
+	ctx->drm_dev = drm_dev;
+	drm_iommu_attach_device(drm_dev, dev);
+
+	exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
+			DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
+			DRM_EXYNOS_IPP_CAP_SCALE | DRM_EXYNOS_IPP_CAP_CONVERT,
+			ctx->formats, ctx->num_formats, "gsc");
+
+	dev_info(dev, "The exynos gscaler has been probed successfully\n");
+
+	return 0;
+}
+
+static void gsc_unbind(struct device *dev, struct device *master,
+			void *data)
+{
+	struct gsc_context *ctx = dev_get_drvdata(dev);
+	struct drm_device *drm_dev = data;
+	struct exynos_drm_ipp *ipp = &ctx->ipp;
+
+	exynos_drm_ipp_unregister(drm_dev, ipp);
+	drm_iommu_detach_device(drm_dev, dev);
+}
+
+static const struct component_ops gsc_component_ops = {
+	.bind	= gsc_bind,
+	.unbind = gsc_unbind,
+};
+
+static const unsigned int gsc_formats[] = {
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB565, DRM_FORMAT_BGRX8888,
+	DRM_FORMAT_NV12, DRM_FORMAT_NV16, DRM_FORMAT_NV21, DRM_FORMAT_NV61,
+	DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU,
+	DRM_FORMAT_YUV420, DRM_FORMAT_YVU420, DRM_FORMAT_YUV422,
+};
+
 static int gsc_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
+	struct gsc_driverdata *driver_data;
+	struct exynos_drm_ipp_formats *formats;
 	struct gsc_context *ctx;
 	struct resource *res;
-	struct exynos_drm_ippdrv *ippdrv;
-	int ret;
+	int ret, i;
 
 	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
 	if (!ctx)
 		return -ENOMEM;
 
-	if (dev->of_node) {
-		ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
-							"samsung,sysreg");
-		if (IS_ERR(ctx->sysreg)) {
-			dev_warn(dev, "failed to get system register.\n");
-			ctx->sysreg = NULL;
-		}
+	formats = devm_kzalloc(dev, sizeof(*formats) *
+			       (ARRAY_SIZE(gsc_formats)), GFP_KERNEL);
+	if (!formats)
+		return -ENOMEM;
+
+	driver_data = (struct gsc_driverdata *)of_device_get_match_data(dev);
+	ctx->dev = dev;
+	ctx->num_clocks = driver_data->num_clocks;
+	ctx->clk_names = driver_data->clk_names;
+
+	for (i = 0; i < ARRAY_SIZE(gsc_formats); i++) {
+		formats[i].fourcc = gsc_formats[i];
+		formats[i].type = DRM_EXYNOS_IPP_FORMAT_SOURCE |
+				  DRM_EXYNOS_IPP_FORMAT_DESTINATION;
+		formats[i].limits = driver_data->limits;
+		formats[i].num_limits = driver_data->num_limits;
 	}
+	ctx->formats = formats;
+	ctx->num_formats = ARRAY_SIZE(gsc_formats);
 
 	/* clock control */
-	ctx->gsc_clk = devm_clk_get(dev, "gscl");
-	if (IS_ERR(ctx->gsc_clk)) {
-		dev_err(dev, "failed to get gsc clock.\n");
-		return PTR_ERR(ctx->gsc_clk);
+	for (i = 0; i < ctx->num_clocks; i++) {
+		ctx->clocks[i] = devm_clk_get(dev, ctx->clk_names[i]);
+		if (IS_ERR(ctx->clocks[i])) {
+			dev_err(dev, "failed to get clock: %s\n",
+				ctx->clk_names[i]);
+			return PTR_ERR(ctx->clocks[i]);
+		}
 	}
 
 	/* resource memory */
@@ -1699,8 +1246,8 @@ static int gsc_probe(struct platform_device *pdev)
 	}
 
 	ctx->irq = res->start;
-	ret = devm_request_threaded_irq(dev, ctx->irq, NULL, gsc_irq_handler,
-		IRQF_ONESHOT, "drm_gsc", ctx);
+	ret = devm_request_irq(dev, ctx->irq, gsc_irq_handler, 0,
+			       dev_name(dev), ctx);
 	if (ret < 0) {
 		dev_err(dev, "failed to request irq.\n");
 		return ret;
@@ -1709,38 +1256,22 @@ static int gsc_probe(struct platform_device *pdev)
 	/* context initailization */
 	ctx->id = pdev->id;
 
-	ippdrv = &ctx->ippdrv;
-	ippdrv->dev = dev;
-	ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &gsc_src_ops;
-	ippdrv->ops[EXYNOS_DRM_OPS_DST] = &gsc_dst_ops;
-	ippdrv->check_property = gsc_ippdrv_check_property;
-	ippdrv->reset = gsc_ippdrv_reset;
-	ippdrv->start = gsc_ippdrv_start;
-	ippdrv->stop = gsc_ippdrv_stop;
-	ret = gsc_init_prop_list(ippdrv);
-	if (ret < 0) {
-		dev_err(dev, "failed to init property list.\n");
-		return ret;
-	}
-
-	DRM_DEBUG_KMS("id[%d]ippdrv[%pK]\n", ctx->id, ippdrv);
-
-	mutex_init(&ctx->lock);
 	platform_set_drvdata(pdev, ctx);
 
+	pm_runtime_use_autosuspend(dev);
+	pm_runtime_set_autosuspend_delay(dev, GSC_AUTOSUSPEND_DELAY);
 	pm_runtime_enable(dev);
 
-	ret = exynos_drm_ippdrv_register(ippdrv);
-	if (ret < 0) {
-		dev_err(dev, "failed to register drm gsc device.\n");
-		goto err_ippdrv_register;
-	}
+	ret = component_add(dev, &gsc_component_ops);
+	if (ret)
+		goto err_pm_dis;
 
 	dev_info(dev, "drm gsc registered successfully.\n");
 
 	return 0;
 
-err_ippdrv_register:
+err_pm_dis:
+	pm_runtime_dont_use_autosuspend(dev);
 	pm_runtime_disable(dev);
 	return ret;
 }
@@ -1748,13 +1279,8 @@ err_ippdrv_register:
 static int gsc_remove(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
-	struct gsc_context *ctx = get_gsc_context(dev);
-	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
 
-	exynos_drm_ippdrv_unregister(ippdrv);
-	mutex_destroy(&ctx->lock);
-
-	pm_runtime_set_suspended(dev);
+	pm_runtime_dont_use_autosuspend(dev);
 	pm_runtime_disable(dev);
 
 	return 0;
@@ -1763,19 +1289,32 @@ static int gsc_remove(struct platform_device *pdev)
 static int __maybe_unused gsc_runtime_suspend(struct device *dev)
 {
 	struct gsc_context *ctx = get_gsc_context(dev);
+	int i;
 
 	DRM_DEBUG_KMS("id[%d]\n", ctx->id);
 
-	return  gsc_clk_ctrl(ctx, false);
+	for (i = ctx->num_clocks - 1; i >= 0; i--)
+		clk_disable_unprepare(ctx->clocks[i]);
+
+	return 0;
 }
 
 static int __maybe_unused gsc_runtime_resume(struct device *dev)
 {
 	struct gsc_context *ctx = get_gsc_context(dev);
+	int i, ret;
 
 	DRM_DEBUG_KMS("id[%d]\n", ctx->id);
 
-	return  gsc_clk_ctrl(ctx, true);
+	for (i = 0; i < ctx->num_clocks; i++) {
+		ret = clk_prepare_enable(ctx->clocks[i]);
+		if (ret) {
+			while (--i > 0)
+				clk_disable_unprepare(ctx->clocks[i]);
+			return ret;
+		}
+	}
+	return 0;
 }
 
 static const struct dev_pm_ops gsc_pm_ops = {
@@ -1784,9 +1323,66 @@ static const struct dev_pm_ops gsc_pm_ops = {
 	SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
 };
 
+static const struct drm_exynos_ipp_limit gsc_5250_limits[] = {
+	{ IPP_SIZE_LIMIT(BUFFER, .h = { 32, 4800, 8 }, .v = { 16, 3344, 8 }) },
+	{ IPP_SIZE_LIMIT(AREA, .h = { 16, 4800, 2 }, .v = { 8, 3344, 2 }) },
+	{ IPP_SIZE_LIMIT(ROTATED, .h = { 32, 2048 }, .v = { 16, 2048 }) },
+	{ IPP_SCALE_LIMIT(.h = { (1 << 16) / 16, (1 << 16) * 8 },
+			  .v = { (1 << 16) / 16, (1 << 16) * 8 }) },
+};
+
+static const struct drm_exynos_ipp_limit gsc_5420_limits[] = {
+	{ IPP_SIZE_LIMIT(BUFFER, .h = { 32, 4800, 8 }, .v = { 16, 3344, 8 }) },
+	{ IPP_SIZE_LIMIT(AREA, .h = { 16, 4800, 2 }, .v = { 8, 3344, 2 }) },
+	{ IPP_SIZE_LIMIT(ROTATED, .h = { 16, 2016 }, .v = { 8, 2016 }) },
+	{ IPP_SCALE_LIMIT(.h = { (1 << 16) / 16, (1 << 16) * 8 },
+			  .v = { (1 << 16) / 16, (1 << 16) * 8 }) },
+};
+
+static const struct drm_exynos_ipp_limit gsc_5433_limits[] = {
+	{ IPP_SIZE_LIMIT(BUFFER, .h = { 32, 8191, 2 }, .v = { 16, 8191, 2 }) },
+	{ IPP_SIZE_LIMIT(AREA, .h = { 16, 4800, 1 }, .v = { 8, 3344, 1 }) },
+	{ IPP_SIZE_LIMIT(ROTATED, .h = { 32, 2047 }, .v = { 8, 8191 }) },
+	{ IPP_SCALE_LIMIT(.h = { (1 << 16) / 16, (1 << 16) * 8 },
+			  .v = { (1 << 16) / 16, (1 << 16) * 8 }) },
+};
+
+static struct gsc_driverdata gsc_exynos5250_drvdata = {
+	.clk_names = {"gscl"},
+	.num_clocks = 1,
+	.limits = gsc_5250_limits,
+	.num_limits = ARRAY_SIZE(gsc_5250_limits),
+};
+
+static struct gsc_driverdata gsc_exynos5420_drvdata = {
+	.clk_names = {"gscl"},
+	.num_clocks = 1,
+	.limits = gsc_5420_limits,
+	.num_limits = ARRAY_SIZE(gsc_5420_limits),
+};
+
+static struct gsc_driverdata gsc_exynos5433_drvdata = {
+	.clk_names = {"pclk", "aclk", "aclk_xiu", "aclk_gsclbend"},
+	.num_clocks = 4,
+	.limits = gsc_5433_limits,
+	.num_limits = ARRAY_SIZE(gsc_5433_limits),
+};
+
 static const struct of_device_id exynos_drm_gsc_of_match[] = {
-	{ .compatible = "samsung,exynos5-gsc" },
-	{ },
+	{
+		.compatible = "samsung,exynos5-gsc",
+		.data = &gsc_exynos5250_drvdata,
+	}, {
+		.compatible = "samsung,exynos5250-gsc",
+		.data = &gsc_exynos5250_drvdata,
+	}, {
+		.compatible = "samsung,exynos5420-gsc",
+		.data = &gsc_exynos5420_drvdata,
+	}, {
+		.compatible = "samsung,exynos5433-gsc",
+		.data = &gsc_exynos5433_drvdata,
+	}, {
+	},
 };
 MODULE_DEVICE_TABLE(of, exynos_drm_gsc_of_match);
 
@@ -1800,4 +1396,3 @@ struct platform_driver gsc_driver = {
 		.of_match_table = of_match_ptr(exynos_drm_gsc_of_match),
 	},
 };
-
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.h b/drivers/gpu/drm/exynos/exynos_drm_gsc.h
deleted file mode 100644
index 29ec1c5efcf2..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- *
- * Authors:
- *	Eunchul Kim <chulspro.kim@samsung.com>
- *	Jinyoung Jeon <jy0.jeon@samsung.com>
- *	Sangmin Lee <lsmin.lee@samsung.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DRM_GSC_H_
-#define _EXYNOS_DRM_GSC_H_
-
-/*
- * TODO
- * FIMD output interface notifier callback.
- * Mixer output interface notifier callback.
- */
-
-#endif /* _EXYNOS_DRM_GSC_H_ */

From 7a2d5c77c55847f31945e5aa8337db2218a5a7c1 Mon Sep 17 00:00:00 2001
From: Marek Szyprowski <m.szyprowski@samsung.com>
Date: Thu, 10 May 2018 08:52:12 +0900
Subject: [PATCH 0666/1286] drm/exynos: fimc: Convert driver to IPP v2 core API

This patch adapts Exynos DRM FIMC driver to new IPP v2 core API.
The side effect of this conversion is a switch to driver component API
to register properly in the Exynos DRM core.

Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Merge conflict so merged manually.
Signed-off-by: Inki Dae <inki.dae@samsung.com>
---
 drivers/gpu/drm/exynos/Kconfig           |    2 +-
 drivers/gpu/drm/exynos/exynos_drm_drv.c  |    8 +-
 drivers/gpu/drm/exynos/exynos_drm_drv.h  |    9 +
 drivers/gpu/drm/exynos/exynos_drm_fimc.c | 1088 +++++++---------------
 drivers/gpu/drm/exynos/exynos_drm_fimc.h |   23 -
 5 files changed, 370 insertions(+), 760 deletions(-)
 delete mode 100644 drivers/gpu/drm/exynos/exynos_drm_fimc.h

diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 5c216548ea18..54f5703b37e8 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -100,7 +100,7 @@ config DRM_EXYNOS_IPP
 
 config DRM_EXYNOS_FIMC
 	bool "FIMC"
-	depends on BROKEN && MFD_SYSCON
+	select DRM_EXYNOS_IPP
 	help
 	  Choose this option if you want to use Exynos FIMC for DRM.
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 2dcb94034716..7ba13c122d14 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -222,6 +222,7 @@ struct exynos_drm_driver_info {
 #define DRM_COMPONENT_DRIVER	BIT(0)	/* supports component framework */
 #define DRM_VIRTUAL_DEVICE	BIT(1)	/* create virtual platform device */
 #define DRM_DMA_DEVICE		BIT(2)	/* can be used for dma allocations */
+#define DRM_FIMC_DEVICE		BIT(3)	/* devices shared with V4L2 subsystem */
 
 #define DRV_PTR(drv, cond) (IS_ENABLED(cond) ? &drv : NULL)
 
@@ -261,6 +262,7 @@ static struct exynos_drm_driver_info exynos_drm_drivers[] = {
 		DRV_PTR(g2d_driver, CONFIG_DRM_EXYNOS_G2D),
 	}, {
 		DRV_PTR(fimc_driver, CONFIG_DRM_EXYNOS_FIMC),
+		DRM_COMPONENT_DRIVER | DRM_FIMC_DEVICE,
 	}, {
 		DRV_PTR(rotator_driver, CONFIG_DRM_EXYNOS_ROTATOR),
 		DRM_COMPONENT_DRIVER
@@ -294,7 +296,11 @@ static struct component_match *exynos_drm_match_add(struct device *dev)
 					    &info->driver->driver,
 					    (void *)platform_bus_type.match))) {
 			put_device(p);
-			component_match_add(dev, &match, compare_dev, d);
+
+			if (!(info->flags & DRM_FIMC_DEVICE) ||
+			    exynos_drm_check_fimc_device(d) == 0)
+				component_match_add(dev, &match,
+						    compare_dev, d);
 			p = d;
 		}
 		put_device(p);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index df2262f70d91..0834e7e28c99 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -273,6 +273,15 @@ static inline int exynos_dpi_bind(struct drm_device *dev,
 }
 #endif
 
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+int exynos_drm_check_fimc_device(struct device *dev);
+#else
+static inline int exynos_drm_check_fimc_device(struct device *dev)
+{
+	return 0;
+}
+#endif
+
 int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
 			 bool nonblock);
 int exynos_atomic_check(struct drm_device *dev, struct drm_atomic_state *state);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 5b18b5c5fdf2..4dfbfc7f3b84 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -12,6 +12,7 @@
  *
  */
 #include <linux/kernel.h>
+#include <linux/component.h>
 #include <linux/platform_device.h>
 #include <linux/mfd/syscon.h>
 #include <linux/regmap.h>
@@ -24,8 +25,8 @@
 #include <drm/exynos_drm.h>
 #include "regs-fimc.h"
 #include "exynos_drm_drv.h"
+#include "exynos_drm_iommu.h"
 #include "exynos_drm_ipp.h"
-#include "exynos_drm_fimc.h"
 
 /*
  * FIMC stands for Fully Interactive Mobile Camera and
@@ -33,23 +34,6 @@
  * input DMA reads image data from the memory.
  * output DMA writes image data to memory.
  * FIMC supports image rotation and image effect functions.
- *
- * M2M operation : supports crop/scale/rotation/csc so on.
- * Memory ----> FIMC H/W ----> Memory.
- * Writeback operation : supports cloned screen with FIMD.
- * FIMD ----> FIMC H/W ----> Memory.
- * Output operation : supports direct display using local path.
- * Memory ----> FIMC H/W ----> FIMD.
- */
-
-/*
- * TODO
- * 1. check suspend/resume api if needed.
- * 2. need to check use case platform_device_id.
- * 3. check src/dst size with, height.
- * 4. added check_prepare api for right register.
- * 5. need to add supported list in prop_list.
- * 6. check prescaler/scaler optimization.
  */
 
 #define FIMC_MAX_DEVS	4
@@ -59,29 +43,19 @@
 #define FIMC_BUF_STOP	1
 #define FIMC_BUF_START	2
 #define FIMC_WIDTH_ITU_709	1280
-#define FIMC_REFRESH_MAX	60
-#define FIMC_REFRESH_MIN	12
-#define FIMC_CROP_MAX	8192
-#define FIMC_CROP_MIN	32
-#define FIMC_SCALE_MAX	4224
-#define FIMC_SCALE_MIN	32
+#define FIMC_AUTOSUSPEND_DELAY	2000
+
+static unsigned int fimc_mask = 0xc;
+module_param_named(fimc_devs, fimc_mask, uint, 0644);
+MODULE_PARM_DESC(fimc_devs, "Alias mask for assigning FIMC devices to Exynos DRM");
 
 #define get_fimc_context(dev)	platform_get_drvdata(to_platform_device(dev))
-#define get_ctx_from_ippdrv(ippdrv)	container_of(ippdrv,\
-					struct fimc_context, ippdrv);
-enum fimc_wb {
-	FIMC_WB_NONE,
-	FIMC_WB_A,
-	FIMC_WB_B,
-};
 
 enum {
 	FIMC_CLK_LCLK,
 	FIMC_CLK_GATE,
 	FIMC_CLK_WB_A,
 	FIMC_CLK_WB_B,
-	FIMC_CLK_MUX,
-	FIMC_CLK_PARENT,
 	FIMC_CLKS_MAX
 };
 
@@ -90,12 +64,8 @@ static const char * const fimc_clock_names[] = {
 	[FIMC_CLK_GATE]   = "fimc",
 	[FIMC_CLK_WB_A]   = "pxl_async0",
 	[FIMC_CLK_WB_B]   = "pxl_async1",
-	[FIMC_CLK_MUX]    = "mux",
-	[FIMC_CLK_PARENT] = "parent",
 };
 
-#define FIMC_DEFAULT_LCLK_FREQUENCY 133000000UL
-
 /*
  * A structure of scaler.
  *
@@ -107,7 +77,7 @@ static const char * const fimc_clock_names[] = {
  * @vratio: vertical ratio.
  */
 struct fimc_scaler {
-	bool	range;
+	bool range;
 	bool bypass;
 	bool up_h;
 	bool up_v;
@@ -115,57 +85,33 @@ struct fimc_scaler {
 	u32 vratio;
 };
 
-/*
- * A structure of scaler capability.
- *
- * find user manual table 43-1.
- * @in_hori: scaler input horizontal size.
- * @bypass: scaler bypass mode.
- * @dst_h_wo_rot: target horizontal size without output rotation.
- * @dst_h_rot: target horizontal size with output rotation.
- * @rl_w_wo_rot: real width without input rotation.
- * @rl_h_rot: real height without output rotation.
- */
-struct fimc_capability {
-	/* scaler */
-	u32	in_hori;
-	u32	bypass;
-	/* output rotator */
-	u32	dst_h_wo_rot;
-	u32	dst_h_rot;
-	/* input rotator */
-	u32	rl_w_wo_rot;
-	u32	rl_h_rot;
-};
-
 /*
  * A structure of fimc context.
  *
- * @ippdrv: prepare initialization using ippdrv.
  * @regs_res: register resources.
  * @regs: memory mapped io registers.
  * @lock: locking of operations.
  * @clocks: fimc clocks.
- * @clk_frequency: LCLK clock frequency.
- * @sysreg: handle to SYSREG block regmap.
  * @sc: scaler infomations.
  * @pol: porarity of writeback.
  * @id: fimc id.
  * @irq: irq number.
- * @suspended: qos operations.
  */
 struct fimc_context {
-	struct exynos_drm_ippdrv	ippdrv;
+	struct exynos_drm_ipp ipp;
+	struct drm_device *drm_dev;
+	struct device	*dev;
+	struct exynos_drm_ipp_task	*task;
+	struct exynos_drm_ipp_formats	*formats;
+	unsigned int			num_formats;
+
 	struct resource	*regs_res;
 	void __iomem	*regs;
 	spinlock_t	lock;
 	struct clk	*clocks[FIMC_CLKS_MAX];
-	u32		clk_frequency;
-	struct regmap	*sysreg;
 	struct fimc_scaler	sc;
 	int	id;
 	int	irq;
-	bool	suspended;
 };
 
 static u32 fimc_read(struct fimc_context *ctx, u32 reg)
@@ -217,19 +163,10 @@ static void fimc_sw_reset(struct fimc_context *ctx)
 	fimc_write(ctx, 0x0, EXYNOS_CIFCNTSEQ);
 }
 
-static int fimc_set_camblk_fimd0_wb(struct fimc_context *ctx)
-{
-	return regmap_update_bits(ctx->sysreg, SYSREG_CAMERA_BLK,
-				  SYSREG_FIMD0WB_DEST_MASK,
-				  ctx->id << SYSREG_FIMD0WB_DEST_SHIFT);
-}
-
-static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
+static void fimc_set_type_ctrl(struct fimc_context *ctx)
 {
 	u32 cfg;
 
-	DRM_DEBUG_KMS("wb[%d]\n", wb);
-
 	cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
 	cfg &= ~(EXYNOS_CIGCTRL_TESTPATTERN_MASK |
 		EXYNOS_CIGCTRL_SELCAM_ITU_MASK |
@@ -238,23 +175,10 @@ static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
 		EXYNOS_CIGCTRL_SELWB_CAMIF_MASK |
 		EXYNOS_CIGCTRL_SELWRITEBACK_MASK);
 
-	switch (wb) {
-	case FIMC_WB_A:
-		cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_A |
-			EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
-		break;
-	case FIMC_WB_B:
-		cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_B |
-			EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
-		break;
-	case FIMC_WB_NONE:
-	default:
-		cfg |= (EXYNOS_CIGCTRL_SELCAM_ITU_A |
-			EXYNOS_CIGCTRL_SELWRITEBACK_A |
-			EXYNOS_CIGCTRL_SELCAM_MIPI_A |
-			EXYNOS_CIGCTRL_SELCAM_FIMC_ITU);
-		break;
-	}
+	cfg |= (EXYNOS_CIGCTRL_SELCAM_ITU_A |
+		EXYNOS_CIGCTRL_SELWRITEBACK_A |
+		EXYNOS_CIGCTRL_SELCAM_MIPI_A |
+		EXYNOS_CIGCTRL_SELCAM_FIMC_ITU);
 
 	fimc_write(ctx, cfg, EXYNOS_CIGCTRL);
 }
@@ -296,7 +220,6 @@ static void fimc_clear_irq(struct fimc_context *ctx)
 
 static bool fimc_check_ovf(struct fimc_context *ctx)
 {
-	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
 	u32 status, flag;
 
 	status = fimc_read(ctx, EXYNOS_CISTATUS);
@@ -310,7 +233,7 @@ static bool fimc_check_ovf(struct fimc_context *ctx)
 			EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
 			EXYNOS_CIWDOFST_CLROVFICR);
 
-		dev_err(ippdrv->dev, "occurred overflow at %d, status 0x%x.\n",
+		dev_err(ctx->dev, "occurred overflow at %d, status 0x%x.\n",
 			ctx->id, status);
 		return true;
 	}
@@ -376,10 +299,8 @@ static void fimc_handle_lastend(struct fimc_context *ctx, bool enable)
 	fimc_write(ctx, cfg, EXYNOS_CIOCTRL);
 }
 
-
-static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
+static void fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
 {
-	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
 	u32 cfg;
 
 	DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
@@ -392,12 +313,12 @@ static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
 	case DRM_FORMAT_RGB565:
 		cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB565;
 		fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
-		return 0;
+		return;
 	case DRM_FORMAT_RGB888:
 	case DRM_FORMAT_XRGB8888:
 		cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB888;
 		fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
-		return 0;
+		return;
 	default:
 		/* bypass */
 		break;
@@ -438,20 +359,13 @@ static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
 		cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CBCR |
 			EXYNOS_MSCTRL_C_INT_IN_2PLANE);
 		break;
-	default:
-		dev_err(ippdrv->dev, "invalid source yuv order 0x%x.\n", fmt);
-		return -EINVAL;
 	}
 
 	fimc_write(ctx, cfg, EXYNOS_MSCTRL);
-
-	return 0;
 }
 
-static int fimc_src_set_fmt(struct device *dev, u32 fmt)
+static void fimc_src_set_fmt(struct fimc_context *ctx, u32 fmt, bool tiled)
 {
-	struct fimc_context *ctx = get_fimc_context(dev);
-	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
 	u32 cfg;
 
 	DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
@@ -485,9 +399,6 @@ static int fimc_src_set_fmt(struct device *dev, u32 fmt)
 	case DRM_FORMAT_NV21:
 		cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
 		break;
-	default:
-		dev_err(ippdrv->dev, "invalid source format 0x%x.\n", fmt);
-		return -EINVAL;
 	}
 
 	fimc_write(ctx, cfg, EXYNOS_MSCTRL);
@@ -495,22 +406,22 @@ static int fimc_src_set_fmt(struct device *dev, u32 fmt)
 	cfg = fimc_read(ctx, EXYNOS_CIDMAPARAM);
 	cfg &= ~EXYNOS_CIDMAPARAM_R_MODE_MASK;
 
-	cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR;
+	if (tiled)
+		cfg |= EXYNOS_CIDMAPARAM_R_MODE_64X32;
+	else
+		cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR;
 
 	fimc_write(ctx, cfg, EXYNOS_CIDMAPARAM);
 
-	return fimc_src_set_fmt_order(ctx, fmt);
+	fimc_src_set_fmt_order(ctx, fmt);
 }
 
-static int fimc_src_set_transf(struct device *dev,
-		enum drm_exynos_degree degree,
-		enum drm_exynos_flip flip, bool *swap)
+static void fimc_src_set_transf(struct fimc_context *ctx, unsigned int rotation)
 {
-	struct fimc_context *ctx = get_fimc_context(dev);
-	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	unsigned int degree = rotation & DRM_MODE_ROTATE_MASK;
 	u32 cfg1, cfg2;
 
-	DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip);
+	DRM_DEBUG_KMS("rotation[%x]\n", rotation);
 
 	cfg1 = fimc_read(ctx, EXYNOS_MSCTRL);
 	cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR |
@@ -520,61 +431,56 @@ static int fimc_src_set_transf(struct device *dev,
 	cfg2 &= ~EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
 
 	switch (degree) {
-	case EXYNOS_DRM_DEGREE_0:
-		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+	case DRM_MODE_ROTATE_0:
+		if (rotation & DRM_MODE_REFLECT_X)
 			cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
-		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+		if (rotation & DRM_MODE_REFLECT_Y)
 			cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
 		break;
-	case EXYNOS_DRM_DEGREE_90:
+	case DRM_MODE_ROTATE_90:
 		cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
-		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+		if (rotation & DRM_MODE_REFLECT_X)
 			cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
-		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+		if (rotation & DRM_MODE_REFLECT_Y)
 			cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
 		break;
-	case EXYNOS_DRM_DEGREE_180:
+	case DRM_MODE_ROTATE_180:
 		cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
 			EXYNOS_MSCTRL_FLIP_Y_MIRROR);
-		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+		if (rotation & DRM_MODE_REFLECT_X)
 			cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
-		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+		if (rotation & DRM_MODE_REFLECT_Y)
 			cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
 		break;
-	case EXYNOS_DRM_DEGREE_270:
+	case DRM_MODE_ROTATE_270:
 		cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
 			EXYNOS_MSCTRL_FLIP_Y_MIRROR);
 		cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
-		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+		if (rotation & DRM_MODE_REFLECT_X)
 			cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
-		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+		if (rotation & DRM_MODE_REFLECT_Y)
 			cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
 		break;
-	default:
-		dev_err(ippdrv->dev, "invalid degree value %d.\n", degree);
-		return -EINVAL;
 	}
 
 	fimc_write(ctx, cfg1, EXYNOS_MSCTRL);
 	fimc_write(ctx, cfg2, EXYNOS_CITRGFMT);
-	*swap = (cfg2 & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) ? 1 : 0;
-
-	return 0;
 }
 
-static int fimc_set_window(struct fimc_context *ctx,
-		struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+static void fimc_set_window(struct fimc_context *ctx,
+			    struct exynos_drm_ipp_buffer *buf)
 {
 	u32 cfg, h1, h2, v1, v2;
 
 	/* cropped image */
-	h1 = pos->x;
-	h2 = sz->hsize - pos->w - pos->x;
-	v1 = pos->y;
-	v2 = sz->vsize - pos->h - pos->y;
+	h1 = buf->rect.x;
+	h2 = buf->buf.width - buf->rect.w - buf->rect.x;
+	v1 = buf->rect.y;
+	v2 = buf->buf.height - buf->rect.h - buf->rect.y;
 
 	DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
-		pos->x, pos->y, pos->w, pos->h, sz->hsize, sz->vsize);
+		buf->rect.x, buf->rect.y, buf->rect.w, buf->rect.h,
+		buf->buf.width, buf->buf.height);
 	DRM_DEBUG_KMS("h1[%d]h2[%d]v1[%d]v2[%d]\n", h1, h2, v1, v2);
 
 	/*
@@ -592,42 +498,30 @@ static int fimc_set_window(struct fimc_context *ctx,
 	cfg = (EXYNOS_CIWDOFST2_WINHOROFST2(h2) |
 		EXYNOS_CIWDOFST2_WINVEROFST2(v2));
 	fimc_write(ctx, cfg, EXYNOS_CIWDOFST2);
-
-	return 0;
 }
 
-static int fimc_src_set_size(struct device *dev, int swap,
-		struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+static void fimc_src_set_size(struct fimc_context *ctx,
+			      struct exynos_drm_ipp_buffer *buf)
 {
-	struct fimc_context *ctx = get_fimc_context(dev);
-	struct drm_exynos_pos img_pos = *pos;
-	struct drm_exynos_sz img_sz = *sz;
 	u32 cfg;
 
-	DRM_DEBUG_KMS("swap[%d]hsize[%d]vsize[%d]\n",
-		swap, sz->hsize, sz->vsize);
+	DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height);
 
 	/* original size */
-	cfg = (EXYNOS_ORGISIZE_HORIZONTAL(img_sz.hsize) |
-		EXYNOS_ORGISIZE_VERTICAL(img_sz.vsize));
+	cfg = (EXYNOS_ORGISIZE_HORIZONTAL(buf->buf.width) |
+		EXYNOS_ORGISIZE_VERTICAL(buf->buf.height));
 
 	fimc_write(ctx, cfg, EXYNOS_ORGISIZE);
 
-	DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", pos->x, pos->y, pos->w, pos->h);
-
-	if (swap) {
-		img_pos.w = pos->h;
-		img_pos.h = pos->w;
-		img_sz.hsize = sz->vsize;
-		img_sz.vsize = sz->hsize;
-	}
+	DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", buf->rect.x, buf->rect.y,
+		buf->rect.w, buf->rect.h);
 
 	/* set input DMA image size */
 	cfg = fimc_read(ctx, EXYNOS_CIREAL_ISIZE);
 	cfg &= ~(EXYNOS_CIREAL_ISIZE_HEIGHT_MASK |
 		EXYNOS_CIREAL_ISIZE_WIDTH_MASK);
-	cfg |= (EXYNOS_CIREAL_ISIZE_WIDTH(img_pos.w) |
-		EXYNOS_CIREAL_ISIZE_HEIGHT(img_pos.h));
+	cfg |= (EXYNOS_CIREAL_ISIZE_WIDTH(buf->rect.w) |
+		EXYNOS_CIREAL_ISIZE_HEIGHT(buf->rect.h));
 	fimc_write(ctx, cfg, EXYNOS_CIREAL_ISIZE);
 
 	/*
@@ -635,91 +529,34 @@ static int fimc_src_set_size(struct device *dev, int swap,
 	 * for now, we support only ITU601 8 bit mode
 	 */
 	cfg = (EXYNOS_CISRCFMT_ITU601_8BIT |
-		EXYNOS_CISRCFMT_SOURCEHSIZE(img_sz.hsize) |
-		EXYNOS_CISRCFMT_SOURCEVSIZE(img_sz.vsize));
+		EXYNOS_CISRCFMT_SOURCEHSIZE(buf->buf.width) |
+		EXYNOS_CISRCFMT_SOURCEVSIZE(buf->buf.height));
 	fimc_write(ctx, cfg, EXYNOS_CISRCFMT);
 
 	/* offset Y(RGB), Cb, Cr */
-	cfg = (EXYNOS_CIIYOFF_HORIZONTAL(img_pos.x) |
-		EXYNOS_CIIYOFF_VERTICAL(img_pos.y));
+	cfg = (EXYNOS_CIIYOFF_HORIZONTAL(buf->rect.x) |
+		EXYNOS_CIIYOFF_VERTICAL(buf->rect.y));
 	fimc_write(ctx, cfg, EXYNOS_CIIYOFF);
-	cfg = (EXYNOS_CIICBOFF_HORIZONTAL(img_pos.x) |
-		EXYNOS_CIICBOFF_VERTICAL(img_pos.y));
+	cfg = (EXYNOS_CIICBOFF_HORIZONTAL(buf->rect.x) |
+		EXYNOS_CIICBOFF_VERTICAL(buf->rect.y));
 	fimc_write(ctx, cfg, EXYNOS_CIICBOFF);
-	cfg = (EXYNOS_CIICROFF_HORIZONTAL(img_pos.x) |
-		EXYNOS_CIICROFF_VERTICAL(img_pos.y));
+	cfg = (EXYNOS_CIICROFF_HORIZONTAL(buf->rect.x) |
+		EXYNOS_CIICROFF_VERTICAL(buf->rect.y));
 	fimc_write(ctx, cfg, EXYNOS_CIICROFF);
 
-	return fimc_set_window(ctx, &img_pos, &img_sz);
+	fimc_set_window(ctx, buf);
 }
 
-static int fimc_src_set_addr(struct device *dev,
-		struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
-		enum drm_exynos_ipp_buf_type buf_type)
+static void fimc_src_set_addr(struct fimc_context *ctx,
+			      struct exynos_drm_ipp_buffer *buf)
 {
-	struct fimc_context *ctx = get_fimc_context(dev);
-	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
-	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
-	struct drm_exynos_ipp_property *property;
-	struct drm_exynos_ipp_config *config;
-
-	if (!c_node) {
-		DRM_ERROR("failed to get c_node.\n");
-		return -EINVAL;
-	}
-
-	property = &c_node->property;
-
-	DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]buf_type[%d]\n",
-		property->prop_id, buf_id, buf_type);
-
-	if (buf_id > FIMC_MAX_SRC) {
-		dev_info(ippdrv->dev, "invalid buf_id %d.\n", buf_id);
-		return -ENOMEM;
-	}
-
-	/* address register set */
-	switch (buf_type) {
-	case IPP_BUF_ENQUEUE:
-		config = &property->config[EXYNOS_DRM_OPS_SRC];
-		fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_Y],
-			EXYNOS_CIIYSA0);
-
-		if (config->fmt == DRM_FORMAT_YVU420) {
-			fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
-				EXYNOS_CIICBSA0);
-			fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
-				EXYNOS_CIICRSA0);
-		} else {
-			fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
-				EXYNOS_CIICBSA0);
-			fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
-				EXYNOS_CIICRSA0);
-		}
-		break;
-	case IPP_BUF_DEQUEUE:
-		fimc_write(ctx, 0x0, EXYNOS_CIIYSA0);
-		fimc_write(ctx, 0x0, EXYNOS_CIICBSA0);
-		fimc_write(ctx, 0x0, EXYNOS_CIICRSA0);
-		break;
-	default:
-		/* bypass */
-		break;
-	}
-
-	return 0;
+	fimc_write(ctx, buf->dma_addr[0], EXYNOS_CIIYSA(0));
+	fimc_write(ctx, buf->dma_addr[1], EXYNOS_CIICBSA(0));
+	fimc_write(ctx, buf->dma_addr[2], EXYNOS_CIICRSA(0));
 }
 
-static struct exynos_drm_ipp_ops fimc_src_ops = {
-	.set_fmt = fimc_src_set_fmt,
-	.set_transf = fimc_src_set_transf,
-	.set_size = fimc_src_set_size,
-	.set_addr = fimc_src_set_addr,
-};
-
-static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
+static void fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
 {
-	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
 	u32 cfg;
 
 	DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
@@ -732,11 +569,11 @@ static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
 	case DRM_FORMAT_RGB565:
 		cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565;
 		fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
-		return 0;
+		return;
 	case DRM_FORMAT_RGB888:
 		cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888;
 		fimc_write(ctx, cfg, EXYNOS_CISCCTRL);
-		return 0;
+		return;
 	case DRM_FORMAT_XRGB8888:
 		cfg |= (EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 |
 			EXYNOS_CISCCTRL_EXTRGB_EXTENSION);
@@ -784,20 +621,13 @@ static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
 		cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR;
 		cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
 		break;
-	default:
-		dev_err(ippdrv->dev, "invalid target yuv order 0x%x.\n", fmt);
-		return -EINVAL;
 	}
 
 	fimc_write(ctx, cfg, EXYNOS_CIOCTRL);
-
-	return 0;
 }
 
-static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
+static void fimc_dst_set_fmt(struct fimc_context *ctx, u32 fmt, bool tiled)
 {
-	struct fimc_context *ctx = get_fimc_context(dev);
-	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
 	u32 cfg;
 
 	DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
@@ -837,10 +667,6 @@ static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
 		case DRM_FORMAT_NV21:
 			cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420;
 			break;
-		default:
-			dev_err(ippdrv->dev, "invalid target format 0x%x.\n",
-				fmt);
-			return -EINVAL;
 		}
 
 		fimc_write(ctx, cfg, EXYNOS_CITRGFMT);
@@ -849,73 +675,67 @@ static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
 	cfg = fimc_read(ctx, EXYNOS_CIDMAPARAM);
 	cfg &= ~EXYNOS_CIDMAPARAM_W_MODE_MASK;
 
-	cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR;
+	if (tiled)
+		cfg |= EXYNOS_CIDMAPARAM_W_MODE_64X32;
+	else
+		cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR;
 
 	fimc_write(ctx, cfg, EXYNOS_CIDMAPARAM);
 
-	return fimc_dst_set_fmt_order(ctx, fmt);
+	fimc_dst_set_fmt_order(ctx, fmt);
 }
 
-static int fimc_dst_set_transf(struct device *dev,
-		enum drm_exynos_degree degree,
-		enum drm_exynos_flip flip, bool *swap)
+static void fimc_dst_set_transf(struct fimc_context *ctx, unsigned int rotation)
 {
-	struct fimc_context *ctx = get_fimc_context(dev);
-	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	unsigned int degree = rotation & DRM_MODE_ROTATE_MASK;
 	u32 cfg;
 
-	DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip);
+	DRM_DEBUG_KMS("rotation[0x%x]\n", rotation);
 
 	cfg = fimc_read(ctx, EXYNOS_CITRGFMT);
 	cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK;
 	cfg &= ~EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
 
 	switch (degree) {
-	case EXYNOS_DRM_DEGREE_0:
-		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+	case DRM_MODE_ROTATE_0:
+		if (rotation & DRM_MODE_REFLECT_X)
 			cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
-		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+		if (rotation & DRM_MODE_REFLECT_Y)
 			cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
 		break;
-	case EXYNOS_DRM_DEGREE_90:
+	case DRM_MODE_ROTATE_90:
 		cfg |= EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
-		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+		if (rotation & DRM_MODE_REFLECT_X)
 			cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
-		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+		if (rotation & DRM_MODE_REFLECT_Y)
 			cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
 		break;
-	case EXYNOS_DRM_DEGREE_180:
+	case DRM_MODE_ROTATE_180:
 		cfg |= (EXYNOS_CITRGFMT_FLIP_X_MIRROR |
 			EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
-		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+		if (rotation & DRM_MODE_REFLECT_X)
 			cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
-		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+		if (rotation & DRM_MODE_REFLECT_Y)
 			cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
 		break;
-	case EXYNOS_DRM_DEGREE_270:
+	case DRM_MODE_ROTATE_270:
 		cfg |= (EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE |
 			EXYNOS_CITRGFMT_FLIP_X_MIRROR |
 			EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
-		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+		if (rotation & DRM_MODE_REFLECT_X)
 			cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
-		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+		if (rotation & DRM_MODE_REFLECT_Y)
 			cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
 		break;
-	default:
-		dev_err(ippdrv->dev, "invalid degree value %d.\n", degree);
-		return -EINVAL;
 	}
 
 	fimc_write(ctx, cfg, EXYNOS_CITRGFMT);
-	*swap = (cfg & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) ? 1 : 0;
-
-	return 0;
 }
 
 static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
-		struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
+			      struct drm_exynos_ipp_task_rect *src,
+			      struct drm_exynos_ipp_task_rect *dst)
 {
-	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
 	u32 cfg, cfg_ext, shfactor;
 	u32 pre_dst_width, pre_dst_height;
 	u32 hfactor, vfactor;
@@ -942,13 +762,13 @@ static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
 	/* fimc_ippdrv_check_property assures that dividers are not null */
 	hfactor = fls(src_w / dst_w / 2);
 	if (hfactor > FIMC_SHFACTOR / 2) {
-		dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
+		dev_err(ctx->dev, "failed to get ratio horizontal.\n");
 		return -EINVAL;
 	}
 
 	vfactor = fls(src_h / dst_h / 2);
 	if (vfactor > FIMC_SHFACTOR / 2) {
-		dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
+		dev_err(ctx->dev, "failed to get ratio vertical.\n");
 		return -EINVAL;
 	}
 
@@ -1019,83 +839,77 @@ static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
 	fimc_write(ctx, cfg_ext, EXYNOS_CIEXTEN);
 }
 
-static int fimc_dst_set_size(struct device *dev, int swap,
-		struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+static void fimc_dst_set_size(struct fimc_context *ctx,
+			     struct exynos_drm_ipp_buffer *buf)
 {
-	struct fimc_context *ctx = get_fimc_context(dev);
-	struct drm_exynos_pos img_pos = *pos;
-	struct drm_exynos_sz img_sz = *sz;
-	u32 cfg;
+	u32 cfg, cfg_ext;
 
-	DRM_DEBUG_KMS("swap[%d]hsize[%d]vsize[%d]\n",
-		swap, sz->hsize, sz->vsize);
+	DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height);
 
 	/* original size */
-	cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(img_sz.hsize) |
-		EXYNOS_ORGOSIZE_VERTICAL(img_sz.vsize));
+	cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(buf->buf.width) |
+		EXYNOS_ORGOSIZE_VERTICAL(buf->buf.height));
 
 	fimc_write(ctx, cfg, EXYNOS_ORGOSIZE);
 
-	DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", pos->x, pos->y, pos->w, pos->h);
+	DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", buf->rect.x, buf->rect.y,
+		buf->rect.w, buf->rect.h);
 
 	/* CSC ITU */
 	cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
 	cfg &= ~EXYNOS_CIGCTRL_CSC_MASK;
 
-	if (sz->hsize >= FIMC_WIDTH_ITU_709)
+	if (buf->buf.width >= FIMC_WIDTH_ITU_709)
 		cfg |= EXYNOS_CIGCTRL_CSC_ITU709;
 	else
 		cfg |= EXYNOS_CIGCTRL_CSC_ITU601;
 
 	fimc_write(ctx, cfg, EXYNOS_CIGCTRL);
 
-	if (swap) {
-		img_pos.w = pos->h;
-		img_pos.h = pos->w;
-		img_sz.hsize = sz->vsize;
-		img_sz.vsize = sz->hsize;
-	}
+	cfg_ext = fimc_read(ctx, EXYNOS_CITRGFMT);
 
 	/* target image size */
 	cfg = fimc_read(ctx, EXYNOS_CITRGFMT);
 	cfg &= ~(EXYNOS_CITRGFMT_TARGETH_MASK |
 		EXYNOS_CITRGFMT_TARGETV_MASK);
-	cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(img_pos.w) |
-		EXYNOS_CITRGFMT_TARGETVSIZE(img_pos.h));
+	if (cfg_ext & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE)
+		cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(buf->rect.h) |
+			EXYNOS_CITRGFMT_TARGETVSIZE(buf->rect.w));
+	else
+		cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(buf->rect.w) |
+			EXYNOS_CITRGFMT_TARGETVSIZE(buf->rect.h));
 	fimc_write(ctx, cfg, EXYNOS_CITRGFMT);
 
 	/* target area */
-	cfg = EXYNOS_CITAREA_TARGET_AREA(img_pos.w * img_pos.h);
+	cfg = EXYNOS_CITAREA_TARGET_AREA(buf->rect.w * buf->rect.h);
 	fimc_write(ctx, cfg, EXYNOS_CITAREA);
 
 	/* offset Y(RGB), Cb, Cr */
-	cfg = (EXYNOS_CIOYOFF_HORIZONTAL(img_pos.x) |
-		EXYNOS_CIOYOFF_VERTICAL(img_pos.y));
+	cfg = (EXYNOS_CIOYOFF_HORIZONTAL(buf->rect.x) |
+		EXYNOS_CIOYOFF_VERTICAL(buf->rect.y));
 	fimc_write(ctx, cfg, EXYNOS_CIOYOFF);
-	cfg = (EXYNOS_CIOCBOFF_HORIZONTAL(img_pos.x) |
-		EXYNOS_CIOCBOFF_VERTICAL(img_pos.y));
+	cfg = (EXYNOS_CIOCBOFF_HORIZONTAL(buf->rect.x) |
+		EXYNOS_CIOCBOFF_VERTICAL(buf->rect.y));
 	fimc_write(ctx, cfg, EXYNOS_CIOCBOFF);
-	cfg = (EXYNOS_CIOCROFF_HORIZONTAL(img_pos.x) |
-		EXYNOS_CIOCROFF_VERTICAL(img_pos.y));
+	cfg = (EXYNOS_CIOCROFF_HORIZONTAL(buf->rect.x) |
+		EXYNOS_CIOCROFF_VERTICAL(buf->rect.y));
 	fimc_write(ctx, cfg, EXYNOS_CIOCROFF);
-
-	return 0;
 }
 
 static void fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
-		enum drm_exynos_ipp_buf_type buf_type)
+		bool enqueue)
 {
 	unsigned long flags;
 	u32 buf_num;
 	u32 cfg;
 
-	DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type);
+	DRM_DEBUG_KMS("buf_id[%d]enqueu[%d]\n", buf_id, enqueue);
 
 	spin_lock_irqsave(&ctx->lock, flags);
 
 	cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ);
 
-	if (buf_type == IPP_BUF_ENQUEUE)
+	if (enqueue)
 		cfg |= (1 << buf_id);
 	else
 		cfg &= ~(1 << buf_id);
@@ -1104,88 +918,29 @@ static void fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
 
 	buf_num = hweight32(cfg);
 
-	if (buf_type == IPP_BUF_ENQUEUE && buf_num >= FIMC_BUF_START)
+	if (enqueue && buf_num >= FIMC_BUF_START)
 		fimc_mask_irq(ctx, true);
-	else if (buf_type == IPP_BUF_DEQUEUE && buf_num <= FIMC_BUF_STOP)
+	else if (!enqueue && buf_num <= FIMC_BUF_STOP)
 		fimc_mask_irq(ctx, false);
 
 	spin_unlock_irqrestore(&ctx->lock, flags);
 }
 
-static int fimc_dst_set_addr(struct device *dev,
-		struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
-		enum drm_exynos_ipp_buf_type buf_type)
+static void fimc_dst_set_addr(struct fimc_context *ctx,
+			     struct exynos_drm_ipp_buffer *buf)
 {
-	struct fimc_context *ctx = get_fimc_context(dev);
-	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
-	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
-	struct drm_exynos_ipp_property *property;
-	struct drm_exynos_ipp_config *config;
+	fimc_write(ctx, buf->dma_addr[0], EXYNOS_CIOYSA(0));
+	fimc_write(ctx, buf->dma_addr[1], EXYNOS_CIOCBSA(0));
+	fimc_write(ctx, buf->dma_addr[2], EXYNOS_CIOCRSA(0));
 
-	if (!c_node) {
-		DRM_ERROR("failed to get c_node.\n");
-		return -EINVAL;
-	}
-
-	property = &c_node->property;
-
-	DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]buf_type[%d]\n",
-		property->prop_id, buf_id, buf_type);
-
-	if (buf_id > FIMC_MAX_DST) {
-		dev_info(ippdrv->dev, "invalid buf_id %d.\n", buf_id);
-		return -ENOMEM;
-	}
-
-	/* address register set */
-	switch (buf_type) {
-	case IPP_BUF_ENQUEUE:
-		config = &property->config[EXYNOS_DRM_OPS_DST];
-
-		fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_Y],
-			EXYNOS_CIOYSA(buf_id));
-
-		if (config->fmt == DRM_FORMAT_YVU420) {
-			fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
-				EXYNOS_CIOCBSA(buf_id));
-			fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
-				EXYNOS_CIOCRSA(buf_id));
-		} else {
-			fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB],
-				EXYNOS_CIOCBSA(buf_id));
-			fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR],
-				EXYNOS_CIOCRSA(buf_id));
-		}
-		break;
-	case IPP_BUF_DEQUEUE:
-		fimc_write(ctx, 0x0, EXYNOS_CIOYSA(buf_id));
-		fimc_write(ctx, 0x0, EXYNOS_CIOCBSA(buf_id));
-		fimc_write(ctx, 0x0, EXYNOS_CIOCRSA(buf_id));
-		break;
-	default:
-		/* bypass */
-		break;
-	}
-
-	fimc_dst_set_buf_seq(ctx, buf_id, buf_type);
-
-	return 0;
+	fimc_dst_set_buf_seq(ctx, 0, true);
 }
 
-static struct exynos_drm_ipp_ops fimc_dst_ops = {
-	.set_fmt = fimc_dst_set_fmt,
-	.set_transf = fimc_dst_set_transf,
-	.set_size = fimc_dst_set_size,
-	.set_addr = fimc_dst_set_addr,
-};
+static void fimc_stop(struct fimc_context *ctx);
 
 static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
 {
 	struct fimc_context *ctx = dev_id;
-	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
-	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
-	struct drm_exynos_ipp_event_work *event_work =
-		c_node->event_work;
 	int buf_id;
 
 	DRM_DEBUG_KMS("fimc id[%d]\n", ctx->id);
@@ -1203,172 +958,21 @@ static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
 
 	DRM_DEBUG_KMS("buf_id[%d]\n", buf_id);
 
-	fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
+	if (ctx->task) {
+		struct exynos_drm_ipp_task *task = ctx->task;
 
-	event_work->ippdrv = ippdrv;
-	event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
-	queue_work(ippdrv->event_workq, &event_work->work);
+		ctx->task = NULL;
+		pm_runtime_mark_last_busy(ctx->dev);
+		pm_runtime_put_autosuspend(ctx->dev);
+		exynos_drm_ipp_task_done(task, 0);
+	}
+
+	fimc_dst_set_buf_seq(ctx, buf_id, false);
+	fimc_stop(ctx);
 
 	return IRQ_HANDLED;
 }
 
-static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
-{
-	struct drm_exynos_ipp_prop_list *prop_list = &ippdrv->prop_list;
-
-	prop_list->version = 1;
-	prop_list->writeback = 1;
-	prop_list->refresh_min = FIMC_REFRESH_MIN;
-	prop_list->refresh_max = FIMC_REFRESH_MAX;
-	prop_list->flip = (1 << EXYNOS_DRM_FLIP_NONE) |
-				(1 << EXYNOS_DRM_FLIP_VERTICAL) |
-				(1 << EXYNOS_DRM_FLIP_HORIZONTAL);
-	prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
-				(1 << EXYNOS_DRM_DEGREE_90) |
-				(1 << EXYNOS_DRM_DEGREE_180) |
-				(1 << EXYNOS_DRM_DEGREE_270);
-	prop_list->csc = 1;
-	prop_list->crop = 1;
-	prop_list->crop_max.hsize = FIMC_CROP_MAX;
-	prop_list->crop_max.vsize = FIMC_CROP_MAX;
-	prop_list->crop_min.hsize = FIMC_CROP_MIN;
-	prop_list->crop_min.vsize = FIMC_CROP_MIN;
-	prop_list->scale = 1;
-	prop_list->scale_max.hsize = FIMC_SCALE_MAX;
-	prop_list->scale_max.vsize = FIMC_SCALE_MAX;
-	prop_list->scale_min.hsize = FIMC_SCALE_MIN;
-	prop_list->scale_min.vsize = FIMC_SCALE_MIN;
-
-	return 0;
-}
-
-static inline bool fimc_check_drm_flip(enum drm_exynos_flip flip)
-{
-	switch (flip) {
-	case EXYNOS_DRM_FLIP_NONE:
-	case EXYNOS_DRM_FLIP_VERTICAL:
-	case EXYNOS_DRM_FLIP_HORIZONTAL:
-	case EXYNOS_DRM_FLIP_BOTH:
-		return true;
-	default:
-		DRM_DEBUG_KMS("invalid flip\n");
-		return false;
-	}
-}
-
-static int fimc_ippdrv_check_property(struct device *dev,
-		struct drm_exynos_ipp_property *property)
-{
-	struct fimc_context *ctx = get_fimc_context(dev);
-	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
-	struct drm_exynos_ipp_prop_list *pp = &ippdrv->prop_list;
-	struct drm_exynos_ipp_config *config;
-	struct drm_exynos_pos *pos;
-	struct drm_exynos_sz *sz;
-	bool swap;
-	int i;
-
-	for_each_ipp_ops(i) {
-		if ((i == EXYNOS_DRM_OPS_SRC) &&
-			(property->cmd == IPP_CMD_WB))
-			continue;
-
-		config = &property->config[i];
-		pos = &config->pos;
-		sz = &config->sz;
-
-		/* check for flip */
-		if (!fimc_check_drm_flip(config->flip)) {
-			DRM_ERROR("invalid flip.\n");
-			goto err_property;
-		}
-
-		/* check for degree */
-		switch (config->degree) {
-		case EXYNOS_DRM_DEGREE_90:
-		case EXYNOS_DRM_DEGREE_270:
-			swap = true;
-			break;
-		case EXYNOS_DRM_DEGREE_0:
-		case EXYNOS_DRM_DEGREE_180:
-			swap = false;
-			break;
-		default:
-			DRM_ERROR("invalid degree.\n");
-			goto err_property;
-		}
-
-		/* check for buffer bound */
-		if ((pos->x + pos->w > sz->hsize) ||
-			(pos->y + pos->h > sz->vsize)) {
-			DRM_ERROR("out of buf bound.\n");
-			goto err_property;
-		}
-
-		/* check for crop */
-		if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
-			if (swap) {
-				if ((pos->h < pp->crop_min.hsize) ||
-					(sz->vsize > pp->crop_max.hsize) ||
-					(pos->w < pp->crop_min.vsize) ||
-					(sz->hsize > pp->crop_max.vsize)) {
-					DRM_ERROR("out of crop size.\n");
-					goto err_property;
-				}
-			} else {
-				if ((pos->w < pp->crop_min.hsize) ||
-					(sz->hsize > pp->crop_max.hsize) ||
-					(pos->h < pp->crop_min.vsize) ||
-					(sz->vsize > pp->crop_max.vsize)) {
-					DRM_ERROR("out of crop size.\n");
-					goto err_property;
-				}
-			}
-		}
-
-		/* check for scale */
-		if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
-			if (swap) {
-				if ((pos->h < pp->scale_min.hsize) ||
-					(sz->vsize > pp->scale_max.hsize) ||
-					(pos->w < pp->scale_min.vsize) ||
-					(sz->hsize > pp->scale_max.vsize)) {
-					DRM_ERROR("out of scale size.\n");
-					goto err_property;
-				}
-			} else {
-				if ((pos->w < pp->scale_min.hsize) ||
-					(sz->hsize > pp->scale_max.hsize) ||
-					(pos->h < pp->scale_min.vsize) ||
-					(sz->vsize > pp->scale_max.vsize)) {
-					DRM_ERROR("out of scale size.\n");
-					goto err_property;
-				}
-			}
-		}
-	}
-
-	return 0;
-
-err_property:
-	for_each_ipp_ops(i) {
-		if ((i == EXYNOS_DRM_OPS_SRC) &&
-			(property->cmd == IPP_CMD_WB))
-			continue;
-
-		config = &property->config[i];
-		pos = &config->pos;
-		sz = &config->sz;
-
-		DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
-			i ? "dst" : "src", config->flip, config->degree,
-			pos->x, pos->y, pos->w, pos->h,
-			sz->hsize, sz->vsize);
-	}
-
-	return -EINVAL;
-}
-
 static void fimc_clear_addr(struct fimc_context *ctx)
 {
 	int i;
@@ -1386,10 +990,8 @@ static void fimc_clear_addr(struct fimc_context *ctx)
 	}
 }
 
-static int fimc_ippdrv_reset(struct device *dev)
+static void fimc_reset(struct fimc_context *ctx)
 {
-	struct fimc_context *ctx = get_fimc_context(dev);
-
 	/* reset h/w block */
 	fimc_sw_reset(ctx);
 
@@ -1397,82 +999,26 @@ static int fimc_ippdrv_reset(struct device *dev)
 	memset(&ctx->sc, 0x0, sizeof(ctx->sc));
 
 	fimc_clear_addr(ctx);
-
-	return 0;
 }
 
-static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+static void fimc_start(struct fimc_context *ctx)
 {
-	struct fimc_context *ctx = get_fimc_context(dev);
-	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
-	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
-	struct drm_exynos_ipp_property *property;
-	struct drm_exynos_ipp_config *config;
-	struct drm_exynos_pos	img_pos[EXYNOS_DRM_OPS_MAX];
-	struct drm_exynos_ipp_set_wb set_wb;
-	int ret, i;
 	u32 cfg0, cfg1;
 
-	DRM_DEBUG_KMS("cmd[%d]\n", cmd);
-
-	if (!c_node) {
-		DRM_ERROR("failed to get c_node.\n");
-		return -EINVAL;
-	}
-
-	property = &c_node->property;
-
 	fimc_mask_irq(ctx, true);
 
-	for_each_ipp_ops(i) {
-		config = &property->config[i];
-		img_pos[i] = config->pos;
-	}
-
-	ret = fimc_set_prescaler(ctx, &ctx->sc,
-		&img_pos[EXYNOS_DRM_OPS_SRC],
-		&img_pos[EXYNOS_DRM_OPS_DST]);
-	if (ret) {
-		dev_err(dev, "failed to set prescaler.\n");
-		return ret;
-	}
-
-	/* If set ture, we can save jpeg about screen */
+	/* If set true, we can save jpeg about screen */
 	fimc_handle_jpeg(ctx, false);
 	fimc_set_scaler(ctx, &ctx->sc);
 
-	switch (cmd) {
-	case IPP_CMD_M2M:
-		fimc_set_type_ctrl(ctx, FIMC_WB_NONE);
-		fimc_handle_lastend(ctx, false);
+	fimc_set_type_ctrl(ctx);
+	fimc_handle_lastend(ctx, false);
 
-		/* setup dma */
-		cfg0 = fimc_read(ctx, EXYNOS_MSCTRL);
-		cfg0 &= ~EXYNOS_MSCTRL_INPUT_MASK;
-		cfg0 |= EXYNOS_MSCTRL_INPUT_MEMORY;
-		fimc_write(ctx, cfg0, EXYNOS_MSCTRL);
-		break;
-	case IPP_CMD_WB:
-		fimc_set_type_ctrl(ctx, FIMC_WB_A);
-		fimc_handle_lastend(ctx, true);
-
-		/* setup FIMD */
-		ret = fimc_set_camblk_fimd0_wb(ctx);
-		if (ret < 0) {
-			dev_err(dev, "camblk setup failed.\n");
-			return ret;
-		}
-
-		set_wb.enable = 1;
-		set_wb.refresh = property->refresh_rate;
-		exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
-		break;
-	case IPP_CMD_OUTPUT:
-	default:
-		ret = -EINVAL;
-		dev_err(dev, "invalid operations.\n");
-		return ret;
-	}
+	/* setup dma */
+	cfg0 = fimc_read(ctx, EXYNOS_MSCTRL);
+	cfg0 &= ~EXYNOS_MSCTRL_INPUT_MASK;
+	cfg0 |= EXYNOS_MSCTRL_INPUT_MEMORY;
+	fimc_write(ctx, cfg0, EXYNOS_MSCTRL);
 
 	/* Reset status */
 	fimc_write(ctx, 0x0, EXYNOS_CISTATUS);
@@ -1498,36 +1044,18 @@ static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
 
 	fimc_clear_bits(ctx, EXYNOS_CIOCTRL, EXYNOS_CIOCTRL_WEAVE_MASK);
 
-	if (cmd == IPP_CMD_M2M)
-		fimc_set_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID);
-
-	return 0;
+	fimc_set_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID);
 }
 
-static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+static void fimc_stop(struct fimc_context *ctx)
 {
-	struct fimc_context *ctx = get_fimc_context(dev);
-	struct drm_exynos_ipp_set_wb set_wb = {0, 0};
 	u32 cfg;
 
-	DRM_DEBUG_KMS("cmd[%d]\n", cmd);
-
-	switch (cmd) {
-	case IPP_CMD_M2M:
-		/* Source clear */
-		cfg = fimc_read(ctx, EXYNOS_MSCTRL);
-		cfg &= ~EXYNOS_MSCTRL_INPUT_MASK;
-		cfg &= ~EXYNOS_MSCTRL_ENVID;
-		fimc_write(ctx, cfg, EXYNOS_MSCTRL);
-		break;
-	case IPP_CMD_WB:
-		exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
-		break;
-	case IPP_CMD_OUTPUT:
-	default:
-		dev_err(dev, "invalid operations.\n");
-		break;
-	}
+	/* Source clear */
+	cfg = fimc_read(ctx, EXYNOS_MSCTRL);
+	cfg &= ~EXYNOS_MSCTRL_INPUT_MASK;
+	cfg &= ~EXYNOS_MSCTRL_ENVID;
+	fimc_write(ctx, cfg, EXYNOS_MSCTRL);
 
 	fimc_mask_irq(ctx, false);
 
@@ -1545,6 +1073,87 @@ static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
 	fimc_set_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_IRQ_END_DISABLE);
 }
 
+static int fimc_commit(struct exynos_drm_ipp *ipp,
+			  struct exynos_drm_ipp_task *task)
+{
+	struct fimc_context *ctx =
+			container_of(ipp, struct fimc_context, ipp);
+
+	pm_runtime_get_sync(ctx->dev);
+	ctx->task = task;
+
+	fimc_src_set_fmt(ctx, task->src.buf.fourcc, task->src.buf.modifier);
+	fimc_src_set_size(ctx, &task->src);
+	fimc_src_set_transf(ctx, DRM_MODE_ROTATE_0);
+	fimc_src_set_addr(ctx, &task->src);
+	fimc_dst_set_fmt(ctx, task->dst.buf.fourcc, task->dst.buf.modifier);
+	fimc_dst_set_transf(ctx, task->transform.rotation);
+	fimc_dst_set_size(ctx, &task->dst);
+	fimc_dst_set_addr(ctx, &task->dst);
+	fimc_set_prescaler(ctx, &ctx->sc, &task->src.rect, &task->dst.rect);
+	fimc_start(ctx);
+
+	return 0;
+}
+
+static void fimc_abort(struct exynos_drm_ipp *ipp,
+			  struct exynos_drm_ipp_task *task)
+{
+	struct fimc_context *ctx =
+			container_of(ipp, struct fimc_context, ipp);
+
+	fimc_reset(ctx);
+
+	if (ctx->task) {
+		struct exynos_drm_ipp_task *task = ctx->task;
+
+		ctx->task = NULL;
+		pm_runtime_mark_last_busy(ctx->dev);
+		pm_runtime_put_autosuspend(ctx->dev);
+		exynos_drm_ipp_task_done(task, -EIO);
+	}
+}
+
+static struct exynos_drm_ipp_funcs ipp_funcs = {
+	.commit = fimc_commit,
+	.abort = fimc_abort,
+};
+
+static int fimc_bind(struct device *dev, struct device *master, void *data)
+{
+	struct fimc_context *ctx = dev_get_drvdata(dev);
+	struct drm_device *drm_dev = data;
+	struct exynos_drm_ipp *ipp = &ctx->ipp;
+
+	ctx->drm_dev = drm_dev;
+	drm_iommu_attach_device(drm_dev, dev);
+
+	exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
+			DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
+			DRM_EXYNOS_IPP_CAP_SCALE | DRM_EXYNOS_IPP_CAP_CONVERT,
+			ctx->formats, ctx->num_formats, "fimc");
+
+	dev_info(dev, "The exynos fimc has been probed successfully\n");
+
+	return 0;
+}
+
+static void fimc_unbind(struct device *dev, struct device *master,
+			void *data)
+{
+	struct fimc_context *ctx = dev_get_drvdata(dev);
+	struct drm_device *drm_dev = data;
+	struct exynos_drm_ipp *ipp = &ctx->ipp;
+
+	exynos_drm_ipp_unregister(drm_dev, ipp);
+	drm_iommu_detach_device(drm_dev, dev);
+}
+
+static const struct component_ops fimc_component_ops = {
+	.bind	= fimc_bind,
+	.unbind = fimc_unbind,
+};
+
 static void fimc_put_clocks(struct fimc_context *ctx)
 {
 	int i;
@@ -1559,7 +1168,7 @@ static void fimc_put_clocks(struct fimc_context *ctx)
 
 static int fimc_setup_clocks(struct fimc_context *ctx)
 {
-	struct device *fimc_dev = ctx->ippdrv.dev;
+	struct device *fimc_dev = ctx->dev;
 	struct device *dev;
 	int ret, i;
 
@@ -1574,8 +1183,6 @@ static int fimc_setup_clocks(struct fimc_context *ctx)
 
 		ctx->clocks[i] = clk_get(dev, fimc_clock_names[i]);
 		if (IS_ERR(ctx->clocks[i])) {
-			if (i >= FIMC_CLK_MUX)
-				break;
 			ret = PTR_ERR(ctx->clocks[i]);
 			dev_err(fimc_dev, "failed to get clock: %s\n",
 						fimc_clock_names[i]);
@@ -1583,20 +1190,6 @@ static int fimc_setup_clocks(struct fimc_context *ctx)
 		}
 	}
 
-	/* Optional FIMC LCLK parent clock setting */
-	if (!IS_ERR(ctx->clocks[FIMC_CLK_PARENT])) {
-		ret = clk_set_parent(ctx->clocks[FIMC_CLK_MUX],
-				     ctx->clocks[FIMC_CLK_PARENT]);
-		if (ret < 0) {
-			dev_err(fimc_dev, "failed to set parent.\n");
-			goto e_clk_free;
-		}
-	}
-
-	ret = clk_set_rate(ctx->clocks[FIMC_CLK_LCLK], ctx->clk_frequency);
-	if (ret < 0)
-		goto e_clk_free;
-
 	ret = clk_prepare_enable(ctx->clocks[FIMC_CLK_LCLK]);
 	if (!ret)
 		return ret;
@@ -1605,57 +1198,118 @@ e_clk_free:
 	return ret;
 }
 
-static int fimc_parse_dt(struct fimc_context *ctx)
+int exynos_drm_check_fimc_device(struct device *dev)
 {
-	struct device_node *node = ctx->ippdrv.dev->of_node;
+	unsigned int id = of_alias_get_id(dev->of_node, "fimc");
 
-	/* Handle only devices that support the LCD Writeback data path */
-	if (!of_property_read_bool(node, "samsung,lcd-wb"))
-		return -ENODEV;
-
-	if (of_property_read_u32(node, "clock-frequency",
-					&ctx->clk_frequency))
-		ctx->clk_frequency = FIMC_DEFAULT_LCLK_FREQUENCY;
-
-	ctx->id = of_alias_get_id(node, "fimc");
-
-	if (ctx->id < 0) {
-		dev_err(ctx->ippdrv.dev, "failed to get node alias id.\n");
-		return -EINVAL;
-	}
-
-	return 0;
+	if (id >= 0 && (BIT(id) & fimc_mask))
+		return 0;
+	return -ENODEV;
 }
 
+static const unsigned int fimc_formats[] = {
+	DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB565,
+	DRM_FORMAT_NV12, DRM_FORMAT_NV16, DRM_FORMAT_NV21, DRM_FORMAT_NV61,
+	DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU,
+	DRM_FORMAT_YUV420, DRM_FORMAT_YVU420, DRM_FORMAT_YUV422,
+	DRM_FORMAT_YUV444,
+};
+
+static const unsigned int fimc_tiled_formats[] = {
+	DRM_FORMAT_NV12, DRM_FORMAT_NV21,
+};
+
+static const struct drm_exynos_ipp_limit fimc_4210_limits_v1[] = {
+	{ IPP_SIZE_LIMIT(BUFFER, .h = { 16, 8192, 8 }, .v = { 16, 8192, 2 }) },
+	{ IPP_SIZE_LIMIT(AREA, .h = { 16, 4224, 2 }, .v = { 16, 0, 2 }) },
+	{ IPP_SIZE_LIMIT(ROTATED, .h = { 128, 1920 }, .v = { 128, 0 }) },
+	{ IPP_SCALE_LIMIT(.h = { (1 << 16) / 64, (1 << 16) * 64 },
+			  .v = { (1 << 16) / 64, (1 << 16) * 64 }) },
+};
+
+static const struct drm_exynos_ipp_limit fimc_4210_limits_v2[] = {
+	{ IPP_SIZE_LIMIT(BUFFER, .h = { 16, 8192, 8 }, .v = { 16, 8192, 2 }) },
+	{ IPP_SIZE_LIMIT(AREA, .h = { 16, 1920, 2 }, .v = { 16, 0, 2 }) },
+	{ IPP_SIZE_LIMIT(ROTATED, .h = { 128, 1366 }, .v = { 128, 0 }) },
+	{ IPP_SCALE_LIMIT(.h = { (1 << 16) / 64, (1 << 16) * 64 },
+			  .v = { (1 << 16) / 64, (1 << 16) * 64 }) },
+};
+
+static const struct drm_exynos_ipp_limit fimc_4210_limits_tiled_v1[] = {
+	{ IPP_SIZE_LIMIT(BUFFER, .h = { 128, 1920, 128 }, .v = { 32, 1920, 32 }) },
+	{ IPP_SIZE_LIMIT(AREA, .h = { 128, 1920, 2 }, .v = { 128, 0, 2 }) },
+	{ IPP_SCALE_LIMIT(.h = { (1 << 16) / 64, (1 << 16) * 64 },
+			  .v = { (1 << 16) / 64, (1 << 16) * 64 }) },
+};
+
+static const struct drm_exynos_ipp_limit fimc_4210_limits_tiled_v2[] = {
+	{ IPP_SIZE_LIMIT(BUFFER, .h = { 128, 1920, 128 }, .v = { 32, 1920, 32 }) },
+	{ IPP_SIZE_LIMIT(AREA, .h = { 128, 1366, 2 }, .v = { 128, 0, 2 }) },
+	{ IPP_SCALE_LIMIT(.h = { (1 << 16) / 64, (1 << 16) * 64 },
+			  .v = { (1 << 16) / 64, (1 << 16) * 64 }) },
+};
+
 static int fimc_probe(struct platform_device *pdev)
 {
+	const struct drm_exynos_ipp_limit *limits;
+	struct exynos_drm_ipp_formats *formats;
 	struct device *dev = &pdev->dev;
 	struct fimc_context *ctx;
 	struct resource *res;
-	struct exynos_drm_ippdrv *ippdrv;
 	int ret;
+	int i, j, num_limits, num_formats;
 
-	if (!dev->of_node) {
-		dev_err(dev, "device tree node not found.\n");
+	if (exynos_drm_check_fimc_device(dev) != 0)
 		return -ENODEV;
-	}
 
 	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
 	if (!ctx)
 		return -ENOMEM;
 
-	ctx->ippdrv.dev = dev;
+	ctx->dev = dev;
+	ctx->id = of_alias_get_id(dev->of_node, "fimc");
 
-	ret = fimc_parse_dt(ctx);
-	if (ret < 0)
-		return ret;
+	/* construct formats/limits array */
+	num_formats = ARRAY_SIZE(fimc_formats) + ARRAY_SIZE(fimc_tiled_formats);
+	formats = devm_kzalloc(dev, sizeof(*formats) * num_formats, GFP_KERNEL);
+	if (!formats)
+		return -ENOMEM;
 
-	ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
-						"samsung,sysreg");
-	if (IS_ERR(ctx->sysreg)) {
-		dev_err(dev, "syscon regmap lookup failed.\n");
-		return PTR_ERR(ctx->sysreg);
+	/* linear formats */
+	if (ctx->id < 3) {
+		limits = fimc_4210_limits_v1;
+		num_limits = ARRAY_SIZE(fimc_4210_limits_v1);
+	} else {
+		limits = fimc_4210_limits_v2;
+		num_limits = ARRAY_SIZE(fimc_4210_limits_v2);
 	}
+	for (i = 0; i < ARRAY_SIZE(fimc_formats); i++) {
+		formats[i].fourcc = fimc_formats[i];
+		formats[i].type = DRM_EXYNOS_IPP_FORMAT_SOURCE |
+				  DRM_EXYNOS_IPP_FORMAT_DESTINATION;
+		formats[i].limits = limits;
+		formats[i].num_limits = num_limits;
+	}
+
+	/* tiled formats */
+	if (ctx->id < 3) {
+		limits = fimc_4210_limits_tiled_v1;
+		num_limits = ARRAY_SIZE(fimc_4210_limits_tiled_v1);
+	} else {
+		limits = fimc_4210_limits_tiled_v2;
+		num_limits = ARRAY_SIZE(fimc_4210_limits_tiled_v2);
+	}
+	for (j = i, i = 0; i < ARRAY_SIZE(fimc_tiled_formats); j++, i++) {
+		formats[j].fourcc = fimc_tiled_formats[i];
+		formats[j].modifier = DRM_FORMAT_MOD_SAMSUNG_64_32_TILE;
+		formats[j].type = DRM_EXYNOS_IPP_FORMAT_SOURCE |
+				  DRM_EXYNOS_IPP_FORMAT_DESTINATION;
+		formats[j].limits = limits;
+		formats[j].num_limits = num_limits;
+	}
+
+	ctx->formats = formats;
+	ctx->num_formats = num_formats;
 
 	/* resource memory */
 	ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1670,9 +1324,8 @@ static int fimc_probe(struct platform_device *pdev)
 		return -ENOENT;
 	}
 
-	ctx->irq = res->start;
-	ret = devm_request_threaded_irq(dev, ctx->irq, NULL, fimc_irq_handler,
-		IRQF_ONESHOT, "drm_fimc", ctx);
+	ret = devm_request_irq(dev, res->start, fimc_irq_handler,
+		0, dev_name(dev), ctx);
 	if (ret < 0) {
 		dev_err(dev, "failed to request irq.\n");
 		return ret;
@@ -1682,39 +1335,24 @@ static int fimc_probe(struct platform_device *pdev)
 	if (ret < 0)
 		return ret;
 
-	ippdrv = &ctx->ippdrv;
-	ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops;
-	ippdrv->ops[EXYNOS_DRM_OPS_DST] = &fimc_dst_ops;
-	ippdrv->check_property = fimc_ippdrv_check_property;
-	ippdrv->reset = fimc_ippdrv_reset;
-	ippdrv->start = fimc_ippdrv_start;
-	ippdrv->stop = fimc_ippdrv_stop;
-	ret = fimc_init_prop_list(ippdrv);
-	if (ret < 0) {
-		dev_err(dev, "failed to init property list.\n");
-		goto err_put_clk;
-	}
-
-	DRM_DEBUG_KMS("id[%d]ippdrv[%pK]\n", ctx->id, ippdrv);
-
 	spin_lock_init(&ctx->lock);
 	platform_set_drvdata(pdev, ctx);
 
+	pm_runtime_use_autosuspend(dev);
+	pm_runtime_set_autosuspend_delay(dev, FIMC_AUTOSUSPEND_DELAY);
 	pm_runtime_enable(dev);
 
-	ret = exynos_drm_ippdrv_register(ippdrv);
-	if (ret < 0) {
-		dev_err(dev, "failed to register drm fimc device.\n");
+	ret = component_add(dev, &fimc_component_ops);
+	if (ret)
 		goto err_pm_dis;
-	}
 
 	dev_info(dev, "drm fimc registered successfully.\n");
 
 	return 0;
 
 err_pm_dis:
+	pm_runtime_dont_use_autosuspend(dev);
 	pm_runtime_disable(dev);
-err_put_clk:
 	fimc_put_clocks(ctx);
 
 	return ret;
@@ -1724,42 +1362,24 @@ static int fimc_remove(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct fimc_context *ctx = get_fimc_context(dev);
-	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
 
-	exynos_drm_ippdrv_unregister(ippdrv);
+	component_del(dev, &fimc_component_ops);
+	pm_runtime_dont_use_autosuspend(dev);
+	pm_runtime_disable(dev);
 
 	fimc_put_clocks(ctx);
-	pm_runtime_set_suspended(dev);
-	pm_runtime_disable(dev);
 
 	return 0;
 }
 
 #ifdef CONFIG_PM
-static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
-{
-	DRM_DEBUG_KMS("enable[%d]\n", enable);
-
-	if (enable) {
-		clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
-		clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]);
-		ctx->suspended = false;
-	} else {
-		clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
-		clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]);
-		ctx->suspended = true;
-	}
-
-	return 0;
-}
-
 static int fimc_runtime_suspend(struct device *dev)
 {
 	struct fimc_context *ctx = get_fimc_context(dev);
 
 	DRM_DEBUG_KMS("id[%d]\n", ctx->id);
-
-	return  fimc_clk_ctrl(ctx, false);
+	clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
+	return 0;
 }
 
 static int fimc_runtime_resume(struct device *dev)
@@ -1767,8 +1387,7 @@ static int fimc_runtime_resume(struct device *dev)
 	struct fimc_context *ctx = get_fimc_context(dev);
 
 	DRM_DEBUG_KMS("id[%d]\n", ctx->id);
-
-	return  fimc_clk_ctrl(ctx, true);
+	return clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
 }
 #endif
 
@@ -1795,4 +1414,3 @@ struct platform_driver fimc_driver = {
 		.pm	= &fimc_pm_ops,
 	},
 };
-
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.h b/drivers/gpu/drm/exynos/exynos_drm_fimc.h
deleted file mode 100644
index 127a424c5fdf..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- *
- * Authors:
- *	Eunchul Kim <chulspro.kim@samsung.com>
- *	Jinyoung Jeon <jy0.jeon@samsung.com>
- *	Sangmin Lee <lsmin.lee@samsung.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DRM_FIMC_H_
-#define _EXYNOS_DRM_FIMC_H_
-
-/*
- * TODO
- * FIMD output interface notifier callback.
- */
-
-#endif /* _EXYNOS_DRM_FIMC_H_ */

From 01fb9185dc180940f90510215ef8764d6155d088 Mon Sep 17 00:00:00 2001
From: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
Date: Wed, 9 May 2018 10:59:26 +0200
Subject: [PATCH 0667/1286] drm/exynos: Add driver for Exynos Scaler module

Exynos Scaler is a hardware module, which processes graphic data fetched
from memory and transfers the resultant dato another memory buffer.
Graphics data can be up/down-scaled, rotated, flipped and converted color
space. Scaler hardware modules are a part of Exynos5420 and newer Exynos
SoCs.

Signed-off-by: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Acked-by: Rob Herring <robh@kernel.org>
Signed-off-by: Inki Dae <inki.dae@samsung.com>
---
 .../bindings/gpu/samsung-scaler.txt           |  27 +
 drivers/gpu/drm/exynos/Kconfig                |   6 +
 drivers/gpu/drm/exynos/Makefile               |   1 +
 drivers/gpu/drm/exynos/exynos_drm_drv.c       |   3 +
 drivers/gpu/drm/exynos/exynos_drm_drv.h       |   1 +
 drivers/gpu/drm/exynos/exynos_drm_scaler.c    | 694 ++++++++++++++++++
 drivers/gpu/drm/exynos/regs-scaler.h          | 426 +++++++++++
 7 files changed, 1158 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/gpu/samsung-scaler.txt
 create mode 100644 drivers/gpu/drm/exynos/exynos_drm_scaler.c
 create mode 100644 drivers/gpu/drm/exynos/regs-scaler.h

diff --git a/Documentation/devicetree/bindings/gpu/samsung-scaler.txt b/Documentation/devicetree/bindings/gpu/samsung-scaler.txt
new file mode 100644
index 000000000000..9c3d98105dfd
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpu/samsung-scaler.txt
@@ -0,0 +1,27 @@
+* Samsung Exynos Image Scaler
+
+Required properties:
+  - compatible : value should be one of the following:
+	(a) "samsung,exynos5420-scaler" for Scaler IP in Exynos5420
+	(b) "samsung,exynos5433-scaler" for Scaler IP in Exynos5433
+
+  - reg : Physical base address of the IP registers and length of memory
+	  mapped region.
+
+  - interrupts : Interrupt specifier for scaler interrupt, according to format
+		 specific to interrupt parent.
+
+  - clocks : Clock specifier for scaler clock, according to generic clock
+	     bindings. (See Documentation/devicetree/bindings/clock/exynos*.txt)
+
+  - clock-names : Names of clocks. For exynos scaler, it should be "mscl"
+		  on 5420 and "pclk", "aclk" and "aclk_xiu" on 5433.
+
+Example:
+	scaler@12800000 {
+		compatible = "samsung,exynos5420-scaler";
+		reg = <0x12800000 0x1294>;
+		interrupts = <0 220 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&clock CLK_MSCL0>;
+		clock-names = "mscl";
+	};
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 54f5703b37e8..208bc27be3cc 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -110,6 +110,12 @@ config DRM_EXYNOS_ROTATOR
 	help
 	  Choose this option if you want to use Exynos Rotator for DRM.
 
+config DRM_EXYNOS_SCALER
+	bool "Scaler"
+	select DRM_EXYNOS_IPP
+	help
+	  Choose this option if you want to use Exynos Scaler for DRM.
+
 config DRM_EXYNOS_GSC
 	bool "GScaler"
 	depends on VIDEO_SAMSUNG_EXYNOS_GSC=n
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index bdf4212dde7b..3b323f1e0475 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -21,6 +21,7 @@ exynosdrm-$(CONFIG_DRM_EXYNOS_G2D)	+= exynos_drm_g2d.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_IPP)	+= exynos_drm_ipp.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_FIMC)	+= exynos_drm_fimc.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_ROTATOR)	+= exynos_drm_rotator.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_SCALER)	+= exynos_drm_scaler.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_GSC)	+= exynos_drm_gsc.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_MIC)     += exynos_drm_mic.o
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 7ba13c122d14..f55ce44b0c0d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -266,6 +266,9 @@ static struct exynos_drm_driver_info exynos_drm_drivers[] = {
 	}, {
 		DRV_PTR(rotator_driver, CONFIG_DRM_EXYNOS_ROTATOR),
 		DRM_COMPONENT_DRIVER
+	}, {
+		DRV_PTR(scaler_driver, CONFIG_DRM_EXYNOS_SCALER),
+		DRM_COMPONENT_DRIVER
 	}, {
 		DRV_PTR(gsc_driver, CONFIG_DRM_EXYNOS_GSC),
 		DRM_COMPONENT_DRIVER
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 0834e7e28c99..c07e6f380e60 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -298,6 +298,7 @@ extern struct platform_driver vidi_driver;
 extern struct platform_driver g2d_driver;
 extern struct platform_driver fimc_driver;
 extern struct platform_driver rotator_driver;
+extern struct platform_driver scaler_driver;
 extern struct platform_driver gsc_driver;
 extern struct platform_driver mic_driver;
 #endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
new file mode 100644
index 000000000000..63b05b7c846a
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -0,0 +1,694 @@
+/*
+ * Copyright (C) 2017 Samsung Electronics Co.Ltd
+ * Author:
+ *	Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundationr
+ */
+
+#include <linux/kernel.h>
+#include <linux/component.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-scaler.h"
+#include "exynos_drm_fb.h"
+#include "exynos_drm_drv.h"
+#include "exynos_drm_iommu.h"
+#include "exynos_drm_ipp.h"
+
+#define scaler_read(offset)		readl(scaler->regs + (offset))
+#define scaler_write(cfg, offset)	writel(cfg, scaler->regs + (offset))
+#define SCALER_MAX_CLK			4
+#define SCALER_AUTOSUSPEND_DELAY	2000
+
+struct scaler_data {
+	const char	*clk_name[SCALER_MAX_CLK];
+	unsigned int	num_clk;
+	const struct exynos_drm_ipp_formats *formats;
+	unsigned int	num_formats;
+};
+
+struct scaler_context {
+	struct exynos_drm_ipp		ipp;
+	struct drm_device		*drm_dev;
+	struct device			*dev;
+	void __iomem			*regs;
+	struct clk			*clock[SCALER_MAX_CLK];
+	struct exynos_drm_ipp_task	*task;
+	const struct scaler_data	*scaler_data;
+};
+
+static u32 scaler_get_format(u32 drm_fmt)
+{
+	switch (drm_fmt) {
+	case DRM_FORMAT_NV21:
+		return SCALER_YUV420_2P_UV;
+	case DRM_FORMAT_NV12:
+		return SCALER_YUV420_2P_VU;
+	case DRM_FORMAT_YUV420:
+		return SCALER_YUV420_3P;
+	case DRM_FORMAT_YUYV:
+		return SCALER_YUV422_1P_YUYV;
+	case DRM_FORMAT_UYVY:
+		return SCALER_YUV422_1P_UYVY;
+	case DRM_FORMAT_YVYU:
+		return SCALER_YUV422_1P_YVYU;
+	case DRM_FORMAT_NV61:
+		return SCALER_YUV422_2P_UV;
+	case DRM_FORMAT_NV16:
+		return SCALER_YUV422_2P_VU;
+	case DRM_FORMAT_YUV422:
+		return SCALER_YUV422_3P;
+	case DRM_FORMAT_NV42:
+		return SCALER_YUV444_2P_UV;
+	case DRM_FORMAT_NV24:
+		return SCALER_YUV444_2P_VU;
+	case DRM_FORMAT_YUV444:
+		return SCALER_YUV444_3P;
+	case DRM_FORMAT_RGB565:
+		return SCALER_RGB_565;
+	case DRM_FORMAT_XRGB1555:
+		return SCALER_ARGB1555;
+	case DRM_FORMAT_ARGB1555:
+		return SCALER_ARGB1555;
+	case DRM_FORMAT_XRGB4444:
+		return SCALER_ARGB4444;
+	case DRM_FORMAT_ARGB4444:
+		return SCALER_ARGB4444;
+	case DRM_FORMAT_XRGB8888:
+		return SCALER_ARGB8888;
+	case DRM_FORMAT_ARGB8888:
+		return SCALER_ARGB8888;
+	case DRM_FORMAT_RGBX8888:
+		return SCALER_RGBA8888;
+	case DRM_FORMAT_RGBA8888:
+		return SCALER_RGBA8888;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static inline void scaler_enable_int(struct scaler_context *scaler)
+{
+	u32 val;
+
+	val = SCALER_INT_EN_TIMEOUT |
+		SCALER_INT_EN_ILLEGAL_BLEND |
+		SCALER_INT_EN_ILLEGAL_RATIO |
+		SCALER_INT_EN_ILLEGAL_DST_HEIGHT |
+		SCALER_INT_EN_ILLEGAL_DST_WIDTH |
+		SCALER_INT_EN_ILLEGAL_DST_V_POS |
+		SCALER_INT_EN_ILLEGAL_DST_H_POS |
+		SCALER_INT_EN_ILLEGAL_DST_C_SPAN |
+		SCALER_INT_EN_ILLEGAL_DST_Y_SPAN |
+		SCALER_INT_EN_ILLEGAL_DST_CR_BASE |
+		SCALER_INT_EN_ILLEGAL_DST_CB_BASE |
+		SCALER_INT_EN_ILLEGAL_DST_Y_BASE |
+		SCALER_INT_EN_ILLEGAL_DST_COLOR |
+		SCALER_INT_EN_ILLEGAL_SRC_HEIGHT |
+		SCALER_INT_EN_ILLEGAL_SRC_WIDTH |
+		SCALER_INT_EN_ILLEGAL_SRC_CV_POS |
+		SCALER_INT_EN_ILLEGAL_SRC_CH_POS |
+		SCALER_INT_EN_ILLEGAL_SRC_YV_POS |
+		SCALER_INT_EN_ILLEGAL_SRC_YH_POS |
+		SCALER_INT_EN_ILLEGAL_DST_SPAN |
+		SCALER_INT_EN_ILLEGAL_SRC_Y_SPAN |
+		SCALER_INT_EN_ILLEGAL_SRC_CR_BASE |
+		SCALER_INT_EN_ILLEGAL_SRC_CB_BASE |
+		SCALER_INT_EN_ILLEGAL_SRC_Y_BASE |
+		SCALER_INT_EN_ILLEGAL_SRC_COLOR |
+		SCALER_INT_EN_FRAME_END;
+	scaler_write(val, SCALER_INT_EN);
+}
+
+static inline void scaler_set_src_fmt(struct scaler_context *scaler,
+	u32 src_fmt)
+{
+	u32 val;
+
+	val = SCALER_SRC_CFG_SET_COLOR_FORMAT(src_fmt);
+	scaler_write(val, SCALER_SRC_CFG);
+}
+
+static inline void scaler_set_src_base(struct scaler_context *scaler,
+	struct exynos_drm_ipp_buffer *src_buf)
+{
+	static unsigned int bases[] = {
+		SCALER_SRC_Y_BASE,
+		SCALER_SRC_CB_BASE,
+		SCALER_SRC_CR_BASE,
+	};
+	int i;
+
+	for (i = 0; i < src_buf->format->num_planes; ++i)
+		scaler_write(src_buf->dma_addr[i], bases[i]);
+}
+
+static inline void scaler_set_src_span(struct scaler_context *scaler,
+	struct exynos_drm_ipp_buffer *src_buf)
+{
+	u32 val;
+
+	val = SCALER_SRC_SPAN_SET_Y_SPAN(src_buf->buf.pitch[0] /
+		src_buf->format->cpp[0]);
+
+	if (src_buf->format->num_planes > 1)
+		val |= SCALER_SRC_SPAN_SET_C_SPAN(src_buf->buf.pitch[1]);
+
+	scaler_write(val, SCALER_SRC_SPAN);
+}
+
+static inline void scaler_set_src_luma_pos(struct scaler_context *scaler,
+	struct drm_exynos_ipp_task_rect *src_pos)
+{
+	u32 val;
+
+	val = SCALER_SRC_Y_POS_SET_YH_POS(src_pos->x << 2);
+	val |=  SCALER_SRC_Y_POS_SET_YV_POS(src_pos->y << 2);
+	scaler_write(val, SCALER_SRC_Y_POS);
+	scaler_write(val, SCALER_SRC_C_POS); /* ATTENTION! */
+}
+
+static inline void scaler_set_src_wh(struct scaler_context *scaler,
+	struct drm_exynos_ipp_task_rect *src_pos)
+{
+	u32 val;
+
+	val = SCALER_SRC_WH_SET_WIDTH(src_pos->w);
+	val |= SCALER_SRC_WH_SET_HEIGHT(src_pos->h);
+	scaler_write(val, SCALER_SRC_WH);
+}
+
+static inline void scaler_set_dst_fmt(struct scaler_context *scaler,
+	u32 dst_fmt)
+{
+	u32 val;
+
+	val = SCALER_DST_CFG_SET_COLOR_FORMAT(dst_fmt);
+	scaler_write(val, SCALER_DST_CFG);
+}
+
+static inline void scaler_set_dst_base(struct scaler_context *scaler,
+	struct exynos_drm_ipp_buffer *dst_buf)
+{
+	static unsigned int bases[] = {
+		SCALER_DST_Y_BASE,
+		SCALER_DST_CB_BASE,
+		SCALER_DST_CR_BASE,
+	};
+	int i;
+
+	for (i = 0; i < dst_buf->format->num_planes; ++i)
+		scaler_write(dst_buf->dma_addr[i], bases[i]);
+}
+
+static inline void scaler_set_dst_span(struct scaler_context *scaler,
+	struct exynos_drm_ipp_buffer *dst_buf)
+{
+	u32 val;
+
+	val = SCALER_DST_SPAN_SET_Y_SPAN(dst_buf->buf.pitch[0] /
+		dst_buf->format->cpp[0]);
+
+	if (dst_buf->format->num_planes > 1)
+		val |= SCALER_DST_SPAN_SET_C_SPAN(dst_buf->buf.pitch[1]);
+
+	scaler_write(val, SCALER_DST_SPAN);
+}
+
+static inline void scaler_set_dst_luma_pos(struct scaler_context *scaler,
+	struct drm_exynos_ipp_task_rect *dst_pos)
+{
+	u32 val;
+
+	val = SCALER_DST_WH_SET_WIDTH(dst_pos->w);
+	val |= SCALER_DST_WH_SET_HEIGHT(dst_pos->h);
+	scaler_write(val, SCALER_DST_WH);
+}
+
+static inline void scaler_set_dst_wh(struct scaler_context *scaler,
+	struct drm_exynos_ipp_task_rect *dst_pos)
+{
+	u32 val;
+
+	val = SCALER_DST_POS_SET_H_POS(dst_pos->x);
+	val |= SCALER_DST_POS_SET_V_POS(dst_pos->y);
+	scaler_write(val, SCALER_DST_POS);
+}
+
+static inline void scaler_set_hv_ratio(struct scaler_context *scaler,
+	unsigned int rotation,
+	struct drm_exynos_ipp_task_rect *src_pos,
+	struct drm_exynos_ipp_task_rect *dst_pos)
+{
+	u32 val, h_ratio, v_ratio;
+
+	if (drm_rotation_90_or_270(rotation)) {
+		h_ratio = (src_pos->h << 16) / dst_pos->w;
+		v_ratio = (src_pos->w << 16) / dst_pos->h;
+	} else {
+		h_ratio = (src_pos->w << 16) / dst_pos->w;
+		v_ratio = (src_pos->h << 16) / dst_pos->h;
+	}
+
+	val = SCALER_H_RATIO_SET(h_ratio);
+	scaler_write(val, SCALER_H_RATIO);
+
+	val = SCALER_V_RATIO_SET(v_ratio);
+	scaler_write(val, SCALER_V_RATIO);
+}
+
+static inline void scaler_set_rotation(struct scaler_context *scaler,
+	unsigned int rotation)
+{
+	u32 val = 0;
+
+	if (rotation & DRM_MODE_ROTATE_90)
+		val |= SCALER_ROT_CFG_SET_ROTMODE(SCALER_ROT_MODE_90);
+	else if (rotation & DRM_MODE_ROTATE_180)
+		val |= SCALER_ROT_CFG_SET_ROTMODE(SCALER_ROT_MODE_180);
+	else if (rotation & DRM_MODE_ROTATE_270)
+		val |= SCALER_ROT_CFG_SET_ROTMODE(SCALER_ROT_MODE_270);
+	if (rotation & DRM_MODE_REFLECT_X)
+		val |= SCALER_ROT_CFG_FLIP_X_EN;
+	if (rotation & DRM_MODE_REFLECT_Y)
+		val |= SCALER_ROT_CFG_FLIP_Y_EN;
+	scaler_write(val, SCALER_ROT_CFG);
+}
+
+static inline void scaler_set_csc(struct scaler_context *scaler,
+	const struct drm_format_info *fmt)
+{
+	static const u32 csc_mtx[2][3][3] = {
+		{ /* YCbCr to RGB */
+			{0x254, 0x000, 0x331},
+			{0x254, 0xf38, 0xe60},
+			{0x254, 0x409, 0x000},
+		},
+		{ /* RGB to YCbCr */
+			{0x084, 0x102, 0x032},
+			{0xfb4, 0xf6b, 0x0e1},
+			{0x0e1, 0xf44, 0xfdc},
+		},
+	};
+	int i, j, dir;
+
+	switch (fmt->format) {
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_XRGB1555:
+	case DRM_FORMAT_ARGB1555:
+	case DRM_FORMAT_XRGB4444:
+	case DRM_FORMAT_ARGB4444:
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_ARGB8888:
+	case DRM_FORMAT_RGBX8888:
+	case DRM_FORMAT_RGBA8888:
+		dir = 1;
+		break;
+	default:
+		dir = 0;
+	}
+
+	for (i = 0; i < 3; i++)
+		for (j = 0; j < 3; j++)
+			scaler_write(csc_mtx[dir][i][j], SCALER_CSC_COEF(j, i));
+}
+
+static inline void scaler_set_timer(struct scaler_context *scaler,
+	unsigned int timer, unsigned int divider)
+{
+	u32 val;
+
+	val = SCALER_TIMEOUT_CTRL_TIMER_ENABLE;
+	val |= SCALER_TIMEOUT_CTRL_SET_TIMER_VALUE(timer);
+	val |= SCALER_TIMEOUT_CTRL_SET_TIMER_DIV(divider);
+	scaler_write(val, SCALER_TIMEOUT_CTRL);
+}
+
+static inline void scaler_start_hw(struct scaler_context *scaler)
+{
+	scaler_write(SCALER_CFG_START_CMD, SCALER_CFG);
+}
+
+static int scaler_commit(struct exynos_drm_ipp *ipp,
+			  struct exynos_drm_ipp_task *task)
+{
+	struct scaler_context *scaler =
+			container_of(ipp, struct scaler_context, ipp);
+
+	u32 src_fmt = scaler_get_format(task->src.buf.fourcc);
+	struct drm_exynos_ipp_task_rect *src_pos = &task->src.rect;
+
+	u32 dst_fmt = scaler_get_format(task->dst.buf.fourcc);
+	struct drm_exynos_ipp_task_rect *dst_pos = &task->dst.rect;
+
+	scaler->task = task;
+
+	pm_runtime_get_sync(scaler->dev);
+
+	scaler_set_src_fmt(scaler, src_fmt);
+	scaler_set_src_base(scaler, &task->src);
+	scaler_set_src_span(scaler, &task->src);
+	scaler_set_src_luma_pos(scaler, src_pos);
+	scaler_set_src_wh(scaler, src_pos);
+
+	scaler_set_dst_fmt(scaler, dst_fmt);
+	scaler_set_dst_base(scaler, &task->dst);
+	scaler_set_dst_span(scaler, &task->dst);
+	scaler_set_dst_luma_pos(scaler, dst_pos);
+	scaler_set_dst_wh(scaler, dst_pos);
+
+	scaler_set_hv_ratio(scaler, task->transform.rotation, src_pos, dst_pos);
+	scaler_set_rotation(scaler, task->transform.rotation);
+
+	scaler_set_csc(scaler, task->src.format);
+
+	scaler_set_timer(scaler, 0xffff, 0xf);
+
+	scaler_enable_int(scaler);
+	scaler_start_hw(scaler);
+
+	return 0;
+}
+
+static struct exynos_drm_ipp_funcs ipp_funcs = {
+	.commit = scaler_commit,
+};
+
+static inline void scaler_disable_int(struct scaler_context *scaler)
+{
+	scaler_write(0, SCALER_INT_EN);
+}
+
+static inline u32 scaler_get_int_status(struct scaler_context *scaler)
+{
+	return scaler_read(SCALER_INT_STATUS);
+}
+
+static inline bool scaler_task_done(u32 val)
+{
+	return val & SCALER_INT_STATUS_FRAME_END ? 0 : -EINVAL;
+}
+
+static irqreturn_t scaler_irq_handler(int irq, void *arg)
+{
+	struct scaler_context *scaler = arg;
+
+	u32 val = scaler_get_int_status(scaler);
+
+	scaler_disable_int(scaler);
+
+	if (scaler->task) {
+		struct exynos_drm_ipp_task *task = scaler->task;
+
+		scaler->task = NULL;
+		pm_runtime_mark_last_busy(scaler->dev);
+		pm_runtime_put_autosuspend(scaler->dev);
+		exynos_drm_ipp_task_done(task, scaler_task_done(val));
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int scaler_bind(struct device *dev, struct device *master, void *data)
+{
+	struct scaler_context *scaler = dev_get_drvdata(dev);
+	struct drm_device *drm_dev = data;
+	struct exynos_drm_ipp *ipp = &scaler->ipp;
+
+	scaler->drm_dev = drm_dev;
+	drm_iommu_attach_device(drm_dev, dev);
+
+	exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
+			DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
+			DRM_EXYNOS_IPP_CAP_SCALE | DRM_EXYNOS_IPP_CAP_CONVERT,
+			scaler->scaler_data->formats,
+			scaler->scaler_data->num_formats, "scaler");
+
+	dev_info(dev, "The exynos scaler has been probed successfully\n");
+
+	return 0;
+}
+
+static void scaler_unbind(struct device *dev, struct device *master,
+			void *data)
+{
+	struct scaler_context *scaler = dev_get_drvdata(dev);
+	struct drm_device *drm_dev = data;
+	struct exynos_drm_ipp *ipp = &scaler->ipp;
+
+	exynos_drm_ipp_unregister(drm_dev, ipp);
+	drm_iommu_detach_device(scaler->drm_dev, scaler->dev);
+}
+
+static const struct component_ops scaler_component_ops = {
+	.bind	= scaler_bind,
+	.unbind = scaler_unbind,
+};
+
+static int scaler_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource	*regs_res;
+	struct scaler_context *scaler;
+	int irq;
+	int ret, i;
+
+	scaler = devm_kzalloc(dev, sizeof(*scaler), GFP_KERNEL);
+	if (!scaler)
+		return -ENOMEM;
+
+	scaler->scaler_data =
+		(struct scaler_data *)of_device_get_match_data(dev);
+
+	scaler->dev = dev;
+	regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	scaler->regs = devm_ioremap_resource(dev, regs_res);
+	if (IS_ERR(scaler->regs))
+		return PTR_ERR(scaler->regs);
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(dev, "failed to get irq\n");
+		return irq;
+	}
+
+	ret = devm_request_threaded_irq(dev, irq, NULL,	scaler_irq_handler,
+					IRQF_ONESHOT, "drm_scaler", scaler);
+	if (ret < 0) {
+		dev_err(dev, "failed to request irq\n");
+		return ret;
+	}
+
+	for (i = 0; i < scaler->scaler_data->num_clk; ++i) {
+		scaler->clock[i] = devm_clk_get(dev,
+					      scaler->scaler_data->clk_name[i]);
+		if (IS_ERR(scaler->clock[i])) {
+			dev_err(dev, "failed to get clock\n");
+			return PTR_ERR(scaler->clock[i]);
+		}
+	}
+
+	pm_runtime_use_autosuspend(dev);
+	pm_runtime_set_autosuspend_delay(dev, SCALER_AUTOSUSPEND_DELAY);
+	pm_runtime_enable(dev);
+	platform_set_drvdata(pdev, scaler);
+
+	ret = component_add(dev, &scaler_component_ops);
+	if (ret)
+		goto err_ippdrv_register;
+
+	return 0;
+
+err_ippdrv_register:
+	pm_runtime_dont_use_autosuspend(dev);
+	pm_runtime_disable(dev);
+	return ret;
+}
+
+static int scaler_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+
+	component_del(dev, &scaler_component_ops);
+	pm_runtime_dont_use_autosuspend(dev);
+	pm_runtime_disable(dev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int clk_disable_unprepare_wrapper(struct clk *clk)
+{
+	clk_disable_unprepare(clk);
+
+	return 0;
+}
+
+static int scaler_clk_ctrl(struct scaler_context *scaler, bool enable)
+{
+	int (*clk_fun)(struct clk *clk), i;
+
+	clk_fun = enable ? clk_prepare_enable : clk_disable_unprepare_wrapper;
+
+	for (i = 0; i < scaler->scaler_data->num_clk; ++i)
+		clk_fun(scaler->clock[i]);
+
+	return 0;
+}
+
+static int scaler_runtime_suspend(struct device *dev)
+{
+	struct scaler_context *scaler = dev_get_drvdata(dev);
+
+	return  scaler_clk_ctrl(scaler, false);
+}
+
+static int scaler_runtime_resume(struct device *dev)
+{
+	struct scaler_context *scaler = dev_get_drvdata(dev);
+
+	return  scaler_clk_ctrl(scaler, true);
+}
+#endif
+
+static const struct dev_pm_ops scaler_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+				pm_runtime_force_resume)
+	SET_RUNTIME_PM_OPS(scaler_runtime_suspend, scaler_runtime_resume, NULL)
+};
+
+static const struct drm_exynos_ipp_limit scaler_5420_two_pixel_hv_limits[] = {
+	{ IPP_SIZE_LIMIT(BUFFER, .h = { 16, SZ_8K }, .v = { 16, SZ_8K }) },
+	{ IPP_SIZE_LIMIT(AREA, .h.align = 2, .v.align = 2) },
+	{ IPP_SCALE_LIMIT(.h = { 65536 * 1 / 4, 65536 * 16 },
+			  .v = { 65536 * 1 / 4, 65536 * 16 }) },
+};
+
+static const struct drm_exynos_ipp_limit scaler_5420_two_pixel_h_limits[] = {
+	{ IPP_SIZE_LIMIT(BUFFER, .h = { 16, SZ_8K }, .v = { 16, SZ_8K }) },
+	{ IPP_SIZE_LIMIT(AREA, .h.align = 2, .v.align = 1) },
+	{ IPP_SCALE_LIMIT(.h = { 65536 * 1 / 4, 65536 * 16 },
+			  .v = { 65536 * 1 / 4, 65536 * 16 }) },
+};
+
+static const struct drm_exynos_ipp_limit scaler_5420_one_pixel_limits[] = {
+	{ IPP_SIZE_LIMIT(BUFFER, .h = { 16, SZ_8K }, .v = { 16, SZ_8K }) },
+	{ IPP_SCALE_LIMIT(.h = { 65536 * 1 / 4, 65536 * 16 },
+			  .v = { 65536 * 1 / 4, 65536 * 16 }) },
+};
+
+static const struct exynos_drm_ipp_formats exynos5420_formats[] = {
+	/* SCALER_YUV420_2P_UV */
+	{ IPP_SRCDST_FORMAT(NV21, scaler_5420_two_pixel_hv_limits) },
+
+	/* SCALER_YUV420_2P_VU */
+	{ IPP_SRCDST_FORMAT(NV12, scaler_5420_two_pixel_hv_limits) },
+
+	/* SCALER_YUV420_3P */
+	{ IPP_SRCDST_FORMAT(YUV420, scaler_5420_two_pixel_hv_limits) },
+
+	/* SCALER_YUV422_1P_YUYV */
+	{ IPP_SRCDST_FORMAT(YUYV, scaler_5420_two_pixel_h_limits) },
+
+	/* SCALER_YUV422_1P_UYVY */
+	{ IPP_SRCDST_FORMAT(UYVY, scaler_5420_two_pixel_h_limits) },
+
+	/* SCALER_YUV422_1P_YVYU */
+	{ IPP_SRCDST_FORMAT(YVYU, scaler_5420_two_pixel_h_limits) },
+
+	/* SCALER_YUV422_2P_UV */
+	{ IPP_SRCDST_FORMAT(NV61, scaler_5420_two_pixel_h_limits) },
+
+	/* SCALER_YUV422_2P_VU */
+	{ IPP_SRCDST_FORMAT(NV16, scaler_5420_two_pixel_h_limits) },
+
+	/* SCALER_YUV422_3P */
+	{ IPP_SRCDST_FORMAT(YUV422, scaler_5420_two_pixel_h_limits) },
+
+	/* SCALER_YUV444_2P_UV */
+	{ IPP_SRCDST_FORMAT(NV42, scaler_5420_one_pixel_limits) },
+
+	/* SCALER_YUV444_2P_VU */
+	{ IPP_SRCDST_FORMAT(NV24, scaler_5420_one_pixel_limits) },
+
+	/* SCALER_YUV444_3P */
+	{ IPP_SRCDST_FORMAT(YUV444, scaler_5420_one_pixel_limits) },
+
+	/* SCALER_RGB_565 */
+	{ IPP_SRCDST_FORMAT(RGB565, scaler_5420_one_pixel_limits) },
+
+	/* SCALER_ARGB1555 */
+	{ IPP_SRCDST_FORMAT(XRGB1555, scaler_5420_one_pixel_limits) },
+
+	/* SCALER_ARGB1555 */
+	{ IPP_SRCDST_FORMAT(ARGB1555, scaler_5420_one_pixel_limits) },
+
+	/* SCALER_ARGB4444 */
+	{ IPP_SRCDST_FORMAT(XRGB4444, scaler_5420_one_pixel_limits) },
+
+	/* SCALER_ARGB4444 */
+	{ IPP_SRCDST_FORMAT(ARGB4444, scaler_5420_one_pixel_limits) },
+
+	/* SCALER_ARGB8888 */
+	{ IPP_SRCDST_FORMAT(XRGB8888, scaler_5420_one_pixel_limits) },
+
+	/* SCALER_ARGB8888 */
+	{ IPP_SRCDST_FORMAT(ARGB8888, scaler_5420_one_pixel_limits) },
+
+	/* SCALER_RGBA8888 */
+	{ IPP_SRCDST_FORMAT(RGBX8888, scaler_5420_one_pixel_limits) },
+
+	/* SCALER_RGBA8888 */
+	{ IPP_SRCDST_FORMAT(RGBA8888, scaler_5420_one_pixel_limits) },
+};
+
+static const struct scaler_data exynos5420_data = {
+	.clk_name	= {"mscl"},
+	.num_clk	= 1,
+	.formats	= exynos5420_formats,
+	.num_formats	= ARRAY_SIZE(exynos5420_formats),
+};
+
+static const struct scaler_data exynos5433_data = {
+	.clk_name	= {"pclk", "aclk", "aclk_xiu"},
+	.num_clk	= 3,
+	.formats	= exynos5420_formats, /* intentional */
+	.num_formats	= ARRAY_SIZE(exynos5420_formats),
+};
+
+static const struct of_device_id exynos_scaler_match[] = {
+	{
+		.compatible = "samsung,exynos5420-scaler",
+		.data = &exynos5420_data,
+	}, {
+		.compatible = "samsung,exynos5433-scaler",
+		.data = &exynos5433_data,
+	}, {
+	},
+};
+MODULE_DEVICE_TABLE(of, exynos_scaler_match);
+
+struct platform_driver scaler_driver = {
+	.probe		= scaler_probe,
+	.remove		= scaler_remove,
+	.driver		= {
+		.name	= "exynos-scaler",
+		.owner	= THIS_MODULE,
+		.pm	= &scaler_pm_ops,
+		.of_match_table = exynos_scaler_match,
+	},
+};
diff --git a/drivers/gpu/drm/exynos/regs-scaler.h b/drivers/gpu/drm/exynos/regs-scaler.h
new file mode 100644
index 000000000000..fc7ccad75e74
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-scaler.h
@@ -0,0 +1,426 @@
+/* drivers/gpu/drm/exynos/regs-scaler.h
+ *
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com/
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * Register definition file for Samsung scaler driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef EXYNOS_REGS_SCALER_H
+#define EXYNOS_REGS_SCALER_H
+
+/* Register part */
+
+/* Global setting */
+#define SCALER_STATUS			0x0	/* no shadow */
+#define SCALER_CFG			0x4
+
+/* Interrupt */
+#define SCALER_INT_EN			0x8	/* no shadow */
+#define SCALER_INT_STATUS		0xc	/* no shadow */
+
+/* SRC */
+#define SCALER_SRC_CFG			0x10
+#define SCALER_SRC_Y_BASE		0x14
+#define SCALER_SRC_CB_BASE		0x18
+#define SCALER_SRC_CR_BASE		0x294
+#define SCALER_SRC_SPAN			0x1c
+#define SCALER_SRC_Y_POS		0x20
+#define SCALER_SRC_WH			0x24
+#define SCALER_SRC_C_POS		0x28
+
+/* DST */
+#define SCALER_DST_CFG			0x30
+#define SCALER_DST_Y_BASE		0x34
+#define SCALER_DST_CB_BASE		0x38
+#define SCALER_DST_CR_BASE		0x298
+#define SCALER_DST_SPAN			0x3c
+#define SCALER_DST_WH			0x40
+#define SCALER_DST_POS			0x44
+
+/* Ratio */
+#define SCALER_H_RATIO			0x50
+#define SCALER_V_RATIO			0x54
+
+/* Rotation */
+#define SCALER_ROT_CFG			0x58
+
+/* Coefficient */
+/*
+ * YHCOEF_{x}{A|B|C|D}			CHCOEF_{x}{A|B|C|D}
+ *
+ *	A	B	C	D	A	B	C	D
+ * 0	60	64	68	6c	140	144	148	14c
+ * 1	70	74	78	7c	150	154	158	15c
+ * 2	80	84	88	8c	160	164	168	16c
+ * 3	90	94	98	9c	170	174	178	17c
+ * 4	a0	a4	a8	ac	180	184	188	18c
+ * 5	b0	b4	b8	bc	190	194	198	19c
+ * 6	c0	c4	c8	cc	1a0	1a4	1a8	1ac
+ * 7	d0	d4	d8	dc	1b0	1b4	1b8	1bc
+ * 8	e0	e4	e8	ec	1c0	1c4	1c8	1cc
+ *
+ *
+ * YVCOEF_{x}{A|B}			CVCOEF_{x}{A|B}
+ *
+ *	A	B			A	B
+ * 0	f0	f4			1d0	1d4
+ * 1	f8	fc			1d8	1dc
+ * 2	100	104			1e0	1e4
+ * 3	108	10c			1e8	1ec
+ * 4	110	114			1f0	1f4
+ * 5	118	11c			1f8	1fc
+ * 6	120	124			200	204
+ * 7	128	12c			208	20c
+ * 8	130	134			210	214
+ */
+#define _SCALER_HCOEF_DELTA(r, c)	((r) * 0x10 + (c) * 0x4)
+#define _SCALER_VCOEF_DELTA(r, c)	((r) * 0x8 + (c) * 0x4)
+
+#define SCALER_YHCOEF(r, c)		(0x60 + _SCALER_HCOEF_DELTA((r), (c)))
+#define SCALER_YVCOEF(r, c)		(0xf0 + _SCALER_VCOEF_DELTA((r), (c)))
+#define SCALER_CHCOEF(r, c)		(0x140 + _SCALER_HCOEF_DELTA((r), (c)))
+#define SCALER_CVCOEF(r, c)		(0x1d0 + _SCALER_VCOEF_DELTA((r), (c)))
+
+
+/* Color Space Conversion */
+#define SCALER_CSC_COEF(x, y)		(0x220 + (y) * 0xc + (x) * 0x4)
+
+/* Dithering */
+#define SCALER_DITH_CFG			0x250
+
+/* Version Number */
+#define SCALER_VER			0x260	/* no shadow */
+
+/* Cycle count and Timeout */
+#define SCALER_CYCLE_COUNT		0x278	/* no shadow */
+#define SCALER_TIMEOUT_CTRL		0x2c0	/* no shadow */
+#define SCALER_TIMEOUT_CNT		0x2c4	/* no shadow */
+
+/* Blending */
+#define SCALER_SRC_BLEND_COLOR		0x280
+#define SCALER_SRC_BLEND_ALPHA		0x284
+#define SCALER_DST_BLEND_COLOR		0x288
+#define SCALER_DST_BLEND_ALPHA		0x28c
+
+/* Color Fill */
+#define SCALER_FILL_COLOR		0x290
+
+/* Multiple Command Queue */
+#define SCALER_ADDR_Q_CONFIG		0x2a0	/* no shadow */
+#define SCALER_SRC_ADDR_Q_STATUS	0x2a4	/* no shadow */
+#define SCALER_SRC_ADDR_Q		0x2a8	/* no shadow */
+
+/* CRC */
+#define SCALER_CRC_COLOR00_10		0x2b0	/* no shadow */
+#define SCALER_CRC_COLOR20_30		0x2b4	/* no shadow */
+#define SCALER_CRC_COLOR01_11		0x2b8	/* no shadow */
+#define SCALER_CRC_COLOR21_31		0x2bc	/* no shadow */
+
+/* Shadow Registers */
+#define SCALER_SHADOW_OFFSET		0x1000
+
+
+/* Bit definition part */
+#define SCALER_MASK(hi_b, lo_b)		((1 << ((hi_b) - (lo_b) + 1)) - 1)
+#define SCALER_GET(reg, hi_b, lo_b)	\
+	(((reg) >> (lo_b)) & SCALER_MASK(hi_b, lo_b))
+#define SCALER_SET(val, hi_b, lo_b) \
+	(((val) & SCALER_MASK(hi_b, lo_b)) << lo_b)
+
+/* SCALER_STATUS */
+#define SCALER_STATUS_SCALER_RUNNING		(1 << 1)
+#define SCALER_STATUS_SCALER_READY_CLK_DOWN	(1 << 0)
+
+/* SCALER_CFG */
+#define SCALER_CFG_FILL_EN			(1 << 24)
+#define SCALER_CFG_BLEND_COLOR_DIVIDE_ALPHA_EN	(1 << 17)
+#define SCALER_CFG_BLEND_EN			(1 << 16)
+#define SCALER_CFG_CSC_Y_OFFSET_SRC_EN		(1 << 10)
+#define SCALER_CFG_CSC_Y_OFFSET_DST_EN		(1 << 9)
+#define SCALER_CFG_16_BURST_MODE		(1 << 8)
+#define SCALER_CFG_SOFT_RESET			(1 << 1)
+#define SCALER_CFG_START_CMD			(1 << 0)
+
+/* SCALER_INT_EN */
+#define SCALER_INT_EN_TIMEOUT			(1 << 31)
+#define SCALER_INT_EN_ILLEGAL_BLEND		(1 << 24)
+#define SCALER_INT_EN_ILLEGAL_RATIO		(1 << 23)
+#define SCALER_INT_EN_ILLEGAL_DST_HEIGHT	(1 << 22)
+#define SCALER_INT_EN_ILLEGAL_DST_WIDTH		(1 << 21)
+#define SCALER_INT_EN_ILLEGAL_DST_V_POS		(1 << 20)
+#define SCALER_INT_EN_ILLEGAL_DST_H_POS		(1 << 19)
+#define SCALER_INT_EN_ILLEGAL_DST_C_SPAN	(1 << 18)
+#define SCALER_INT_EN_ILLEGAL_DST_Y_SPAN	(1 << 17)
+#define SCALER_INT_EN_ILLEGAL_DST_CR_BASE	(1 << 16)
+#define SCALER_INT_EN_ILLEGAL_DST_CB_BASE	(1 << 15)
+#define SCALER_INT_EN_ILLEGAL_DST_Y_BASE	(1 << 14)
+#define SCALER_INT_EN_ILLEGAL_DST_COLOR		(1 << 13)
+#define SCALER_INT_EN_ILLEGAL_SRC_HEIGHT	(1 << 12)
+#define SCALER_INT_EN_ILLEGAL_SRC_WIDTH		(1 << 11)
+#define SCALER_INT_EN_ILLEGAL_SRC_CV_POS	(1 << 10)
+#define SCALER_INT_EN_ILLEGAL_SRC_CH_POS	(1 << 9)
+#define SCALER_INT_EN_ILLEGAL_SRC_YV_POS	(1 << 8)
+#define SCALER_INT_EN_ILLEGAL_SRC_YH_POS	(1 << 7)
+#define SCALER_INT_EN_ILLEGAL_DST_SPAN		(1 << 6)
+#define SCALER_INT_EN_ILLEGAL_SRC_Y_SPAN	(1 << 5)
+#define SCALER_INT_EN_ILLEGAL_SRC_CR_BASE	(1 << 4)
+#define SCALER_INT_EN_ILLEGAL_SRC_CB_BASE	(1 << 3)
+#define SCALER_INT_EN_ILLEGAL_SRC_Y_BASE	(1 << 2)
+#define SCALER_INT_EN_ILLEGAL_SRC_COLOR		(1 << 1)
+#define SCALER_INT_EN_FRAME_END			(1 << 0)
+
+/* SCALER_INT_STATUS */
+#define SCALER_INT_STATUS_TIMEOUT		(1 << 31)
+#define SCALER_INT_STATUS_ILLEGAL_BLEND		(1 << 24)
+#define SCALER_INT_STATUS_ILLEGAL_RATIO		(1 << 23)
+#define SCALER_INT_STATUS_ILLEGAL_DST_HEIGHT	(1 << 22)
+#define SCALER_INT_STATUS_ILLEGAL_DST_WIDTH	(1 << 21)
+#define SCALER_INT_STATUS_ILLEGAL_DST_V_POS	(1 << 20)
+#define SCALER_INT_STATUS_ILLEGAL_DST_H_POS	(1 << 19)
+#define SCALER_INT_STATUS_ILLEGAL_DST_C_SPAN	(1 << 18)
+#define SCALER_INT_STATUS_ILLEGAL_DST_Y_SPAN	(1 << 17)
+#define SCALER_INT_STATUS_ILLEGAL_DST_CR_BASE	(1 << 16)
+#define SCALER_INT_STATUS_ILLEGAL_DST_CB_BASE	(1 << 15)
+#define SCALER_INT_STATUS_ILLEGAL_DST_Y_BASE	(1 << 14)
+#define SCALER_INT_STATUS_ILLEGAL_DST_COLOR	(1 << 13)
+#define SCALER_INT_STATUS_ILLEGAL_SRC_HEIGHT	(1 << 12)
+#define SCALER_INT_STATUS_ILLEGAL_SRC_WIDTH	(1 << 11)
+#define SCALER_INT_STATUS_ILLEGAL_SRC_CV_POS	(1 << 10)
+#define SCALER_INT_STATUS_ILLEGAL_SRC_CH_POS	(1 << 9)
+#define SCALER_INT_STATUS_ILLEGAL_SRC_YV_POS	(1 << 8)
+#define SCALER_INT_STATUS_ILLEGAL_SRC_YH_POS	(1 << 7)
+#define SCALER_INT_STATUS_ILLEGAL_DST_SPAN	(1 << 6)
+#define SCALER_INT_STATUS_ILLEGAL_SRC_Y_SPAN	(1 << 5)
+#define SCALER_INT_STATUS_ILLEGAL_SRC_CR_BASE	(1 << 4)
+#define SCALER_INT_STATUS_ILLEGAL_SRC_CB_BASE	(1 << 3)
+#define SCALER_INT_STATUS_ILLEGAL_SRC_Y_BASE	(1 << 2)
+#define SCALER_INT_STATUS_ILLEGAL_SRC_COLOR	(1 << 1)
+#define SCALER_INT_STATUS_FRAME_END		(1 << 0)
+
+/* SCALER_SRC_CFG */
+#define SCALER_SRC_CFG_TILE_EN			(1 << 10)
+#define SCALER_SRC_CFG_GET_BYTE_SWAP(r)		SCALER_GET(r, 6, 5)
+#define SCALER_SRC_CFG_SET_BYTE_SWAP(v)		SCALER_SET(v, 6, 5)
+#define SCALER_SRC_CFG_GET_COLOR_FORMAT(r)	SCALER_GET(r, 4, 0)
+#define SCALER_SRC_CFG_SET_COLOR_FORMAT(v)	SCALER_SET(v, 4, 0)
+#define SCALER_YUV420_2P_UV			0
+#define SCALER_YUV422_2P_UV			2
+#define SCALER_YUV444_2P_UV			3
+#define SCALER_RGB_565				4
+#define SCALER_ARGB1555				5
+#define SCALER_ARGB8888				6
+#define SCALER_ARGB8888_PRE			7
+#define SCALER_YUV422_1P_YVYU			9
+#define SCALER_YUV422_1P_YUYV			10
+#define SCALER_YUV422_1P_UYVY			11
+#define SCALER_ARGB4444				12
+#define SCALER_L8A8				13
+#define SCALER_RGBA8888				14
+#define SCALER_L8				15
+#define SCALER_YUV420_2P_VU			16
+#define SCALER_YUV422_2P_VU			18
+#define SCALER_YUV444_2P_VU			19
+#define SCALER_YUV420_3P			20
+#define SCALER_YUV422_3P			22
+#define SCALER_YUV444_3P			23
+
+/* SCALER_SRC_SPAN */
+#define SCALER_SRC_SPAN_GET_C_SPAN(r)		SCALER_GET(r, 29, 16)
+#define SCALER_SRC_SPAN_SET_C_SPAN(v)		SCALER_SET(v, 29, 16)
+#define SCALER_SRC_SPAN_GET_Y_SPAN(r)		SCALER_GET(r, 13, 0)
+#define SCALER_SRC_SPAN_SET_Y_SPAN(v)		SCALER_SET(v, 13, 0)
+
+/* SCALER_SRC_Y_POS */
+#define SCALER_SRC_Y_POS_GET_YH_POS(r)		SCALER_GET(r, 31, 16)
+#define SCALER_SRC_Y_POS_SET_YH_POS(v)		SCALER_SET(v, 31, 16)
+#define SCALER_SRC_Y_POS_GET_YV_POS(r)		SCALER_GET(r, 15, 0)
+#define SCALER_SRC_Y_POS_SET_YV_POS(v)		SCALER_SET(v, 15, 0)
+
+/* SCALER_SRC_WH */
+#define SCALER_SRC_WH_GET_WIDTH(r)		SCALER_GET(r, 29, 16)
+#define SCALER_SRC_WH_SET_WIDTH(v)		SCALER_SET(v, 29, 16)
+#define SCALER_SRC_WH_GET_HEIGHT(r)		SCALER_GET(r, 13, 0)
+#define SCALER_SRC_WH_SET_HEIGHT(v)		SCALER_SET(v, 13, 0)
+
+/* SCALER_SRC_C_POS */
+#define SCALER_SRC_C_POS_GET_CH_POS(r)		SCALER_GET(r, 31, 16)
+#define SCALER_SRC_C_POS_SET_CH_POS(v)		SCALER_SET(v, 31, 16)
+#define SCALER_SRC_C_POS_GET_CV_POS(r)		SCALER_GET(r, 15, 0)
+#define SCALER_SRC_C_POS_SET_CV_POS(v)		SCALER_SET(v, 15, 0)
+
+/* SCALER_DST_CFG */
+#define SCALER_DST_CFG_GET_BYTE_SWAP(r)		SCALER_GET(r, 6, 5)
+#define SCALER_DST_CFG_SET_BYTE_SWAP(v)		SCALER_SET(v, 6, 5)
+#define SCALER_DST_CFG_GET_COLOR_FORMAT(r)	SCALER_GET(r, 4, 0)
+#define SCALER_DST_CFG_SET_COLOR_FORMAT(v)	SCALER_SET(v, 4, 0)
+
+/* SCALER_DST_SPAN */
+#define SCALER_DST_SPAN_GET_C_SPAN(r)		SCALER_GET(r, 29, 16)
+#define SCALER_DST_SPAN_SET_C_SPAN(v)		SCALER_SET(v, 29, 16)
+#define SCALER_DST_SPAN_GET_Y_SPAN(r)		SCALER_GET(r, 13, 0)
+#define SCALER_DST_SPAN_SET_Y_SPAN(v)		SCALER_SET(v, 13, 0)
+
+/* SCALER_DST_WH */
+#define SCALER_DST_WH_GET_WIDTH(r)		SCALER_GET(r, 29, 16)
+#define SCALER_DST_WH_SET_WIDTH(v)		SCALER_SET(v, 29, 16)
+#define SCALER_DST_WH_GET_HEIGHT(r)		SCALER_GET(r, 13, 0)
+#define SCALER_DST_WH_SET_HEIGHT(v)		SCALER_SET(v, 13, 0)
+
+/* SCALER_DST_POS */
+#define SCALER_DST_POS_GET_H_POS(r)		SCALER_GET(r, 29, 16)
+#define SCALER_DST_POS_SET_H_POS(v)		SCALER_SET(v, 29, 16)
+#define SCALER_DST_POS_GET_V_POS(r)		SCALER_GET(r, 13, 0)
+#define SCALER_DST_POS_SET_V_POS(v)		SCALER_SET(v, 13, 0)
+
+/* SCALER_H_RATIO */
+#define SCALER_H_RATIO_GET(r)			SCALER_GET(r, 18, 0)
+#define SCALER_H_RATIO_SET(v)			SCALER_SET(v, 18, 0)
+
+/* SCALER_V_RATIO */
+#define SCALER_V_RATIO_GET(r)			SCALER_GET(r, 18, 0)
+#define SCALER_V_RATIO_SET(v)			SCALER_SET(v, 18, 0)
+
+/* SCALER_ROT_CFG */
+#define SCALER_ROT_CFG_FLIP_X_EN		(1 << 3)
+#define SCALER_ROT_CFG_FLIP_Y_EN		(1 << 2)
+#define SCALER_ROT_CFG_GET_ROTMODE(r)		SCALER_GET(r, 1, 0)
+#define SCALER_ROT_CFG_SET_ROTMODE(v)		SCALER_SET(v, 1, 0)
+#define SCALER_ROT_MODE_90			1
+#define SCALER_ROT_MODE_180			2
+#define SCALER_ROT_MODE_270			3
+
+/* SCALER_HCOEF, SCALER_VCOEF */
+#define SCALER_COEF_SHIFT(i)			(16 * (1 - (i) % 2))
+#define SCALER_COEF_GET(r, i)			\
+				(((r) >> SCALER_COEF_SHIFT(i)) & 0x1ff)
+#define SCALER_COEF_SET(v, i)			\
+				(((v) & 0x1ff) << SCALER_COEF_SHIFT(i))
+
+/* SCALER_CSC_COEFxy */
+#define SCALER_CSC_COEF_GET(r)			SCALER_GET(r, 11, 0)
+#define SCALER_CSC_COEF_SET(v)			SCALER_SET(v, 11, 0)
+
+/* SCALER_DITH_CFG */
+#define SCALER_DITH_CFG_GET_R_TYPE(r)		SCALER_GET(r, 8, 6)
+#define SCALER_DITH_CFG_SET_R_TYPE(v)		SCALER_SET(v, 8, 6)
+#define SCALER_DITH_CFG_GET_G_TYPE(r)		SCALER_GET(r, 5, 3)
+#define SCALER_DITH_CFG_SET_G_TYPE(v)		SCALER_SET(v, 5, 3)
+#define SCALER_DITH_CFG_GET_B_TYPE(r)		SCALER_GET(r, 2, 0)
+#define SCALER_DITH_CFG_SET_B_TYPE(v)		SCALER_SET(v, 2, 0)
+
+/* SCALER_TIMEOUT_CTRL */
+#define SCALER_TIMEOUT_CTRL_GET_TIMER_VALUE(r)	SCALER_GET(r, 31, 16)
+#define SCALER_TIMEOUT_CTRL_SET_TIMER_VALUE(v)	SCALER_SET(v, 31, 16)
+#define SCALER_TIMEOUT_CTRL_GET_TIMER_DIV(r)	SCALER_GET(r, 7, 4)
+#define SCALER_TIMEOUT_CTRL_SET_TIMER_DIV(v)	SCALER_SET(v, 7, 4)
+#define SCALER_TIMEOUT_CTRL_TIMER_ENABLE	(1 << 0)
+
+/* SCALER_TIMEOUT_CNT */
+#define SCALER_TIMEOUT_CTRL_GET_TIMER_COUNT(r)	SCALER_GET(r, 31, 16)
+
+/* SCALER_SRC_BLEND_COLOR */
+#define SCALER_SRC_BLEND_COLOR_SEL_INV		(1 << 31)
+#define SCALER_SRC_BLEND_COLOR_GET_SEL(r)	SCALER_GET(r, 30, 29)
+#define SCALER_SRC_BLEND_COLOR_SET_SEL(v)	SCALER_SET(v, 30, 29)
+#define SCALER_SRC_BLEND_COLOR_OP_SEL_INV	(1 << 28)
+#define SCALER_SRC_BLEND_COLOR_GET_OP_SEL(r)	SCALER_GET(r, 27, 24)
+#define SCALER_SRC_BLEND_COLOR_SET_OP_SEL(v)	SCALER_SET(v, 27, 24)
+#define SCALER_SRC_BLEND_COLOR_GET_COLOR0(r)	SCALER_GET(r, 23, 16)
+#define SCALER_SRC_BLEND_COLOR_SET_COLOR0(v)	SCALER_SET(v, 23, 16)
+#define SCALER_SRC_BLEND_COLOR_GET_COLOR1(r)	SCALER_GET(r, 15, 8)
+#define SCALER_SRC_BLEND_COLOR_SET_COLOR1(v)	SCALER_SET(v, 15, 8)
+#define SCALER_SRC_BLEND_COLOR_GET_COLOR2(r)	SCALER_GET(r, 7, 0)
+#define SCALER_SRC_BLEND_COLOR_SET_COLOR2(v)	SCALER_SET(v, 7, 0)
+
+/* SCALER_SRC_BLEND_ALPHA */
+#define SCALER_SRC_BLEND_ALPHA_SEL_INV		(1 << 31)
+#define SCALER_SRC_BLEND_ALPHA_GET_SEL(r)	SCALER_GET(r, 30, 29)
+#define SCALER_SRC_BLEND_ALPHA_SET_SEL(v)	SCALER_SET(v, 30, 29)
+#define SCALER_SRC_BLEND_ALPHA_OP_SEL_INV	(1 << 28)
+#define SCALER_SRC_BLEND_ALPHA_GET_OP_SEL(r)	SCALER_GET(r, 27, 24)
+#define SCALER_SRC_BLEND_ALPHA_SET_OP_SEL(v)	SCALER_SET(v, 27, 24)
+#define SCALER_SRC_BLEND_ALPHA_GET_ALPHA(r)	SCALER_GET(r, 7, 0)
+#define SCALER_SRC_BLEND_ALPHA_SET_ALPHA(v)	SCALER_SET(v, 7, 0)
+
+/* SCALER_DST_BLEND_COLOR */
+#define SCALER_DST_BLEND_COLOR_SEL_INV		(1 << 31)
+#define SCALER_DST_BLEND_COLOR_GET_SEL(r)	SCALER_GET(r, 30, 29)
+#define SCALER_DST_BLEND_COLOR_SET_SEL(v)	SCALER_SET(v, 30, 29)
+#define SCALER_DST_BLEND_COLOR_OP_SEL_INV	(1 << 28)
+#define SCALER_DST_BLEND_COLOR_GET_OP_SEL(r)	SCALER_GET(r, 27, 24)
+#define SCALER_DST_BLEND_COLOR_SET_OP_SEL(v)	SCALER_SET(v, 27, 24)
+#define SCALER_DST_BLEND_COLOR_GET_COLOR0(r)	SCALER_GET(r, 23, 16)
+#define SCALER_DST_BLEND_COLOR_SET_COLOR0(v)	SCALER_SET(v, 23, 16)
+#define SCALER_DST_BLEND_COLOR_GET_COLOR1(r)	SCALER_GET(r, 15, 8)
+#define SCALER_DST_BLEND_COLOR_SET_COLOR1(v)	SCALER_SET(v, 15, 8)
+#define SCALER_DST_BLEND_COLOR_GET_COLOR2(r)	SCALER_GET(r, 7, 0)
+#define SCALER_DST_BLEND_COLOR_SET_COLOR2(v)	SCALER_SET(v, 7, 0)
+
+/* SCALER_DST_BLEND_ALPHA */
+#define SCALER_DST_BLEND_ALPHA_SEL_INV		(1 << 31)
+#define SCALER_DST_BLEND_ALPHA_GET_SEL(r)	SCALER_GET(r, 30, 29)
+#define SCALER_DST_BLEND_ALPHA_SET_SEL(v)	SCALER_SET(v, 30, 29)
+#define SCALER_DST_BLEND_ALPHA_OP_SEL_INV	(1 << 28)
+#define SCALER_DST_BLEND_ALPHA_GET_OP_SEL(r)	SCALER_GET(r, 27, 24)
+#define SCALER_DST_BLEND_ALPHA_SET_OP_SEL(v)	SCALER_SET(v, 27, 24)
+#define SCALER_DST_BLEND_ALPHA_GET_ALPHA(r)	SCALER_GET(r, 7, 0)
+#define SCALER_DST_BLEND_ALPHA_SET_ALPHA(v)	SCALER_SET(v, 7, 0)
+
+/* SCALER_FILL_COLOR */
+#define SCALER_FILL_COLOR_GET_ALPHA(r)		SCALER_GET(r, 31, 24)
+#define SCALER_FILL_COLOR_SET_ALPHA(v)		SCALER_SET(v, 31, 24)
+#define SCALER_FILL_COLOR_GET_FILL_COLOR0(r)	SCALER_GET(r, 23, 16)
+#define SCALER_FILL_COLOR_SET_FILL_COLOR0(v)	SCALER_SET(v, 23, 16)
+#define SCALER_FILL_COLOR_GET_FILL_COLOR1(r)	SCALER_GET(r, 15, 8)
+#define SCALER_FILL_COLOR_SET_FILL_COLOR1(v)	SCALER_SET(v, 15, 8)
+#define SCALER_FILL_COLOR_GET_FILL_COLOR2(r)	SCALER_GET(r, 7, 0)
+#define SCALER_FILL_COLOR_SET_FILL_COLOR2(v)	SCALER_SET(v, 7, 0)
+
+/* SCALER_ADDR_Q_CONFIG */
+#define SCALER_ADDR_Q_CONFIG_RST		(1 << 0)
+
+/* SCALER_SRC_ADDR_Q_STATUS */
+#define SCALER_SRC_ADDR_Q_STATUS_Y_FULL		(1 << 23)
+#define SCALER_SRC_ADDR_Q_STATUS_Y_EMPTY	(1 << 22)
+#define SCALER_SRC_ADDR_Q_STATUS_GET_Y_WR_IDX(r)	SCALER_GET(r, 21, 16)
+#define SCALER_SRC_ADDR_Q_STATUS_CB_FULL	(1 << 15)
+#define SCALER_SRC_ADDR_Q_STATUS_CB_EMPTY	(1 << 14)
+#define SCALER_SRC_ADDR_Q_STATUS_GET_CB_WR_IDX(r)	SCALER_GET(r, 13, 8)
+#define SCALER_SRC_ADDR_Q_STATUS_CR_FULL	(1 << 7)
+#define SCALER_SRC_ADDR_Q_STATUS_CR_EMPTY	(1 << 6)
+#define SCALER_SRC_ADDR_Q_STATUS_GET_CR_WR_IDX(r)	SCALER_GET(r, 5, 0)
+
+/* SCALER_DST_ADDR_Q_STATUS */
+#define SCALER_DST_ADDR_Q_STATUS_Y_FULL		(1 << 23)
+#define SCALER_DST_ADDR_Q_STATUS_Y_EMPTY	(1 << 22)
+#define SCALER_DST_ADDR_Q_STATUS_GET_Y_WR_IDX(r)	SCALER_GET(r, 21, 16)
+#define SCALER_DST_ADDR_Q_STATUS_CB_FULL	(1 << 15)
+#define SCALER_DST_ADDR_Q_STATUS_CB_EMPTY	(1 << 14)
+#define SCALER_DST_ADDR_Q_STATUS_GET_CB_WR_IDX(r)	SCALER_GET(r, 13, 8)
+#define SCALER_DST_ADDR_Q_STATUS_CR_FULL	(1 << 7)
+#define SCALER_DST_ADDR_Q_STATUS_CR_EMPTY	(1 << 6)
+#define SCALER_DST_ADDR_Q_STATUS_GET_CR_WR_IDX(r)	SCALER_GET(r, 5, 0)
+
+/* SCALER_CRC_COLOR00_10 */
+#define SCALER_CRC_COLOR00_10_GET_00(r)		SCALER_GET(r, 31, 16)
+#define SCALER_CRC_COLOR00_10_GET_10(r)		SCALER_GET(r, 15, 0)
+
+/* SCALER_CRC_COLOR20_30 */
+#define SCALER_CRC_COLOR20_30_GET_20(r)		SCALER_GET(r, 31, 16)
+#define SCALER_CRC_COLOR20_30_GET_30(r)		SCALER_GET(r, 15, 0)
+
+/* SCALER_CRC_COLOR01_11 */
+#define SCALER_CRC_COLOR01_11_GET_01(r)		SCALER_GET(r, 31, 16)
+#define SCALER_CRC_COLOR01_11_GET_11(r)		SCALER_GET(r, 15, 0)
+
+/* SCALER_CRC_COLOR21_31 */
+#define SCALER_CRC_COLOR21_31_GET_21(r)		SCALER_GET(r, 31, 16)
+#define SCALER_CRC_COLOR21_31_GET_31(r)		SCALER_GET(r, 15, 0)
+
+#endif /* EXYNOS_REGS_SCALER_H */

From 818c05d8e2679e27800b9ce5bc2b0a89ecd164e8 Mon Sep 17 00:00:00 2001
From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Date: Thu, 3 May 2018 21:41:19 +0300
Subject: [PATCH 0668/1286] drm: panel-orientation-quirks: Convert to use
 match_string() helper

The new helper returns index of the matching string in an array.
We are going to use it here.

Acked-by: Hans de Goede <hdegoede@redhat.com>
Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20180503184119.22355-1-andriy.shevchenko@linux.intel.com
---
 drivers/gpu/drm/drm_panel_orientation_quirks.c | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index caebddda8bce..fe9c6c731e87 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -172,10 +172,9 @@ int drm_get_panel_orientation_quirk(int width, int height)
 		if (!bios_date)
 			continue;
 
-		for (i = 0; data->bios_dates[i]; i++) {
-			if (!strcmp(data->bios_dates[i], bios_date))
-				return data->orientation;
-		}
+		i = match_string(data->bios_dates, -1, bios_date);
+		if (i >= 0)
+			return data->orientation;
 	}
 
 	return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;

From dd7c2626329468c0344a794187b467d34c3640cb Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Tue, 8 May 2018 16:39:36 +0530
Subject: [PATCH 0669/1286] drm/modes: Introduce drm_mode_match()
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Make mode matching less confusing by allowing the caller to specify
which parts of the modes should match via some flags.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Shashank Sharma <shashank.sharma@intel.com>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1525777785-9740-2-git-send-email-ankit.k.nautiyal@intel.com
---
 drivers/gpu/drm/drm_modes.c | 146 +++++++++++++++++++++++++++---------
 include/drm/drm_modes.h     |   9 +++
 2 files changed, 118 insertions(+), 37 deletions(-)

diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index e82b61e08f8c..c395a244f665 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -939,6 +939,99 @@ struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
 }
 EXPORT_SYMBOL(drm_mode_duplicate);
 
+static bool drm_mode_match_timings(const struct drm_display_mode *mode1,
+				   const struct drm_display_mode *mode2)
+{
+	return mode1->hdisplay == mode2->hdisplay &&
+		mode1->hsync_start == mode2->hsync_start &&
+		mode1->hsync_end == mode2->hsync_end &&
+		mode1->htotal == mode2->htotal &&
+		mode1->hskew == mode2->hskew &&
+		mode1->vdisplay == mode2->vdisplay &&
+		mode1->vsync_start == mode2->vsync_start &&
+		mode1->vsync_end == mode2->vsync_end &&
+		mode1->vtotal == mode2->vtotal &&
+		mode1->vscan == mode2->vscan;
+}
+
+static bool drm_mode_match_clock(const struct drm_display_mode *mode1,
+				  const struct drm_display_mode *mode2)
+{
+	/*
+	 * do clock check convert to PICOS
+	 * so fb modes get matched the same
+	 */
+	if (mode1->clock && mode2->clock)
+		return KHZ2PICOS(mode1->clock) == KHZ2PICOS(mode2->clock);
+	else
+		return mode1->clock == mode2->clock;
+}
+
+static bool drm_mode_match_flags(const struct drm_display_mode *mode1,
+				 const struct drm_display_mode *mode2)
+{
+	return (mode1->flags & ~DRM_MODE_FLAG_3D_MASK) ==
+		(mode2->flags & ~DRM_MODE_FLAG_3D_MASK);
+}
+
+static bool drm_mode_match_3d_flags(const struct drm_display_mode *mode1,
+				    const struct drm_display_mode *mode2)
+{
+	return (mode1->flags & DRM_MODE_FLAG_3D_MASK) ==
+		(mode2->flags & DRM_MODE_FLAG_3D_MASK);
+}
+
+static bool drm_mode_match_aspect_ratio(const struct drm_display_mode *mode1,
+					const struct drm_display_mode *mode2)
+{
+	return mode1->picture_aspect_ratio == mode2->picture_aspect_ratio;
+}
+
+/**
+ * drm_mode_match - test modes for (partial) equality
+ * @mode1: first mode
+ * @mode2: second mode
+ * @match_flags: which parts need to match (DRM_MODE_MATCH_*)
+ *
+ * Check to see if @mode1 and @mode2 are equivalent.
+ *
+ * Returns:
+ * True if the modes are (partially) equal, false otherwise.
+ */
+bool drm_mode_match(const struct drm_display_mode *mode1,
+		    const struct drm_display_mode *mode2,
+		    unsigned int match_flags)
+{
+	if (!mode1 && !mode2)
+		return true;
+
+	if (!mode1 || !mode2)
+		return false;
+
+	if (match_flags & DRM_MODE_MATCH_TIMINGS &&
+	    !drm_mode_match_timings(mode1, mode2))
+		return false;
+
+	if (match_flags & DRM_MODE_MATCH_CLOCK &&
+	    !drm_mode_match_clock(mode1, mode2))
+		return false;
+
+	if (match_flags & DRM_MODE_MATCH_FLAGS &&
+	    !drm_mode_match_flags(mode1, mode2))
+		return false;
+
+	if (match_flags & DRM_MODE_MATCH_3D_FLAGS &&
+	    !drm_mode_match_3d_flags(mode1, mode2))
+		return false;
+
+	if (match_flags & DRM_MODE_MATCH_ASPECT_RATIO &&
+	    !drm_mode_match_aspect_ratio(mode1, mode2))
+		return false;
+
+	return true;
+}
+EXPORT_SYMBOL(drm_mode_match);
+
 /**
  * drm_mode_equal - test modes for equality
  * @mode1: first mode
@@ -949,23 +1042,14 @@ EXPORT_SYMBOL(drm_mode_duplicate);
  * Returns:
  * True if the modes are equal, false otherwise.
  */
-bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
+bool drm_mode_equal(const struct drm_display_mode *mode1,
+		    const struct drm_display_mode *mode2)
 {
-	if (!mode1 && !mode2)
-		return true;
-
-	if (!mode1 || !mode2)
-		return false;
-
-	/* do clock check convert to PICOS so fb modes get matched
-	 * the same */
-	if (mode1->clock && mode2->clock) {
-		if (KHZ2PICOS(mode1->clock) != KHZ2PICOS(mode2->clock))
-			return false;
-	} else if (mode1->clock != mode2->clock)
-		return false;
-
-	return drm_mode_equal_no_clocks(mode1, mode2);
+	return drm_mode_match(mode1, mode2,
+			      DRM_MODE_MATCH_TIMINGS |
+			      DRM_MODE_MATCH_CLOCK |
+			      DRM_MODE_MATCH_FLAGS |
+			      DRM_MODE_MATCH_3D_FLAGS);
 }
 EXPORT_SYMBOL(drm_mode_equal);
 
@@ -980,13 +1064,13 @@ EXPORT_SYMBOL(drm_mode_equal);
  * Returns:
  * True if the modes are equal, false otherwise.
  */
-bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
+bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1,
+			      const struct drm_display_mode *mode2)
 {
-	if ((mode1->flags & DRM_MODE_FLAG_3D_MASK) !=
-	    (mode2->flags & DRM_MODE_FLAG_3D_MASK))
-		return false;
-
-	return drm_mode_equal_no_clocks_no_stereo(mode1, mode2);
+	return drm_mode_match(mode1, mode2,
+			      DRM_MODE_MATCH_TIMINGS |
+			      DRM_MODE_MATCH_FLAGS |
+			      DRM_MODE_MATCH_3D_FLAGS);
 }
 EXPORT_SYMBOL(drm_mode_equal_no_clocks);
 
@@ -1004,21 +1088,9 @@ EXPORT_SYMBOL(drm_mode_equal_no_clocks);
 bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
 					const struct drm_display_mode *mode2)
 {
-	if (mode1->hdisplay == mode2->hdisplay &&
-	    mode1->hsync_start == mode2->hsync_start &&
-	    mode1->hsync_end == mode2->hsync_end &&
-	    mode1->htotal == mode2->htotal &&
-	    mode1->hskew == mode2->hskew &&
-	    mode1->vdisplay == mode2->vdisplay &&
-	    mode1->vsync_start == mode2->vsync_start &&
-	    mode1->vsync_end == mode2->vsync_end &&
-	    mode1->vtotal == mode2->vtotal &&
-	    mode1->vscan == mode2->vscan &&
-	    (mode1->flags & ~DRM_MODE_FLAG_3D_MASK) ==
-	     (mode2->flags & ~DRM_MODE_FLAG_3D_MASK))
-		return true;
-
-	return false;
+	return drm_mode_match(mode1, mode2,
+			      DRM_MODE_MATCH_TIMINGS |
+			      DRM_MODE_MATCH_FLAGS);
 }
 EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo);
 
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index 0d310beae6af..2f78b7ee4824 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -147,6 +147,12 @@ enum drm_mode_status {
 
 #define DRM_MODE_FLAG_3D_MAX	DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF
 
+#define DRM_MODE_MATCH_TIMINGS (1 << 0)
+#define DRM_MODE_MATCH_CLOCK (1 << 1)
+#define DRM_MODE_MATCH_FLAGS (1 << 2)
+#define DRM_MODE_MATCH_3D_FLAGS (1 << 3)
+#define DRM_MODE_MATCH_ASPECT_RATIO (1 << 4)
+
 /**
  * struct drm_display_mode - DRM kernel-internal display mode structure
  * @hdisplay: horizontal display size
@@ -490,6 +496,9 @@ void drm_mode_copy(struct drm_display_mode *dst,
 		   const struct drm_display_mode *src);
 struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
 					    const struct drm_display_mode *mode);
+bool drm_mode_match(const struct drm_display_mode *mode1,
+		    const struct drm_display_mode *mode2,
+		    unsigned int match_flags);
 bool drm_mode_equal(const struct drm_display_mode *mode1,
 		    const struct drm_display_mode *mode2);
 bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1,

From a2328fd657017557606264c61074e609adfbb3ce Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Tue, 8 May 2018 16:39:37 +0530
Subject: [PATCH 0670/1286] drm/edid: Use drm_mode_match_no_clocks_no_stereo()
 for consistentcy
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Use drm_mode_equal_no_clocks_no_stereo() in
drm_match_hdmi_mode_clock_tolerance() for consistency as we
also use it in drm_match_hdmi_mode() and the cea mode matching
functions.

This doesn't actually change anything since the input mode
comes from detailed timings and we match it against
edid_4k_modes[] which. So none of those modes can have stereo
flags set.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Shashank Sharma <shashank.sharma@intel.com>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1525777785-9740-3-git-send-email-ankit.k.nautiyal@intel.com
---
 drivers/gpu/drm/drm_edid.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 61dd9a2fbe5b..aa70da86ef2c 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3047,7 +3047,7 @@ static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_
 		    abs(to_match->clock - clock2) > clock_tolerance)
 			continue;
 
-		if (drm_mode_equal_no_clocks(to_match, hdmi_mode))
+		if (drm_mode_equal_no_clocks_no_stereo(to_match, hdmi_mode))
 			return vic;
 	}
 

From 357768cc9e3fdacf6551da0ae1483bc87dbcb4e8 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Tue, 8 May 2018 16:39:38 +0530
Subject: [PATCH 0671/1286] drm/edid: Fix cea mode aspect ratio handling
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

commit 6dffd431e229 ("drm: Add aspect ratio parsing in DRM layer")
cause us to not send out any VICs in the AVI infoframes. That commit
was since reverted, but if and when we add aspect ratio handing back
we need to be more careful.

Let's handle this by considering the aspect ratio as a requirement
for cea mode matching only if the passed in mode actually has a
non-zero aspect ratio field. This will keep userspace that doesn't
provide an aspect ratio working as before by matching it to the
first otherwise equal cea mode. And once userspace starts to
provide the aspect ratio it will be considerd a hard requirement
for the match.

Also change the hdmi mode matching to use drm_mode_match() for
consistency, but we don't match on aspect ratio there since the
spec doesn't list a specific aspect ratio for those modes.

Cc: Shashank Sharma <shashank.sharma@intel.com>
Cc: "Lin, Jia" <lin.a.jia@intel.com>
Cc: Akashdeep Sharma <akashdeep.sharma@intel.com>
Cc: Jim Bride <jim.bride@linux.intel.com>
Cc: Jose Abreu <Jose.Abreu@synopsys.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Emil Velikov <emil.l.velikov@gmail.com>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Shashank Sharma <shashank.sharma@intel.com>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1525777785-9740-4-git-send-email-ankit.k.nautiyal@intel.com
---
 drivers/gpu/drm/drm_edid.c | 18 ++++++++++++++----
 1 file changed, 14 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index aa70da86ef2c..ba68ff94d3b3 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -2930,11 +2930,15 @@ cea_mode_alternate_timings(u8 vic, struct drm_display_mode *mode)
 static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_match,
 					     unsigned int clock_tolerance)
 {
+	unsigned int match_flags = DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_FLAGS;
 	u8 vic;
 
 	if (!to_match->clock)
 		return 0;
 
+	if (to_match->picture_aspect_ratio)
+		match_flags |= DRM_MODE_MATCH_ASPECT_RATIO;
+
 	for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) {
 		struct drm_display_mode cea_mode = edid_cea_modes[vic];
 		unsigned int clock1, clock2;
@@ -2948,7 +2952,7 @@ static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_m
 			continue;
 
 		do {
-			if (drm_mode_equal_no_clocks_no_stereo(to_match, &cea_mode))
+			if (drm_mode_match(to_match, &cea_mode, match_flags))
 				return vic;
 		} while (cea_mode_alternate_timings(vic, &cea_mode));
 	}
@@ -2965,11 +2969,15 @@ static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_m
  */
 u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
 {
+	unsigned int match_flags = DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_FLAGS;
 	u8 vic;
 
 	if (!to_match->clock)
 		return 0;
 
+	if (to_match->picture_aspect_ratio)
+		match_flags |= DRM_MODE_MATCH_ASPECT_RATIO;
+
 	for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) {
 		struct drm_display_mode cea_mode = edid_cea_modes[vic];
 		unsigned int clock1, clock2;
@@ -2983,7 +2991,7 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
 			continue;
 
 		do {
-			if (drm_mode_equal_no_clocks_no_stereo(to_match, &cea_mode))
+			if (drm_mode_match(to_match, &cea_mode, match_flags))
 				return vic;
 		} while (cea_mode_alternate_timings(vic, &cea_mode));
 	}
@@ -3030,6 +3038,7 @@ hdmi_mode_alternate_clock(const struct drm_display_mode *hdmi_mode)
 static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_match,
 					      unsigned int clock_tolerance)
 {
+	unsigned int match_flags = DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_FLAGS;
 	u8 vic;
 
 	if (!to_match->clock)
@@ -3047,7 +3056,7 @@ static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_
 		    abs(to_match->clock - clock2) > clock_tolerance)
 			continue;
 
-		if (drm_mode_equal_no_clocks_no_stereo(to_match, hdmi_mode))
+		if (drm_mode_match(to_match, hdmi_mode, match_flags))
 			return vic;
 	}
 
@@ -3064,6 +3073,7 @@ static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_
  */
 static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
 {
+	unsigned int match_flags = DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_FLAGS;
 	u8 vic;
 
 	if (!to_match->clock)
@@ -3079,7 +3089,7 @@ static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
 
 		if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
 		     KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
-		    drm_mode_equal_no_clocks_no_stereo(to_match, hdmi_mode))
+		    drm_mode_match(to_match, hdmi_mode, match_flags))
 			return vic;
 	}
 	return 0;

From a9c266c27ee568e3028b804a447b1fea58209618 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Tue, 8 May 2018 16:39:39 +0530
Subject: [PATCH 0672/1286] drm/edid: Don't send bogus aspect ratios in AVI
 infoframes
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

If the user mode would specify an aspect ratio other than 4:3 or 16:9
we now silently ignore it. Maybe a better apporoach is to return an
error? Let's try that.

Also we must be careful that we don't try to send illegal picture
aspect in the infoframe as it's only capable of signalling none,
4:3, and 16:9. Currently we're sending these bogus infoframes
whenever the cea mode specifies some other aspect ratio.

Cc: Shashank Sharma <shashank.sharma@intel.com>
Cc: Sean Paul <seanpaul@chromium.org>
Cc: Jose Abreu <Jose.Abreu@synopsys.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Emil Velikov <emil.l.velikov@gmail.com>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Shashank Sharma <shashank.sharma@intel.com>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1525777785-9740-5-git-send-email-ankit.k.nautiyal@intel.com
---
 drivers/gpu/drm/drm_edid.c | 23 +++++++++++++++++------
 1 file changed, 17 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index ba68ff94d3b3..42a7e871aa2a 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -4838,6 +4838,7 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
 					 const struct drm_display_mode *mode,
 					 bool is_hdmi2_sink)
 {
+	enum hdmi_picture_aspect picture_aspect;
 	int err;
 
 	if (!frame || !mode)
@@ -4880,13 +4881,23 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
 	 * Populate picture aspect ratio from either
 	 * user input (if specified) or from the CEA mode list.
 	 */
-	if (mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_4_3 ||
-		mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_16_9)
-		frame->picture_aspect = mode->picture_aspect_ratio;
-	else if (frame->video_code > 0)
-		frame->picture_aspect = drm_get_cea_aspect_ratio(
-						frame->video_code);
+	picture_aspect = mode->picture_aspect_ratio;
+	if (picture_aspect == HDMI_PICTURE_ASPECT_NONE)
+		picture_aspect = drm_get_cea_aspect_ratio(frame->video_code);
 
+	/*
+	 * The infoframe can't convey anything but none, 4:3
+	 * and 16:9, so if the user has asked for anything else
+	 * we can only satisfy it by specifying the right VIC.
+	 */
+	if (picture_aspect > HDMI_PICTURE_ASPECT_16_9) {
+		if (picture_aspect !=
+		    drm_get_cea_aspect_ratio(frame->video_code))
+			return -EINVAL;
+		picture_aspect = HDMI_PICTURE_ASPECT_NONE;
+	}
+
+	frame->picture_aspect = picture_aspect;
 	frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
 	frame->scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
 

From bfe2e2c9564dd8e7c0109a7b1cc1bf61fbbedf95 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
Date: Tue, 8 May 2018 16:39:40 +0530
Subject: [PATCH 0673/1286] video/hdmi: Reject illegal picture aspect ratios
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

AVI infoframe can only carry none, 4:3, or 16:9 picture aspect
ratios. Return an error if the user asked for something different.

Cc: Shashank Sharma <shashank.sharma@intel.com>
Cc: "Lin, Jia" <lin.a.jia@intel.com>
Cc: Akashdeep Sharma <akashdeep.sharma@intel.com>
Cc: Jim Bride <jim.bride@linux.intel.com>
Cc: Jose Abreu <Jose.Abreu@synopsys.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Emil Velikov <emil.l.velikov@gmail.com>
Cc: Thierry Reding <thierry.reding@gmail.com>
Cc: Hans Verkuil <hans.verkuil@cisco.com>
Cc: linux-media@vger.kernel.org
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Jose Abreu <joabreu@synopsys.com>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1525777785-9740-6-git-send-email-ankit.k.nautiyal@intel.com
---
 drivers/video/hdmi.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index 111a0ab6280a..38716eb50408 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -93,6 +93,9 @@ ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
 	if (size < length)
 		return -ENOSPC;
 
+	if (frame->picture_aspect > HDMI_PICTURE_ASPECT_16_9)
+		return -EINVAL;
+
 	memset(buffer, 0, size);
 
 	ptr[0] = frame->type;

From 7595bda2fb4378ccbb8db1d0e8de56d15ea7f7fa Mon Sep 17 00:00:00 2001
From: Ankit Nautiyal <ankit.k.nautiyal@intel.com>
Date: Tue, 8 May 2018 16:39:41 +0530
Subject: [PATCH 0674/1286] drm: Add DRM client cap for aspect-ratio

To enable aspect-ratio support in DRM, blindly exposing the aspect
ratio information along with mode, can break things in existing
non-atomic user-spaces which have no intention or support to use this
aspect ratio information.

To avoid this, a new drm client cap is required to enable a non-atomic
user-space to advertise if it supports modes with aspect-ratio. Based
on this cap value, the kernel will take a call on exposing the aspect
ratio info in modes or not.

This patch adds the client cap for aspect-ratio.

Since no atomic-userspaces blow up on receiving aspect-ratio
information, the client cap for aspect-ratio is always enabled
for atomic clients.

Cc: Ville Syrjala <ville.syrjala@linux.intel.com>
Cc: Shashank Sharma <shashank.sharma@intel.com>
Signed-off-by: Ankit Nautiyal <ankit.k.nautiyal@intel.com>

V3: rebase
V4: As suggested by Marteen Lankhorst modified the commit message
    explaining the need to use the DRM cap for aspect-ratio. Also,
    tweaked the comment lines in the code for better understanding and
    clarity, as recommended by Shashank Sharma.
V5: rebase
V6: rebase
V7: rebase
V8: rebase
V9: rebase
V10: rebase
V11: rebase
V12: As suggested by Daniel Vetter and Ville Syrjala,
     always enable aspect-ratio client cap for atomic userspaces,
     if no atomic userspace breaks on aspect-ratio bits.
V13: rebase
V14: rebase

Reviewed-by: Shashank Sharma <shashank.sharma@intel.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1525777785-9740-7-git-send-email-ankit.k.nautiyal@intel.com
---
 drivers/gpu/drm/drm_ioctl.c | 9 +++++++++
 include/drm/drm_file.h      | 8 ++++++++
 include/uapi/drm/drm.h      | 7 +++++++
 3 files changed, 24 insertions(+)

diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index eadeabc393f0..0d4cfb232576 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -324,6 +324,15 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
 			return -EINVAL;
 		file_priv->atomic = req->value;
 		file_priv->universal_planes = req->value;
+		/*
+		 * No atomic user-space blows up on aspect ratio mode bits.
+		 */
+		file_priv->aspect_ratio_allowed = req->value;
+		break;
+	case DRM_CLIENT_CAP_ASPECT_RATIO:
+		if (req->value > 1)
+			return -EINVAL;
+		file_priv->aspect_ratio_allowed = req->value;
 		break;
 	default:
 		return -EINVAL;
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 99ab50cbab00..91a65a360079 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -180,6 +180,14 @@ struct drm_file {
 	/** @atomic: True if client understands atomic properties. */
 	unsigned atomic:1;
 
+	/**
+	 * @aspect_ratio_allowed:
+	 *
+	 * True, if client can handle picture aspect ratios, and has requested
+	 * to pass this information along with the mode.
+	 */
+	unsigned aspect_ratio_allowed:1;
+
 	/**
 	 * @is_master:
 	 *
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 6fdff5945c8a..9c660e1688ab 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -680,6 +680,13 @@ struct drm_get_cap {
  */
 #define DRM_CLIENT_CAP_ATOMIC	3
 
+/**
+ * DRM_CLIENT_CAP_ASPECT_RATIO
+ *
+ * If set to 1, the DRM core will provide aspect ratio information in modes.
+ */
+#define DRM_CLIENT_CAP_ASPECT_RATIO    4
+
 /** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
 struct drm_set_client_cap {
 	__u64 capability;

From ace5bf0e254b10585efa938d05e95ea05ae15326 Mon Sep 17 00:00:00 2001
From: Ankit Nautiyal <ankit.k.nautiyal@intel.com>
Date: Tue, 8 May 2018 16:39:42 +0530
Subject: [PATCH 0675/1286] drm: Handle aspect ratio info in legacy modeset
 path

If the user-space does not support aspect-ratio, and requests for a
modeset with mode having aspect ratio bits set, then the given
user-mode must be rejected. Secondly, while preparing a user-mode from
kernel mode, the aspect-ratio info must not be given, if aspect-ratio
is not supported by the user.

This patch:
1. rejects the modes with aspect-ratio info, during modeset, if the
   user does not support aspect ratio.
2. does not load the aspect-ratio info in user-mode structure, if
   aspect ratio is not supported.
3. adds helper functions for determining if aspect-ratio is expected
   in user-mode and for allowing/disallowing the aspect-ratio, if its
   not expected.

Signed-off-by: Ankit Nautiyal <ankit.k.nautiyal@intel.com>

V3: Addressed review comments from Ville:
    Do not corrupt the current crtc state by updating aspect-ratio on
    the fly.
V4: rebase
V5: As suggested by Ville, rejected the modeset calls for modes with
    aspect ratio, if the user does not set aspect-ratio cap.
V6: Used the helper functions for determining if aspect-ratio is
    expected in the user-mode.
V7: rebase
V8: rebase
V9: rebase
V10: Modified the commit-message
V11: rebase
V12: Merged the patch for adding aspect-ratio helper functions
     with this patch.
V13: Minor modifications as suggested by Ville.
V14: Removed helper functions, as they were used only once in legacy
     modeset path, as suggested by Daniel Vetter.

Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1525777785-9740-8-git-send-email-ankit.k.nautiyal@intel.com
---
 drivers/gpu/drm/drm_crtc.c | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index a231dd5dce16..98a36e6c69ad 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -449,6 +449,8 @@ int drm_mode_getcrtc(struct drm_device *dev,
 			crtc_resp->mode_valid = 0;
 		}
 	}
+	if (!file_priv->aspect_ratio_allowed)
+		crtc_resp->mode.flags &= ~DRM_MODE_FLAG_PIC_AR_MASK;
 	drm_modeset_unlock(&crtc->mutex);
 
 	return 0;
@@ -628,6 +630,13 @@ retry:
 			ret = -ENOMEM;
 			goto out;
 		}
+		if (!file_priv->aspect_ratio_allowed &&
+		    (crtc_req->mode.flags & DRM_MODE_FLAG_PIC_AR_MASK) != DRM_MODE_FLAG_PIC_AR_NONE) {
+			DRM_DEBUG_KMS("Unexpected aspect-ratio flag bits\n");
+			ret = -EINVAL;
+			goto out;
+		}
+
 
 		ret = drm_mode_convert_umode(dev, mode, &crtc_req->mode);
 		if (ret) {

From c3ff0cdb354f89a5b877eee61af70e6ae51de50b Mon Sep 17 00:00:00 2001
From: Ankit Nautiyal <ankit.k.nautiyal@intel.com>
Date: Tue, 8 May 2018 16:39:43 +0530
Subject: [PATCH 0676/1286] drm: Expose modes with aspect ratio, only if
 requested

We parse the EDID and add all the modes in the connector's modelist.
This adds CEA modes with aspect ratio information too, regardless of
whether user space requested this information or not.

This patch:
-prunes the modes with aspect-ratio information, from the
 drm_mode_get_connector modelist supplied to the user, if the
 user-space has not set the aspect ratio DRM client cap. However if
 such a mode is unique in the list, it is kept in the list, with
 aspect-ratio flags reset.
-prepares a list of exposed modes, which is used to find unique modes
 if aspect-ratio is not allowed.
-adds a new list_head 'exposed_head' in drm_mode_display, to traverse
 the list of exposed modes.

Cc: Ville Syrjala <ville.syrjala@linux.intel.com>
Cc: Shashank Sharma <shashank.sharma@intel.com>
Cc: Jose Abreu <jose.abreu@synopsys.com>

Signed-off-by: Ankit Nautiyal <ankit.k.nautiyal@intel.com>

V3: As suggested by Ville, modified the mechanism of pruning of modes
    with aspect-ratio, if the aspect-ratio is not supported. Instead
    of straight away pruning such a mode, the mode is retained with
    aspect ratio bits set to zero, provided it is unique.
V4: rebase
V5: Addressed review comments from Ville:
    -used a pointer to store last valid mode.
    -avoided, modifying of picture_aspect_ratio in kernel mode,
     instead only flags bits of user mode are reset (if aspect-ratio
     is not supported).
V6: As suggested by Ville, corrected the mode pruning logic and
    elaborated the mode pruning logic and the assumptions taken.
V7: rebase
V8: rebase
V9: rebase
V10: rebase
V11: Fixed the issue caused in kms_3d test, and enhanced the pruning
     logic to correctly identify and prune modes with aspect-ratio,
     if aspect-ratio cap is not set.
V12: As suggested by Ville, added another list_head in
     drm_mode_display to traverse the list of exposed modes and
     avoided duplication of modes.
V13: Minor modifications, as suggested by Ville.
v14: As suggested by Daniel Vetter and Ville Syrjala, corrected the
     pruning logic to avoid any dependency in the order of mode with
     aspect-ratio.
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1525777785-9740-9-git-send-email-ankit.k.nautiyal@intel.com
---
 drivers/gpu/drm/drm_connector.c | 44 +++++++++++++++++++++++++++------
 include/drm/drm_modes.h         | 13 ++++++++++
 2 files changed, 50 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index dfc8ca1e9413..9b9ba5d5ec0c 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -1531,8 +1531,10 @@ static struct drm_encoder *drm_connector_get_encoder(struct drm_connector *conne
 	return connector->encoder;
 }
 
-static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
-					 const struct drm_file *file_priv)
+static bool
+drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
+			     const struct list_head *export_list,
+			     const struct drm_file *file_priv)
 {
 	/*
 	 * If user-space hasn't configured the driver to expose the stereo 3D
@@ -1540,6 +1542,23 @@ static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
 	 */
 	if (!file_priv->stereo_allowed && drm_mode_is_stereo(mode))
 		return false;
+	/*
+	 * If user-space hasn't configured the driver to expose the modes
+	 * with aspect-ratio, don't expose them. However if such a mode
+	 * is unique, let it be exposed, but reset the aspect-ratio flags
+	 * while preparing the list of user-modes.
+	 */
+	if (!file_priv->aspect_ratio_allowed) {
+		struct drm_display_mode *mode_itr;
+
+		list_for_each_entry(mode_itr, export_list, export_head)
+			if (drm_mode_match(mode_itr, mode,
+					   DRM_MODE_MATCH_TIMINGS |
+					   DRM_MODE_MATCH_CLOCK |
+					   DRM_MODE_MATCH_FLAGS |
+					   DRM_MODE_MATCH_3D_FLAGS))
+				return false;
+	}
 
 	return true;
 }
@@ -1559,6 +1578,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
 	struct drm_mode_modeinfo u_mode;
 	struct drm_mode_modeinfo __user *mode_ptr;
 	uint32_t __user *encoder_ptr;
+	LIST_HEAD(export_list);
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		return -EINVAL;
@@ -1607,21 +1627,31 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
 
 	/* delayed so we get modes regardless of pre-fill_modes state */
 	list_for_each_entry(mode, &connector->modes, head)
-		if (drm_mode_expose_to_userspace(mode, file_priv))
+		if (drm_mode_expose_to_userspace(mode, &export_list,
+						 file_priv)) {
+			list_add_tail(&mode->export_head, &export_list);
 			mode_count++;
+		}
 
 	/*
 	 * This ioctl is called twice, once to determine how much space is
 	 * needed, and the 2nd time to fill it.
+	 * The modes that need to be exposed to the user are maintained in the
+	 * 'export_list'. When the ioctl is called first time to determine the,
+	 * space, the export_list gets filled, to find the no.of modes. In the
+	 * 2nd time, the user modes are filled, one by one from the export_list.
 	 */
 	if ((out_resp->count_modes >= mode_count) && mode_count) {
 		copied = 0;
 		mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
-		list_for_each_entry(mode, &connector->modes, head) {
-			if (!drm_mode_expose_to_userspace(mode, file_priv))
-				continue;
-
+		list_for_each_entry(mode, &export_list, export_head) {
 			drm_mode_convert_to_umode(&u_mode, mode);
+			/*
+			 * Reset aspect ratio flags of user-mode, if modes with
+			 * aspect-ratio are not supported.
+			 */
+			if (!file_priv->aspect_ratio_allowed)
+				u_mode.flags &= ~DRM_MODE_FLAG_PIC_AR_MASK;
 			if (copy_to_user(mode_ptr + copied,
 					 &u_mode, sizeof(u_mode))) {
 				ret = -EFAULT;
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index 2f78b7ee4824..b159fe07fcf9 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -411,6 +411,19 @@ struct drm_display_mode {
 	 * Field for setting the HDMI picture aspect ratio of a mode.
 	 */
 	enum hdmi_picture_aspect picture_aspect_ratio;
+
+	/**
+	 * @export_head:
+	 *
+	 * struct list_head for modes to be exposed to the userspace.
+	 * This is to maintain a list of exposed modes while preparing
+	 * user-mode's list in drm_mode_getconnector ioctl. The purpose of this
+	 * list_head only lies in the ioctl function, and is not expected to be
+	 * used outside the function.
+	 * Once used, the stale pointers are not reset, but left as it is, to
+	 * avoid overhead of protecting it by mode_config.mutex.
+	 */
+	struct list_head export_head;
 };
 
 /**

From 222ec1618c3aceca1e61e1e73e559c647c2b946f Mon Sep 17 00:00:00 2001
From: Shashank Sharma <shashank.sharma@intel.com>
Date: Tue, 8 May 2018 16:39:44 +0530
Subject: [PATCH 0677/1286] drm: Add aspect ratio parsing in DRM layer

Current DRM layer functions don't parse aspect ratio information
while converting a user mode->kernel mode or vice versa. This
causes modeset to pick mode with wrong aspect ratio, eventually
causing failures in HDMI compliance test cases, due to wrong VIC.

This patch adds aspect ratio information in DRM's mode conversion
and mode comparision functions, to make sure kernel picks mode
with right aspect ratio (as per the VIC).

Background:
This patch was once reviewed and merged, and later reverted due to
lack of DRM cap protection. This is a re-spin of this patch, this
time with DRM cap protection, to avoid aspect ratio information, when
the client doesn't request for it.

Review link: https://pw-emeril.freedesktop.org/patch/104068/
Background discussion: https://patchwork.kernel.org/patch/9379057/

Signed-off-by: Shashank Sharma <shashank.sharma@intel.com>
Signed-off-by: Lin, Jia <lin.a.jia@intel.com>
Signed-off-by: Akashdeep Sharma <akashdeep.sharma@intel.com>
Reviewed-by: Jim Bride <jim.bride@linux.intel.com> (V2)
Reviewed-by: Jose Abreu <Jose.Abreu@synopsys.com> (V4)

Cc: Ville Syrjala <ville.syrjala@linux.intel.com>
Cc: Jim Bride <jim.bride@linux.intel.com>
Cc: Jose Abreu <Jose.Abreu@synopsys.com>
Cc: Ankit Nautiyal <ankit.k.nautiyal@intel.com>

V3: modified the aspect-ratio check in drm_mode_equal as per new flags
    provided by Ville. https://patchwork.freedesktop.org/patch/188043/
V4: rebase
V5: rebase
V6: As recommended by Ville, avoided matching of aspect-ratio in
    drm_fb_helper, while trying to find a common mode among connectors
    for the target clone mode.
V7: rebase
V8: rebase
V9: rebase
V10: rebase
V11: rebase
V12: rebase
V13: rebase
V14: rebase

Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1525777785-9740-10-git-send-email-ankit.k.nautiyal@intel.com
---
 drivers/gpu/drm/drm_fb_helper.c | 12 +++++++++--
 drivers/gpu/drm/drm_modes.c     | 35 ++++++++++++++++++++++++++++++++-
 2 files changed, 44 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 0646b108030b..2ee1eaa66188 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -2183,7 +2183,11 @@ static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
 		for (j = 0; j < i; j++) {
 			if (!enabled[j])
 				continue;
-			if (!drm_mode_equal(modes[j], modes[i]))
+			if (!drm_mode_match(modes[j], modes[i],
+					    DRM_MODE_MATCH_TIMINGS |
+					    DRM_MODE_MATCH_CLOCK |
+					    DRM_MODE_MATCH_FLAGS |
+					    DRM_MODE_MATCH_3D_FLAGS))
 				can_clone = false;
 		}
 	}
@@ -2203,7 +2207,11 @@ static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
 
 		fb_helper_conn = fb_helper->connector_info[i];
 		list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
-			if (drm_mode_equal(mode, dmt_mode))
+			if (drm_mode_match(mode, dmt_mode,
+					   DRM_MODE_MATCH_TIMINGS |
+					   DRM_MODE_MATCH_CLOCK |
+					   DRM_MODE_MATCH_FLAGS |
+					   DRM_MODE_MATCH_3D_FLAGS))
 				modes[i] = mode;
 		}
 		if (!modes[i])
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index c395a244f665..7dfabdd6bcc8 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1049,7 +1049,8 @@ bool drm_mode_equal(const struct drm_display_mode *mode1,
 			      DRM_MODE_MATCH_TIMINGS |
 			      DRM_MODE_MATCH_CLOCK |
 			      DRM_MODE_MATCH_FLAGS |
-			      DRM_MODE_MATCH_3D_FLAGS);
+			      DRM_MODE_MATCH_3D_FLAGS|
+			      DRM_MODE_MATCH_ASPECT_RATIO);
 }
 EXPORT_SYMBOL(drm_mode_equal);
 
@@ -1647,6 +1648,20 @@ void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out,
 	out->vrefresh = in->vrefresh;
 	out->flags = in->flags;
 	out->type = in->type;
+
+	switch (in->picture_aspect_ratio) {
+	case HDMI_PICTURE_ASPECT_4_3:
+		out->flags |= DRM_MODE_FLAG_PIC_AR_4_3;
+		break;
+	case HDMI_PICTURE_ASPECT_16_9:
+		out->flags |= DRM_MODE_FLAG_PIC_AR_16_9;
+		break;
+	case HDMI_PICTURE_ASPECT_RESERVED:
+	default:
+		out->flags |= DRM_MODE_FLAG_PIC_AR_NONE;
+		break;
+	}
+
 	strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
 	out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
 }
@@ -1693,6 +1708,24 @@ int drm_mode_convert_umode(struct drm_device *dev,
 	strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
 	out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
 
+	/* Clearing picture aspect ratio bits from out flags,
+	 * as the aspect-ratio information is not stored in
+	 * flags for kernel-mode, but in picture_aspect_ratio.
+	 */
+	out->flags &= ~DRM_MODE_FLAG_PIC_AR_MASK;
+
+	switch (in->flags & DRM_MODE_FLAG_PIC_AR_MASK) {
+	case DRM_MODE_FLAG_PIC_AR_4_3:
+		out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_4_3;
+		break;
+	case DRM_MODE_FLAG_PIC_AR_16_9:
+		out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_16_9;
+		break;
+	default:
+		out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
+		break;
+	}
+
 	out->status = drm_mode_validate_driver(dev, out);
 	if (out->status != MODE_OK)
 		return -EINVAL;

From 900aa8ad21587e909603f471b6cd81fd5338ec45 Mon Sep 17 00:00:00 2001
From: Shashank Sharma <shashank.sharma@intel.com>
Date: Tue, 8 May 2018 16:39:45 +0530
Subject: [PATCH 0678/1286] drm: Add and handle new aspect ratios in DRM layer

HDMI 2.0/CEA-861-F introduces two new aspect ratios:
- 64:27
- 256:135

This patch:
-  Adds new DRM flags for to represent these new aspect ratios.
-  Adds new cases to handle these aspect ratios while converting
from user->kernel mode or vise versa.

This patch was once reviewed and merged, and later reverted due
to lack of DRM client protection, while adding aspect ratio bits
in user modes. This is a re-spin of the series, with DRM client
cap protection.

The previous series can be found here:
https://pw-emeril.freedesktop.org/series/10850/

Signed-off-by: Shashank Sharma <shashank.sharma@intel.com>
Reviewed-by: Sean Paul <seanpaul@chromium.org> (V2)
Reviewed-by: Jose Abreu <Jose.Abreu@synopsys.com> (V2)

Cc: Ville Syrjala <ville.syrjala@linux.intel.com>
Cc: Sean Paul <seanpaul@chromium.org>
Cc: Jose Abreu <Jose.Abreu@synopsys.com>
Cc: Ankit Nautiyal <ankit.k.nautiyal@intel.com>

V3: rebase
V4: rebase
V5: corrected the macro name for an aspect ratio, in a switch case.
V6: rebase
V7: rebase
V8: rebase
V9: rebase
V10: rebase
V11: rebase
V12: rebase
V13: rebase
V14: rebase

Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1525777785-9740-11-git-send-email-ankit.k.nautiyal@intel.com
---
 drivers/gpu/drm/drm_modes.c | 12 ++++++++++++
 include/uapi/drm/drm_mode.h |  6 ++++++
 2 files changed, 18 insertions(+)

diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 7dfabdd6bcc8..c78ca0e84ffd 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1656,6 +1656,12 @@ void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out,
 	case HDMI_PICTURE_ASPECT_16_9:
 		out->flags |= DRM_MODE_FLAG_PIC_AR_16_9;
 		break;
+	case HDMI_PICTURE_ASPECT_64_27:
+		out->flags |= DRM_MODE_FLAG_PIC_AR_64_27;
+		break;
+	case HDMI_PICTURE_ASPECT_256_135:
+		out->flags |= DRM_MODE_FLAG_PIC_AR_256_135;
+		break;
 	case HDMI_PICTURE_ASPECT_RESERVED:
 	default:
 		out->flags |= DRM_MODE_FLAG_PIC_AR_NONE;
@@ -1721,6 +1727,12 @@ int drm_mode_convert_umode(struct drm_device *dev,
 	case DRM_MODE_FLAG_PIC_AR_16_9:
 		out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_16_9;
 		break;
+	case DRM_MODE_FLAG_PIC_AR_64_27:
+		out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_64_27;
+		break;
+	case DRM_MODE_FLAG_PIC_AR_256_135:
+		out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_256_135;
+		break;
 	default:
 		out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
 		break;
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 50bcf4214ff9..4b3a1bb58e68 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -93,6 +93,8 @@ extern "C" {
 #define DRM_MODE_PICTURE_ASPECT_NONE		0
 #define DRM_MODE_PICTURE_ASPECT_4_3		1
 #define DRM_MODE_PICTURE_ASPECT_16_9		2
+#define DRM_MODE_PICTURE_ASPECT_64_27		3
+#define DRM_MODE_PICTURE_ASPECT_256_135		4
 
 /* Aspect ratio flag bitmask (4 bits 22:19) */
 #define DRM_MODE_FLAG_PIC_AR_MASK		(0x0F<<19)
@@ -102,6 +104,10 @@ extern "C" {
 			(DRM_MODE_PICTURE_ASPECT_4_3<<19)
 #define  DRM_MODE_FLAG_PIC_AR_16_9 \
 			(DRM_MODE_PICTURE_ASPECT_16_9<<19)
+#define  DRM_MODE_FLAG_PIC_AR_64_27 \
+			(DRM_MODE_PICTURE_ASPECT_64_27<<19)
+#define  DRM_MODE_FLAG_PIC_AR_256_135 \
+			(DRM_MODE_PICTURE_ASPECT_256_135<<19)
 
 #define  DRM_MODE_FLAG_ALL	(DRM_MODE_FLAG_PHSYNC |		\
 				 DRM_MODE_FLAG_NHSYNC |		\

From 8e021151948c56eddf68affc6817965dabbcaddd Mon Sep 17 00:00:00 2001
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Date: Sat, 12 May 2018 03:03:12 +0530
Subject: [PATCH 0679/1286] drm/i915: Enable display workaround 827 for all
 planes, v2.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

The workaround was applied only to the primary plane, but is required
on all planes. Iterate over all planes in the crtc atomic check to see
if the workaround is enabled, and only perform the actual toggling in
the pre/post plane update functions.

Changes since v1:
- Track active NV12 planes in a nv12_planes bitmask. (Ville)

v2: Removing BROXTON support for NV12 due to WA826

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1526074397-10457-2-git-send-email-vidya.srinivas@intel.com
---
 drivers/gpu/drm/i915/intel_atomic_plane.c |  7 +++-
 drivers/gpu/drm/i915/intel_display.c      | 43 ++++++++++++++---------
 drivers/gpu/drm/i915/intel_drv.h          |  1 +
 3 files changed, 33 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 7481ce85746b..6d068786eb41 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -183,11 +183,16 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
 	}
 
 	/* FIXME pre-g4x don't work like this */
-	if (intel_state->base.visible)
+	if (state->visible)
 		crtc_state->active_planes |= BIT(intel_plane->id);
 	else
 		crtc_state->active_planes &= ~BIT(intel_plane->id);
 
+	if (state->visible && state->fb->format->format == DRM_FORMAT_NV12)
+		crtc_state->nv12_planes |= BIT(intel_plane->id);
+	else
+		crtc_state->nv12_planes &= ~BIT(intel_plane->id);
+
 	return intel_plane_atomic_calc_changes(old_crtc_state,
 					       &crtc_state->base,
 					       old_plane_state,
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index cdfe0951d171..3bc12958f878 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -5142,6 +5142,22 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s
 	return !old_crtc_state->ips_enabled;
 }
 
+static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
+			  const struct intel_crtc_state *crtc_state)
+{
+	if (!crtc_state->nv12_planes)
+		return false;
+
+	if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
+		return false;
+
+	if ((INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv)) ||
+	    IS_CANNONLAKE(dev_priv))
+		return true;
+
+	return false;
+}
+
 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
 {
 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
@@ -5166,7 +5182,6 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
 	if (old_primary_state) {
 		struct drm_plane_state *new_primary_state =
 			drm_atomic_get_new_plane_state(old_state, primary);
-		struct drm_framebuffer *fb = new_primary_state->fb;
 
 		intel_fbc_post_update(crtc);
 
@@ -5174,15 +5189,12 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
 		    (needs_modeset(&pipe_config->base) ||
 		     !old_primary_state->visible))
 			intel_post_enable_primary(&crtc->base, pipe_config);
-
-		/* Display WA 827 */
-		if ((INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv)) ||
-		    IS_CANNONLAKE(dev_priv)) {
-			if (fb && fb->format->format == DRM_FORMAT_NV12)
-				skl_wa_clkgate(dev_priv, crtc->pipe, false);
-		}
-
 	}
+
+	/* Display WA 827 */
+	if (needs_nv12_wa(dev_priv, old_crtc_state) &&
+	    !needs_nv12_wa(dev_priv, pipe_config))
+		skl_wa_clkgate(dev_priv, crtc->pipe, false);
 }
 
 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
@@ -5206,14 +5218,6 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
 		struct intel_plane_state *new_primary_state =
 			intel_atomic_get_new_plane_state(old_intel_state,
 							 to_intel_plane(primary));
-		struct drm_framebuffer *fb = new_primary_state->base.fb;
-
-		/* Display WA 827 */
-		if ((INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv)) ||
-		    IS_CANNONLAKE(dev_priv)) {
-			if (fb && fb->format->format == DRM_FORMAT_NV12)
-				skl_wa_clkgate(dev_priv, crtc->pipe, true);
-		}
 
 		intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
 		/*
@@ -5225,6 +5229,11 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
 			intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
 	}
 
+	/* Display WA 827 */
+	if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
+	    needs_nv12_wa(dev_priv, pipe_config))
+		skl_wa_clkgate(dev_priv, crtc->pipe, true);
+
 	/*
 	 * Vblank time updates from the shadow to live plane control register
 	 * are blocked if the memory self-refresh mode is active at that
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 52337f487ebc..038870abe989 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -890,6 +890,7 @@ struct intel_crtc_state {
 
 	/* bitmask of visible planes (enum plane_id) */
 	u8 active_planes;
+	u8 nv12_planes;
 
 	/* HDMI scrambling status */
 	bool hdmi_scrambling;

From 6deef9b6057d9432e08dab3643be92517e0d15de Mon Sep 17 00:00:00 2001
From: Vidya Srinivas <vidya.srinivas@intel.com>
Date: Sat, 12 May 2018 03:03:13 +0530
Subject: [PATCH 0680/1286] drm/i915: Enable Display WA 0528

Possible hang with NV12 plane surface formats.
WA: When the plane source pixel format is NV12,
the CHICKEN_PIPESL_* register bit 22 must be set to 1
and the render decompression must not be enabled
on any of the planes in that pipe.

v2: removed unnecessary POSTING_READ

v3: Added RB from Maarten

v4: Removed support for NV12 for BROXTON

Credits-to: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: Vidya Srinivas <vidya.srinivas@intel.com>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1526074397-10457-3-git-send-email-vidya.srinivas@intel.com
---
 drivers/gpu/drm/i915/intel_display.c | 22 +++++++++++++++++++---
 1 file changed, 19 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3bc12958f878..941895310625 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -488,10 +488,22 @@ static const struct intel_limit intel_limits_bxt = {
 	.p2 = { .p2_slow = 1, .p2_fast = 20 },
 };
 
+static void
+skl_wa_528(struct drm_i915_private *dev_priv, int pipe, bool enable)
+{
+	if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
+		return;
+
+	if (enable)
+		I915_WRITE(CHICKEN_PIPESL_1(pipe), HSW_FBCQ_DIS);
+	else
+		I915_WRITE(CHICKEN_PIPESL_1(pipe), 0);
+}
+
 static void
 skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable)
 {
-	if (IS_SKYLAKE(dev_priv))
+	if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
 		return;
 
 	if (enable)
@@ -5193,8 +5205,10 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
 
 	/* Display WA 827 */
 	if (needs_nv12_wa(dev_priv, old_crtc_state) &&
-	    !needs_nv12_wa(dev_priv, pipe_config))
+	    !needs_nv12_wa(dev_priv, pipe_config)) {
 		skl_wa_clkgate(dev_priv, crtc->pipe, false);
+		skl_wa_528(dev_priv, crtc->pipe, false);
+	}
 }
 
 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
@@ -5231,8 +5245,10 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
 
 	/* Display WA 827 */
 	if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
-	    needs_nv12_wa(dev_priv, pipe_config))
+	    needs_nv12_wa(dev_priv, pipe_config)) {
 		skl_wa_clkgate(dev_priv, crtc->pipe, true);
+		skl_wa_528(dev_priv, crtc->pipe, true);
+	}
 
 	/*
 	 * Vblank time updates from the shadow to live plane control register

From 5d794288fc562fd584c33a0834b45c134d6202c8 Mon Sep 17 00:00:00 2001
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Date: Sat, 12 May 2018 03:03:14 +0530
Subject: [PATCH 0681/1286] drm/i915: Add skl_check_nv12_surface for NV12

We skip src trunction/adjustments for
NV12 case and handle the sizes directly.
Without this, pipe fifo underruns are seen on APL/KBL.

v2: For NV12, making the src coordinates multiplier of 4

v3: Moving all the src coords handling code for NV12
to skl_check_nv12_surface

v4: Added RB from Mika

v5: Rebased the series. Removed checks of mult of 4 in
skl_update_scaler, Added NV12 condition in intel_check_sprite_plane
where src x/w is being checked for mult of 2 for yuv planes.

v6: Made changes to skl_check_nv12_surface as per WA#1106

Reviewed-by: Mika Kahola <mika.kahola@intel.com>
Signed-off-by: Vidya Srinivas <vidya.srinivas@intel.com>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1526074397-10457-4-git-send-email-vidya.srinivas@intel.com
---
 drivers/gpu/drm/i915/intel_display.c | 29 ++++++++++++++++++++++++++--
 drivers/gpu/drm/i915/intel_sprite.c  |  3 ++-
 2 files changed, 29 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 941895310625..c8ff4b705fff 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3102,6 +3102,29 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
 	return 0;
 }
 
+static int
+skl_check_nv12_surface(const struct intel_crtc_state *crtc_state,
+		       struct intel_plane_state *plane_state)
+{
+	/* Display WA #1106 */
+	if (plane_state->base.rotation !=
+	    (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90) &&
+	    plane_state->base.rotation != DRM_MODE_ROTATE_270)
+		return 0;
+
+	/*
+	 * src coordinates are rotated here.
+	 * We check height but report it as width
+	 */
+	if (((drm_rect_height(&plane_state->base.src) >> 16) % 4) != 0) {
+		DRM_DEBUG_KMS("src width must be multiple "
+			      "of 4 for rotated NV12\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
 {
 	const struct drm_framebuffer *fb = plane_state->base.fb;
@@ -3185,6 +3208,9 @@ int skl_check_plane_surface(const struct intel_crtc_state *crtc_state,
 	 * the main surface setup depends on it.
 	 */
 	if (fb->format->format == DRM_FORMAT_NV12) {
+		ret = skl_check_nv12_surface(crtc_state, plane_state);
+		if (ret)
+			return ret;
 		ret = skl_check_nv12_aux_surface(plane_state);
 		if (ret)
 			return ret;
@@ -4806,8 +4832,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
 	}
 
 	if (plane_scaler_check && pixel_format == DRM_FORMAT_NV12 &&
-	    (src_h < SKL_MIN_YUV_420_SRC_H || (src_w % 4) != 0 ||
-	     (src_h % 4) != 0)) {
+	    (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
 		DRM_DEBUG_KMS("NV12: src dimensions not met\n");
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 9cd4be020840..26de04445aca 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -1060,7 +1060,8 @@ intel_check_sprite_plane(struct intel_plane *plane,
 		src_y = src->y1 >> 16;
 		src_h = drm_rect_height(src) >> 16;
 
-		if (intel_format_is_yuv(fb->format->format)) {
+		if (intel_format_is_yuv(fb->format->format) &&
+		    fb->format->format != DRM_FORMAT_NV12) {
 			src_x &= ~1;
 			src_w &= ~1;
 

From e44134f2673cf104c0ce42bfdebaf9f11bbef997 Mon Sep 17 00:00:00 2001
From: Chandra Konduru <chandra.konduru@intel.com>
Date: Sat, 12 May 2018 03:03:15 +0530
Subject: [PATCH 0682/1286] drm/i915: Add NV12 support to
 intel_framebuffer_init

This patch adds NV12 as supported format
to intel_framebuffer_init and performs various checks.

v2:
-Fix an issue in checks added (Chandra Konduru)

v3: rebased (me)

v4: Review comments by Ville addressed
Added platform check for NV12 in intel_framebuffer_init
Removed offset checks for NV12 case

v5: Addressed review comments by Clinton A Taylor
This NV12 support only correctly works on SKL.
Plane color space conversion is different on GLK and later platforms
causing the colors to display incorrectly.
Ville's plane color space property patch series
in review will fix this issue.
- Restricted the NV12 case in intel_framebuffer_init to
SKL and BXT only.

v6: Rebased (me)

v7: Addressed review comments by Ville
Restricting the NV12 to BXT for now.

v8: Rebased (me)
Restricting the NV12 changes to BXT and KBL for now.

v9: Rebased (me)

v10: NV12 supported by all GEN >= 9.
Making this change in intel_framebuffer_init. This is
part of addressing Maarten's review comments.
Comment under v8 no longer applicable

v11: Addressed review comments from Shashank Sharma

v12: Adding Reviewed By from Shashank Sharma

v13: Addressed review comments from Juha-Pekka Heikkila
"NV12 not to be supported by SKL"

v14: Addressed review comments from Maarten.
Add checks for fb width height for NV12 and fail the fb
creation if check fails. Added reviewed by from
Juha-Pekka Heikkila

v15: Rebased the series

v16: Setting the minimum value during fb creating to 16
as per Bspec for NV12. Earlier minimum was expected
to be > 16. Now changed it to >=16.

v17: Adding restriction to framebuffer_init - the fb
width and height should be a multiplier of 4

v18: Added RB from Maarten. Included Maarten's review comments
Dont allow CCS formats for fb creation of NV12

v19: Review comments from Maarten addressed -
Removing BROXTON support for NV12 due to WA826

Credits-to: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Tested-by: Clinton Taylor <clinton.a.taylor@intel.com>
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Shashank Sharma <shashank.sharma@intel.com>
Reviewed-by: Clinton Taylor <clinton.a.taylor@intel.com>
Reviewed-by: Juha-Pekka Heikkila <juhapekka.heikkila@gmail.com>
Signed-off-by: Chandra Konduru <chandra.konduru@intel.com>
Signed-off-by: Nabendu Maiti <nabendu.bikash.maiti@intel.com>
Signed-off-by: Vidya Srinivas <vidya.srinivas@intel.com>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1526074397-10457-5-git-send-email-vidya.srinivas@intel.com
---
 drivers/gpu/drm/i915/intel_display.c | 22 ++++++++++++++++++++++
 1 file changed, 22 insertions(+)

diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index c8ff4b705fff..1492cf9d71d3 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -14307,6 +14307,20 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
 			goto err;
 		}
 		break;
+	case DRM_FORMAT_NV12:
+		if (mode_cmd->modifier[0] == I915_FORMAT_MOD_Y_TILED_CCS ||
+		    mode_cmd->modifier[0] == I915_FORMAT_MOD_Yf_TILED_CCS) {
+			DRM_DEBUG_KMS("RC not to be enabled with NV12\n");
+			goto err;
+		}
+		if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) ||
+		    IS_BROXTON(dev_priv)) {
+			DRM_DEBUG_KMS("unsupported pixel format: %s\n",
+				      drm_get_format_name(mode_cmd->pixel_format,
+							  &format_name));
+			goto err;
+		}
+		break;
 	default:
 		DRM_DEBUG_KMS("unsupported pixel format: %s\n",
 			      drm_get_format_name(mode_cmd->pixel_format, &format_name));
@@ -14319,6 +14333,14 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
 
 	drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
 
+	if (fb->format->format == DRM_FORMAT_NV12 &&
+	    (fb->width < SKL_MIN_YUV_420_SRC_W ||
+	     fb->height < SKL_MIN_YUV_420_SRC_H ||
+	     (fb->width % 4) != 0 || (fb->height % 4) != 0)) {
+		DRM_DEBUG_KMS("src dimensions not correct for NV12\n");
+		return -EINVAL;
+	}
+
 	for (i = 0; i < fb->format->num_planes; i++) {
 		u32 stride_alignment;
 

From c0b56ab544c0aaefc0aa5526471f72e2324e002c Mon Sep 17 00:00:00 2001
From: Chandra Konduru <chandra.konduru@intel.com>
Date: Sat, 12 May 2018 03:03:16 +0530
Subject: [PATCH 0683/1286] drm/i915: Add NV12 as supported format for primary
 plane
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This patch adds NV12 to list of supported formats for
primary plane

v2: Rebased (Chandra Konduru)

v3: Rebased (me)

v4: Review comments by Ville addressed
Removed the skl_primary_formats_with_nv12 and
added NV12 case in existing skl_primary_formats

v5: Rebased (me)

v6: Missed the Tested-by/Reviewed-by in the previous series
Adding the same to commit message in this version.

v7: Review comments by Ville addressed
	Restricting the NV12 for BXT and on PIPE A and B
Rebased (me)

v8: Rebased (me)
Modified restricting the NV12 support for both BXT and KBL.

v9: Rebased (me)

v10: Addressed review comments from Maarten.
	Adding NV12 inside skl_primary_formats itself.

v11: Adding Reviewed By tag from Shashank Sharma

v12: Addressed review comments from Juha-Pekka Heikkila
"NV12 not to be supported by SKL"

v13: Addressed review comments from Ville
Added skl_pri_planar_formats to include NV12
and skl_plane_has_planar function to check for
NV12 support on plane. Added NV12 format to
skl_mod_supported. These were review comments
from Kristian Høgsberg <hoegsberg@gmail.com>

v14: Added reviewed by from Juha-Pekka Heikkila

v15: Rebased the series

v16: Added all tiling support under mod supported
for NV12. Credits to Megha Aggarwal

v17: Added RB by Maarten and Kristian

v18: Review comments from Maarten addressed -
Removing BROXTON support for NV12 due to WA826

v19: Addressed review comments from Maarten
Make changes to skl_mod_supported

Credits-to: Megha Aggarwal megha.aggarwal@intel.com
Credits-to: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Tested-by: Clinton Taylor <clinton.a.taylor@intel.com>
Reviewed-by: Kristian Høgsberg <hoegsberg@gmail.com>
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Juha-Pekka Heikkila <juhapekka.heikkila@gmail.com>
Reviewed-by: Clinton Taylor <clinton.a.taylor@intel.com>
Reviewed-by: Shashank Sharma <shashank.sharma@intel.com>
Signed-off-by: Chandra Konduru <chandra.konduru@intel.com>
Signed-off-by: Nabendu Maiti <nabendu.bikash.maiti@intel.com>
Signed-off-by: Vidya Srinivas <vidya.srinivas@intel.com>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1526074397-10457-6-git-send-email-vidya.srinivas@intel.com
---
 drivers/gpu/drm/i915/intel_display.c | 50 ++++++++++++++++++++++++++--
 drivers/gpu/drm/i915/intel_drv.h     |  2 ++
 2 files changed, 50 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 1492cf9d71d3..ad588d564198 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -88,6 +88,22 @@ static const uint32_t skl_primary_formats[] = {
 	DRM_FORMAT_VYUY,
 };
 
+static const uint32_t skl_pri_planar_formats[] = {
+	DRM_FORMAT_C8,
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_ABGR8888,
+	DRM_FORMAT_XRGB2101010,
+	DRM_FORMAT_XBGR2101010,
+	DRM_FORMAT_YUYV,
+	DRM_FORMAT_YVYU,
+	DRM_FORMAT_UYVY,
+	DRM_FORMAT_VYUY,
+	DRM_FORMAT_NV12,
+};
+
 static const uint64_t skl_format_modifiers_noccs[] = {
 	I915_FORMAT_MOD_Yf_TILED,
 	I915_FORMAT_MOD_Y_TILED,
@@ -13218,6 +13234,7 @@ static bool skl_mod_supported(uint32_t format, uint64_t modifier)
 	case DRM_FORMAT_YVYU:
 	case DRM_FORMAT_UYVY:
 	case DRM_FORMAT_VYUY:
+	case DRM_FORMAT_NV12:
 		if (modifier == I915_FORMAT_MOD_Yf_TILED)
 			return true;
 		/* fall through */
@@ -13425,6 +13442,30 @@ static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
 	return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
 }
 
+bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
+			  enum pipe pipe, enum plane_id plane_id)
+{
+	if (plane_id == PLANE_PRIMARY) {
+		if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
+			return false;
+		else if ((INTEL_GEN(dev_priv) == 9 && pipe == PIPE_C) &&
+			 !IS_GEMINILAKE(dev_priv))
+			return false;
+	} else if (plane_id >= PLANE_SPRITE0) {
+		if (plane_id == PLANE_CURSOR)
+			return false;
+		if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) == 10) {
+			if (plane_id != PLANE_SPRITE0)
+				return false;
+		} else {
+			if (plane_id != PLANE_SPRITE0 || pipe == PIPE_C ||
+			    IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
+				return false;
+		}
+	}
+	return true;
+}
+
 static struct intel_plane *
 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
@@ -13485,8 +13526,13 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
 	primary->check_plane = intel_check_primary_plane;
 
 	if (INTEL_GEN(dev_priv) >= 9) {
-		intel_primary_formats = skl_primary_formats;
-		num_formats = ARRAY_SIZE(skl_primary_formats);
+		if (skl_plane_has_planar(dev_priv, pipe, PLANE_PRIMARY)) {
+			intel_primary_formats = skl_pri_planar_formats;
+			num_formats = ARRAY_SIZE(skl_pri_planar_formats);
+		} else {
+			intel_primary_formats = skl_primary_formats;
+			num_formats = ARRAY_SIZE(skl_primary_formats);
+		}
 
 		if (skl_plane_has_ccs(dev_priv, pipe, PLANE_PRIMARY))
 			modifiers = skl_format_modifiers_ccs;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 038870abe989..d7dbca1aabff 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -2082,6 +2082,8 @@ bool skl_plane_get_hw_state(struct intel_plane *plane);
 bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
 		       enum pipe pipe, enum plane_id plane_id);
 bool intel_format_is_yuv(uint32_t format);
+bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
+			  enum pipe pipe, enum plane_id plane_id);
 
 /* intel_tv.c */
 void intel_tv_init(struct drm_i915_private *dev_priv);

From 429204f1059909245d8f73b66aa729c6c2807cae Mon Sep 17 00:00:00 2001
From: Chandra Konduru <chandra.konduru@intel.com>
Date: Sat, 12 May 2018 03:03:17 +0530
Subject: [PATCH 0684/1286] drm/i915: Add NV12 as supported format for sprite
 plane
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This patch adds NV12 to list of supported formats for sprite plane.

v2: Rebased (me)

v3: Review comments by Ville addressed
- Removed skl_plane_formats_with_nv12 and added
NV12 case in existing skl_plane_formats
- Added the 10bpc RGB formats

v4: Addressed review comments from Clinton A Taylor
"Why are we adding 10 bit RGB formats with the NV12 series patches?
Trying to set XR30 or AB30 results in error returned even though
the modes are advertised for the planes"
- Removed 10bit RGB formats added previously with NV12 series

v5: Missed the Tested-by/Reviewed-by in the previous series
Adding the same to commit message in this version.
Addressed review comments from Clinton A Taylor
"Why are we adding 10 bit RGB formats with the NV12 series patches?
Trying to set XR30 or AB30 results in error returned even though
the modes are advertised for the planes"
- Previous version has 10bit RGB format removed from VLV formats
by mistake. Fixing that in this version.
Removed 10bit RGB formats added previously with NV12 series
for SKL.

v6: Addressed review comments by Ville
Restricting the NV12 to BXT and PIPE A and B

v7: Rebased (me)

v8: Rebased (me)
Restricting NV12 changes to BXT and KBL
Restricting NV12 changes for plane 0 (overlay)

v9: Rebased (me)

v10: Addressed review comments from Maarten.
Adding NV12 to skl_plane_formats itself.

v11: Addressed review comments from Shashank Sharma

v12: Addressed review comments from Shashank Sharma
Made the condition in intel_sprite_plane_create
simple and easy to read as suggested.

v13: Adding reviewed by tag from Shashank Sharma
Addressed review comments from Juha-Pekka Heikkila
"NV12 not to be supported by SKL"

v14: Addressed review comments from Ville
Added skl_planar_formats to include NV12
and a check skl_plane_has_planar in sprite create
Added NV12 format to skl_mod_supported. These were
review comments from Kristian Høgsberg <hoegsberg@gmail.com>

v15: Added reviewed by from Juha-Pekka Heikkila

v16: Rebased the series

v17: Added all tiling under mod supported for NV12
Credits to Megha Aggarwal

v18: Added RB by Maarten and Kristian

v19: Addressed review comments from Maarten
Made modification to skl_mod_supported

Credits-to: Megha Aggarwal <megha.aggarwal@intel.com>
Credits-to: Kristian Høgsberg <hoegsberg@gmail.com>
Reviewed-by: Kristian Høgsberg <hoegsberg@gmail.com>
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Tested-by: Clinton Taylor <clinton.a.taylor@intel.com>
Reviewed-by: Juha-Pekka Heikkila <juhapekka.heikkila@gmail.com>
Reviewed-by: Shashank Sharma <shashank.sharma@intel.com>
Reviewed-by: Clinton Taylor <clinton.a.taylor@intel.com>
Signed-off-by: Chandra Konduru <chandra.konduru@intel.com>
Signed-off-by: Nabendu Maiti <nabendu.bikash.maiti@intel.com>
Signed-off-by: Vidya Srinivas <vidya.srinivas@intel.com>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1526074397-10457-7-git-send-email-vidya.srinivas@intel.com
---
 drivers/gpu/drm/i915/intel_sprite.c | 24 ++++++++++++++++++++++--
 1 file changed, 22 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 26de04445aca..1597938d2451 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -1254,6 +1254,19 @@ static uint32_t skl_plane_formats[] = {
 	DRM_FORMAT_VYUY,
 };
 
+static uint32_t skl_planar_formats[] = {
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_ABGR8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_YUYV,
+	DRM_FORMAT_YVYU,
+	DRM_FORMAT_UYVY,
+	DRM_FORMAT_VYUY,
+	DRM_FORMAT_NV12,
+};
+
 static const uint64_t skl_plane_format_modifiers_noccs[] = {
 	I915_FORMAT_MOD_Yf_TILED,
 	I915_FORMAT_MOD_Y_TILED,
@@ -1348,6 +1361,7 @@ static bool skl_mod_supported(uint32_t format, uint64_t modifier)
 	case DRM_FORMAT_YVYU:
 	case DRM_FORMAT_UYVY:
 	case DRM_FORMAT_VYUY:
+	case DRM_FORMAT_NV12:
 		if (modifier == I915_FORMAT_MOD_Yf_TILED)
 			return true;
 		/* fall through */
@@ -1447,8 +1461,14 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
 		intel_plane->disable_plane = skl_disable_plane;
 		intel_plane->get_hw_state = skl_plane_get_hw_state;
 
-		plane_formats = skl_plane_formats;
-		num_plane_formats = ARRAY_SIZE(skl_plane_formats);
+		if (skl_plane_has_planar(dev_priv, pipe,
+					 PLANE_SPRITE0 + plane)) {
+			plane_formats = skl_planar_formats;
+			num_plane_formats = ARRAY_SIZE(skl_planar_formats);
+		} else {
+			plane_formats = skl_plane_formats;
+			num_plane_formats = ARRAY_SIZE(skl_plane_formats);
+		}
 
 		if (skl_plane_has_ccs(dev_priv, pipe, PLANE_SPRITE0 + plane))
 			modifiers = skl_plane_format_modifiers_ccs;

From ca6acc25250a1dc101c5a541b4f58bcc1dd65de5 Mon Sep 17 00:00:00 2001
From: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Date: Tue, 8 May 2018 15:41:54 +0300
Subject: [PATCH 0685/1286] drm/i915/gtt: Trust the uncached store to flush wcb

Not all architectures guarantee that uncached read will
flush the write combining buffer. So marking it explicitly
is recommended [1].

However we know the architecture we are operating on
and can avoid wmb as the UC store will flush the wcb [2].

Omit the wmb() before invalidate as redudant.

v2: squash combining and removal (Chris)
v3: remove obsolete comments about posting reads (Chris)

References: http://yarchive.net/comp/linux/write_combining.html [1]
References: http://download.intel.com/design/PentiumII/applnots/24442201.pdf [2]
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180508124154.14586-1-mika.kuoppala@linux.intel.com
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 18 ++++++++----------
 1 file changed, 8 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index c879bfd9294f..6eae9e1ed8be 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -110,7 +110,8 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma);
 
 static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv)
 {
-	/* Note that as an uncached mmio write, this should flush the
+	/*
+	 * Note that as an uncached mmio write, this will flush the
 	 * WCB of the writes into the GGTT before it triggers the invalidate.
 	 */
 	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
@@ -2418,11 +2419,9 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
 	for_each_sgt_dma(addr, sgt_iter, vma->pages)
 		gen8_set_pte(gtt_entries++, pte_encode | addr);
 
-	wmb();
-
-	/* This next bit makes the above posting read even more important. We
-	 * want to flush the TLBs only after we're certain all the PTE updates
-	 * have finished.
+	/*
+	 * We want to flush the TLBs only after we're certain all the PTE
+	 * updates have finished.
 	 */
 	ggtt->invalidate(vm->i915);
 }
@@ -2460,11 +2459,10 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
 	dma_addr_t addr;
 	for_each_sgt_dma(addr, iter, vma->pages)
 		iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
-	wmb();
 
-	/* This next bit makes the above posting read even more important. We
-	 * want to flush the TLBs only after we're certain all the PTE updates
-	 * have finished.
+	/*
+	 * We want to flush the TLBs only after we're certain all the PTE
+	 * updates have finished.
 	 */
 	ggtt->invalidate(vm->i915);
 }

From cc38cae7c4e9350c93aa2da506086415fecd6e4a Mon Sep 17 00:00:00 2001
From: Oscar Mateo <oscar.mateo@intel.com>
Date: Tue, 8 May 2018 14:29:23 -0700
Subject: [PATCH 0686/1286] drm/i915/icl: Introduce initial Icelake Workarounds

Inherit workarounds from previous platforms that are still valid for
Icelake.

v2: GEN7_ROW_CHICKEN2 is masked
v3:
  - Since it has been fixed already in upstream, removed the TODO
    comment about WA_SET_BIT for WaInPlaceDecompressionHang.
  - Squashed with this patch:
      drm/i915/icl: add icelake_init_clock_gating()
    from Paulo Zanoni <paulo.r.zanoni@intel.com>
  - Squashed with this patch:
      drm/i915/icl: WaForceEnableNonCoherent
    from Oscar Mateo <oscar.mateo@intel.com>
  - WaPushConstantDereferenceHoldDisable is now Wa_1604370585 and
    applies to B0 as well.
  - WaPipeControlBefore3DStateSamplePattern WABB was being applied
    to ICL incorrectly.
v4:
  - Wrap the commit message
  - s/dev_priv/p to please checkpatch
v5: Rebased on top of the WA refactoring
v6: Rebased on top of further whitelist registers refactoring (Michel)
v7: Added WaRsForcewakeAddDelayForAck
v8: s/ICL_HDC_CHICKEN0/ICL_HDC_MODE (Mika)
v9:
  - C, not lisp (Chris)
  - WaIncreaseDefaultTLBEntries is the same for GEN > 9_LP (Tvrtko)

Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Tomasz Lis <tomasz.lis@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1525814984-20039-2-git-send-email-oscar.mateo@intel.com
---
 drivers/gpu/drm/i915/i915_drv.h          |  9 +++++
 drivers/gpu/drm/i915/i915_gem_gtt.c      |  6 ++--
 drivers/gpu/drm/i915/i915_reg.h          |  1 +
 drivers/gpu/drm/i915/intel_lrc.c         |  2 ++
 drivers/gpu/drm/i915/intel_pm.c          |  4 ++-
 drivers/gpu/drm/i915/intel_uncore.c      |  7 ++--
 drivers/gpu/drm/i915/intel_workarounds.c | 46 ++++++++++++++++++++++++
 7 files changed, 69 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 24c5e4765afd..57fb3aa09db0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2470,6 +2470,15 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define IS_CNL_REVID(p, since, until) \
 	(IS_CANNONLAKE(p) && IS_REVID(p, since, until))
 
+#define ICL_REVID_A0		0x0
+#define ICL_REVID_A2		0x1
+#define ICL_REVID_B0		0x3
+#define ICL_REVID_B2		0x4
+#define ICL_REVID_C0		0x5
+
+#define IS_ICL_REVID(p, since, until) \
+	(IS_ICELAKE(p) && IS_REVID(p, since, until))
+
 /*
  * The genX designation typically refers to the render engine, so render
  * capability related checks should use IS_GEN, while display and other checks
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 6eae9e1ed8be..c01d6dbe269a 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2138,15 +2138,15 @@ static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
 	 * called on driver load and after a GPU reset, so you can place
 	 * workarounds here even if they get overwritten by GPU reset.
 	 */
-	/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl */
+	/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
 	if (IS_BROADWELL(dev_priv))
 		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
 	else if (IS_CHERRYVIEW(dev_priv))
 		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
-	else if (IS_GEN9_BC(dev_priv) || IS_GEN10(dev_priv))
-		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
 	else if (IS_GEN9_LP(dev_priv))
 		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
+	else if (INTEL_GEN(dev_priv) >= 9)
+		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
 
 	/*
 	 * To support 64K PTEs we need to first enable the use of the
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 085928c9005e..2b22d4d3b0df 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -7238,6 +7238,7 @@ enum {
 /* GEN8 chicken */
 #define HDC_CHICKEN0				_MMIO(0x7300)
 #define CNL_HDC_CHICKEN0			_MMIO(0xE5F0)
+#define ICL_HDC_MODE				_MMIO(0xE5F4)
 #define  HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE	(1<<15)
 #define  HDC_FENCE_DEST_SLM_DISABLE		(1<<14)
 #define  HDC_DONOT_FETCH_MEM_WHEN_MASKED	(1<<11)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index d3c00f60c1b0..243d40369e6a 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1682,6 +1682,8 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
 		return -EINVAL;
 
 	switch (INTEL_GEN(engine->i915)) {
+	case 11:
+		return 0;
 	case 10:
 		wa_bb_fn[0] = gen10_init_indirectctx_bb;
 		wa_bb_fn[1] = NULL;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 4126132eb707..9c6e48cc9514 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -9190,7 +9190,9 @@ static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
  */
 void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
 {
-	if (IS_CANNONLAKE(dev_priv))
+	if (IS_ICELAKE(dev_priv))
+		dev_priv->display.init_clock_gating = nop_init_clock_gating;
+	else if (IS_CANNONLAKE(dev_priv))
 		dev_priv->display.init_clock_gating = cnl_init_clock_gating;
 	else if (IS_COFFEELAKE(dev_priv))
 		dev_priv->display.init_clock_gating = cfl_init_clock_gating;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index d6e20f0f4c28..448293eb638d 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -139,7 +139,9 @@ fw_domain_wait_ack_with_fallback(const struct drm_i915_private *i915,
 	 * in the hope that the original ack will be delivered along with
 	 * the fallback ack.
 	 *
-	 * This workaround is described in HSDES #1604254524
+	 * This workaround is described in HSDES #1604254524 and it's known as:
+	 * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl
+	 * although the name is a bit misleading.
 	 */
 
 	pass = 1;
@@ -1394,7 +1396,8 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
 	if (INTEL_GEN(dev_priv) >= 11) {
 		int i;
 
-		dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
+		dev_priv->uncore.funcs.force_wake_get =
+			fw_domains_get_with_fallback;
 		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
 		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
 			       FORCEWAKE_RENDER_GEN9,
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index ec9d340fcb00..73d02d3785d4 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -441,6 +441,27 @@ static int cnl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 	return 0;
 }
 
+static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+{
+	/* Wa_1604370585:icl (pre-prod)
+	 * Formerly known as WaPushConstantDereferenceHoldDisable
+	 */
+	if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0))
+		WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
+				  PUSH_CONSTANT_DEREF_DISABLE);
+
+	/* WaForceEnableNonCoherent:icl
+	 * This is not the same workaround as in early Gen9 platforms, where
+	 * lacking this could cause system hangs, but coherency performance
+	 * overhead is high and only a few compute workloads really need it
+	 * (the register is whitelisted in hardware now, so UMDs can opt in
+	 * for coherency if they have a good reason).
+	 */
+	WA_SET_BIT_MASKED(ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
+
+	return 0;
+}
+
 int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 {
 	int err = 0;
@@ -465,6 +486,8 @@ int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 		err = cfl_ctx_workarounds_init(dev_priv);
 	else if (IS_CANNONLAKE(dev_priv))
 		err = cnl_ctx_workarounds_init(dev_priv);
+	else if (IS_ICELAKE(dev_priv))
+		err = icl_ctx_workarounds_init(dev_priv);
 	else
 		MISSING_CASE(INTEL_GEN(dev_priv));
 	if (err)
@@ -663,6 +686,21 @@ static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
 		   _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
 }
 
+static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+{
+	/* This is not an Wa. Enable for better image quality */
+	I915_WRITE(_3D_CHICKEN3,
+		   _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
+
+	/* WaInPlaceDecompressionHang:icl */
+	I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
+					    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+
+	/* WaPipelineFlushCoherentLines:icl */
+	I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
+				   GEN8_LQSC_FLUSH_COHERENT_LINES);
+}
+
 void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
 {
 	if (INTEL_GEN(dev_priv) < 8)
@@ -683,6 +721,8 @@ void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
 		cfl_gt_workarounds_apply(dev_priv);
 	else if (IS_CANNONLAKE(dev_priv))
 		cnl_gt_workarounds_apply(dev_priv);
+	else if (IS_ICELAKE(dev_priv))
+		icl_gt_workarounds_apply(dev_priv);
 	else
 		MISSING_CASE(INTEL_GEN(dev_priv));
 }
@@ -761,6 +801,10 @@ static void cnl_whitelist_build(struct whitelist *w)
 	whitelist_reg(w, GEN8_CS_CHICKEN1);
 }
 
+static void icl_whitelist_build(struct whitelist *w)
+{
+}
+
 static struct whitelist *whitelist_build(struct intel_engine_cs *engine,
 					 struct whitelist *w)
 {
@@ -789,6 +833,8 @@ static struct whitelist *whitelist_build(struct intel_engine_cs *engine,
 		cfl_whitelist_build(w);
 	else if (IS_CANNONLAKE(i915))
 		cnl_whitelist_build(w);
+	else if (IS_ICELAKE(i915))
+		icl_whitelist_build(w);
 	else
 		MISSING_CASE(INTEL_GEN(i915));
 

From d65dc3e40b80ab63fb0d70c947558d0f49f912da Mon Sep 17 00:00:00 2001
From: Oscar Mateo <oscar.mateo@intel.com>
Date: Tue, 8 May 2018 14:29:24 -0700
Subject: [PATCH 0687/1286] drm/i915/icl: Enable Sampler DFR

Sampler Dynamic Frequency Rebalancing (DFR) aims to reduce Sampler
power by dynamically changing its clock frequency in low-throughput
conditions. This patches enables it by default on Gen11.

v2: Wrong operation to clear the bit (Praveen)
v3: Rebased on top of the WA refactoring
v4: Move to icl_init_clock_gating, since it's not a WA (Rodrigo)
v5: C, not lisp (Chris)

Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Cc: Praveen Paneri <praveen.paneri@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1525814984-20039-3-git-send-email-oscar.mateo@intel.com
---
 drivers/gpu/drm/i915/i915_reg.h | 3 +++
 drivers/gpu/drm/i915/intel_pm.c | 9 ++++++++-
 2 files changed, 11 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2b22d4d3b0df..6aad16ee44ae 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -8253,6 +8253,9 @@ enum {
 #define GEN8_GARBCNTL                   _MMIO(0xB004)
 #define   GEN9_GAPS_TSV_CREDIT_DISABLE  (1<<7)
 
+#define GEN10_DFR_RATIO_EN_AND_CHICKEN	_MMIO(0x9550)
+#define   DFR_DISABLE			(1 << 9)
+
 /* IVYBRIDGE DPF */
 #define GEN7_L3CDERRST1(slice)		_MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */
 #define   GEN7_L3CDERRST1_ROW_MASK	(0x7ff<<14)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 9c6e48cc9514..b85229e153c4 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -8664,6 +8664,13 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
 }
 
+static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
+{
+	/* This is not an Wa. Enable to reduce Sampler power */
+	I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
+		   I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
+}
+
 static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
 {
 	if (!HAS_PCH_CNP(dev_priv))
@@ -9191,7 +9198,7 @@ static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
 void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
 {
 	if (IS_ICELAKE(dev_priv))
-		dev_priv->display.init_clock_gating = nop_init_clock_gating;
+		dev_priv->display.init_clock_gating = icl_init_clock_gating;
 	else if (IS_CANNONLAKE(dev_priv))
 		dev_priv->display.init_clock_gating = cnl_init_clock_gating;
 	else if (IS_COFFEELAKE(dev_priv))

From 5bcebe76704f43d598c8a8da8dd77ffd3afd754e Mon Sep 17 00:00:00 2001
From: Oscar Mateo <oscar.mateo@intel.com>
Date: Tue, 8 May 2018 14:29:25 -0700
Subject: [PATCH 0688/1286] drm/i915/icl: WaGAPZPriorityScheme

The default GAPZ arbitrer priority value at power-on has been found
to be incorrect.

v2: Now renamed to Wa_1405543622
v3: Rebased on top of the WA refactoring
v4: Added HSDES reference number (Mika)
v5:
  - Rebased
  - C, not lisp (Chris)

References: HSDES#1405543622
Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1525814984-20039-4-git-send-email-oscar.mateo@intel.com
---
 drivers/gpu/drm/i915/i915_reg.h          | 5 +++--
 drivers/gpu/drm/i915/intel_workarounds.c | 6 ++++++
 2 files changed, 9 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 6aad16ee44ae..c9c2ad5f5844 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -8250,8 +8250,9 @@ enum {
 #define   GEN8_DOP_CLOCK_GATE_GUC_ENABLE	(1<<4)
 #define   GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE     (1<<6)
 
-#define GEN8_GARBCNTL                   _MMIO(0xB004)
-#define   GEN9_GAPS_TSV_CREDIT_DISABLE  (1<<7)
+#define GEN8_GARBCNTL				_MMIO(0xB004)
+#define   GEN9_GAPS_TSV_CREDIT_DISABLE		(1 << 7)
+#define   GEN11_ARBITRATION_PRIO_ORDER_MASK	(0x3f << 22)
 
 #define GEN10_DFR_RATIO_EN_AND_CHICKEN	_MMIO(0x9550)
 #define   DFR_DISABLE			(1 << 9)
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 73d02d3785d4..44ae0b4f6079 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -699,6 +699,12 @@ static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
 	/* WaPipelineFlushCoherentLines:icl */
 	I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
 				   GEN8_LQSC_FLUSH_COHERENT_LINES);
+
+	/* Wa_1405543622:icl
+	 * Formerly known as WaGAPZPriorityScheme
+	 */
+	I915_WRITE(GEN8_GARBCNTL, I915_READ(GEN8_GARBCNTL) |
+				  GEN11_ARBITRATION_PRIO_ORDER_MASK);
 }
 
 void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)

From d41bab687999793d7331b7b8906dca7e1aeb64c7 Mon Sep 17 00:00:00 2001
From: Oscar Mateo <oscar.mateo@intel.com>
Date: Tue, 8 May 2018 14:29:26 -0700
Subject: [PATCH 0689/1286] drm/i915/icl: WaL3BankAddressHashing

Revert to an L3 non-hash model, for performance reasons.

v2:
  - Place the WA name above the actual change
  - Improve the register naming
v3:
  - Rebased
  - Renamed to Wa_1604223664
v4: Rebased on top of the WA refactoring
v5:
  - Added References (Mika)
  - Fixed wrong mask and value (Mika)
  - Do not apply together with another WA for the same
    register (not worth the hassle)
v6:
  - Rebased
  - C, not lisp (Chris)

References: HSDES#1604223664
Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1525814984-20039-5-git-send-email-oscar.mateo@intel.com
---
 drivers/gpu/drm/i915/i915_reg.h          |  6 ++++++
 drivers/gpu/drm/i915/intel_workarounds.c | 10 ++++++++++
 2 files changed, 16 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index c9c2ad5f5844..81f1a8c7c0f5 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -8253,6 +8253,12 @@ enum {
 #define GEN8_GARBCNTL				_MMIO(0xB004)
 #define   GEN9_GAPS_TSV_CREDIT_DISABLE		(1 << 7)
 #define   GEN11_ARBITRATION_PRIO_ORDER_MASK	(0x3f << 22)
+#define   GEN11_HASH_CTRL_EXCL_MASK		(0x7f << 0)
+#define   GEN11_HASH_CTRL_EXCL_BIT0		(1 << 0)
+
+#define GEN11_GLBLINVL				_MMIO(0xB404)
+#define   GEN11_BANK_HASH_ADDR_EXCL_MASK	(0x7f << 5)
+#define   GEN11_BANK_HASH_ADDR_EXCL_BIT0	(1 << 5)
 
 #define GEN10_DFR_RATIO_EN_AND_CHICKEN	_MMIO(0x9550)
 #define   DFR_DISABLE			(1 << 9)
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 44ae0b4f6079..3b037298ff99 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -705,6 +705,16 @@ static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
 	 */
 	I915_WRITE(GEN8_GARBCNTL, I915_READ(GEN8_GARBCNTL) |
 				  GEN11_ARBITRATION_PRIO_ORDER_MASK);
+
+	/* Wa_1604223664:icl
+	 * Formerly known as WaL3BankAddressHashing
+	 */
+	I915_WRITE(GEN8_GARBCNTL,
+		   (I915_READ(GEN8_GARBCNTL) & ~GEN11_HASH_CTRL_EXCL_MASK) |
+		   GEN11_HASH_CTRL_EXCL_BIT0);
+	I915_WRITE(GEN11_GLBLINVL,
+		   (I915_READ(GEN11_GLBLINVL) & ~GEN11_BANK_HASH_ADDR_EXCL_MASK) |
+		   GEN11_BANK_HASH_ADDR_EXCL_BIT0);
 }
 
 void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)

From f4a357140a5693bfb0ab4e6608b7118a288a9ba9 Mon Sep 17 00:00:00 2001
From: Oscar Mateo <oscar.mateo@intel.com>
Date: Tue, 8 May 2018 14:29:27 -0700
Subject: [PATCH 0690/1286] drm/i915/icl: WaModifyGamTlbPartitioning

Adjust default GAM TLB partitioning for performance reasons.

v2: Only touch the bits that we really need
v3: Rebased on top of the WA refactoring
v4:
  - Added References (Mika)
  - Rebased
v5:
  - Rebased
  - C, not lisp (Chris)
  - Correct reference number (Mika)

References: HSDES#220160670
Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1525814984-20039-6-git-send-email-oscar.mateo@intel.com
---
 drivers/gpu/drm/i915/i915_reg.h          | 5 +++++
 drivers/gpu/drm/i915/intel_workarounds.c | 5 +++++
 2 files changed, 10 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 81f1a8c7c0f5..7fe505ce5888 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -8263,6 +8263,11 @@ enum {
 #define GEN10_DFR_RATIO_EN_AND_CHICKEN	_MMIO(0x9550)
 #define   DFR_DISABLE			(1 << 9)
 
+#define GEN11_GACB_PERF_CTRL			_MMIO(0x4B80)
+#define   GEN11_HASH_CTRL_MASK			(0x3 << 12 | 0xf << 0)
+#define   GEN11_HASH_CTRL_BIT0			(1 << 0)
+#define   GEN11_HASH_CTRL_BIT4			(1 << 12)
+
 /* IVYBRIDGE DPF */
 #define GEN7_L3CDERRST1(slice)		_MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */
 #define   GEN7_L3CDERRST1_ROW_MASK	(0x7ff<<14)
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 3b037298ff99..2561c55043c5 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -715,6 +715,11 @@ static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
 	I915_WRITE(GEN11_GLBLINVL,
 		   (I915_READ(GEN11_GLBLINVL) & ~GEN11_BANK_HASH_ADDR_EXCL_MASK) |
 		   GEN11_BANK_HASH_ADDR_EXCL_BIT0);
+
+	/* WaModifyGamTlbPartitioning:icl */
+	I915_WRITE(GEN11_GACB_PERF_CTRL,
+		   (I915_READ(GEN11_GACB_PERF_CTRL) & ~GEN11_HASH_CTRL_MASK) |
+		   GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
 }
 
 void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)

From 5246ae4bdb4ceae9778a7966db1d9522c6cb0ea7 Mon Sep 17 00:00:00 2001
From: Oscar Mateo <oscar.mateo@intel.com>
Date: Tue, 8 May 2018 14:29:28 -0700
Subject: [PATCH 0691/1286] drm/i915/icl: WaDisableCleanEvicts

Avoids an undefined LLC behavior.

BSpec: 9613

v2: Renamed to Wa_1405733216
v3: Spaces around '<<' and fix surrounding code
v4: Rebased on top of the WA refactoring
v5: Added References (Mika)
v6:
  - Rebased
  - C, not lisp (Chris)

References: HSDES#1405733216
Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1525814984-20039-7-git-send-email-oscar.mateo@intel.com
---
 drivers/gpu/drm/i915/i915_reg.h          | 5 +++--
 drivers/gpu/drm/i915/intel_workarounds.c | 6 ++++++
 2 files changed, 9 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 7fe505ce5888..a7bd739fde82 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -7232,8 +7232,9 @@ enum {
 #define  L3SQ_URB_READ_CAM_MATCH_DISABLE	(1<<27)
 
 #define GEN8_L3SQCREG4				_MMIO(0xb118)
-#define  GEN8_LQSC_RO_PERF_DIS			(1<<27)
-#define  GEN8_LQSC_FLUSH_COHERENT_LINES		(1<<21)
+#define  GEN11_LQSC_CLEAN_EVICT_DISABLE		(1 << 6)
+#define  GEN8_LQSC_RO_PERF_DIS			(1 << 27)
+#define  GEN8_LQSC_FLUSH_COHERENT_LINES		(1 << 21)
 
 /* GEN8 chicken */
 #define HDC_CHICKEN0				_MMIO(0x7300)
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 2561c55043c5..7e8bcc2ae091 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -720,6 +720,12 @@ static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
 	I915_WRITE(GEN11_GACB_PERF_CTRL,
 		   (I915_READ(GEN11_GACB_PERF_CTRL) & ~GEN11_HASH_CTRL_MASK) |
 		   GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
+
+	/* Wa_1405733216:icl
+	 * Formerly known as WaDisableCleanEvicts
+	 */
+	I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
+				   GEN11_LQSC_CLEAN_EVICT_DISABLE);
 }
 
 void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)

From 6b967dc392090831954644549676409ca22fe8bf Mon Sep 17 00:00:00 2001
From: Oscar Mateo <oscar.mateo@intel.com>
Date: Tue, 8 May 2018 14:29:29 -0700
Subject: [PATCH 0692/1286] drm/i915/icl: WaCL2SFHalfMaxAlloc

This workarounds an issue with insufficient storage for the
CL2 and SF units.

v2: Renamed to Wa_1405766107
v3: Wrapped the commit message
v4: Rebased on top of the WA refactoring
v5: Added References (Mika)
v6:
  - Rebased
  - s/MACALLOC/MAXALLOC (Mika)
  - C, not lisp (Chris)

References: HSDES#1405766107
Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1525814984-20039-8-git-send-email-oscar.mateo@intel.com
---
 drivers/gpu/drm/i915/i915_reg.h          | 4 ++++
 drivers/gpu/drm/i915/intel_workarounds.c | 7 +++++++
 2 files changed, 11 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index a7bd739fde82..d325fad480f0 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -8269,6 +8269,10 @@ enum {
 #define   GEN11_HASH_CTRL_BIT0			(1 << 0)
 #define   GEN11_HASH_CTRL_BIT4			(1 << 12)
 
+#define GEN11_LSN_UNSLCVC				_MMIO(0xB43C)
+#define   GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC	(1 << 9)
+#define   GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC	(1 << 7)
+
 /* IVYBRIDGE DPF */
 #define GEN7_L3CDERRST1(slice)		_MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */
 #define   GEN7_L3CDERRST1_ROW_MASK	(0x7ff<<14)
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 7e8bcc2ae091..a6758bdd74dd 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -726,6 +726,13 @@ static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
 	 */
 	I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
 				   GEN11_LQSC_CLEAN_EVICT_DISABLE);
+
+	/* Wa_1405766107:icl
+	 * Formerly known as WaCL2SFHalfMaxAlloc
+	 */
+	I915_WRITE(GEN11_LSN_UNSLCVC, I915_READ(GEN11_LSN_UNSLCVC) |
+				      GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
+				      GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
 }
 
 void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)

From 908ae05173637e9b39545636a12c244314d6fce1 Mon Sep 17 00:00:00 2001
From: Oscar Mateo <oscar.mateo@intel.com>
Date: Tue, 8 May 2018 14:29:30 -0700
Subject: [PATCH 0693/1286] drm/i915/icl: WaDisCtxReload

Revert to the legacy implementation to avoid a system hang.

v2: Correct the address for GAMW_ECO_DEV_RW_IA_REG
v3: Renamed to Wa_220166154
v4: Rebased on top of the WA refactoring
v5: Added References (Mika)
v6:
  - Rebased
  - C, not lisp (Chris)

References: HSDES#220166154
Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1525814984-20039-9-git-send-email-oscar.mateo@intel.com
---
 drivers/gpu/drm/i915/i915_reg.h          | 3 +++
 drivers/gpu/drm/i915/intel_workarounds.c | 6 ++++++
 2 files changed, 9 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index d325fad480f0..dd23af3ca352 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -8273,6 +8273,9 @@ enum {
 #define   GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC	(1 << 9)
 #define   GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC	(1 << 7)
 
+#define GAMW_ECO_DEV_RW_IA_REG			_MMIO(0x4080)
+#define   GAMW_ECO_DEV_CTX_RELOAD_DISABLE	(1 << 7)
+
 /* IVYBRIDGE DPF */
 #define GEN7_L3CDERRST1(slice)		_MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */
 #define   GEN7_L3CDERRST1_ROW_MASK	(0x7ff<<14)
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index a6758bdd74dd..354740360085 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -733,6 +733,12 @@ static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
 	I915_WRITE(GEN11_LSN_UNSLCVC, I915_READ(GEN11_LSN_UNSLCVC) |
 				      GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
 				      GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
+
+	/* Wa_220166154:icl
+	 * Formerly known as WaDisCtxReload
+	 */
+	I915_WRITE(GAMW_ECO_DEV_RW_IA_REG, I915_READ(GAMW_ECO_DEV_RW_IA_REG) |
+					   GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
 }
 
 void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)

From 0a437d4981650615ba3b4c7a9ce0c76894637217 Mon Sep 17 00:00:00 2001
From: Oscar Mateo <oscar.mateo@intel.com>
Date: Tue, 8 May 2018 14:29:31 -0700
Subject: [PATCH 0694/1286] drm/i915/icl: Wa_1405779004

Disable MSC clock gating to prevent data corruption.

BSpec: 19257

v2: Rebased on top of the WA refactoring
v3: Added References (Mika)
v4:
  - Rebased
  - C, not lisp (Chris)
  - A0 only (Mika)

References: HSDES#1405779004
Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1525814984-20039-10-git-send-email-oscar.mateo@intel.com
---
 drivers/gpu/drm/i915/i915_reg.h          | 1 +
 drivers/gpu/drm/i915/intel_workarounds.c | 6 ++++++
 2 files changed, 7 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index dd23af3ca352..950ec8e6cf65 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3840,6 +3840,7 @@ enum {
 #define SLICE_UNIT_LEVEL_CLKGATE	_MMIO(0x94d4)
 #define  SARBUNIT_CLKGATE_DIS		(1 << 5)
 #define  RCCUNIT_CLKGATE_DIS		(1 << 7)
+#define  MSCUNIT_CLKGATE_DIS		(1 << 10)
 
 #define SUBSLICE_UNIT_LEVEL_CLKGATE	_MMIO(0x9524)
 #define  GWUNIT_CLKGATE_DIS		(1 << 16)
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 354740360085..469a83d521ae 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -739,6 +739,12 @@ static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
 	 */
 	I915_WRITE(GAMW_ECO_DEV_RW_IA_REG, I915_READ(GAMW_ECO_DEV_RW_IA_REG) |
 					   GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
+
+	/* Wa_1405779004:icl (pre-prod) */
+	if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
+		I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE,
+			   I915_READ(SLICE_UNIT_LEVEL_CLKGATE) |
+			   MSCUNIT_CLKGATE_DIS);
 }
 
 void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)

From 36204d80bacb5382c7944fceb14975c727def102 Mon Sep 17 00:00:00 2001
From: Oscar Mateo <oscar.mateo@intel.com>
Date: Tue, 8 May 2018 14:29:32 -0700
Subject: [PATCH 0695/1286] drm/i915/icl: Wa_1406680159

Disable GWL clock gating to prevent an issue that might
cause hangs.

v2: Rebased on top of the WA refactoring
v3: Wa_2201832410 officially merged with Wa_1406680159
v4: Added References (Mika)
v5:
  - Rebased
  - C, not lisp (Chris)
  - Add reference where WA is better explained (Rodrigo)
  - Add reference to WA that got merged with this

References: HSDES#1406681710
References: HSDES#1406680159
References: HSDES#2201832410
Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1525814984-20039-11-git-send-email-oscar.mateo@intel.com
---
 drivers/gpu/drm/i915/intel_workarounds.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 469a83d521ae..a3fa01a47c86 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -745,6 +745,11 @@ static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
 		I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE,
 			   I915_READ(SLICE_UNIT_LEVEL_CLKGATE) |
 			   MSCUNIT_CLKGATE_DIS);
+
+	/* Wa_1406680159:icl */
+	I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE,
+		   I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE) |
+		   GWUNIT_CLKGATE_DIS);
 }
 
 void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)

From 5215eef35fcbbc8f9bd68adff90eb813e8c3b7cf Mon Sep 17 00:00:00 2001
From: Oscar Mateo <oscar.mateo@intel.com>
Date: Tue, 8 May 2018 14:29:33 -0700
Subject: [PATCH 0696/1286] drm/i915/icl: Wa_1604302699

Disable I2M Write for performance reasons.

v2: Rebased on top of the WA refactoring
v3: Added References (Mika)
v4:
  - Rebased
  - C, not lisp (Chris)
  - GEN7 chicken bit in the wrong side of the fence (Mika)
  - Use two spaces to align bit macros

References: HSDES#1604302699
Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1525814984-20039-12-git-send-email-oscar.mateo@intel.com
---
 drivers/gpu/drm/i915/i915_reg.h          | 4 +++-
 drivers/gpu/drm/i915/intel_workarounds.c | 5 +++++
 2 files changed, 8 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 950ec8e6cf65..7cb2ddc42e1b 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -7227,7 +7227,9 @@ enum {
 #define GEN7_L3CNTLREG3				_MMIO(0xB024)
 
 #define GEN7_L3_CHICKEN_MODE_REGISTER		_MMIO(0xB030)
-#define  GEN7_WA_L3_CHICKEN_MODE				0x20000000
+#define   GEN7_WA_L3_CHICKEN_MODE		0x20000000
+#define GEN10_L3_CHICKEN_MODE_REGISTER		_MMIO(0xB114)
+#define   GEN11_I2M_WRITE_DISABLE		(1 << 28)
 
 #define GEN7_L3SQCREG4				_MMIO(0xb034)
 #define  L3SQ_URB_READ_CAM_MATCH_DISABLE	(1<<27)
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index a3fa01a47c86..2a4e3ee5af10 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -750,6 +750,11 @@ static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
 	I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE,
 		   I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE) |
 		   GWUNIT_CLKGATE_DIS);
+
+	/* Wa_1604302699:icl */
+	I915_WRITE(GEN10_L3_CHICKEN_MODE_REGISTER,
+		   I915_READ(GEN10_L3_CHICKEN_MODE_REGISTER) |
+		   GEN11_I2M_WRITE_DISABLE);
 }
 
 void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)

From 5ba700c73a89b0bace1a89a08e7a7eca5f011152 Mon Sep 17 00:00:00 2001
From: Oscar Mateo <oscar.mateo@intel.com>
Date: Tue, 8 May 2018 14:29:34 -0700
Subject: [PATCH 0697/1286] drm/i915/icl: Wa_1406838659

Disable CGPSF unit clock gating to prevent an issue.

v2: Rebased on top of the WA refactoring
v3: Added References (Mika)
v4:
  - Rebased
  - C, not lisp (Chris)
  - Remove unintentional whitespaces (Mika)
  - Fixed in C0 (Mika)

References: HSDES#1406838659
Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1525814984-20039-13-git-send-email-oscar.mateo@intel.com
---
 drivers/gpu/drm/i915/i915_reg.h          | 3 +++
 drivers/gpu/drm/i915/intel_workarounds.c | 6 ++++++
 2 files changed, 9 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 7cb2ddc42e1b..ce484271e30a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3848,6 +3848,9 @@ enum {
 #define UNSLICE_UNIT_LEVEL_CLKGATE	_MMIO(0x9434)
 #define  VFUNIT_CLKGATE_DIS		(1 << 20)
 
+#define INF_UNIT_LEVEL_CLKGATE		_MMIO(0x9560)
+#define   CGPSF_CLKGATE_DIS		(1 << 3)
+
 /*
  * Display engine regs
  */
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 2a4e3ee5af10..942d32256c53 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -755,6 +755,12 @@ static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
 	I915_WRITE(GEN10_L3_CHICKEN_MODE_REGISTER,
 		   I915_READ(GEN10_L3_CHICKEN_MODE_REGISTER) |
 		   GEN11_I2M_WRITE_DISABLE);
+
+	/* Wa_1406838659:icl (pre-prod) */
+	if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0))
+		I915_WRITE(INF_UNIT_LEVEL_CLKGATE,
+			   I915_READ(INF_UNIT_LEVEL_CLKGATE) |
+			   CGPSF_CLKGATE_DIS);
 }
 
 void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)

From 73f4e8a338da114ec9e0b8c634a02fd85d4fa396 Mon Sep 17 00:00:00 2001
From: Oscar Mateo <oscar.mateo@intel.com>
Date: Tue, 8 May 2018 14:29:35 -0700
Subject: [PATCH 0698/1286] drm/i915/icl: WaForwardProgressSoftReset

Avoids a hang during soft reset.

v2: Rebased on top of the WA refactoring
v3: Added References (Mika)
v4:
  - Rebased
  - C, not lisp (Chris)
  - Which steppings affected by this are not clear.
    For the moment, apply unconditionally as per the
    BSpec (Mika)
  - Add reference to another HSD also related

References: HSDES#1405476379
References: HSDES#2006612137
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1525814984-20039-14-git-send-email-oscar.mateo@intel.com
---
 drivers/gpu/drm/i915/i915_reg.h          | 5 +++++
 drivers/gpu/drm/i915/intel_workarounds.c | 7 +++++++
 2 files changed, 12 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index ce484271e30a..14491782aa9e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -9897,6 +9897,11 @@ enum skl_power_gate {
 /* Media decoder 2 MOCS registers */
 #define GEN11_MFX2_MOCS(i)	_MMIO(0x10000 + (i) * 4)
 
+#define GEN10_SCRATCH_LNCF2		_MMIO(0xb0a0)
+#define   PMFLUSHDONE_LNICRSDROP	(1 << 20)
+#define   PMFLUSH_GAPL3UNBLOCK		(1 << 21)
+#define   PMFLUSHDONE_LNEBLK		(1 << 22)
+
 /* gamt regs */
 #define GEN8_L3_LRA_1_GPGPU _MMIO(0x4dd4)
 #define   GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW  0x67F1427F /* max/min for LRA1/2 */
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 942d32256c53..5eec4ce965a5 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -761,6 +761,13 @@ static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
 		I915_WRITE(INF_UNIT_LEVEL_CLKGATE,
 			   I915_READ(INF_UNIT_LEVEL_CLKGATE) |
 			   CGPSF_CLKGATE_DIS);
+
+	/* WaForwardProgressSoftReset:icl */
+	I915_WRITE(GEN10_SCRATCH_LNCF2,
+		   I915_READ(GEN10_SCRATCH_LNCF2) |
+		   PMFLUSHDONE_LNICRSDROP |
+		   PMFLUSH_GAPL3UNBLOCK |
+		   PMFLUSHDONE_LNEBLK);
 }
 
 void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)

From 77dfedb5be03779f9a5d83e323a1b36e32090105 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Fri, 11 May 2018 13:11:45 +0100
Subject: [PATCH 0699/1286] drm/i915/execlists: Use rmb() to order CSB reads
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

We assume that the CSB is written using the normal ringbuffer
coherency protocols, as outlined in kernel/events/ring_buffer.c:

    *   (HW)                              (DRIVER)
    *
    *   if (LOAD ->data_tail) {            LOAD ->data_head
    *                      (A)             smp_rmb()       (C)
    *      STORE $data                     LOAD $data
    *      smp_wmb()       (B)             smp_mb()        (D)
    *      STORE ->data_head               STORE ->data_tail
    *   }

So we assume that the HW fulfils its ordering requirements (B), and so
we should use a complimentary rmb (C) to ensure that our read of its
WRITE pointer is completed before we start accessing the data.

The final mb (D) is implied by the uncached mmio we perform to inform
the HW of our READ pointer.

References: https://bugs.freedesktop.org/show_bug.cgi?id=105064
References: https://bugs.freedesktop.org/show_bug.cgi?id=105888
References: https://bugs.freedesktop.org/show_bug.cgi?id=106185
Fixes: 767a983ab255 ("drm/i915/execlists: Read the context-status HEAD from the HWSP")
References: 61bf9719fa17 ("drm/i915/cnl: Use mmio access to context status buffer")
Suggested-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Cc: Rafael Antognolli <rafael.antognolli@intel.com>
Cc: Michel Thierry <michel.thierry@intel.com>
Cc: Timo Aaltonen <tjaalton@ubuntu.com>
Tested-by: Timo Aaltonen <tjaalton@ubuntu.com>
Acked-by: Michel Thierry <michel.thierry@intel.com>
Acked-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180511121147.31915-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_lrc.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 243d40369e6a..7c6164d14bd8 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1009,6 +1009,7 @@ static void execlists_submission_tasklet(unsigned long data)
 
 			head = execlists->csb_head;
 			tail = READ_ONCE(buf[write_idx]);
+			rmb(); /* Hopefully paired with a wmb() in HW */
 		}
 		GEM_TRACE("%s cs-irq head=%d [%d%s], tail=%d [%d%s]\n",
 			  engine->name,

From e71a82d8c1fa28ab048227df929e4f07d98f1656 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Fri, 11 May 2018 13:11:46 +0100
Subject: [PATCH 0700/1286] Revert "drm/i915/cnl: Use mmio access to context
 status buffer"
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

In the previous patch (to include a rmb() after readig the CSB WRITE
pointer from the HWSP) we believe we have fixed the underlying bug, and
so can re-enable using the HWSP on Cannolake.

This reverts commit 61bf9719fa17 ("drm/i915/cnl: Use mmio access to
context status buffer").

References: https://bugs.freedesktop.org/show_bug.cgi?id=105888
References: https://bugs.freedesktop.org/show_bug.cgi?id=106185
References: 61bf9719fa17 ("drm/i915/cnl: Use mmio access to context status buffer")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Cc: Rafael Antognolli <rafael.antognolli@intel.com>
Cc: Michel Thierry <michel.thierry@intel.com>
Cc: Timo Aaltonen <tjaalton@ubuntu.com>
Tested-by: Timo Aaltonen <tjaalton@ubuntu.com>
Acked-by: Michel Thierry <michel.thierry@intel.com>
Acked-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180511121147.31915-2-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_engine_cs.c | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 70325e0824e3..8303e05b0c7d 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -470,9 +470,6 @@ static bool csb_force_mmio(struct drm_i915_private *i915)
 	if (intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915))
 		return true;
 
-	if (IS_CANNONLAKE(i915))
-		return true;
-
 	return false;
 }
 

From e896d29a548d04371ce746f7d02a8488ff93d812 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Fri, 11 May 2018 14:52:07 +0100
Subject: [PATCH 0701/1286] drm/i915/oa: Check that OA is disabled before
 unpinning

Before we unpin the buffer used for OA reports and return it to the
system, we need to be sure that the HW has finished writing into it.
For lack of a better idea, poll OACONTROL to check it is switched off.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=106379
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Tested-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180511135207.12880-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_perf.c | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index d9341415df40..019bd2d073ad 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1960,11 +1960,19 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
 static void gen7_oa_disable(struct drm_i915_private *dev_priv)
 {
 	I915_WRITE(GEN7_OACONTROL, 0);
+	if (intel_wait_for_register(dev_priv,
+				    GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
+				    50))
+		DRM_ERROR("wait for OA to be disabled timed out\n");
 }
 
 static void gen8_oa_disable(struct drm_i915_private *dev_priv)
 {
 	I915_WRITE(GEN8_OACONTROL, 0);
+	if (intel_wait_for_register(dev_priv,
+				    GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
+				    50))
+		DRM_ERROR("wait for OA to be disabled timed out\n");
 }
 
 /**

From f79401b477bc22914e4c37ea39c611117bd10b19 Mon Sep 17 00:00:00 2001
From: Matthew Auld <matthew.auld@intel.com>
Date: Fri, 11 May 2018 10:51:40 +0100
Subject: [PATCH 0702/1286] drm/i915/selftests: scrub 64K

We write all 4K page entries, even when using 64K pages. In order to
verify that the HW isn't cheating by using the 4K PTE instead of the 64K
PTE, we want to remove all the surplus entries. If the HW skipped the
64K PTE, it will read/write into the scratch page instead - which we
detect as missing results during selftests.

v2: much improved commentary (Chris)

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Changbin Du <changbin.du@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180511095140.25590-1-matthew.auld@intel.com
---
 drivers/gpu/drm/i915/i915_gem_gtt.c         | 21 +++++++++++++++++++++
 drivers/gpu/drm/i915/i915_gem_gtt.h         |  1 +
 drivers/gpu/drm/i915/selftests/huge_pages.c |  3 +++
 3 files changed, 25 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index c01d6dbe269a..996ab2ad6c45 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1162,6 +1162,27 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
 			vaddr[idx.pde] |= GEN8_PDE_IPS_64K;
 			kunmap_atomic(vaddr);
 			page_size = I915_GTT_PAGE_SIZE_64K;
+
+			/*
+			 * We write all 4K page entries, even when using 64K
+			 * pages. In order to verify that the HW isn't cheating
+			 * by using the 4K PTE instead of the 64K PTE, we want
+			 * to remove all the surplus entries. If the HW skipped
+			 * the 64K PTE, it will read/write into the scratch page
+			 * instead - which we detect as missing results during
+			 * selftests.
+			 */
+			if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
+				u16 i;
+
+				encode = pte_encode | vma->vm->scratch_page.daddr;
+				vaddr = kmap_atomic_px(pd->page_table[idx.pde]);
+
+				for (i = 1; i < index; i += 16)
+					memset64(vaddr + i, encode, 15);
+
+				kunmap_atomic(vaddr);
+			}
 		}
 
 		vma->page_sizes.gtt |= page_size;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 1db0dedb4059..aec4f73574f4 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -342,6 +342,7 @@ struct i915_address_space {
 	void (*clear_pages)(struct i915_vma *vma);
 
 	I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
+	I915_SELFTEST_DECLARE(bool scrub_64K);
 };
 
 #define i915_is_ggtt(V) (!(V)->file)
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index d7c8ef8e6764..91c72911be3c 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -1757,6 +1757,9 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
 		goto out_unlock;
 	}
 
+	if (ctx->ppgtt)
+		ctx->ppgtt->base.scrub_64K = true;
+
 	err = i915_subtests(tests, ctx);
 
 out_unlock:

From 0c79f9cb77eae28d48a4f9fc1b3341aacbbd260c Mon Sep 17 00:00:00 2001
From: Michel Thierry <michel.thierry@intel.com>
Date: Thu, 10 May 2018 13:07:08 -0700
Subject: [PATCH 0703/1286] drm/i915/gen9: Add WaClearHIZ_WM_CHICKEN3 for bxt
 and glk

Factor in clear values wherever required while updating destination
min/max.

References: HSDES#1604444184
Signed-off-by: Michel Thierry <michel.thierry@intel.com>
Cc: mesa-dev@lists.freedesktop.org
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Oscar Mateo <oscar.mateo@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180510200708.18097-1-michel.thierry@intel.com
Cc: stable@vger.kernel.org
---
 drivers/gpu/drm/i915/i915_reg.h          | 3 +++
 drivers/gpu/drm/i915/intel_workarounds.c | 4 ++++
 2 files changed, 7 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 14491782aa9e..f11bb213ec07 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -7259,6 +7259,9 @@ enum {
 #define SLICE_ECO_CHICKEN0			_MMIO(0x7308)
 #define   PIXEL_MASK_CAMMING_DISABLE		(1 << 14)
 
+#define GEN9_WM_CHICKEN3			_MMIO(0x5588)
+#define   GEN9_FACTOR_IN_CLR_VAL_HIZ		(1 << 9)
+
 /* WaCatErrorRejectionIssue */
 #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG		_MMIO(0x9030)
 #define  GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB	(1<<11)
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 5eec4ce965a5..2df3538ceba5 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -270,6 +270,10 @@ static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 			    GEN9_PREEMPT_GPGPU_LEVEL_MASK,
 			    GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
 
+	/* WaClearHIZ_WM_CHICKEN3:bxt,glk */
+	if (IS_GEN9_LP(dev_priv))
+		WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
+
 	return 0;
 }
 

From b99f514f5dfa38e04ef0b628d82a97772945cae7 Mon Sep 17 00:00:00 2001
From: Changbin Du <changbin.du@intel.com>
Date: Thu, 19 Apr 2018 12:12:37 +0800
Subject: [PATCH 0704/1286] drm/i915/gvt: Remove disable_warn_untrack and print
 untracked mmio with debug level

The disable_warn_untrack never prevent gvt from printing untracked
mmio errors. We were disturbed by this error storm and the fix is
just adding them to the list with no essential new change.

This message is only useful for enabling new platform during
developing process. So lower the message level to debug and then
remove disable_warn_untrack.

Signed-off-by: Changbin Du <changbin.du@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
---
 drivers/gpu/drm/i915/gvt/gvt.h      |  1 -
 drivers/gpu/drm/i915/gvt/handlers.c | 11 +++--------
 drivers/gpu/drm/i915/gvt/mmio.c     |  2 --
 3 files changed, 3 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 6ec888822a0f..05d15a095310 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -99,7 +99,6 @@ struct intel_vgpu_fence {
 struct intel_vgpu_mmio {
 	void *vreg;
 	void *sreg;
-	bool disable_warn_untrack;
 };
 
 #define INTEL_GVT_MAX_BAR_NUM 4
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index a33c1c3e4a21..26c924bd7b21 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -191,6 +191,8 @@ static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
 	unsigned int max_fence = vgpu_fence_sz(vgpu);
 
 	if (fence_num >= max_fence) {
+		gvt_vgpu_err("access oob fence reg %d/%d\n",
+			     fence_num, max_fence);
 
 		/* When guest access oob fence regs without access
 		 * pv_info first, we treat guest not supporting GVT,
@@ -200,11 +202,6 @@ static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
 			enter_failsafe_mode(vgpu,
 					GVT_FAILSAFE_UNSUPPORTED_GUEST);
 
-		if (!vgpu->mmio.disable_warn_untrack) {
-			gvt_vgpu_err("found oob fence register access\n");
-			gvt_vgpu_err("total fence %d, access fence %d\n",
-				     max_fence, fence_num);
-		}
 		memset(p_data, 0, bytes);
 		return -EINVAL;
 	}
@@ -3092,9 +3089,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
 	 */
 	mmio_info = find_mmio_info(gvt, offset);
 	if (!mmio_info) {
-		if (!vgpu->mmio.disable_warn_untrack)
-			gvt_vgpu_err("untracked MMIO %08x len %d\n",
-				     offset, bytes);
+		gvt_dbg_mmio("untracked MMIO %08x len %d\n", offset, bytes);
 		goto default_rw;
 	}
 
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 11b71b33f1c0..e4960aff68bd 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -244,8 +244,6 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
 
 		/* set the bit 0:2(Core C-State ) to C0 */
 		vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0;
-
-		vgpu->mmio.disable_warn_untrack = false;
 	} else {
 #define GVT_GEN8_MMIO_RESET_OFFSET		(0x44200)
 		/* only reset the engine related, so starting with 0x44200

From 3d8b9e258b9dbbeb0cdeb1cf5885e40d63d564ab Mon Sep 17 00:00:00 2001
From: Zhao Yan <yan.y.zhao@intel.com>
Date: Tue, 8 May 2018 14:52:30 +0800
Subject: [PATCH 0705/1286] drm/i915/gvt: let NOPID be the default value of
 force_to_nonpriv registers

Each ring has a NOPID register and currently they are regarded as default
value of force_to_nonpriv registers in guest drivers

Signed-off-by: Zhao Yan <yan.y.zhao@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
---
 drivers/gpu/drm/i915/gvt/cmd_parser.c |  9 ++++++++-
 drivers/gpu/drm/i915/gvt/handlers.c   | 22 ++++++++++++++--------
 2 files changed, 22 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 9ec2cd982705..737cc824344d 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -817,8 +817,15 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s,
 {
 	struct intel_gvt *gvt = s->vgpu->gvt;
 	unsigned int data = cmd_val(s, index + 1);
+	u32 ring_base;
+	u32 nopid;
+	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
 
-	if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) {
+	ring_base = dev_priv->engine[s->ring_id]->mmio_base;
+	nopid = i915_mmio_reg_offset(RING_NOPID(ring_base));
+
+	if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data) &&
+			data != nopid) {
 		gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
 			offset, data);
 		return -EPERM;
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 26c924bd7b21..bf2fa606afcd 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -474,21 +474,27 @@ static int force_nonpriv_write(struct intel_vgpu *vgpu,
 	unsigned int offset, void *p_data, unsigned int bytes)
 {
 	u32 reg_nonpriv = *(u32 *)p_data;
+	int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
+	u32 ring_base;
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 	int ret = -EINVAL;
 
-	if ((bytes != 4) || ((offset & (bytes - 1)) != 0)) {
-		gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n",
-			vgpu->id, offset, bytes);
+	if ((bytes != 4) || ((offset & (bytes - 1)) != 0) || ring_id < 0) {
+		gvt_err("vgpu(%d) ring %d Invalid FORCE_NONPRIV offset %x(%dB)\n",
+			vgpu->id, ring_id, offset, bytes);
 		return ret;
 	}
 
-	if (in_whitelist(reg_nonpriv)) {
+	ring_base = dev_priv->engine[ring_id]->mmio_base;
+
+	if (in_whitelist(reg_nonpriv) ||
+		reg_nonpriv == i915_mmio_reg_offset(RING_NOPID(ring_base))) {
 		ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
 			bytes);
-	} else {
-		gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x\n",
-			vgpu->id, reg_nonpriv);
-	}
+	} else
+		gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x at offset %x\n",
+			vgpu->id, reg_nonpriv, offset);
+
 	return ret;
 }
 

From 0438a1059877396319b90da289f1473c9c973cd8 Mon Sep 17 00:00:00 2001
From: Zhao Yan <yan.y.zhao@intel.com>
Date: Tue, 8 May 2018 14:52:42 +0800
Subject: [PATCH 0706/1286] drm/i915/gvt: do not return error on handling
 force_to_nonpriv registers

Return error will cause vm hang and enter failsafe mode.
However, we don't want that happen on detecting an wrong force_to_nonpriv
register write.
Therefore, we just omit the wrong write or patch it to default value.

v2: only return 0 on detecting lri write of registers outside whitelist,
but still return error on other error conditions.  (zhenyu wang)

Signed-off-by: Zhao Yan <yan.y.zhao@intel.com>
Reviewed-by: Zhang Yulei <yulei.zhang@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
---
 drivers/gpu/drm/i915/gvt/cmd_parser.c | 3 ++-
 drivers/gpu/drm/i915/gvt/handlers.c   | 2 +-
 2 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 737cc824344d..536cb691a543 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -828,7 +828,8 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s,
 			data != nopid) {
 		gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
 			offset, data);
-		return -EPERM;
+		patch_value(s, cmd_ptr(s, index), nopid);
+		return 0;
 	}
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index bf2fa606afcd..4b6532fb789a 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -495,7 +495,7 @@ static int force_nonpriv_write(struct intel_vgpu *vgpu,
 		gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x at offset %x\n",
 			vgpu->id, reg_nonpriv, offset);
 
-	return ret;
+	return 0;
 }
 
 static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,

From cb8ba171ae6c1e4f5fa027162c06d50fc2b43055 Mon Sep 17 00:00:00 2001
From: Zhao Yan <yan.y.zhao@intel.com>
Date: Tue, 8 May 2018 14:52:50 +0800
Subject: [PATCH 0707/1286] drm/i915/gvt: let force_to_nonpriv cmd handler only
 valid for LRI cmd

the cmd_reg_handler() is called by cmds LRM, PIPE_CTRL, SRM...
for LRM, SRM, we cannot get write data in a simple way.
On other side,  the force_to_nonpriv reigsters will only be written in LRI
in current drivers. so we don't want to bother the handler to handle those
memory access cmds, just leave a print message here.

Signed-off-by: Zhao Yan <yan.y.zhao@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
---
 drivers/gpu/drm/i915/gvt/cmd_parser.c | 14 +++++++++++---
 1 file changed, 11 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 536cb691a543..36c6180e5769 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -813,14 +813,22 @@ static inline bool is_force_nonpriv_mmio(unsigned int offset)
 }
 
 static int force_nonpriv_reg_handler(struct parser_exec_state *s,
-				     unsigned int offset, unsigned int index)
+		unsigned int offset, unsigned int index, char *cmd)
 {
 	struct intel_gvt *gvt = s->vgpu->gvt;
-	unsigned int data = cmd_val(s, index + 1);
+	unsigned int data;
 	u32 ring_base;
 	u32 nopid;
 	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
 
+	if (!strcmp(cmd, "lri"))
+		data = cmd_val(s, index + 1);
+	else {
+		gvt_err("Unexpected forcenonpriv 0x%x write from cmd %s\n",
+			offset, cmd);
+		return -EINVAL;
+	}
+
 	ring_base = dev_priv->engine[s->ring_id]->mmio_base;
 	nopid = i915_mmio_reg_offset(RING_NOPID(ring_base));
 
@@ -877,7 +885,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
 		return -EINVAL;
 
 	if (is_force_nonpriv_mmio(offset) &&
-		force_nonpriv_reg_handler(s, offset, index))
+		force_nonpriv_reg_handler(s, offset, index, cmd))
 		return -EPERM;
 
 	if (offset == i915_mmio_reg_offset(DERRMR) ||

From 41e403d04e7050c8d88682939febcdbe117d4c82 Mon Sep 17 00:00:00 2001
From: Weinan Li <weinan.z.li@intel.com>
Date: Wed, 21 Mar 2018 15:40:32 +0800
Subject: [PATCH 0708/1286] Revert "drm/i915/gvt: set max priority for gvt
 context"

This reverts commit 11474e9091cf2002e948647fd9f63a7f027e488a.

There are issues which will block the host preemption before, instead of
disabling it use one workaround "setting max priority for gvt context"
to avoid the gvt context be preempted by the host. Now the issues have been
cleared, so revert this patch to enable host preemption.

v2:
- refine description(Zhenyu)

Signed-off-by: Weinan Li <weinan.z.li@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Signed-off-by: Zhi Wang <zhi.a.wang@intel.com>
---
 drivers/gpu/drm/i915/gvt/scheduler.c | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index ffb45a9ee228..c2d183b91500 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -1156,9 +1156,6 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
 	if (IS_ERR(s->shadow_ctx))
 		return PTR_ERR(s->shadow_ctx);
 
-	if (HAS_LOGICAL_RING_PREEMPTION(vgpu->gvt->dev_priv))
-		s->shadow_ctx->sched.priority = INT_MAX;
-
 	bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
 
 	s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload",

From e8929999fa718da5758ff877592f33fea368ca8a Mon Sep 17 00:00:00 2001
From: Andrzej Hajda <a.hajda@samsung.com>
Date: Mon, 7 May 2018 11:29:28 +0200
Subject: [PATCH 0709/1286] drm/exynos/dsi: remove mode_set callback

The callback was used only to copy provided mode to context for later
usage. Since the mode is always available from crtc atomic state this code
can be removed.

Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Signed-off-by: Inki Dae <inki.dae@samsung.com>
---
 drivers/gpu/drm/exynos/exynos_drm_dsi.c | 40 +++++++------------------
 1 file changed, 10 insertions(+), 30 deletions(-)

diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 7904ffa9abfb..eae44fd714f0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -270,7 +270,6 @@ struct exynos_dsi {
 	u32 lanes;
 	u32 mode_flags;
 	u32 format;
-	struct videomode vm;
 
 	int state;
 	struct drm_property *brightness;
@@ -881,30 +880,30 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi)
 
 static void exynos_dsi_set_display_mode(struct exynos_dsi *dsi)
 {
-	struct videomode *vm = &dsi->vm;
+	struct drm_display_mode *m = &dsi->encoder.crtc->state->adjusted_mode;
 	unsigned int num_bits_resol = dsi->driver_data->num_bits_resol;
 	u32 reg;
 
 	if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
 		reg = DSIM_CMD_ALLOW(0xf)
-			| DSIM_STABLE_VFP(vm->vfront_porch)
-			| DSIM_MAIN_VBP(vm->vback_porch);
+			| DSIM_STABLE_VFP(m->vsync_start - m->vdisplay)
+			| DSIM_MAIN_VBP(m->vtotal - m->vsync_end);
 		exynos_dsi_write(dsi, DSIM_MVPORCH_REG, reg);
 
-		reg = DSIM_MAIN_HFP(vm->hfront_porch)
-			| DSIM_MAIN_HBP(vm->hback_porch);
+		reg = DSIM_MAIN_HFP(m->hsync_start - m->hdisplay)
+			| DSIM_MAIN_HBP(m->htotal - m->hsync_end);
 		exynos_dsi_write(dsi, DSIM_MHPORCH_REG, reg);
 
-		reg = DSIM_MAIN_VSA(vm->vsync_len)
-			| DSIM_MAIN_HSA(vm->hsync_len);
+		reg = DSIM_MAIN_VSA(m->vsync_end - m->vsync_start)
+			| DSIM_MAIN_HSA(m->hsync_end - m->hsync_start);
 		exynos_dsi_write(dsi, DSIM_MSYNC_REG, reg);
 	}
-	reg =  DSIM_MAIN_HRESOL(vm->hactive, num_bits_resol) |
-		DSIM_MAIN_VRESOL(vm->vactive, num_bits_resol);
+	reg =  DSIM_MAIN_HRESOL(m->hdisplay, num_bits_resol) |
+		DSIM_MAIN_VRESOL(m->vdisplay, num_bits_resol);
 
 	exynos_dsi_write(dsi, DSIM_MDRESOL_REG, reg);
 
-	dev_dbg(dsi->dev, "LCD size = %dx%d\n", vm->hactive, vm->vactive);
+	dev_dbg(dsi->dev, "LCD size = %dx%d\n", m->hdisplay, m->vdisplay);
 }
 
 static void exynos_dsi_set_display_enable(struct exynos_dsi *dsi, bool enable)
@@ -1485,26 +1484,7 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
 	return 0;
 }
 
-static void exynos_dsi_mode_set(struct drm_encoder *encoder,
-				struct drm_display_mode *mode,
-				struct drm_display_mode *adjusted_mode)
-{
-	struct exynos_dsi *dsi = encoder_to_dsi(encoder);
-	struct videomode *vm = &dsi->vm;
-	struct drm_display_mode *m = adjusted_mode;
-
-	vm->hactive = m->hdisplay;
-	vm->vactive = m->vdisplay;
-	vm->vfront_porch = m->vsync_start - m->vdisplay;
-	vm->vback_porch = m->vtotal - m->vsync_end;
-	vm->vsync_len = m->vsync_end - m->vsync_start;
-	vm->hfront_porch = m->hsync_start - m->hdisplay;
-	vm->hback_porch = m->htotal - m->hsync_end;
-	vm->hsync_len = m->hsync_end - m->hsync_start;
-}
-
 static const struct drm_encoder_helper_funcs exynos_dsi_encoder_helper_funcs = {
-	.mode_set = exynos_dsi_mode_set,
 	.enable = exynos_dsi_enable,
 	.disable = exynos_dsi_disable,
 };

From c9ac371d4b5982d2f179d42bb99781e510d55f50 Mon Sep 17 00:00:00 2001
From: Eric Anholt <eric@anholt.net>
Date: Tue, 8 May 2018 17:14:25 -0700
Subject: [PATCH 0710/1286] drm: Fix render node numbering regression from
 control node removal.

drm_minor_alloc() does multiplication on this enum, so the removal
ended up moving render nodes down from 128 base to 64.  This caused
Mesa's surfaceless backend to be unable to open the render nodes,
since it was still looking up at 128.

v2: Add a comment warning the next person.

Signed-off-by: Eric Anholt <eric@anholt.net>
Fixes: 0d49f303e8a7 ("drm: remove all control node code")
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Sean Paul <seanpaul@chromium.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20180509001425.12574-1-eric@anholt.net
---
 include/drm/drm_file.h | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 91a65a360079..027ac16da3d1 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -47,8 +47,12 @@ struct device;
  * header include loops we need it here for now.
  */
 
+/* Note that the order of this enum is ABI (it determines
+ * /dev/dri/renderD* numbers).
+ */
 enum drm_minor_type {
 	DRM_MINOR_PRIMARY,
+	DRM_MINOR_CONTROL,
 	DRM_MINOR_RENDER,
 };
 

From 4db518e4e8286ca93bd5399f26549eafc87607ea Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Fri, 11 May 2018 13:11:47 +0100
Subject: [PATCH 0711/1286] drm/i915/execlists: Relax CSB force-mmio for VT-d
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

The original switch to use CSB from the HWSP was plagued by the effect
of read ordering on VT-d; we would read the WRITE pointer from the HWSP
before it had completed writing the CSB contents. The mystery comes down
to the lack of rmb() for correct ordering with respect to the writes
from HW, and with that resolved we can remove the VT-d special casing.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Cc: Michel Thierry <michel.thierry@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180511121147.31915-3-chris@chris-wilson.co.uk
Tested-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_engine_cs.c | 8 --------
 1 file changed, 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 8303e05b0c7d..6bfd7e3ed152 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -458,14 +458,6 @@ static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
 
 static bool csb_force_mmio(struct drm_i915_private *i915)
 {
-	/*
-	 * IOMMU adds unpredictable latency causing the CSB write (from the
-	 * GPU into the HWSP) to only be visible some time after the interrupt
-	 * (missed breadcrumb syndrome).
-	 */
-	if (intel_vtd_active())
-		return true;
-
 	/* Older GVT emulation depends upon intercepting CSB mmio */
 	if (intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915))
 		return true;

From 110ab11d413881395773df29e8bdf5bd3a2164ee Mon Sep 17 00:00:00 2001
From: Dave Airlie <airlied@redhat.com>
Date: Thu, 3 May 2018 12:10:21 +1000
Subject: [PATCH 0712/1286] drm/virtio: add define for second capset to the
 virgl code.

Although the kernel doesn't use this, qemu imports these headers
and it's best to keep them consistent.

This define is also something userspace may want to use.

Signed-off-by: Dave Airlie <airlied@redhat.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20180503021021.10694-1-airlied@gmail.com
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
---
 include/uapi/linux/virtio_gpu.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h
index 4b04ead26cd9..f43c3c6171ff 100644
--- a/include/uapi/linux/virtio_gpu.h
+++ b/include/uapi/linux/virtio_gpu.h
@@ -260,6 +260,7 @@ struct virtio_gpu_cmd_submit {
 };
 
 #define VIRTIO_GPU_CAPSET_VIRGL 1
+#define VIRTIO_GPU_CAPSET_VIRGL2 2
 
 /* VIRTIO_GPU_CMD_GET_CAPSET_INFO */
 struct virtio_gpu_get_capset_info {

From 68266f1c08db731fa7c3a0903bf890fc76ce9345 Mon Sep 17 00:00:00 2001
From: Souptick Joarder <jrdr.linux@gmail.com>
Date: Tue, 17 Apr 2018 19:08:44 +0530
Subject: [PATCH 0713/1286] gpu: drm: qxl: Adding new typedef vm_fault_t

Use new return type vm_fault_t for fault handler. For
now, this is just documenting that the function returns
a VM_FAULT value rather than an errno. Once all instances
are converted, vm_fault_t will become a distinct type.

Reference id -> 1c8f422059ae ("mm: change return type to
vm_fault_t")

Signed-off-by: Souptick Joarder <jrdr.linux@gmail.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20180417133844.GA30256@jordon-HP-15-Notebook-PC
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
---
 drivers/gpu/drm/qxl/qxl_ttm.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index ee2340e31f06..86a1fb32f6db 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -105,16 +105,16 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
 static struct vm_operations_struct qxl_ttm_vm_ops;
 static const struct vm_operations_struct *ttm_vm_ops;
 
-static int qxl_ttm_fault(struct vm_fault *vmf)
+static vm_fault_t qxl_ttm_fault(struct vm_fault *vmf)
 {
 	struct ttm_buffer_object *bo;
-	int r;
+	vm_fault_t ret;
 
 	bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
 	if (bo == NULL)
 		return VM_FAULT_NOPAGE;
-	r = ttm_vm_ops->fault(vmf);
-	return r;
+	ret = ttm_vm_ops->fault(vmf);
+	return ret;
 }
 
 int qxl_mmap(struct file *filp, struct vm_area_struct *vma)

From 0c591a40af1b369cc11dce4d558dd71bebbdc090 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Sat, 12 May 2018 09:49:57 +0100
Subject: [PATCH 0714/1286] drm/i915: Mark up nested spinlocks

When we process the outstanding requests upon banning a context, we need
to acquire both the engine and the client's timeline, nesting the locks.
This requires explicit markup as the two timelines are now of the same
class, since commit a89d1f921c15 ("drm/i915: Split i915_gem_timeline into
individual timelines").

Testcase: igt/gem_eio/banned
Fixes: a89d1f921c15 ("drm/i915: Split i915_gem_timeline into individual timelines")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Michel Thierry <michel.thierry@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180512084957.9829-1-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_gem.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 89bf5d67cb74..0a2070112b66 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3119,7 +3119,7 @@ static void engine_skip_context(struct i915_request *request)
 	GEM_BUG_ON(timeline == &engine->timeline);
 
 	spin_lock_irqsave(&engine->timeline.lock, flags);
-	spin_lock(&timeline->lock);
+	spin_lock_nested(&timeline->lock, SINGLE_DEPTH_NESTING);
 
 	list_for_each_entry_continue(request, &engine->timeline.requests, link)
 		if (request->ctx == hung_ctx)

From 01f83786f9ab9c8883ce634cb9a0de51086ad7ea Mon Sep 17 00:00:00 2001
From: Jani Nikula <jani.nikula@intel.com>
Date: Mon, 14 May 2018 15:28:05 +0300
Subject: [PATCH 0715/1286] drm/i915: Update DRIVER_DATE to 20180514

Signed-off-by: Jani Nikula <jani.nikula@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 57fb3aa09db0..34c125e2d90c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -85,8 +85,8 @@
 
 #define DRIVER_NAME		"i915"
 #define DRIVER_DESC		"Intel Graphics"
-#define DRIVER_DATE		"20180413"
-#define DRIVER_TIMESTAMP	1523611258
+#define DRIVER_DATE		"20180514"
+#define DRIVER_TIMESTAMP	1526300884
 
 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
  * WARN_ON()) for hw state sanity checks to check for unexpected conditions

From 0eeef69022b4ea503106f5f695fd5d8ae2c72706 Mon Sep 17 00:00:00 2001
From: Anthony Koo <Anthony.Koo@amd.com>
Date: Mon, 9 Apr 2018 14:55:17 -0500
Subject: [PATCH 0716/1286] drm/amd/display: Updated HDR Static Metadata to
 directly take info packet raw

Updated HDR Static Metadata to directly take info packet raw

Updating Infopacket does not require Passive

Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/dc/core/dc_resource.c | 91 +------------------
 drivers/gpu/drm/amd/display/dc/dc_stream.h    |  4 +-
 2 files changed, 5 insertions(+), 90 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index eb8f4792198c..e1036e409877 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -2315,97 +2315,12 @@ static void set_hdr_static_info_packet(
 		struct dc_info_packet *info_packet,
 		struct dc_stream_state *stream)
 {
-	uint16_t i = 0;
-	enum signal_type signal = stream->signal;
-	uint32_t data;
+	/* HDR Static Metadata info packet for HDR10 */
 
-	if (!stream->hdr_static_metadata.hdr_supported)
+	if (!stream->hdr_static_metadata.valid)
 		return;
 
-	if (dc_is_hdmi_signal(signal)) {
-		info_packet->valid = true;
-
-		info_packet->hb0 = 0x87;
-		info_packet->hb1 = 0x01;
-		info_packet->hb2 = 0x1A;
-		i = 1;
-	} else if (dc_is_dp_signal(signal)) {
-		info_packet->valid = true;
-
-		info_packet->hb0 = 0x00;
-		info_packet->hb1 = 0x87;
-		info_packet->hb2 = 0x1D;
-		info_packet->hb3 = (0x13 << 2);
-		i = 2;
-	}
-
-	data = stream->hdr_static_metadata.is_hdr;
-	info_packet->sb[i++] = data ? 0x02 : 0x00;
-	info_packet->sb[i++] = 0x00;
-
-	data = stream->hdr_static_metadata.chromaticity_green_x / 2;
-	info_packet->sb[i++] = data & 0xFF;
-	info_packet->sb[i++] = (data & 0xFF00) >> 8;
-
-	data = stream->hdr_static_metadata.chromaticity_green_y / 2;
-	info_packet->sb[i++] = data & 0xFF;
-	info_packet->sb[i++] = (data & 0xFF00) >> 8;
-
-	data = stream->hdr_static_metadata.chromaticity_blue_x / 2;
-	info_packet->sb[i++] = data & 0xFF;
-	info_packet->sb[i++] = (data & 0xFF00) >> 8;
-
-	data = stream->hdr_static_metadata.chromaticity_blue_y / 2;
-	info_packet->sb[i++] = data & 0xFF;
-	info_packet->sb[i++] = (data & 0xFF00) >> 8;
-
-	data = stream->hdr_static_metadata.chromaticity_red_x / 2;
-	info_packet->sb[i++] = data & 0xFF;
-	info_packet->sb[i++] = (data & 0xFF00) >> 8;
-
-	data = stream->hdr_static_metadata.chromaticity_red_y / 2;
-	info_packet->sb[i++] = data & 0xFF;
-	info_packet->sb[i++] = (data & 0xFF00) >> 8;
-
-	data = stream->hdr_static_metadata.chromaticity_white_point_x / 2;
-	info_packet->sb[i++] = data & 0xFF;
-	info_packet->sb[i++] = (data & 0xFF00) >> 8;
-
-	data = stream->hdr_static_metadata.chromaticity_white_point_y / 2;
-	info_packet->sb[i++] = data & 0xFF;
-	info_packet->sb[i++] = (data & 0xFF00) >> 8;
-
-	data = stream->hdr_static_metadata.max_luminance;
-	info_packet->sb[i++] = data & 0xFF;
-	info_packet->sb[i++] = (data & 0xFF00) >> 8;
-
-	data = stream->hdr_static_metadata.min_luminance;
-	info_packet->sb[i++] = data & 0xFF;
-	info_packet->sb[i++] = (data & 0xFF00) >> 8;
-
-	data = stream->hdr_static_metadata.maximum_content_light_level;
-	info_packet->sb[i++] = data & 0xFF;
-	info_packet->sb[i++] = (data & 0xFF00) >> 8;
-
-	data = stream->hdr_static_metadata.maximum_frame_average_light_level;
-	info_packet->sb[i++] = data & 0xFF;
-	info_packet->sb[i++] = (data & 0xFF00) >> 8;
-
-	if (dc_is_hdmi_signal(signal)) {
-		uint32_t checksum = 0;
-
-		checksum += info_packet->hb0;
-		checksum += info_packet->hb1;
-		checksum += info_packet->hb2;
-
-		for (i = 1; i <= info_packet->hb2; i++)
-			checksum += info_packet->sb[i];
-
-		info_packet->sb[0] = 0x100 - checksum;
-	} else if (dc_is_dp_signal(signal)) {
-		info_packet->sb[0] = 0x01;
-		info_packet->sb[1] = 0x1A;
-	}
+	*info_packet = stream->hdr_static_metadata;
 }
 
 static void set_vsc_info_packet(
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 2971cd07e093..08f1a45ed042 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -58,7 +58,7 @@ struct dc_stream_state {
 
 	struct freesync_context freesync_ctx;
 
-	struct dc_hdr_static_metadata hdr_static_metadata;
+	struct dc_info_packet hdr_static_metadata;
 	struct dc_transfer_func *out_transfer_func;
 	struct colorspace_transform gamut_remap_matrix;
 	struct csc_transform csc_color_matrix;
@@ -113,8 +113,8 @@ struct dc_stream_update {
 	struct rect src;
 	struct rect dst;
 	struct dc_transfer_func *out_transfer_func;
-	struct dc_hdr_static_metadata *hdr_static_metadata;
 	enum color_transfer_func color_output_tf;
+	struct dc_info_packet *hdr_static_metadata;
 	unsigned int *abm_level;
 	unsigned long long *periodic_fn_vsync_delta;
 };

From 85b25034608e861ce60b771b988967ea039a06c6 Mon Sep 17 00:00:00 2001
From: Anthony Koo <Anthony.Koo@amd.com>
Date: Sun, 25 Mar 2018 16:41:06 -0400
Subject: [PATCH 0717/1286] drm/amd/display: Get rid of unused input_tf

Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 1 -
 drivers/gpu/drm/amd/display/dc/core/dc.c          | 3 ---
 drivers/gpu/drm/amd/display/dc/core/dc_debug.c    | 4 ----
 drivers/gpu/drm/amd/display/dc/dc.h               | 5 -----
 4 files changed, 13 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 2514d7b3b66e..aa8e25a9b09e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4034,7 +4034,6 @@ static bool commit_planes_to_stream(
 		flip_addr[i].address = plane_states[i]->address;
 		flip_addr[i].flip_immediate = plane_states[i]->flip_immediate;
 		plane_info[i].color_space = plane_states[i]->color_space;
-		plane_info[i].input_tf = plane_states[i]->input_tf;
 		plane_info[i].format = plane_states[i]->format;
 		plane_info[i].plane_size = plane_states[i]->plane_size;
 		plane_info[i].rotation = plane_states[i]->rotation;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 8f09f3ab0c29..e59357724eac 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1018,9 +1018,6 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
 	if (u->plane_info->color_space != u->surface->color_space)
 		update_flags->bits.color_space_change = 1;
 
-	if (u->plane_info->input_tf != u->surface->input_tf)
-		update_flags->bits.input_tf_change = 1;
-
 	if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror)
 		update_flags->bits.horizontal_mirror_change = 1;
 
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index a3c87611220d..267c76766dea 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -155,7 +155,6 @@ void pre_surface_trace(
 				"plane_state->tiling_info.gfx8.pipe_config = %d;\n"
 				"plane_state->tiling_info.gfx8.array_mode = %d;\n"
 				"plane_state->color_space = %d;\n"
-				"plane_state->input_tf = %d;\n"
 				"plane_state->dcc.enable = %d;\n"
 				"plane_state->format = %d;\n"
 				"plane_state->rotation = %d;\n"
@@ -163,7 +162,6 @@ void pre_surface_trace(
 				plane_state->tiling_info.gfx8.pipe_config,
 				plane_state->tiling_info.gfx8.array_mode,
 				plane_state->color_space,
-				plane_state->input_tf,
 				plane_state->dcc.enable,
 				plane_state->format,
 				plane_state->rotation,
@@ -203,7 +201,6 @@ void update_surface_trace(
 		if (update->plane_info) {
 			SURFACE_TRACE(
 					"plane_info->color_space = %d;\n"
-					"plane_info->input_tf = %d;\n"
 					"plane_info->format = %d;\n"
 					"plane_info->plane_size.grph.surface_pitch = %d;\n"
 					"plane_info->plane_size.grph.surface_size.height = %d;\n"
@@ -213,7 +210,6 @@ void update_surface_trace(
 					"plane_info->rotation = %d;\n"
 					"plane_info->stereo_format = %d;\n",
 					update->plane_info->color_space,
-					update->plane_info->input_tf,
 					update->plane_info->format,
 					update->plane_info->plane_size.grph.surface_pitch,
 					update->plane_info->plane_size.grph.surface_size.height,
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 23349148c7a4..6a47da30b281 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -333,7 +333,6 @@ enum {
 	TRANSFER_FUNC_POINTS = 1025
 };
 
-// Moved here from color module for linux
 enum color_transfer_func {
 	transfer_func_unknown,
 	transfer_func_srgb,
@@ -420,7 +419,6 @@ union surface_update_flags {
 		/* Medium updates */
 		uint32_t dcc_change:1;
 		uint32_t color_space_change:1;
-		uint32_t input_tf_change:1;
 		uint32_t horizontal_mirror_change:1;
 		uint32_t per_pixel_alpha_change:1;
 		uint32_t rotation_change:1;
@@ -470,7 +468,6 @@ struct dc_plane_state {
 	struct dc_hdr_static_metadata hdr_static_ctx;
 
 	enum dc_color_space color_space;
-	enum color_transfer_func input_tf;
 
 	enum surface_pixel_format format;
 	enum dc_rotation_angle rotation;
@@ -500,7 +497,6 @@ struct dc_plane_info {
 	enum dc_rotation_angle rotation;
 	enum plane_stereo_format stereo_format;
 	enum dc_color_space color_space;
-	enum color_transfer_func input_tf;
 	unsigned int sdr_white_level;
 	bool horizontal_mirror;
 	bool visible;
@@ -527,7 +523,6 @@ struct dc_surface_update {
 	 * null means no updates
 	 */
 	struct dc_gamma *gamma;
-	enum color_transfer_func color_input_tf;
 	struct dc_transfer_func *in_transfer_func;
 
 	struct csc_transform *input_csc_color_matrix;

From 477c000ece26a588752c9d1ed9904097e95de8c9 Mon Sep 17 00:00:00 2001
From: Anthony Koo <Anthony.Koo@amd.com>
Date: Sun, 25 Mar 2018 16:55:05 -0400
Subject: [PATCH 0718/1286] drm/amd/display: Remove unused fields

Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dc.h | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 6a47da30b281..5b81ae5acdf4 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -361,9 +361,6 @@ struct dc_hdr_static_metadata {
 	uint32_t max_luminance;
 	uint32_t maximum_content_light_level;
 	uint32_t maximum_frame_average_light_level;
-
-	bool hdr_supported;
-	bool is_hdr;
 };
 
 enum dc_transfer_func_type {

From 5c6161162a556e2260d3c9f61f2c02bd82ee1ae9 Mon Sep 17 00:00:00 2001
From: Anthony Koo <Anthony.Koo@amd.com>
Date: Mon, 26 Mar 2018 16:14:31 -0400
Subject: [PATCH 0719/1286] drm/amd/display: Do not use os types

Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../drm/amd/display/dc/basics/fixpt31_32.c    | 158 +++++++++---------
 .../gpu/drm/amd/display/include/fixed31_32.h  |  40 +++--
 2 files changed, 98 insertions(+), 100 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
index 8a9bba879207..7191c3213743 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
@@ -26,13 +26,13 @@
 #include "dm_services.h"
 #include "include/fixed31_32.h"
 
-static inline uint64_t abs_i64(
-	int64_t arg)
+static inline unsigned long long abs_i64(
+	long long arg)
 {
 	if (arg > 0)
-		return (uint64_t)arg;
+		return (unsigned long long)arg;
 	else
-		return (uint64_t)(-arg);
+		return (unsigned long long)(-arg);
 }
 
 /*
@@ -40,12 +40,12 @@ static inline uint64_t abs_i64(
  * result = dividend / divisor
  * *remainder = dividend % divisor
  */
-static inline uint64_t complete_integer_division_u64(
-	uint64_t dividend,
-	uint64_t divisor,
-	uint64_t *remainder)
+static inline unsigned long long complete_integer_division_u64(
+	unsigned long long dividend,
+	unsigned long long divisor,
+	unsigned long long *remainder)
 {
-	uint64_t result;
+	unsigned long long result;
 
 	ASSERT(divisor);
 
@@ -65,29 +65,29 @@ static inline uint64_t complete_integer_division_u64(
 	(FRACTIONAL_PART_MASK & (x))
 
 struct fixed31_32 dal_fixed31_32_from_fraction(
-	int64_t numerator,
-	int64_t denominator)
+	long long numerator,
+	long long denominator)
 {
 	struct fixed31_32 res;
 
 	bool arg1_negative = numerator < 0;
 	bool arg2_negative = denominator < 0;
 
-	uint64_t arg1_value = arg1_negative ? -numerator : numerator;
-	uint64_t arg2_value = arg2_negative ? -denominator : denominator;
+	unsigned long long arg1_value = arg1_negative ? -numerator : numerator;
+	unsigned long long arg2_value = arg2_negative ? -denominator : denominator;
 
-	uint64_t remainder;
+	unsigned long long remainder;
 
 	/* determine integer part */
 
-	uint64_t res_value = complete_integer_division_u64(
+	unsigned long long res_value = complete_integer_division_u64(
 		arg1_value, arg2_value, &remainder);
 
 	ASSERT(res_value <= LONG_MAX);
 
 	/* determine fractional part */
 	{
-		uint32_t i = FIXED31_32_BITS_PER_FRACTIONAL_PART;
+		unsigned int i = FIXED31_32_BITS_PER_FRACTIONAL_PART;
 
 		do {
 			remainder <<= 1;
@@ -103,14 +103,14 @@ struct fixed31_32 dal_fixed31_32_from_fraction(
 
 	/* round up LSB */
 	{
-		uint64_t summand = (remainder << 1) >= arg2_value;
+		unsigned long long summand = (remainder << 1) >= arg2_value;
 
 		ASSERT(res_value <= LLONG_MAX - summand);
 
 		res_value += summand;
 	}
 
-	res.value = (int64_t)res_value;
+	res.value = (long long)res_value;
 
 	if (arg1_negative ^ arg2_negative)
 		res.value = -res.value;
@@ -119,7 +119,7 @@ struct fixed31_32 dal_fixed31_32_from_fraction(
 }
 
 struct fixed31_32 dal_fixed31_32_from_int_nonconst(
-	int64_t arg)
+	long long arg)
 {
 	struct fixed31_32 res;
 
@@ -132,7 +132,7 @@ struct fixed31_32 dal_fixed31_32_from_int_nonconst(
 
 struct fixed31_32 dal_fixed31_32_shl(
 	struct fixed31_32 arg,
-	uint8_t shift)
+	unsigned char shift)
 {
 	struct fixed31_32 res;
 
@@ -181,16 +181,16 @@ struct fixed31_32 dal_fixed31_32_mul(
 	bool arg1_negative = arg1.value < 0;
 	bool arg2_negative = arg2.value < 0;
 
-	uint64_t arg1_value = arg1_negative ? -arg1.value : arg1.value;
-	uint64_t arg2_value = arg2_negative ? -arg2.value : arg2.value;
+	unsigned long long arg1_value = arg1_negative ? -arg1.value : arg1.value;
+	unsigned long long arg2_value = arg2_negative ? -arg2.value : arg2.value;
 
-	uint64_t arg1_int = GET_INTEGER_PART(arg1_value);
-	uint64_t arg2_int = GET_INTEGER_PART(arg2_value);
+	unsigned long long arg1_int = GET_INTEGER_PART(arg1_value);
+	unsigned long long arg2_int = GET_INTEGER_PART(arg2_value);
 
-	uint64_t arg1_fra = GET_FRACTIONAL_PART(arg1_value);
-	uint64_t arg2_fra = GET_FRACTIONAL_PART(arg2_value);
+	unsigned long long arg1_fra = GET_FRACTIONAL_PART(arg1_value);
+	unsigned long long arg2_fra = GET_FRACTIONAL_PART(arg2_value);
 
-	uint64_t tmp;
+	unsigned long long tmp;
 
 	res.value = arg1_int * arg2_int;
 
@@ -200,22 +200,22 @@ struct fixed31_32 dal_fixed31_32_mul(
 
 	tmp = arg1_int * arg2_fra;
 
-	ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+	ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
 
 	res.value += tmp;
 
 	tmp = arg2_int * arg1_fra;
 
-	ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+	ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
 
 	res.value += tmp;
 
 	tmp = arg1_fra * arg2_fra;
 
 	tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
-		(tmp >= (uint64_t)dal_fixed31_32_half.value);
+		(tmp >= (unsigned long long)dal_fixed31_32_half.value);
 
-	ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+	ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
 
 	res.value += tmp;
 
@@ -230,13 +230,13 @@ struct fixed31_32 dal_fixed31_32_sqr(
 {
 	struct fixed31_32 res;
 
-	uint64_t arg_value = abs_i64(arg.value);
+	unsigned long long arg_value = abs_i64(arg.value);
 
-	uint64_t arg_int = GET_INTEGER_PART(arg_value);
+	unsigned long long arg_int = GET_INTEGER_PART(arg_value);
 
-	uint64_t arg_fra = GET_FRACTIONAL_PART(arg_value);
+	unsigned long long arg_fra = GET_FRACTIONAL_PART(arg_value);
 
-	uint64_t tmp;
+	unsigned long long tmp;
 
 	res.value = arg_int * arg_int;
 
@@ -246,20 +246,20 @@ struct fixed31_32 dal_fixed31_32_sqr(
 
 	tmp = arg_int * arg_fra;
 
-	ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+	ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
 
 	res.value += tmp;
 
-	ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+	ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
 
 	res.value += tmp;
 
 	tmp = arg_fra * arg_fra;
 
 	tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
-		(tmp >= (uint64_t)dal_fixed31_32_half.value);
+		(tmp >= (unsigned long long)dal_fixed31_32_half.value);
 
-	ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+	ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
 
 	res.value += tmp;
 
@@ -288,7 +288,7 @@ struct fixed31_32 dal_fixed31_32_sinc(
 
 	struct fixed31_32 res = dal_fixed31_32_one;
 
-	int32_t n = 27;
+	int n = 27;
 
 	struct fixed31_32 arg_norm = arg;
 
@@ -299,7 +299,7 @@ struct fixed31_32 dal_fixed31_32_sinc(
 			arg_norm,
 			dal_fixed31_32_mul_int(
 				dal_fixed31_32_two_pi,
-				(int32_t)div64_s64(
+				(int)div64_s64(
 					arg_norm.value,
 					dal_fixed31_32_two_pi.value)));
 	}
@@ -343,7 +343,7 @@ struct fixed31_32 dal_fixed31_32_cos(
 
 	struct fixed31_32 res = dal_fixed31_32_one;
 
-	int32_t n = 26;
+	int n = 26;
 
 	do {
 		res = dal_fixed31_32_sub(
@@ -370,7 +370,7 @@ struct fixed31_32 dal_fixed31_32_cos(
 static struct fixed31_32 fixed31_32_exp_from_taylor_series(
 	struct fixed31_32 arg)
 {
-	uint32_t n = 9;
+	unsigned int n = 9;
 
 	struct fixed31_32 res = dal_fixed31_32_from_fraction(
 		n + 2,
@@ -409,7 +409,7 @@ struct fixed31_32 dal_fixed31_32_exp(
 	if (dal_fixed31_32_le(
 		dal_fixed31_32_ln2_div_2,
 		dal_fixed31_32_abs(arg))) {
-		int32_t m = dal_fixed31_32_round(
+		int m = dal_fixed31_32_round(
 			dal_fixed31_32_div(
 				arg,
 				dal_fixed31_32_ln2));
@@ -429,7 +429,7 @@ struct fixed31_32 dal_fixed31_32_exp(
 		if (m > 0)
 			return dal_fixed31_32_shl(
 				fixed31_32_exp_from_taylor_series(r),
-				(uint8_t)m);
+				(unsigned char)m);
 		else
 			return dal_fixed31_32_div_int(
 				fixed31_32_exp_from_taylor_series(r),
@@ -482,50 +482,50 @@ struct fixed31_32 dal_fixed31_32_pow(
 			arg2));
 }
 
-int32_t dal_fixed31_32_floor(
+int dal_fixed31_32_floor(
 	struct fixed31_32 arg)
 {
-	uint64_t arg_value = abs_i64(arg.value);
+	unsigned long long arg_value = abs_i64(arg.value);
 
 	if (arg.value >= 0)
-		return (int32_t)GET_INTEGER_PART(arg_value);
+		return (int)GET_INTEGER_PART(arg_value);
 	else
-		return -(int32_t)GET_INTEGER_PART(arg_value);
+		return -(int)GET_INTEGER_PART(arg_value);
 }
 
-int32_t dal_fixed31_32_round(
+int dal_fixed31_32_round(
 	struct fixed31_32 arg)
 {
-	uint64_t arg_value = abs_i64(arg.value);
+	unsigned long long arg_value = abs_i64(arg.value);
 
-	const int64_t summand = dal_fixed31_32_half.value;
+	const long long summand = dal_fixed31_32_half.value;
 
-	ASSERT(LLONG_MAX - (int64_t)arg_value >= summand);
+	ASSERT(LLONG_MAX - (long long)arg_value >= summand);
 
 	arg_value += summand;
 
 	if (arg.value >= 0)
-		return (int32_t)GET_INTEGER_PART(arg_value);
+		return (int)GET_INTEGER_PART(arg_value);
 	else
-		return -(int32_t)GET_INTEGER_PART(arg_value);
+		return -(int)GET_INTEGER_PART(arg_value);
 }
 
-int32_t dal_fixed31_32_ceil(
+int dal_fixed31_32_ceil(
 	struct fixed31_32 arg)
 {
-	uint64_t arg_value = abs_i64(arg.value);
+	unsigned long long arg_value = abs_i64(arg.value);
 
-	const int64_t summand = dal_fixed31_32_one.value -
+	const long long summand = dal_fixed31_32_one.value -
 		dal_fixed31_32_epsilon.value;
 
-	ASSERT(LLONG_MAX - (int64_t)arg_value >= summand);
+	ASSERT(LLONG_MAX - (long long)arg_value >= summand);
 
 	arg_value += summand;
 
 	if (arg.value >= 0)
-		return (int32_t)GET_INTEGER_PART(arg_value);
+		return (int)GET_INTEGER_PART(arg_value);
 	else
-		return -(int32_t)GET_INTEGER_PART(arg_value);
+		return -(int)GET_INTEGER_PART(arg_value);
 }
 
 /* this function is a generic helper to translate fixed point value to
@@ -535,15 +535,15 @@ int32_t dal_fixed31_32_ceil(
  * part in 32 bits. It is used in hw programming (scaler)
  */
 
-static inline uint32_t ux_dy(
-	int64_t value,
-	uint32_t integer_bits,
-	uint32_t fractional_bits)
+static inline unsigned int ux_dy(
+	long long value,
+	unsigned int integer_bits,
+	unsigned int fractional_bits)
 {
 	/* 1. create mask of integer part */
-	uint32_t result = (1 << integer_bits) - 1;
+	unsigned int result = (1 << integer_bits) - 1;
 	/* 2. mask out fractional part */
-	uint32_t fractional_part = FRACTIONAL_PART_MASK & value;
+	unsigned int fractional_part = FRACTIONAL_PART_MASK & value;
 	/* 3. shrink fixed point integer part to be of integer_bits width*/
 	result &= GET_INTEGER_PART(value);
 	/* 4. make space for fractional part to be filled in after integer */
@@ -554,13 +554,13 @@ static inline uint32_t ux_dy(
 	return result | fractional_part;
 }
 
-static inline uint32_t clamp_ux_dy(
-	int64_t value,
-	uint32_t integer_bits,
-	uint32_t fractional_bits,
-	uint32_t min_clamp)
+static inline unsigned int clamp_ux_dy(
+	long long value,
+	unsigned int integer_bits,
+	unsigned int fractional_bits,
+	unsigned int min_clamp)
 {
-	uint32_t truncated_val = ux_dy(value, integer_bits, fractional_bits);
+	unsigned int truncated_val = ux_dy(value, integer_bits, fractional_bits);
 
 	if (value >= (1LL << (integer_bits + FIXED31_32_BITS_PER_FRACTIONAL_PART)))
 		return (1 << (integer_bits + fractional_bits)) - 1;
@@ -570,35 +570,35 @@ static inline uint32_t clamp_ux_dy(
 		return min_clamp;
 }
 
-uint32_t dal_fixed31_32_u2d19(
+unsigned int dal_fixed31_32_u2d19(
 	struct fixed31_32 arg)
 {
 	return ux_dy(arg.value, 2, 19);
 }
 
-uint32_t dal_fixed31_32_u0d19(
+unsigned int dal_fixed31_32_u0d19(
 	struct fixed31_32 arg)
 {
 	return ux_dy(arg.value, 0, 19);
 }
 
-uint32_t dal_fixed31_32_clamp_u0d14(
+unsigned int dal_fixed31_32_clamp_u0d14(
 	struct fixed31_32 arg)
 {
 	return clamp_ux_dy(arg.value, 0, 14, 1);
 }
 
-uint32_t dal_fixed31_32_clamp_u0d10(
+unsigned int dal_fixed31_32_clamp_u0d10(
 	struct fixed31_32 arg)
 {
 	return clamp_ux_dy(arg.value, 0, 10, 1);
 }
 
-int32_t dal_fixed31_32_s4d19(
+int dal_fixed31_32_s4d19(
 	struct fixed31_32 arg)
 {
 	if (arg.value < 0)
-		return -(int32_t)ux_dy(dal_fixed31_32_abs(arg).value, 4, 19);
+		return -(int)ux_dy(dal_fixed31_32_abs(arg).value, 4, 19);
 	else
 		return ux_dy(arg.value, 4, 19);
 }
diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
index 0de258622c12..16cbdb43d856 100644
--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
+++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
@@ -26,8 +26,6 @@
 #ifndef __DAL_FIXED31_32_H__
 #define __DAL_FIXED31_32_H__
 
-#include "os_types.h"
-
 #define FIXED31_32_BITS_PER_FRACTIONAL_PART 32
 
 /*
@@ -44,7 +42,7 @@
  */
 
 struct fixed31_32 {
-	int64_t value;
+	long long value;
 };
 
 /*
@@ -73,15 +71,15 @@ static const struct fixed31_32 dal_fixed31_32_ln2_div_2 = { 1488522236LL };
  * result = numerator / denominator
  */
 struct fixed31_32 dal_fixed31_32_from_fraction(
-	int64_t numerator,
-	int64_t denominator);
+	long long numerator,
+	long long denominator);
 
 /*
  * @brief
  * result = arg
  */
-struct fixed31_32 dal_fixed31_32_from_int_nonconst(int64_t arg);
-static inline struct fixed31_32 dal_fixed31_32_from_int(int64_t arg)
+struct fixed31_32 dal_fixed31_32_from_int_nonconst(long long arg);
+static inline struct fixed31_32 dal_fixed31_32_from_int(long long arg)
 {
 	if (__builtin_constant_p(arg)) {
 		struct fixed31_32 res;
@@ -213,7 +211,7 @@ static inline struct fixed31_32 dal_fixed31_32_clamp(
  */
 struct fixed31_32 dal_fixed31_32_shl(
 	struct fixed31_32 arg,
-	uint8_t shift);
+	unsigned char shift);
 
 /*
  * @brief
@@ -221,7 +219,7 @@ struct fixed31_32 dal_fixed31_32_shl(
  */
 static inline struct fixed31_32 dal_fixed31_32_shr(
 	struct fixed31_32 arg,
-	uint8_t shift)
+	unsigned char shift)
 {
 	struct fixed31_32 res;
 	res.value = arg.value >> shift;
@@ -246,7 +244,7 @@ struct fixed31_32 dal_fixed31_32_add(
  * result = arg1 + arg2
  */
 static inline struct fixed31_32 dal_fixed31_32_add_int(struct fixed31_32 arg1,
-						       int32_t arg2)
+						       int arg2)
 {
 	return dal_fixed31_32_add(arg1,
 				  dal_fixed31_32_from_int(arg2));
@@ -265,7 +263,7 @@ struct fixed31_32 dal_fixed31_32_sub(
  * result = arg1 - arg2
  */
 static inline struct fixed31_32 dal_fixed31_32_sub_int(struct fixed31_32 arg1,
-						       int32_t arg2)
+						       int arg2)
 {
 	return dal_fixed31_32_sub(arg1,
 				  dal_fixed31_32_from_int(arg2));
@@ -291,7 +289,7 @@ struct fixed31_32 dal_fixed31_32_mul(
  * result = arg1 * arg2
  */
 static inline struct fixed31_32 dal_fixed31_32_mul_int(struct fixed31_32 arg1,
-						       int32_t arg2)
+						       int arg2)
 {
 	return dal_fixed31_32_mul(arg1,
 				  dal_fixed31_32_from_int(arg2));
@@ -309,7 +307,7 @@ struct fixed31_32 dal_fixed31_32_sqr(
  * result = arg1 / arg2
  */
 static inline struct fixed31_32 dal_fixed31_32_div_int(struct fixed31_32 arg1,
-						       int64_t arg2)
+						       long long arg2)
 {
 	return dal_fixed31_32_from_fraction(arg1.value,
 					    dal_fixed31_32_from_int(arg2).value);
@@ -434,21 +432,21 @@ struct fixed31_32 dal_fixed31_32_pow(
  * @brief
  * result = floor(arg) := greatest integer lower than or equal to arg
  */
-int32_t dal_fixed31_32_floor(
+int dal_fixed31_32_floor(
 	struct fixed31_32 arg);
 
 /*
  * @brief
  * result = round(arg) := integer nearest to arg
  */
-int32_t dal_fixed31_32_round(
+int dal_fixed31_32_round(
 	struct fixed31_32 arg);
 
 /*
  * @brief
  * result = ceil(arg) := lowest integer greater than or equal to arg
  */
-int32_t dal_fixed31_32_ceil(
+int dal_fixed31_32_ceil(
 	struct fixed31_32 arg);
 
 /* the following two function are used in scaler hw programming to convert fixed
@@ -457,20 +455,20 @@ int32_t dal_fixed31_32_ceil(
  * fractional
  */
 
-uint32_t dal_fixed31_32_u2d19(
+unsigned int dal_fixed31_32_u2d19(
 	struct fixed31_32 arg);
 
-uint32_t dal_fixed31_32_u0d19(
+unsigned int dal_fixed31_32_u0d19(
 	struct fixed31_32 arg);
 
 
-uint32_t dal_fixed31_32_clamp_u0d14(
+unsigned int dal_fixed31_32_clamp_u0d14(
 	struct fixed31_32 arg);
 
-uint32_t dal_fixed31_32_clamp_u0d10(
+unsigned int dal_fixed31_32_clamp_u0d10(
 	struct fixed31_32 arg);
 
-int32_t dal_fixed31_32_s4d19(
+int dal_fixed31_32_s4d19(
 	struct fixed31_32 arg);
 
 #endif

From 586f27a3c2b04e041c2d51c6c2374fbfa3075407 Mon Sep 17 00:00:00 2001
From: Anthony Koo <Anthony.Koo@amd.com>
Date: Mon, 26 Mar 2018 16:19:18 -0400
Subject: [PATCH 0720/1286] drm/amd/display: csc_transform to dc_csc_transform

Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dc.h               | 4 ++--
 drivers/gpu/drm/amd/display/dc/dc_hw_types.h      | 5 +++++
 drivers/gpu/drm/amd/display/dc/dc_stream.h        | 2 +-
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c  | 2 +-
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h  | 2 +-
 drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h       | 2 +-
 drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h | 5 -----
 drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h       | 2 +-
 drivers/gpu/drm/amd/display/dc/inc/hw/transform.h | 2 +-
 9 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 5b81ae5acdf4..588672cbfbf2 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -457,7 +457,7 @@ struct dc_plane_state {
 	struct dc_gamma *gamma_correction;
 	struct dc_transfer_func *in_transfer_func;
 	struct dc_bias_and_scale *bias_and_scale;
-	struct csc_transform input_csc_color_matrix;
+	struct dc_csc_transform input_csc_color_matrix;
 	struct fixed31_32 coeff_reduction_factor;
 	uint32_t sdr_white_level;
 
@@ -522,7 +522,7 @@ struct dc_surface_update {
 	struct dc_gamma *gamma;
 	struct dc_transfer_func *in_transfer_func;
 
-	struct csc_transform *input_csc_color_matrix;
+	struct dc_csc_transform *input_csc_color_matrix;
 	struct fixed31_32 *coeff_reduction_factor;
 };
 
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index b83a7dc2f5a9..b1f70579d61b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -423,6 +423,11 @@ enum dc_gamma_type {
 	GAMMA_CS_TFM_1D = 3,
 };
 
+struct dc_csc_transform {
+	uint16_t matrix[12];
+	bool enable_adjustment;
+};
+
 struct dc_gamma {
 	struct kref refcount;
 	enum dc_gamma_type type;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 08f1a45ed042..ed3c39f132fd 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -61,7 +61,7 @@ struct dc_stream_state {
 	struct dc_info_packet hdr_static_metadata;
 	struct dc_transfer_func *out_transfer_func;
 	struct colorspace_transform gamut_remap_matrix;
-	struct csc_transform csc_color_matrix;
+	struct dc_csc_transform csc_color_matrix;
 
 	enum dc_color_space output_color_space;
 	enum dc_dither_option dither_option;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index 5f40a7374c02..c008a71ebc4e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -264,7 +264,7 @@ void dpp1_cnv_setup (
 		struct dpp *dpp_base,
 		enum surface_pixel_format format,
 		enum expansion_mode mode,
-		struct csc_transform input_csc_color_matrix,
+		struct dc_csc_transform input_csc_color_matrix,
 		enum dc_color_space input_color_space)
 {
 	uint32_t pixel_format;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index 9b5ff76a8027..3fccf9959305 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -1451,7 +1451,7 @@ void dpp1_cnv_setup (
 		struct dpp *dpp_base,
 		enum surface_pixel_format format,
 		enum expansion_mode mode,
-		struct csc_transform input_csc_color_matrix,
+		struct dc_csc_transform input_csc_color_matrix,
 		enum dc_color_space input_color_space);
 
 void dpp1_full_bypass(struct dpp *dpp_base);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 99995608b620..bb7af1b1c7b3 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -117,7 +117,7 @@ struct dpp_funcs {
 			struct dpp *dpp_base,
 			enum surface_pixel_format format,
 			enum expansion_mode mode,
-			struct csc_transform input_csc_color_matrix,
+			struct dc_csc_transform input_csc_color_matrix,
 			enum dc_color_space input_color_space);
 
 	void (*dpp_full_bypass)(struct dpp *dpp_base);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index 93da44527d2e..9fe73028d588 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -140,11 +140,6 @@ enum opp_regamma {
 	OPP_REGAMMA_USER
 };
 
-struct csc_transform {
-	uint16_t matrix[12];
-	bool enable_adjustment;
-};
-
 struct dc_bias_and_scale {
 	uint16_t scale_red;
 	uint16_t bias_red;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h
index 2109eac20a3d..b2fa4c4cd920 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h
@@ -87,7 +87,7 @@ struct ipp_funcs {
 		struct input_pixel_processor *ipp,
 		enum surface_pixel_format format,
 		enum expansion_mode mode,
-		struct csc_transform input_csc_color_matrix,
+		struct dc_csc_transform input_csc_color_matrix,
 		enum dc_color_space input_color_space);
 
 	/* DCE function to setup IPP.  TODO: see if we can consolidate to setup */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
index c5b3623bcbd9..fecc80c47c26 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
@@ -252,7 +252,7 @@ struct transform_funcs {
 			struct transform *xfm_base,
 			enum surface_pixel_format format,
 			enum expansion_mode mode,
-			struct csc_transform input_csc_color_matrix,
+			struct dc_csc_transform input_csc_color_matrix,
 			enum dc_color_space input_color_space);
 
 	void (*ipp_full_bypass)(struct transform *xfm_base);

From a2e8f540c4efa7a8f180c910d202469b3ed4f5ba Mon Sep 17 00:00:00 2001
From: Anthony Koo <Anthony.Koo@amd.com>
Date: Mon, 9 Apr 2018 14:57:47 -0500
Subject: [PATCH 0721/1286] drm/amd/display: Refactor color module

Remove some unnecessary TF definitions from update structures

Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dc_stream.h | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index ed3c39f132fd..d7e6d53bb383 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -67,7 +67,6 @@ struct dc_stream_state {
 	enum dc_dither_option dither_option;
 
 	enum view_3d_format view_format;
-	enum color_transfer_func output_tf;
 
 	bool ignore_msa_timing_param;
 
@@ -113,9 +112,9 @@ struct dc_stream_update {
 	struct rect src;
 	struct rect dst;
 	struct dc_transfer_func *out_transfer_func;
-	enum color_transfer_func color_output_tf;
 	struct dc_info_packet *hdr_static_metadata;
 	unsigned int *abm_level;
+
 	unsigned long long *periodic_fn_vsync_delta;
 };
 

From 28177772cbf693a6960e92bba1f08a0e78acb048 Mon Sep 17 00:00:00 2001
From: Anthony Koo <Anthony.Koo@amd.com>
Date: Mon, 26 Mar 2018 16:29:51 -0400
Subject: [PATCH 0722/1286] drm/amd/display: move color_transfer_func to color
 mod

Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dc.h | 14 +-------------
 1 file changed, 1 insertion(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 588672cbfbf2..0f566a1ba35b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -333,19 +333,6 @@ enum {
 	TRANSFER_FUNC_POINTS = 1025
 };
 
-enum color_transfer_func {
-	transfer_func_unknown,
-	transfer_func_srgb,
-	transfer_func_bt709,
-	transfer_func_pq2084,
-	transfer_func_pq2084_interim,
-	transfer_func_linear_0_1,
-	transfer_func_linear_0_125,
-	transfer_func_dolbyvision,
-	transfer_func_gamma_22,
-	transfer_func_gamma_26
-};
-
 struct dc_hdr_static_metadata {
 	/* display chromaticities and white point in units of 0.00001 */
 	unsigned int chromaticity_green_x;
@@ -693,6 +680,7 @@ struct dc_cursor {
 	struct dc_cursor_attributes attributes;
 };
 
+
 /*******************************************************************************
  * Interrupt interfaces
  ******************************************************************************/

From 754e3673201eb192be68b8a07f2e448d75f69dfe Mon Sep 17 00:00:00 2001
From: Anthony Koo <Anthony.Koo@amd.com>
Date: Tue, 27 Mar 2018 23:12:21 -0400
Subject: [PATCH 0723/1286] drm/amd/display: Fix structure initialization of
 hdmi_info_packet

Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index e1036e409877..8d7bc1fa9ffe 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1853,11 +1853,13 @@ static void set_avi_info_frame(
 	unsigned int cn0_cn1_value = 0;
 	uint8_t *check_sum = NULL;
 	uint8_t byte_index = 0;
-	union hdmi_info_packet hdmi_info = {0};
+	union hdmi_info_packet hdmi_info;
 	union display_content_support support = {0};
 	unsigned int vic = pipe_ctx->stream->timing.vic;
 	enum dc_timing_3d_format format;
 
+	memset(&hdmi_info, 0, sizeof(union hdmi_info_packet));
+
 	color_space = pipe_ctx->stream->output_color_space;
 	if (color_space == COLOR_SPACE_UNKNOWN)
 		color_space = (stream->timing.pixel_encoding == PIXEL_ENCODING_RGB) ?

From e43a432c018a9a2c2641e1f8c08a836cc83982cd Mon Sep 17 00:00:00 2001
From: Anthony Koo <Anthony.Koo@amd.com>
Date: Tue, 27 Mar 2018 16:43:56 -0400
Subject: [PATCH 0724/1286] drm/amd/display: Have DC manage its own allocation
 of gamma

Creating plane will also allocate gamma and input TF
Creating stream will also allocate outputTF

Fix issue with gamma not applied
OS may call SetGamma before surface committed, so need to store
in target and apply later.

Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c         | 6 ++----
 drivers/gpu/drm/amd/display/dc/core/dc_stream.c           | 6 ++++--
 drivers/gpu/drm/amd/display/dc/core/dc_surface.c          | 8 +++++++-
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 5 ++---
 4 files changed, 15 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index aa8e25a9b09e..18f221b0349d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2200,7 +2200,6 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
 					     const struct drm_connector *connector)
 {
 	struct dc_crtc_timing *timing_out = &stream->timing;
-	struct dc_transfer_func *tf = dc_create_transfer_func();
 
 	memset(timing_out, 0, sizeof(struct dc_crtc_timing));
 
@@ -2244,9 +2243,8 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
 
 	stream->output_color_space = get_output_color_space(timing_out);
 
-	tf->type = TF_TYPE_PREDEFINED;
-	tf->tf = TRANSFER_FUNCTION_SRGB;
-	stream->out_transfer_func = tf;
+	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
+	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
 }
 
 static void fill_audio_info(struct audio_info *audio_info,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index ce0747ed0f00..3b2ddbd8c054 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -101,14 +101,16 @@ static void construct(struct dc_stream_state *stream,
 	stream->status.link = stream->sink->link;
 
 	update_stream_signal(stream);
+
+	stream->out_transfer_func = dc_create_transfer_func();
+	stream->out_transfer_func->type = TF_TYPE_BYPASS;
 }
 
 static void destruct(struct dc_stream_state *stream)
 {
 	dc_sink_release(stream->sink);
 	if (stream->out_transfer_func != NULL) {
-		dc_transfer_func_release(
-				stream->out_transfer_func);
+		dc_transfer_func_release(stream->out_transfer_func);
 		stream->out_transfer_func = NULL;
 	}
 }
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index ade5b8ee9c3c..959387705965 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -38,6 +38,12 @@
 static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state)
 {
 	plane_state->ctx = ctx;
+
+	plane_state->gamma_correction = dc_create_gamma();
+	plane_state->gamma_correction->is_identity = true;
+
+	plane_state->in_transfer_func = dc_create_transfer_func();
+	plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
 }
 
 static void destruct(struct dc_plane_state *plane_state)
@@ -175,7 +181,7 @@ void dc_transfer_func_release(struct dc_transfer_func *tf)
 	kref_put(&tf->refcount, dc_transfer_func_free);
 }
 
-struct dc_transfer_func *dc_create_transfer_func(void)
+struct dc_transfer_func *dc_create_transfer_func()
 {
 	struct dc_transfer_func *tf = kzalloc(sizeof(*tf), GFP_KERNEL);
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index f3341a2399fa..a6cf9ade9131 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -956,9 +956,8 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
 		tf = plane_state->in_transfer_func;
 
 	if (plane_state->gamma_correction &&
-		plane_state->gamma_correction->is_identity)
-		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
-	else if (plane_state->gamma_correction && dce_use_lut(plane_state->format))
+		!plane_state->gamma_correction->is_identity
+			&& dce_use_lut(plane_state->format))
 		dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
 
 	if (tf == NULL)

From 84ffa80123f56f80145dc638f21dfcbedda5610d Mon Sep 17 00:00:00 2001
From: "Leo (Sunpeng) Li" <sunpeng.li@amd.com>
Date: Thu, 29 Mar 2018 17:04:12 -0400
Subject: [PATCH 0725/1286] drm/amd/display: Fix dim display on DCE11

Before programming the input gamma, check that we're not using the
identity correction.

Signed-off-by: Leo (Sunpeng) Li <sunpeng.li@amd.com>
Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 78bf4fae9e0d..52427ae42e0f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -280,7 +280,9 @@ dce110_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
 	build_prescale_params(&prescale_params, plane_state);
 	ipp->funcs->ipp_program_prescale(ipp, &prescale_params);
 
-	if (plane_state->gamma_correction && dce_use_lut(plane_state->format))
+	if (plane_state->gamma_correction &&
+			!plane_state->gamma_correction->is_identity &&
+			dce_use_lut(plane_state->format))
 		ipp->funcs->ipp_program_input_lut(ipp, plane_state->gamma_correction);
 
 	if (tf == NULL) {

From e405c2173e8c1c8e2e823592581ba3f774e7811c Mon Sep 17 00:00:00 2001
From: "Leo (Sunpeng) Li" <sunpeng.li@amd.com>
Date: Wed, 4 Apr 2018 16:01:30 -0400
Subject: [PATCH 0726/1286] drm/amd/display: Fix memleak on input transfer
 function

Input transfer function creation is now done when the plane is created.
This is done within the following change:

    Author: Anthony Koo <Anthony.Koo@amd.com>
        drm/amd/display: Have DC manage its own allocation of gamma

Therefore, we no longer need to create it when filling in the plane
attributes.

Signed-off-by: Leo (Sunpeng) Li <sunpeng.li@amd.com>
Reviewed-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 8 --------
 1 file changed, 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 18f221b0349d..265f0166f688 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2011,7 +2011,6 @@ static int fill_plane_attributes(struct amdgpu_device *adev,
 	const struct amdgpu_framebuffer *amdgpu_fb =
 		to_amdgpu_framebuffer(plane_state->fb);
 	const struct drm_crtc *crtc = plane_state->crtc;
-	struct dc_transfer_func *input_tf;
 	int ret = 0;
 
 	if (!fill_rects_from_plane_state(plane_state, dc_plane_state))
@@ -2025,13 +2024,6 @@ static int fill_plane_attributes(struct amdgpu_device *adev,
 	if (ret)
 		return ret;
 
-	input_tf = dc_create_transfer_func();
-
-	if (input_tf == NULL)
-		return -ENOMEM;
-
-	dc_plane_state->in_transfer_func = input_tf;
-
 	/*
 	 * Always set input transfer function, since plane state is refreshed
 	 * every time.

From eb40c86a83211827338e4a58b8e925a45a19d633 Mon Sep 17 00:00:00 2001
From: Nico Sneck <nicosneck@hotmail.com>
Date: Sat, 7 Apr 2018 15:13:04 +0000
Subject: [PATCH 0727/1286] drm/radeon: add PX quirk for Asus K73TK

With this the dGPU turns on correctly.

Signed-off-by: Nico Sneck <nicosneck@hotmail.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Cc: stable@vger.kernel.org
---
 drivers/gpu/drm/radeon/radeon_device.c | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 90e17e29e12a..59c8a6647ff2 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -140,6 +140,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
 	 * https://bugs.freedesktop.org/show_bug.cgi?id=101491
 	 */
 	{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
+	/* Asus K73TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
+	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381#c52
+	 */
+	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2123, RADEON_PX_QUIRK_DISABLE_PX },
 	{ 0, 0, 0, 0, 0 },
 };
 

From 1bb5afd768b950e9ddcb62b3c31bce8bed3ef774 Mon Sep 17 00:00:00 2001
From: Huang Rui <ray.huang@amd.com>
Date: Sun, 8 Apr 2018 14:39:18 +0800
Subject: [PATCH 0728/1286] drm/amdgpu: fix null pointer panic with direct fw
 loading on gpu reset

When system uses fw direct loading, then psp context structure won't be
initiliazed. And it is also unable to execute mode reset.

[  434.601474] amdgpu 0000:0c:00.0: GPU reset begin!
[  434.694326] amdgpu 0000:0c:00.0: GPU reset
[  434.743152] BUG: unable to handle kernel NULL pointer dereference at
0000000000000058
[  434.838474] IP: psp_gpu_reset+0xc/0x30 [amdgpu]
[  434.893532] PGD 406ed9067
[  434.893533] P4D 406ed9067
[  434.926376] PUD 400b46067
[  434.959217] PMD 0
[  435.033379] Oops: 0000 [#1] SMP
[  435.072573] Modules linked in: amdgpu(OE) chash(OE) gpu_sched(OE) ttm(OE)
drm_kms_helper(OE) drm(OE) fb_sys_fops syscopyarea sysfillrect sysimgblt
rpcsec_gss_krb5 auth_rpcgss nfsv4 nfs lockd grace fscache snd_hda_codec_realtek
snd_hda_codec_generic snd_hda_codec_hdmi snd_hda_intel snd_hda_codec
snd_hda_core snd_hwdep snd_pcm edac_mce_amd snd_seq_midi snd_seq_midi_event
kvm_amd snd_rawmidi kvm irqbypass crct10dif_pclmul crc32_pclmul snd_seq
ghash_clmulni_intel snd_seq_device pcbc snd_timer eeepc_wmi aesni_intel snd
asus_wmi aes_x86_64 sparse_keymap crypto_simd glue_helper joydev soundcore
wmi_bmof cryptd video i2c_piix4 shpchp 8250_dw i2c_designware_platform mac_hid
i2c_designware_core sunrpc parport_pc ppdev lp parport autofs4 hid_generic igb
usbhid dca ptp mxm_wmi pps_core ahci hid i2c_algo_bit
[  435.931754]  libahci wmi

Signed-off-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Evan Quan <evan.quan@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 19e71f4a8ac2..c7d43e064fc7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -505,6 +505,9 @@ failed:
 
 int psp_gpu_reset(struct amdgpu_device *adev)
 {
+	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+		return 0;
+
 	return psp_mode1_reset(&adev->psp);
 }
 

From f73f9e35a2a7c8cee3691a4b7313bbc3b95eec6b Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Wed, 4 Apr 2018 12:36:57 +0800
Subject: [PATCH 0729/1286] drm/amd/pp: Refine
 pp_atomfwctrl_get_vbios_bootup_values

In order to share pp_atomfwctrl_get_vbios_bootup_values
on asics with different BIOS_CLKID.
Not call function pp_atomfwctrl_get_clk_information_by_clkid in
pp_atomfwctrl_get_vbios_bootup_values.

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c | 9 +--------
 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h | 2 ++
 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 6 ++++++
 3 files changed, 9 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
index 0adaf36b6d68..c97b0e5ba43b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
@@ -488,7 +488,7 @@ int pp_atomfwctrl_get_gpio_information(struct pp_hwmgr *hwmgr,
 	return 0;
 }
 
-int pp_atomfwctrl__get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKID id, uint32_t *frequency)
+int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKID id, uint32_t *frequency)
 {
 	struct amdgpu_device *adev = hwmgr->adev;
 	struct atom_get_smu_clock_info_parameters_v3_1   parameters;
@@ -515,7 +515,6 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
 {
 	struct atom_firmware_info_v3_1 *info = NULL;
 	uint16_t ix;
-	uint32_t frequency = 0;
 
 	ix = GetIndexIntoMasterDataTable(firmwareinfo);
 	info = (struct atom_firmware_info_v3_1 *)
@@ -538,12 +537,6 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
 	boot_values->ulSocClk   = 0;
 	boot_values->ulDCEFClk   = 0;
 
-	if (!pp_atomfwctrl__get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, &frequency))
-		boot_values->ulSocClk   = frequency;
-
-	if (!pp_atomfwctrl__get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, &frequency))
-		boot_values->ulDCEFClk   = frequency;
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
index 8df1e84f27c9..fe10aa4db5e6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
@@ -230,6 +230,8 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
 			struct pp_atomfwctrl_bios_boot_up_values *boot_values);
 int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
 			struct pp_atomfwctrl_smc_dpm_parameters *param);
+int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr,
+					BIOS_CLKID id, uint32_t *frequency);
 
 #endif
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index c9fb4b2cf5c6..ba299424f8f6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -2481,6 +2481,12 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
 		data->vbios_boot_state.mvddc    = boot_up_values.usMvddc;
 		data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
 		data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
+		pp_atomfwctrl_get_clk_information_by_clkid(hwmgr,
+				SMU9_SYSPLL0_SOCCLK_ID, &boot_up_values.ulSocClk);
+
+		pp_atomfwctrl_get_clk_information_by_clkid(hwmgr,
+				SMU9_SYSPLL0_DCEFCLK_ID, &boot_up_values.ulDCEFClk);
+
 		data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
 		data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
 		if (0 != boot_up_values.usVddc) {

From e6636ae1b7aab30a1fb4ea7805b5b6b2494eca71 Mon Sep 17 00:00:00 2001
From: Evan Quan <evan.quan@amd.com>
Date: Tue, 10 Apr 2018 12:30:59 +0800
Subject: [PATCH 0730/1286] drm/amdgpu: add MP1 and THM hw ip base reg offset

Signed-off-by: Evan Quan <evan.quan@amd.com>
Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h          | 2 ++
 drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c | 3 ++-
 2 files changed, 4 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 21272ce74b56..7e5defbfc3b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1391,6 +1391,7 @@ enum amd_hw_ip_block_type {
 	ATHUB_HWIP,
 	NBIO_HWIP,
 	MP0_HWIP,
+	MP1_HWIP,
 	UVD_HWIP,
 	VCN_HWIP = UVD_HWIP,
 	VCE_HWIP,
@@ -1400,6 +1401,7 @@ enum amd_hw_ip_block_type {
 	SMUIO_HWIP,
 	PWR_HWIP,
 	NBIF_HWIP,
+	THM_HWIP,
 	MAX_HWIP
 };
 
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
index 4c45db7f1157..45aafca7f315 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
@@ -38,6 +38,7 @@ int vega10_reg_base_init(struct amdgpu_device *adev)
 		adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
 		adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
 		adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
+		adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
 		adev->reg_offset[UVD_HWIP][i] = (uint32_t *)(&(UVD_BASE.instance[i]));
 		adev->reg_offset[VCE_HWIP][i] = (uint32_t *)(&(VCE_BASE.instance[i]));
 		adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i]));
@@ -49,7 +50,7 @@ int vega10_reg_base_init(struct amdgpu_device *adev)
 		adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
 		adev->reg_offset[PWR_HWIP][i] = (uint32_t *)(&(PWR_BASE.instance[i]));
 		adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIF_BASE.instance[i]));
-
+		adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
 	}
 	return 0;
 }

From b8a5559112714bb328330dbf2a4a1912e8c7a462 Mon Sep 17 00:00:00 2001
From: Evan Quan <evan.quan@amd.com>
Date: Tue, 10 Apr 2018 12:32:16 +0800
Subject: [PATCH 0731/1286] drm/amd/pp: use soc15 common macros instead of
 vega10 specific

pp_soc15.h is vega10 specific. Update powerplay code to use soc15 common
macros defined in soc15_common.h.

Signed-off-by: Evan Quan <evan.quan@amd.com>
Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c |   7 +-
 .../drm/amd/powerplay/hwmgr/vega10_hwmgr.c    |  16 +--
 .../amd/powerplay/hwmgr/vega10_powertune.c    |  50 ++++----
 .../drm/amd/powerplay/hwmgr/vega10_thermal.c  | 107 +++++++-----------
 .../drm/amd/powerplay/hwmgr/vega12_hwmgr.c    |   1 -
 .../drm/amd/powerplay/hwmgr/vega12_thermal.c  |  37 +++---
 drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h  |  52 ---------
 .../drm/amd/powerplay/smumgr/smu10_smumgr.c   |  37 +++---
 .../drm/amd/powerplay/smumgr/vega10_smumgr.c  |  50 +++-----
 .../drm/amd/powerplay/smumgr/vega12_smumgr.c  |  56 ++++-----
 10 files changed, 133 insertions(+), 280 deletions(-)
 delete mode 100644 drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 055358b95fdf..6ba3b1fa57aa 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -34,7 +34,7 @@
 #include "rv_ppsmc.h"
 #include "smu10_hwmgr.h"
 #include "power_state.h"
-#include "pp_soc15.h"
+#include "soc15_common.h"
 
 #define SMU10_MAX_DEEPSLEEP_DIVIDER_ID     5
 #define SMU10_MINIMUM_ENGINE_CLOCK         800   /* 8Mhz, the low boundary of engine clock allowed on this chip */
@@ -947,9 +947,8 @@ static int smu10_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simpl
 
 static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
 {
-	uint32_t reg_offset = soc15_get_register_offset(THM_HWID, 0,
-			mmTHM_TCON_CUR_TMP_BASE_IDX, mmTHM_TCON_CUR_TMP);
-	uint32_t reg_value = cgs_read_register(hwmgr->device, reg_offset);
+	struct amdgpu_device *adev = hwmgr->adev;
+	uint32_t reg_value = RREG32_SOC15(THM, 0, mmTHM_TCON_CUR_TMP);
 	int cur_temp =
 		(reg_value & THM_TCON_CUR_TMP__CUR_TEMP_MASK) >> THM_TCON_CUR_TMP__CUR_TEMP__SHIFT;
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index ba299424f8f6..f6427c88f6a7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -36,7 +36,7 @@
 #include "smu9.h"
 #include "smu9_driver_if.h"
 #include "vega10_inc.h"
-#include "pp_soc15.h"
+#include "soc15_common.h"
 #include "pppcielanes.h"
 #include "vega10_hwmgr.h"
 #include "vega10_processpptables.h"
@@ -754,7 +754,6 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
 	uint32_t config_telemetry = 0;
 	struct pp_atomfwctrl_voltage_table vol_table;
 	struct amdgpu_device *adev = hwmgr->adev;
-	uint32_t reg;
 
 	data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL);
 	if (data == NULL)
@@ -860,10 +859,7 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
 			advanceFanControlParameters.usFanPWMMinLimit *
 			hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
 
-	reg = soc15_get_register_offset(DF_HWID, 0,
-			mmDF_CS_AON0_DramBaseAddress0_BASE_IDX,
-			mmDF_CS_AON0_DramBaseAddress0);
-	data->mem_channels = (cgs_read_register(hwmgr->device, reg) &
+	data->mem_channels = (RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0) &
 			DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
 			DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
 	PP_ASSERT_WITH_CODE(data->mem_channels < ARRAY_SIZE(channel_number),
@@ -3808,11 +3804,12 @@ static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
 static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
 			      void *value, int *size)
 {
+	struct amdgpu_device *adev = hwmgr->adev;
 	uint32_t sclk_idx, mclk_idx, activity_percent = 0;
 	struct vega10_hwmgr *data = hwmgr->backend;
 	struct vega10_dpm_table *dpm_table = &data->dpm_table;
 	int ret = 0;
-	uint32_t reg, val_vid;
+	uint32_t val_vid;
 
 	switch (idx) {
 	case AMDGPU_PP_SENSOR_GFX_SCLK:
@@ -3862,10 +3859,7 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
 		}
 		break;
 	case AMDGPU_PP_SENSOR_VDDGFX:
-		reg = soc15_get_register_offset(SMUIO_HWID, 0,
-			mmSMUSVI0_PLANE0_CURRENTVID_BASE_IDX,
-			mmSMUSVI0_PLANE0_CURRENTVID);
-		val_vid = (cgs_read_register(hwmgr->device, reg) &
+		val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_PLANE0_CURRENTVID) &
 			SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID_MASK) >>
 			SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT;
 		*((uint32_t *)value) = (uint32_t)convert_to_vddc((uint8_t)val_vid);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
index 203a6918395b..a9efd8554fbc 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
@@ -27,7 +27,7 @@
 #include "vega10_ppsmc.h"
 #include "vega10_inc.h"
 #include "pp_debug.h"
-#include "pp_soc15.h"
+#include "soc15_common.h"
 
 static const struct vega10_didt_config_reg SEDiDtTuningCtrlConfig_Vega10[] =
 {
@@ -888,36 +888,36 @@ static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable)
 	if (PP_CAP(PHM_PlatformCaps_DiDtEDCEnable)) {
 		if (PP_CAP(PHM_PlatformCaps_SQRamping)) {
 			data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL);
-			data = CGS_REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_EN, en);
-			data = CGS_REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_SW_RST, ~en);
+			data = REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_EN, en);
+			data = REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_SW_RST, ~en);
 			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL, data);
 		}
 
 		if (PP_CAP(PHM_PlatformCaps_DBRamping)) {
 			data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL);
-			data = CGS_REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_EN, en);
-			data = CGS_REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_SW_RST, ~en);
+			data = REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_EN, en);
+			data = REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_SW_RST, ~en);
 			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL, data);
 		}
 
 		if (PP_CAP(PHM_PlatformCaps_TDRamping)) {
 			data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL);
-			data = CGS_REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_EN, en);
-			data = CGS_REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_SW_RST, ~en);
+			data = REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_EN, en);
+			data = REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_SW_RST, ~en);
 			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL, data);
 		}
 
 		if (PP_CAP(PHM_PlatformCaps_TCPRamping)) {
 			data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL);
-			data = CGS_REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_EN, en);
-			data = CGS_REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_SW_RST, ~en);
+			data = REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_EN, en);
+			data = REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_SW_RST, ~en);
 			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL, data);
 		}
 
 		if (PP_CAP(PHM_PlatformCaps_DBRRamping)) {
 			data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL);
-			data = CGS_REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_EN, en);
-			data = CGS_REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_SW_RST, ~en);
+			data = REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_EN, en);
+			data = REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_SW_RST, ~en);
 			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL, data);
 		}
 	}
@@ -933,17 +933,15 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
 	struct amdgpu_device *adev = hwmgr->adev;
 	int result;
 	uint32_t num_se = 0, count, data;
-	uint32_t reg;
 
 	num_se = adev->gfx.config.max_shader_engines;
 
 	adev->gfx.rlc.funcs->enter_safe_mode(adev);
 
 	mutex_lock(&adev->grbm_idx_mutex);
-	reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
 	for (count = 0; count < num_se; count++) {
 		data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
-		cgs_write_register(hwmgr->device, reg, data);
+		WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
 
 		result =  vega10_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega10, VEGA10_CONFIGREG_DIDT);
 		result |= vega10_program_didt_config_registers(hwmgr, SEDiDtStallPatternConfig_vega10, VEGA10_CONFIGREG_DIDT);
@@ -958,7 +956,7 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
 		if (0 != result)
 			break;
 	}
-	cgs_write_register(hwmgr->device, reg, 0xE0000000);
+	WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000);
 	mutex_unlock(&adev->grbm_idx_mutex);
 
 	vega10_didt_set_mask(hwmgr, true);
@@ -986,17 +984,15 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
 	struct amdgpu_device *adev = hwmgr->adev;
 	int result;
 	uint32_t num_se = 0, count, data;
-	uint32_t reg;
 
 	num_se = adev->gfx.config.max_shader_engines;
 
 	adev->gfx.rlc.funcs->enter_safe_mode(adev);
 
 	mutex_lock(&adev->grbm_idx_mutex);
-	reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
 	for (count = 0; count < num_se; count++) {
 		data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
-		cgs_write_register(hwmgr->device, reg, data);
+		WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
 
 		result = vega10_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega10, VEGA10_CONFIGREG_DIDT);
 		result |= vega10_program_didt_config_registers(hwmgr, SEDiDtStallPatternConfig_vega10, VEGA10_CONFIGREG_DIDT);
@@ -1005,7 +1001,7 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
 		if (0 != result)
 			break;
 	}
-	cgs_write_register(hwmgr->device, reg, 0xE0000000);
+	WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000);
 	mutex_unlock(&adev->grbm_idx_mutex);
 
 	vega10_didt_set_mask(hwmgr, true);
@@ -1049,17 +1045,15 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr)
 	struct amdgpu_device *adev = hwmgr->adev;
 	int result;
 	uint32_t num_se = 0, count, data;
-	uint32_t reg;
 
 	num_se = adev->gfx.config.max_shader_engines;
 
 	adev->gfx.rlc.funcs->enter_safe_mode(adev);
 
 	mutex_lock(&adev->grbm_idx_mutex);
-	reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
 	for (count = 0; count < num_se; count++) {
 		data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
-		cgs_write_register(hwmgr->device, reg, data);
+		WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
 		result = vega10_program_didt_config_registers(hwmgr, SEDiDtWeightConfig_Vega10, VEGA10_CONFIGREG_DIDT);
 		result |= vega10_program_didt_config_registers(hwmgr, SEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
 		result |= vega10_program_didt_config_registers(hwmgr, SEEDCStallDelayConfig_Vega10, VEGA10_CONFIGREG_DIDT);
@@ -1070,7 +1064,7 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr)
 		if (0 != result)
 			break;
 	}
-	cgs_write_register(hwmgr->device, reg, 0xE0000000);
+	WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000);
 	mutex_unlock(&adev->grbm_idx_mutex);
 
 	vega10_didt_set_mask(hwmgr, true);
@@ -1099,7 +1093,6 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
 	int result;
 	uint32_t num_se = 0;
 	uint32_t count, data;
-	uint32_t reg;
 
 	num_se = adev->gfx.config.max_shader_engines;
 
@@ -1108,10 +1101,9 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
 	vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10);
 
 	mutex_lock(&adev->grbm_idx_mutex);
-	reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
 	for (count = 0; count < num_se; count++) {
 		data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
-		cgs_write_register(hwmgr->device, reg, data);
+		WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
 		result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
 		result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallDelayConfig_Vega10, VEGA10_CONFIGREG_DIDT);
 		result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCCtrlResetConfig_Vega10, VEGA10_CONFIGREG_DIDT);
@@ -1120,7 +1112,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
 		if (0 != result)
 			break;
 	}
-	cgs_write_register(hwmgr->device, reg, 0xE0000000);
+	WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000);
 	mutex_unlock(&adev->grbm_idx_mutex);
 
 	vega10_didt_set_mask(hwmgr, true);
@@ -1165,14 +1157,12 @@ static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
 static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
 {
 	struct amdgpu_device *adev = hwmgr->adev;
-	uint32_t reg;
 	int result;
 
 	adev->gfx.rlc.funcs->enter_safe_mode(adev);
 
 	mutex_lock(&adev->grbm_idx_mutex);
-	reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
-	cgs_write_register(hwmgr->device, reg, 0xE0000000);
+	WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000);
 	mutex_unlock(&adev->grbm_idx_mutex);
 
 	result = vega10_program_didt_config_registers(hwmgr, SEEDCForceStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
index 9f18226a56ea..aa044c1955fe 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
@@ -25,7 +25,7 @@
 #include "vega10_hwmgr.h"
 #include "vega10_ppsmc.h"
 #include "vega10_inc.h"
-#include "pp_soc15.h"
+#include "soc15_common.h"
 #include "pp_debug.h"
 
 static int vega10_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
@@ -89,6 +89,7 @@ int vega10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
 
 int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
 {
+	struct amdgpu_device *adev = hwmgr->adev;
 	struct vega10_hwmgr *data = hwmgr->backend;
 	uint32_t tach_period;
 	uint32_t crystal_clock_freq;
@@ -100,10 +101,8 @@ int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
 	if (data->smu_features[GNLD_FAN_CONTROL].supported) {
 		result = vega10_get_current_rpm(hwmgr, speed);
 	} else {
-		uint32_t reg = soc15_get_register_offset(THM_HWID, 0,
-				mmCG_TACH_STATUS_BASE_IDX, mmCG_TACH_STATUS);
 		tach_period =
-			CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg),
+			REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_STATUS),
 					  CG_TACH_STATUS,
 					  TACH_PERIOD);
 
@@ -127,26 +126,23 @@ int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
 */
 int vega10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
 {
-	uint32_t reg;
-
-	reg = soc15_get_register_offset(THM_HWID, 0,
-			mmCG_FDO_CTRL2_BASE_IDX, mmCG_FDO_CTRL2);
+	struct amdgpu_device *adev = hwmgr->adev;
 
 	if (hwmgr->fan_ctrl_is_in_default_mode) {
 		hwmgr->fan_ctrl_default_mode =
-			CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg),
+			REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
 				CG_FDO_CTRL2, FDO_PWM_MODE);
 		hwmgr->tmin =
-			CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg),
+			REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
 				CG_FDO_CTRL2, TMIN);
 		hwmgr->fan_ctrl_is_in_default_mode = false;
 	}
 
-	cgs_write_register(hwmgr->device, reg,
-			CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+	WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
+			REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
 				CG_FDO_CTRL2, TMIN, 0));
-	cgs_write_register(hwmgr->device, reg,
-			CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+	WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
+			REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
 				CG_FDO_CTRL2, FDO_PWM_MODE, mode));
 
 	return 0;
@@ -159,18 +155,15 @@ int vega10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
 */
 int vega10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
 {
-	uint32_t reg;
-
-	reg = soc15_get_register_offset(THM_HWID, 0,
-			mmCG_FDO_CTRL2_BASE_IDX, mmCG_FDO_CTRL2);
+	struct amdgpu_device *adev = hwmgr->adev;
 
 	if (!hwmgr->fan_ctrl_is_in_default_mode) {
-		cgs_write_register(hwmgr->device, reg,
-			CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+		WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
+			REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
 				CG_FDO_CTRL2, FDO_PWM_MODE,
 				hwmgr->fan_ctrl_default_mode));
-		cgs_write_register(hwmgr->device, reg,
-			CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+		WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
+			REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
 				CG_FDO_CTRL2, TMIN,
 				hwmgr->tmin << CG_FDO_CTRL2__TMIN__SHIFT));
 		hwmgr->fan_ctrl_is_in_default_mode = true;
@@ -257,10 +250,10 @@ int vega10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
 int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
 		uint32_t speed)
 {
+	struct amdgpu_device *adev = hwmgr->adev;
 	uint32_t duty100;
 	uint32_t duty;
 	uint64_t tmp64;
-	uint32_t reg;
 
 	if (hwmgr->thermal_controller.fanInfo.bNoFan)
 		return 0;
@@ -271,10 +264,7 @@ int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
 	if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
 		vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
 
-	reg = soc15_get_register_offset(THM_HWID, 0,
-			mmCG_FDO_CTRL1_BASE_IDX, mmCG_FDO_CTRL1);
-
-	duty100 = CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg),
+	duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
 				    CG_FDO_CTRL1, FMAX_DUTY100);
 
 	if (duty100 == 0)
@@ -284,10 +274,8 @@ int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
 	do_div(tmp64, 100);
 	duty = (uint32_t)tmp64;
 
-	reg = soc15_get_register_offset(THM_HWID, 0,
-			mmCG_FDO_CTRL0_BASE_IDX, mmCG_FDO_CTRL0);
-	cgs_write_register(hwmgr->device, reg,
-		CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+	WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
+		REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
 			CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
 
 	return vega10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
@@ -317,10 +305,10 @@ int vega10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
 */
 int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
 {
+	struct amdgpu_device *adev = hwmgr->adev;
 	uint32_t tach_period;
 	uint32_t crystal_clock_freq;
 	int result = 0;
-	uint32_t reg;
 
 	if (hwmgr->thermal_controller.fanInfo.bNoFan ||
 	    (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
@@ -333,10 +321,8 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
 	if (!result) {
 		crystal_clock_freq = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
 		tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
-		reg = soc15_get_register_offset(THM_HWID, 0,
-				mmCG_TACH_STATUS_BASE_IDX, mmCG_TACH_STATUS);
-		cgs_write_register(hwmgr->device, reg,
-				CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+		WREG32_SOC15(THM, 0, mmCG_TACH_STATUS,
+				REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_STATUS),
 					CG_TACH_STATUS, TACH_PERIOD,
 					tach_period));
 	}
@@ -350,13 +336,10 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
 */
 int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
 {
+	struct amdgpu_device *adev = hwmgr->adev;
 	int temp;
-	uint32_t reg;
 
-	reg = soc15_get_register_offset(THM_HWID, 0,
-			mmCG_MULT_THERMAL_STATUS_BASE_IDX,  mmCG_MULT_THERMAL_STATUS);
-
-	temp = cgs_read_register(hwmgr->device, reg);
+	temp = RREG32_SOC15(THM, 0, mmCG_MULT_THERMAL_STATUS);
 
 	temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
 			CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
@@ -379,11 +362,12 @@ int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
 static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
 		struct PP_TemperatureRange *range)
 {
+	struct amdgpu_device *adev = hwmgr->adev;
 	int low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP *
 			PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
 	int high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP *
 			PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-	uint32_t val, reg;
+	uint32_t val;
 
 	if (low < range->min)
 		low = range->min;
@@ -393,20 +377,17 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
 	if (low > high)
 		return -EINVAL;
 
-	reg = soc15_get_register_offset(THM_HWID, 0,
-			mmTHM_THERMAL_INT_CTRL_BASE_IDX, mmTHM_THERMAL_INT_CTRL);
+	val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
 
-	val = cgs_read_register(hwmgr->device, reg);
-
-	val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
-	val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
-	val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
-	val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+	val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
+	val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
+	val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+	val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
 	val &= (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK) &
 			(~THM_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK) &
 			(~THM_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK);
 
-	cgs_write_register(hwmgr->device, reg, val);
+	WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
 
 	return 0;
 }
@@ -418,21 +399,17 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
 */
 static int vega10_thermal_initialize(struct pp_hwmgr *hwmgr)
 {
-	uint32_t reg;
+	struct amdgpu_device *adev = hwmgr->adev;
 
 	if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
-		reg = soc15_get_register_offset(THM_HWID, 0,
-				mmCG_TACH_CTRL_BASE_IDX, mmCG_TACH_CTRL);
-		cgs_write_register(hwmgr->device, reg,
-			CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+		WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
+			REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
 				CG_TACH_CTRL, EDGE_PER_REV,
 				hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution - 1));
 	}
 
-	reg = soc15_get_register_offset(THM_HWID, 0,
-			mmCG_FDO_CTRL2_BASE_IDX, mmCG_FDO_CTRL2);
-	cgs_write_register(hwmgr->device, reg,
-		CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+	WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
+		REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
 			CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28));
 
 	return 0;
@@ -445,9 +422,9 @@ static int vega10_thermal_initialize(struct pp_hwmgr *hwmgr)
 */
 static int vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr)
 {
+	struct amdgpu_device *adev = hwmgr->adev;
 	struct vega10_hwmgr *data = hwmgr->backend;
 	uint32_t val = 0;
-	uint32_t reg;
 
 	if (data->smu_features[GNLD_FW_CTF].supported) {
 		if (data->smu_features[GNLD_FW_CTF].enabled)
@@ -465,8 +442,7 @@ static int vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr)
 	val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
 	val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
 
-	reg = soc15_get_register_offset(THM_HWID, 0, mmTHM_THERMAL_INT_ENA_BASE_IDX, mmTHM_THERMAL_INT_ENA);
-	cgs_write_register(hwmgr->device, reg, val);
+	WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val);
 
 	return 0;
 }
@@ -477,8 +453,8 @@ static int vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr)
 */
 int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr)
 {
+	struct amdgpu_device *adev = hwmgr->adev;
 	struct vega10_hwmgr *data = hwmgr->backend;
-	uint32_t reg;
 
 	if (data->smu_features[GNLD_FW_CTF].supported) {
 		if (!data->smu_features[GNLD_FW_CTF].enabled)
@@ -493,8 +469,7 @@ int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr)
 		data->smu_features[GNLD_FW_CTF].enabled = false;
 	}
 
-	reg = soc15_get_register_offset(THM_HWID, 0, mmTHM_THERMAL_INT_ENA_BASE_IDX, mmTHM_THERMAL_INT_ENA);
-	cgs_write_register(hwmgr->device, reg, 0);
+	WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 6a85238ae20f..7dca75cdf722 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -34,7 +34,6 @@
 #include "atomfirmware.h"
 #include "cgs_common.h"
 #include "vega12_inc.h"
-#include "pp_soc15.h"
 #include "pppcielanes.h"
 #include "vega12_hwmgr.h"
 #include "vega12_processpptables.h"
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
index df0fa815cd6e..cfd9e6ccb790 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
@@ -26,7 +26,7 @@
 #include "vega12_smumgr.h"
 #include "vega12_ppsmc.h"
 #include "vega12_inc.h"
-#include "pp_soc15.h"
+#include "soc15_common.h"
 #include "pp_debug.h"
 
 static int vega12_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
@@ -147,13 +147,10 @@ int vega12_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
 */
 int vega12_thermal_get_temperature(struct pp_hwmgr *hwmgr)
 {
+	struct amdgpu_device *adev = hwmgr->adev;
 	int temp = 0;
-	uint32_t reg;
 
-	reg = soc15_get_register_offset(THM_HWID, 0,
-			mmCG_MULT_THERMAL_STATUS_BASE_IDX,  mmCG_MULT_THERMAL_STATUS);
-
-	temp = cgs_read_register(hwmgr->device, reg);
+	temp = RREG32_SOC15(THM, 0, mmCG_MULT_THERMAL_STATUS);
 
 	temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
 			CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
@@ -175,11 +172,12 @@ int vega12_thermal_get_temperature(struct pp_hwmgr *hwmgr)
 static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
 		struct PP_TemperatureRange *range)
 {
+	struct amdgpu_device *adev = hwmgr->adev;
 	int low = VEGA12_THERMAL_MINIMUM_ALERT_TEMP *
 			PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
 	int high = VEGA12_THERMAL_MAXIMUM_ALERT_TEMP *
 			PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-	uint32_t val, reg;
+	uint32_t val;
 
 	if (low < range->min)
 		low = range->min;
@@ -189,18 +187,15 @@ static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
 	if (low > high)
 		return -EINVAL;
 
-	reg = soc15_get_register_offset(THM_HWID, 0,
-			mmTHM_THERMAL_INT_CTRL_BASE_IDX, mmTHM_THERMAL_INT_CTRL);
+	val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
 
-	val = cgs_read_register(hwmgr->device, reg);
-
-	val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
-	val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
-	val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
-	val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+	val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
+	val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
+	val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+	val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
 	val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
 
-	cgs_write_register(hwmgr->device, reg, val);
+	WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
 
 	return 0;
 }
@@ -212,15 +207,14 @@ static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
 */
 static int vega12_thermal_enable_alert(struct pp_hwmgr *hwmgr)
 {
+	struct amdgpu_device *adev = hwmgr->adev;
 	uint32_t val = 0;
-	uint32_t reg;
 
 	val |= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
 	val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
 	val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
 
-	reg = soc15_get_register_offset(THM_HWID, 0, mmTHM_THERMAL_INT_ENA_BASE_IDX, mmTHM_THERMAL_INT_ENA);
-	cgs_write_register(hwmgr->device, reg, val);
+	WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val);
 
 	return 0;
 }
@@ -231,10 +225,9 @@ static int vega12_thermal_enable_alert(struct pp_hwmgr *hwmgr)
 */
 int vega12_thermal_disable_alert(struct pp_hwmgr *hwmgr)
 {
-	uint32_t reg;
+	struct amdgpu_device *adev = hwmgr->adev;
 
-	reg = soc15_get_register_offset(THM_HWID, 0, mmTHM_THERMAL_INT_ENA_BASE_IDX, mmTHM_THERMAL_INT_ENA);
-	cgs_write_register(hwmgr->device, reg, 0);
+	WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h b/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h
deleted file mode 100644
index 214f370c5efd..000000000000
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright 2016 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef PP_SOC15_H
-#define PP_SOC15_H
-
-#include "soc15_hw_ip.h"
-#include "vega10_ip_offset.h"
-
-inline static uint32_t soc15_get_register_offset(
-		uint32_t hw_id,
-		uint32_t inst,
-		uint32_t segment,
-		uint32_t offset)
-{
-	uint32_t reg = 0;
-
-	if (hw_id == THM_HWID)
-		reg = THM_BASE.instance[inst].segment[segment] + offset;
-	else if (hw_id == NBIF_HWID)
-		reg = NBIF_BASE.instance[inst].segment[segment] + offset;
-	else if (hw_id == MP1_HWID)
-		reg = MP1_BASE.instance[inst].segment[segment] + offset;
-	else if (hw_id == DF_HWID)
-		reg = DF_BASE.instance[inst].segment[segment] + offset;
-	else if (hw_id == GC_HWID)
-		reg = GC_BASE.instance[inst].segment[segment] + offset;
-	else if (hw_id == SMUIO_HWID)
-		reg = SMUIO_BASE.instance[inst].segment[segment] + offset;
-	return reg;
-}
-
-#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
index bc53f2beda30..9adea7263774 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -23,7 +23,7 @@
 
 #include "smumgr.h"
 #include "smu10_inc.h"
-#include "pp_soc15.h"
+#include "soc15_common.h"
 #include "smu10_smumgr.h"
 #include "ppatomctrl.h"
 #include "rv_ppsmc.h"
@@ -49,48 +49,41 @@
 
 static uint32_t smu10_wait_for_response(struct pp_hwmgr *hwmgr)
 {
+	struct amdgpu_device *adev = hwmgr->adev;
 	uint32_t reg;
 
-	reg = soc15_get_register_offset(MP1_HWID, 0,
-			mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
+	reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
 
 	phm_wait_for_register_unequal(hwmgr, reg,
 			0, MP1_C2PMSG_90__CONTENT_MASK);
 
-	return cgs_read_register(hwmgr->device, reg);
+	return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
 }
 
 static int smu10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
 		uint16_t msg)
 {
-	uint32_t reg;
+	struct amdgpu_device *adev = hwmgr->adev;
 
-	reg = soc15_get_register_offset(MP1_HWID, 0,
-			mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
-	cgs_write_register(hwmgr->device, reg, msg);
+	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
 
 	return 0;
 }
 
 static int smu10_read_arg_from_smc(struct pp_hwmgr *hwmgr)
 {
-	uint32_t reg;
+	struct amdgpu_device *adev = hwmgr->adev;
 
-	reg = soc15_get_register_offset(MP1_HWID, 0,
-			mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
-
-	return cgs_read_register(hwmgr->device, reg);
+	return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
 }
 
 static int smu10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
 {
-	uint32_t reg;
+	struct amdgpu_device *adev = hwmgr->adev;
 
 	smu10_wait_for_response(hwmgr);
 
-	reg = soc15_get_register_offset(MP1_HWID, 0,
-			mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
-	cgs_write_register(hwmgr->device, reg, 0);
+	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
 
 	smu10_send_msg_to_smc_without_waiting(hwmgr, msg);
 
@@ -104,17 +97,13 @@ static int smu10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
 static int smu10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
 		uint16_t msg, uint32_t parameter)
 {
-	uint32_t reg;
+	struct amdgpu_device *adev = hwmgr->adev;
 
 	smu10_wait_for_response(hwmgr);
 
-	reg = soc15_get_register_offset(MP1_HWID, 0,
-			mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
-	cgs_write_register(hwmgr->device, reg, 0);
+	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
 
-	reg = soc15_get_register_offset(MP1_HWID, 0,
-			mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
-	cgs_write_register(hwmgr->device, reg, parameter);
+	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
 
 	smu10_send_msg_to_smc_without_waiting(hwmgr, msg);
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index 4aafb043bcb0..14ac6d15c7a7 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -23,7 +23,7 @@
 
 #include "smumgr.h"
 #include "vega10_inc.h"
-#include "pp_soc15.h"
+#include "soc15_common.h"
 #include "vega10_smumgr.h"
 #include "vega10_hwmgr.h"
 #include "vega10_ppsmc.h"
@@ -54,18 +54,13 @@
 
 static bool vega10_is_smc_ram_running(struct pp_hwmgr *hwmgr)
 {
-	uint32_t mp1_fw_flags, reg;
+	struct amdgpu_device *adev = hwmgr->adev;
+	uint32_t mp1_fw_flags;
 
-	reg = soc15_get_register_offset(NBIF_HWID, 0,
-			mmPCIE_INDEX2_BASE_IDX, mmPCIE_INDEX2);
-
-	cgs_write_register(hwmgr->device, reg,
+	WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
 			(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
 
-	reg = soc15_get_register_offset(NBIF_HWID, 0,
-			mmPCIE_DATA2_BASE_IDX, mmPCIE_DATA2);
-
-	mp1_fw_flags = cgs_read_register(hwmgr->device, reg);
+	mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
 
 	if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
 		return true;
@@ -81,11 +76,11 @@ static bool vega10_is_smc_ram_running(struct pp_hwmgr *hwmgr)
  */
 static uint32_t vega10_wait_for_response(struct pp_hwmgr *hwmgr)
 {
+	struct amdgpu_device *adev = hwmgr->adev;
 	uint32_t reg;
 	uint32_t ret;
 
-	reg = soc15_get_register_offset(MP1_HWID, 0,
-			mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
+	reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
 
 	ret = phm_wait_for_register_unequal(hwmgr, reg,
 			0, MP1_C2PMSG_90__CONTENT_MASK);
@@ -93,7 +88,7 @@ static uint32_t vega10_wait_for_response(struct pp_hwmgr *hwmgr)
 	if (ret)
 		pr_err("No response from smu\n");
 
-	return cgs_read_register(hwmgr->device, reg);
+	return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
 }
 
 /*
@@ -105,11 +100,9 @@ static uint32_t vega10_wait_for_response(struct pp_hwmgr *hwmgr)
 static int vega10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
 		uint16_t msg)
 {
-	uint32_t reg;
+	struct amdgpu_device *adev = hwmgr->adev;
 
-	reg = soc15_get_register_offset(MP1_HWID, 0,
-			mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
-	cgs_write_register(hwmgr->device, reg, msg);
+	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
 
 	return 0;
 }
@@ -122,14 +115,12 @@ static int vega10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
  */
 static int vega10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
 {
-	uint32_t reg;
+	struct amdgpu_device *adev = hwmgr->adev;
 	uint32_t ret;
 
 	vega10_wait_for_response(hwmgr);
 
-	reg = soc15_get_register_offset(MP1_HWID, 0,
-			mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
-	cgs_write_register(hwmgr->device, reg, 0);
+	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
 
 	vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
 
@@ -150,18 +141,14 @@ static int vega10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
 static int vega10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
 		uint16_t msg, uint32_t parameter)
 {
-	uint32_t reg;
+	struct amdgpu_device *adev = hwmgr->adev;
 	uint32_t ret;
 
 	vega10_wait_for_response(hwmgr);
 
-	reg = soc15_get_register_offset(MP1_HWID, 0,
-			mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
-	cgs_write_register(hwmgr->device, reg, 0);
+	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
 
-	reg = soc15_get_register_offset(MP1_HWID, 0,
-			mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
-	cgs_write_register(hwmgr->device, reg, parameter);
+	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
 
 	vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
 
@@ -174,12 +161,9 @@ static int vega10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
 
 static int vega10_get_argument(struct pp_hwmgr *hwmgr)
 {
-	uint32_t reg;
+	struct amdgpu_device *adev = hwmgr->adev;
 
-	reg = soc15_get_register_offset(MP1_HWID, 0,
-			mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
-
-	return cgs_read_register(hwmgr->device, reg);
+	return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
 }
 
 static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
index 651a3f28734b..7d9b40e8b1bf 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
@@ -23,7 +23,7 @@
 
 #include "smumgr.h"
 #include "vega12_inc.h"
-#include "pp_soc15.h"
+#include "soc15_common.h"
 #include "vega12_smumgr.h"
 #include "vega12_ppsmc.h"
 #include "vega12/smu9_driver_if.h"
@@ -44,18 +44,13 @@
 
 static bool vega12_is_smc_ram_running(struct pp_hwmgr *hwmgr)
 {
-	uint32_t mp1_fw_flags, reg;
+	struct amdgpu_device *adev = hwmgr->adev;
+	uint32_t mp1_fw_flags;
 
-	reg = soc15_get_register_offset(NBIF_HWID, 0,
-			mmPCIE_INDEX2_BASE_IDX, mmPCIE_INDEX2);
-
-	cgs_write_register(hwmgr->device, reg,
+	WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
 			(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
 
-	reg = soc15_get_register_offset(NBIF_HWID, 0,
-			mmPCIE_DATA2_BASE_IDX, mmPCIE_DATA2);
-
-	mp1_fw_flags = cgs_read_register(hwmgr->device, reg);
+	mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
 
 	if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
 				MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
@@ -72,15 +67,15 @@ static bool vega12_is_smc_ram_running(struct pp_hwmgr *hwmgr)
  */
 static uint32_t vega12_wait_for_response(struct pp_hwmgr *hwmgr)
 {
+	struct amdgpu_device *adev = hwmgr->adev;
 	uint32_t reg;
 
-	reg = soc15_get_register_offset(MP1_HWID, 0,
-			mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
+	reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
 
 	phm_wait_for_register_unequal(hwmgr, reg,
 			0, MP1_C2PMSG_90__CONTENT_MASK);
 
-	return cgs_read_register(hwmgr->device, reg);
+	return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
 }
 
 /*
@@ -92,11 +87,9 @@ static uint32_t vega12_wait_for_response(struct pp_hwmgr *hwmgr)
 int vega12_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
 		uint16_t msg)
 {
-	uint32_t reg;
+	struct amdgpu_device *adev = hwmgr->adev;
 
-	reg = soc15_get_register_offset(MP1_HWID, 0,
-			mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
-	cgs_write_register(hwmgr->device, reg, msg);
+	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
 
 	return 0;
 }
@@ -109,13 +102,11 @@ int vega12_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
  */
 int vega12_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
 {
-	uint32_t reg;
+	struct amdgpu_device *adev = hwmgr->adev;
 
 	vega12_wait_for_response(hwmgr);
 
-	reg = soc15_get_register_offset(MP1_HWID, 0,
-			mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
-	cgs_write_register(hwmgr->device, reg, 0);
+	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
 
 	vega12_send_msg_to_smc_without_waiting(hwmgr, msg);
 
@@ -135,17 +126,13 @@ int vega12_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
 int vega12_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
 		uint16_t msg, uint32_t parameter)
 {
-	uint32_t reg;
+	struct amdgpu_device *adev = hwmgr->adev;
 
 	vega12_wait_for_response(hwmgr);
 
-	reg = soc15_get_register_offset(MP1_HWID, 0,
-			mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
-	cgs_write_register(hwmgr->device, reg, 0);
+	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
 
-	reg = soc15_get_register_offset(MP1_HWID, 0,
-			mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
-	cgs_write_register(hwmgr->device, reg, parameter);
+	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
 
 	vega12_send_msg_to_smc_without_waiting(hwmgr, msg);
 
@@ -166,11 +153,9 @@ int vega12_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
 int vega12_send_msg_to_smc_with_parameter_without_waiting(
 		struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
 {
-	uint32_t reg;
+	struct amdgpu_device *adev = hwmgr->adev;
 
-	reg = soc15_get_register_offset(MP1_HWID, 0,
-			mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
-	cgs_write_register(hwmgr->device, reg, parameter);
+	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, parameter);
 
 	return vega12_send_msg_to_smc_without_waiting(hwmgr, msg);
 }
@@ -183,12 +168,9 @@ int vega12_send_msg_to_smc_with_parameter_without_waiting(
  */
 int vega12_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg)
 {
-	uint32_t reg;
+	struct amdgpu_device *adev = hwmgr->adev;
 
-	reg = soc15_get_register_offset(MP1_HWID, 0,
-			mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
-
-	*arg = cgs_read_register(hwmgr->device, reg);
+	*arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
 
 	return 0;
 }

From c11d8afe10228e4621acfcb8f302255ea8567a1e Mon Sep 17 00:00:00 2001
From: Evan Quan <evan.quan@amd.com>
Date: Tue, 10 Apr 2018 13:05:49 +0800
Subject: [PATCH 0732/1286] drm/amd/pp: fix the wrong readout engine clock in
 deep sleep

Signed-off-by: Evan Quan <evan.quan@amd.com>
Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 13 ++++---------
 drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h   |  1 +
 2 files changed, 5 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index f6427c88f6a7..c90502bcc2b2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -3805,7 +3805,7 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
 			      void *value, int *size)
 {
 	struct amdgpu_device *adev = hwmgr->adev;
-	uint32_t sclk_idx, mclk_idx, activity_percent = 0;
+	uint32_t sclk_mhz, mclk_idx, activity_percent = 0;
 	struct vega10_hwmgr *data = hwmgr->backend;
 	struct vega10_dpm_table *dpm_table = &data->dpm_table;
 	int ret = 0;
@@ -3813,14 +3813,9 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
 
 	switch (idx) {
 	case AMDGPU_PP_SENSOR_GFX_SCLK:
-		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex);
-		sclk_idx = smum_get_argument(hwmgr);
-		if (sclk_idx <  dpm_table->gfx_table.count) {
-			*((uint32_t *)value) = dpm_table->gfx_table.dpm_levels[sclk_idx].value;
-			*size = 4;
-		} else {
-			ret = -EINVAL;
-		}
+		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGfxclkActualFrequency);
+		sclk_mhz = smum_get_argument(hwmgr);
+		*((uint32_t *)value) = sclk_mhz * 100;
 		break;
 	case AMDGPU_PP_SENSOR_GFX_MCLK:
 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
index c3ed737ab951..715b5a168831 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
@@ -131,6 +131,7 @@ typedef uint16_t PPSMC_Result;
 #define PPSMC_MSG_RunAcgInOpenLoop               0x5E
 #define PPSMC_MSG_InitializeAcg                  0x5F
 #define PPSMC_MSG_GetCurrPkgPwr                  0x61
+#define PPSMC_MSG_GetAverageGfxclkActualFrequency 0x63
 #define PPSMC_MSG_SetPccThrottleLevel            0x67
 #define PPSMC_MSG_UpdatePkgPwrPidAlpha           0x68
 #define PPSMC_Message_Count                      0x69

From f1018f50d48395b4a189bf8ea9af1e4441209cfd Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
Date: Thu, 5 Apr 2018 14:46:41 +0200
Subject: [PATCH 0733/1286] drm/amdgpu: use ctx bytes_moved
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Instead of the global (inaccurate) counter.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 10 +++-------
 1 file changed, 3 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index d7d7ce1507ec..de69ab12bb55 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -412,7 +412,6 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
 		struct amdgpu_bo_list_entry *candidate = p->evictable;
 		struct amdgpu_bo *bo = candidate->robj;
 		struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
-		u64 initial_bytes_moved, bytes_moved;
 		bool update_bytes_moved_vis;
 		uint32_t other;
 
@@ -436,18 +435,15 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
 			continue;
 
 		/* Good we can try to move this BO somewhere else */
-		amdgpu_ttm_placement_from_domain(bo, other);
 		update_bytes_moved_vis =
 			adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
 			bo->tbo.mem.mem_type == TTM_PL_VRAM &&
 			bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT;
-		initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
+		amdgpu_ttm_placement_from_domain(bo, other);
 		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
-		bytes_moved = atomic64_read(&adev->num_bytes_moved) -
-			initial_bytes_moved;
-		p->bytes_moved += bytes_moved;
+		p->bytes_moved += ctx.bytes_moved;
 		if (update_bytes_moved_vis)
-			p->bytes_moved_vis += bytes_moved;
+			p->bytes_moved_vis += ctx.bytes_moved;
 
 		if (unlikely(r))
 			break;

From 5422a28fe86f9f77480471385e0a416c27a9ca72 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
Date: Thu, 5 Apr 2018 16:42:03 +0200
Subject: [PATCH 0734/1286] drm/amdgpu: fix and cleanup cpu visible VRAM
 handling
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

The detection if a BO was placed in CPU visible VRAM was incorrect.

Fix it and merge it with the correct detection in amdgpu_ttm.c

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c     |  6 ++----
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 21 +++++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c    | 19 +++----------------
 3 files changed, 26 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index de69ab12bb55..68af2f878bc9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -382,8 +382,7 @@ retry:
 
 	p->bytes_moved += ctx.bytes_moved;
 	if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
-	    bo->tbo.mem.mem_type == TTM_PL_VRAM &&
-	    bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
+	    amdgpu_bo_in_cpu_visible_vram(bo))
 		p->bytes_moved_vis += ctx.bytes_moved;
 
 	if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains &&
@@ -437,8 +436,7 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
 		/* Good we can try to move this BO somewhere else */
 		update_bytes_moved_vis =
 			adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
-			bo->tbo.mem.mem_type == TTM_PL_VRAM &&
-			bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT;
+			amdgpu_bo_in_cpu_visible_vram(bo);
 		amdgpu_ttm_placement_from_domain(bo, other);
 		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 		p->bytes_moved += ctx.bytes_moved;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 546f77cb7882..3bee13344065 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -195,6 +195,27 @@ static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo)
 	}
 }
 
+/**
+ * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
+ */
+static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
+{
+	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+	unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
+	struct drm_mm_node *node = bo->tbo.mem.mm_node;
+	unsigned long pages_left;
+
+	if (bo->tbo.mem.mem_type != TTM_PL_VRAM)
+		return false;
+
+	for (pages_left = bo->tbo.mem.num_pages; pages_left;
+	     pages_left -= node->size, node++)
+		if (node->start < fpfn)
+			return true;
+
+	return false;
+}
+
 /**
  * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
  */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 205da3ff9cd0..ab73300e6c7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -223,20 +223,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
 		if (!adev->mman.buffer_funcs_enabled) {
 			amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
 		} else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
-			   !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
-			unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
-			struct drm_mm_node *node = bo->mem.mm_node;
-			unsigned long pages_left;
-
-			for (pages_left = bo->mem.num_pages;
-			     pages_left;
-			     pages_left -= node->size, node++) {
-				if (node->start < fpfn)
-					break;
-			}
-
-			if (!pages_left)
-				goto gtt;
+			   !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
+			   amdgpu_bo_in_cpu_visible_vram(abo)) {
 
 			/* Try evicting to the CPU inaccessible part of VRAM
 			 * first, but only set GTT as busy placement, so this
@@ -245,12 +233,11 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
 			 */
 			amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
 							 AMDGPU_GEM_DOMAIN_GTT);
-			abo->placements[0].fpfn = fpfn;
+			abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
 			abo->placements[0].lpfn = 0;
 			abo->placement.busy_placement = &abo->placements[1];
 			abo->placement.num_busy_placement = 1;
 		} else {
-gtt:
 			amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
 		}
 		break;

From 45a2d58e84e6d28c2d9ae8e68bd815d9a98ad52e Mon Sep 17 00:00:00 2001
From: Harry Wentland <harry.wentland@amd.com>
Date: Tue, 10 Apr 2018 16:08:44 -0400
Subject: [PATCH 0735/1286] drm/amd/display: Fix 64-bit division in
 hwss_edp_power_control

Signed-off-by: Harry Wentland <harry.wentland@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 52427ae42e0f..68a182ce53c7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -857,7 +857,7 @@ void hwss_edp_power_control(
 					dm_get_elapse_time_in_ns(
 							ctx,
 							current_ts,
-							link->link_trace.time_stamp.edp_poweroff) / 1000000;
+							div64_u64(link->link_trace.time_stamp.edp_poweroff, 1000000));
 			unsigned long long wait_time_ms = 0;
 
 			/* max 500ms from LCDVDD off to on */

From 2fa417324abd635294c298f9f3119743055bf5b9 Mon Sep 17 00:00:00 2001
From: Harry Wentland <harry.wentland@amd.com>
Date: Mon, 9 Apr 2018 14:27:46 -0400
Subject: [PATCH 0736/1286] drm/amd/display: Remove PRE_VEGA flag

We enabled this upstream by default now and no longer need the flag.

Signed-off-by: Harry Wentland <harry.wentland@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 ---
 drivers/gpu/drm/amd/display/Kconfig        | 8 --------
 2 files changed, 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index abc33464959e..62d6505ade84 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2093,9 +2093,6 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
 	case CHIP_POLARIS12:
 	case CHIP_TONGA:
 	case CHIP_FIJI:
-#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
-		return amdgpu_dc != 0;
-#endif
 	case CHIP_VEGA10:
 	case CHIP_VEGA12:
 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 5b124a67404c..d5d4586e6176 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -9,14 +9,6 @@ config DRM_AMD_DC
 	  support for AMDGPU. This adds required support for Vega and
 	  Raven ASICs.
 
-config DRM_AMD_DC_PRE_VEGA
-	bool "DC support for Polaris and older ASICs"
-	default y
-	help
-	  Choose this option to enable the new DC support for older asics
-	  by default. This includes Polaris, Carrizo, Tonga, Bonaire,
-	  and Hawaii.
-
 config DRM_AMD_DC_FBC
 	bool "AMD FBC - Enable Frame Buffer Compression"
 	depends on DRM_AMD_DC

From db4b37975888cf22e39f2cabc6590167faabaeaa Mon Sep 17 00:00:00 2001
From: Harry Wentland <harry.wentland@amd.com>
Date: Mon, 9 Apr 2018 14:04:56 -0400
Subject: [PATCH 0737/1286] drm/amd/display: Don't spam debug messages

Signed-off-by: Harry Wentland <harry.wentland@amd.com>
Reviewed-by: Leo (Sunpeng) Li <sunpeng.li@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/include/logger_types.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index 4f332e80cecc..b608a0830801 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -32,7 +32,7 @@
 
 #define DC_LOG_ERROR(...) DRM_ERROR(__VA_ARGS__)
 #define DC_LOG_WARNING(...) DRM_WARN(__VA_ARGS__)
-#define DC_LOG_DEBUG(...) DRM_INFO(__VA_ARGS__)
+#define DC_LOG_DEBUG(...) DRM_DEBUG_KMS(__VA_ARGS__)
 #define DC_LOG_DC(...) DRM_DEBUG_KMS(__VA_ARGS__)
 #define DC_LOG_DTN(...) DRM_DEBUG_KMS(__VA_ARGS__)
 #define DC_LOG_SURFACE(...) pr_debug("[SURFACE]:"__VA_ARGS__)

From d6014e776ceb5da2d86ce405f692522f2b0370f2 Mon Sep 17 00:00:00 2001
From: Shirish S <shirish.s@amd.com>
Date: Wed, 28 Mar 2018 12:22:22 +0530
Subject: [PATCH 0738/1286] drm/amd/display: remove dummy is_blanked() to
 optimise boot time

is_blanked() hook is a dummy one for underlay pipe, hence
when called, it loops for ~300ms at boot.

This patch removes this dummy call and adds missing checks.

Signed-off-by: Shirish S <shirish.s@amd.com>
Reviewed-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c      | 3 +++
 drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c             | 3 ++-
 .../drm/amd/display/dc/dce110/dce110_timing_generator_v.c  | 7 -------
 3 files changed, 5 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index ebc96b720083..481f6928a9c0 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -230,6 +230,9 @@ bool hwss_wait_for_blank_complete(
 {
 	int counter;
 
+	/* Not applicable if the pipe is not primary, save 300ms of boot time */
+	if (!tg->funcs->is_blanked)
+		return true;
 	for (counter = 0; counter < 100; counter++) {
 		if (tg->funcs->is_blanked(tg))
 			break;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
index 487724345d9d..0275d6d60da4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
@@ -53,7 +53,8 @@ void dce_pipe_control_lock(struct dc *dc,
 	struct dce_hwseq *hws = dc->hwseq;
 
 	/* Not lock pipe when blank */
-	if (lock && pipe->stream_res.tg->funcs->is_blanked(pipe->stream_res.tg))
+	if (lock && pipe->stream_res.tg->funcs->is_blanked &&
+	    pipe->stream_res.tg->funcs->is_blanked(pipe->stream_res.tg))
 		return;
 
 	val = REG_GET_4(BLND_V_UPDATE_LOCK[pipe->stream_res.tg->inst],
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
index 8ad04816e7d3..a3cef60380ed 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
@@ -648,12 +648,6 @@ static void dce110_timing_generator_v_disable_vga(
 	return;
 }
 
-static bool dce110_tg_v_is_blanked(struct timing_generator *tg)
-{
-	/* Signal comes from the primary pipe, underlay is never blanked. */
-	return false;
-}
-
 /** ********************************************************************************************
  *
  * DCE11 Timing Generator Constructor / Destructor
@@ -670,7 +664,6 @@ static const struct timing_generator_funcs dce110_tg_v_funcs = {
 		.set_early_control = dce110_timing_generator_v_set_early_control,
 		.wait_for_state = dce110_timing_generator_v_wait_for_state,
 		.set_blank = dce110_timing_generator_v_set_blank,
-		.is_blanked = dce110_tg_v_is_blanked,
 		.set_colors = dce110_timing_generator_v_set_colors,
 		.set_overscan_blank_color =
 				dce110_timing_generator_v_set_overscan_color_black,

From 45313e5f1c72962a21df58af52a421e4c076b2d4 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Tue, 10 Apr 2018 10:58:43 +0800
Subject: [PATCH 0739/1286] drm/amd/pp: Move same macro definitions to hwmgr.h

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c        | 4 ----
 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c      | 4 ----
 drivers/gpu/drm/amd/powerplay/inc/hwmgr.h               | 2 ++
 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c        | 3 ---
 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c      | 3 ---
 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c   | 3 ---
 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c | 2 --
 drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c     | 2 --
 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c     | 4 ----
 drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c    | 2 --
 10 files changed, 2 insertions(+), 27 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 14332159227e..21c021ba0f49 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -61,10 +61,6 @@
 #define SMC_CG_IND_START            0xc0030000
 #define SMC_CG_IND_END              0xc0040000
 
-#define VOLTAGE_SCALE               4
-#define VOLTAGE_VID_OFFSET_SCALE1   625
-#define VOLTAGE_VID_OFFSET_SCALE2   100
-
 #define MEM_FREQ_LOW_LATENCY        25000
 #define MEM_FREQ_HIGH_LATENCY       80000
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index c90502bcc2b2..26c56025d56c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -51,10 +51,6 @@
 #include "smuio/smuio_9_0_offset.h"
 #include "smuio/smuio_9_0_sh_mask.h"
 
-#define VOLTAGE_SCALE  4
-#define VOLTAGE_VID_OFFSET_SCALE1   625
-#define VOLTAGE_VID_OFFSET_SCALE2   100
-
 #define HBM_MEMORY_CHANNEL_WIDTH    128
 
 static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index e450ec74d6ed..9b3dd7dce4e2 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -38,6 +38,8 @@ struct phm_fan_speed_info;
 struct pp_atomctrl_voltage_table;
 
 #define VOLTAGE_SCALE 4
+#define VOLTAGE_VID_OFFSET_SCALE1   625
+#define VOLTAGE_VID_OFFSET_SCALE2   100
 
 enum DISPLAY_GAP {
 	DISPLAY_GAP_VBLANK_OR_WM = 0,   /* Wait for vblank or MCHG watermark. */
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
index c28b95fd1c85..2a93f3a8e4f0 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
@@ -61,9 +61,6 @@
 
 #define SMC_RAM_END 0x40000
 
-#define VOLTAGE_SCALE               4
-#define VOLTAGE_VID_OFFSET_SCALE1    625
-#define VOLTAGE_VID_OFFSET_SCALE2    100
 #define CISLAND_MINIMUM_ENGINE_CLOCK 800
 #define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index dae3422366b3..53df9405f43a 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -53,10 +53,7 @@
 
 #define FIJI_SMC_SIZE 0x20000
 
-#define VOLTAGE_SCALE 4
 #define POWERTUNE_DEFAULT_SET_MAX    1
-#define VOLTAGE_VID_OFFSET_SCALE1   625
-#define VOLTAGE_VID_OFFSET_SCALE2   100
 #define VDDC_VDDCI_DELTA            300
 #define MC_CG_ARB_FREQ_F1           0x0b
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
index bc05e355012d..415f691c3fa9 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
@@ -60,10 +60,7 @@
 
 #define ICELAND_SMC_SIZE               0x20000
 
-#define VOLTAGE_SCALE 4
 #define POWERTUNE_DEFAULT_SET_MAX    1
-#define VOLTAGE_VID_OFFSET_SCALE1   625
-#define VOLTAGE_VID_OFFSET_SCALE2   100
 #define MC_CG_ARB_FREQ_F1           0x0b
 #define VDDC_VDDCI_DELTA            200
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index d9192286099d..a8c6524f07e4 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -52,8 +52,6 @@
 #include "dce/dce_10_0_sh_mask.h"
 
 #define POLARIS10_SMC_SIZE 0x20000
-#define VOLTAGE_VID_OFFSET_SCALE1   625
-#define VOLTAGE_VID_OFFSET_SCALE2   100
 #define POWERTUNE_DEFAULT_SET_MAX    1
 #define VDDC_VDDCI_DELTA            200
 #define MC_CG_ARB_FREQ_F1           0x0b
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
index 9adea7263774..0a563f6fe9ea 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -33,8 +33,6 @@
 #include "pp_debug.h"
 
 
-#define VOLTAGE_SCALE 4
-
 #define BUFFER_SIZE                 80000
 #define MAX_STRING_SIZE             15
 #define BUFFER_SIZETWO              131072
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index 94ba304ff52e..782b19fc2e70 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -55,11 +55,7 @@
 #include "dce/dce_10_0_d.h"
 #include "dce/dce_10_0_sh_mask.h"
 
-
-#define VOLTAGE_SCALE 4
 #define POWERTUNE_DEFAULT_SET_MAX    1
-#define VOLTAGE_VID_OFFSET_SCALE1   625
-#define VOLTAGE_VID_OFFSET_SCALE2   100
 #define MC_CG_ARB_FREQ_F1           0x0b
 #define VDDC_VDDCI_DELTA            200
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index 14ac6d15c7a7..e84669c448a3 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -35,8 +35,6 @@
 #define AVFS_EN_MSB		1568
 #define AVFS_EN_LSB		1568
 
-#define VOLTAGE_SCALE	4
-
 /* Microcode file is stored in this buffer */
 #define BUFFER_SIZE                 80000
 #define MAX_STRING_SIZE             15

From 29ae1118d85e8435b12fca512410dbd39920cce9 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Tue, 10 Apr 2018 10:58:43 +0800
Subject: [PATCH 0740/1286] drm/amd/pp: Remove unnecessary forward declaration

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../drm/amd/powerplay/hwmgr/vega10_hwmgr.c    | 84 +++++++++----------
 1 file changed, 41 insertions(+), 43 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 26c56025d56c..127c550e8bb1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -75,8 +75,6 @@ static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK                                                        0x000000F0L
 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK                                                        0x00000700L
 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK                                                        0xFFFFF000L
-static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
-		enum pp_clock_type type, uint32_t mask);
 
 static const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic);
 
@@ -4095,6 +4093,47 @@ static void vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
 	}
 }
 
+static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
+		enum pp_clock_type type, uint32_t mask)
+{
+	struct vega10_hwmgr *data = hwmgr->backend;
+
+	switch (type) {
+	case PP_SCLK:
+		data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0;
+		data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0;
+
+		PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
+			"Failed to upload boot level to lowest!",
+			return -EINVAL);
+
+		PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
+			"Failed to upload dpm max level to highest!",
+			return -EINVAL);
+		break;
+
+	case PP_MCLK:
+		data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0;
+		data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0;
+
+		PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
+			"Failed to upload boot level to lowest!",
+			return -EINVAL);
+
+		PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
+			"Failed to upload dpm max level to highest!",
+			return -EINVAL);
+
+		break;
+
+	case PP_PCIE:
+	default:
+		break;
+	}
+
+	return 0;
+}
+
 static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
 				enum amd_dpm_forced_level level)
 {
@@ -4381,47 +4420,6 @@ static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
 	return result;
 }
 
-static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
-		enum pp_clock_type type, uint32_t mask)
-{
-	struct vega10_hwmgr *data = hwmgr->backend;
-
-	switch (type) {
-	case PP_SCLK:
-		data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0;
-		data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0;
-
-		PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
-			"Failed to upload boot level to lowest!",
-			return -EINVAL);
-
-		PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
-			"Failed to upload dpm max level to highest!",
-			return -EINVAL);
-		break;
-
-	case PP_MCLK:
-		data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0;
-		data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0;
-
-		PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
-			"Failed to upload boot level to lowest!",
-			return -EINVAL);
-
-		PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
-			"Failed to upload dpm max level to highest!",
-			return -EINVAL);
-
-		break;
-
-	case PP_PCIE:
-	default:
-		break;
-	}
-
-	return 0;
-}
-
 static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
 		enum pp_clock_type type, char *buf)
 {

From 819a23f83e3b2513cffbef418458a47ca02c36b3 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Tue, 10 Apr 2018 17:17:22 +0800
Subject: [PATCH 0741/1286] drm/amdgpu: Add APU support in vi_set_uvd_clocks
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

fix the issue set uvd clock failed on CZ/ST
which lead 1s delay when boot up.

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Acked-by: Shirish S <shirish.s@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Cc: stable@vger.kernel.org
---
 drivers/gpu/drm/amd/amdgpu/vi.c | 46 +++++++++++++++++++++++++--------
 1 file changed, 35 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 1b4ee249b95a..51acd7c3d2a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -728,33 +728,57 @@ static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
 		return r;
 
 	tmp = RREG32_SMC(cntl_reg);
-	tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
-		CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
+
+	if (adev->flags & AMD_IS_APU)
+		tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK;
+	else
+		tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
+				CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
 	tmp |= dividers.post_divider;
 	WREG32_SMC(cntl_reg, tmp);
 
 	for (i = 0; i < 100; i++) {
-		if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK)
-			break;
+		tmp = RREG32_SMC(status_reg);
+		if (adev->flags & AMD_IS_APU) {
+			if (tmp & 0x10000)
+				break;
+		} else {
+			if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK)
+				break;
+		}
 		mdelay(10);
 	}
 	if (i == 100)
 		return -ETIMEDOUT;
-
 	return 0;
 }
 
+#define ixGNB_CLK1_DFS_CNTL 0xD82200F0
+#define ixGNB_CLK1_STATUS   0xD822010C
+#define ixGNB_CLK2_DFS_CNTL 0xD8220110
+#define ixGNB_CLK2_STATUS   0xD822012C
+
 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
 {
 	int r;
 
-	r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
-	if (r)
-		return r;
+	if (adev->flags & AMD_IS_APU) {
+		r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, ixGNB_CLK2_STATUS);
+		if (r)
+			return r;
 
-	r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
-	if (r)
-		return r;
+		r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, ixGNB_CLK1_STATUS);
+		if (r)
+			return r;
+	} else {
+		r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
+		if (r)
+			return r;
+
+		r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
+		if (r)
+			return r;
+	}
 
 	return 0;
 }

From 08ebb6e9f4fd7098c28e0ebbb42847cf0488ebb8 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Tue, 10 Apr 2018 17:49:56 +0800
Subject: [PATCH 0742/1286] drm/amdgpu: Add APU support in vi_set_vce_clocks
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

1. fix set vce clocks failed on Cz/St
   which lead 1s delay when boot up.
2. remove the workaround in vce_v3_0.c

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Acked-by: Shirish S <shirish.s@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Cc: stable@vger.kernel.org
---
 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c |  4 ++--
 drivers/gpu/drm/amd/amdgpu/vi.c       | 31 +++++++++++++++++++++------
 2 files changed, 27 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 428d1928e44e..ac9617269a2f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -467,8 +467,8 @@ static int vce_v3_0_hw_init(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	vce_v3_0_override_vce_clock_gating(adev, true);
-	if (!(adev->flags & AMD_IS_APU))
-		amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
+
+	amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
 
 	for (i = 0; i < adev->vce.num_rings; i++)
 		adev->vce.ring[i].ready = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 51acd7c3d2a9..4034a2863226 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -757,6 +757,8 @@ static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
 #define ixGNB_CLK1_STATUS   0xD822010C
 #define ixGNB_CLK2_DFS_CNTL 0xD8220110
 #define ixGNB_CLK2_STATUS   0xD822012C
+#define ixGNB_CLK3_DFS_CNTL 0xD8220130
+#define ixGNB_CLK3_STATUS   0xD822014C
 
 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
 {
@@ -788,6 +790,22 @@ static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
 	int r, i;
 	struct atom_clock_dividers dividers;
 	u32 tmp;
+	u32 reg_ctrl;
+	u32 reg_status;
+	u32 status_mask;
+	u32 reg_mask;
+
+	if (adev->flags & AMD_IS_APU) {
+		reg_ctrl = ixGNB_CLK3_DFS_CNTL;
+		reg_status = ixGNB_CLK3_STATUS;
+		status_mask = 0x00010000;
+		reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
+	} else {
+		reg_ctrl = ixCG_ECLK_CNTL;
+		reg_status = ixCG_ECLK_STATUS;
+		status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK;
+		reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
+	}
 
 	r = amdgpu_atombios_get_clock_dividers(adev,
 					       COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
@@ -796,24 +814,25 @@ static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
 		return r;
 
 	for (i = 0; i < 100; i++) {
-		if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
+		if (RREG32_SMC(reg_status) & status_mask)
 			break;
 		mdelay(10);
 	}
+
 	if (i == 100)
 		return -ETIMEDOUT;
 
-	tmp = RREG32_SMC(ixCG_ECLK_CNTL);
-	tmp &= ~(CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK |
-		CG_ECLK_CNTL__ECLK_DIVIDER_MASK);
+	tmp = RREG32_SMC(reg_ctrl);
+	tmp &= ~reg_mask;
 	tmp |= dividers.post_divider;
-	WREG32_SMC(ixCG_ECLK_CNTL, tmp);
+	WREG32_SMC(reg_ctrl, tmp);
 
 	for (i = 0; i < 100; i++) {
-		if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
+		if (RREG32_SMC(reg_status) & status_mask)
 			break;
 		mdelay(10);
 	}
+
 	if (i == 100)
 		return -ETIMEDOUT;
 

From 61279073b1d35ea29bf546c7751bda09610ab5ef Mon Sep 17 00:00:00 2001
From: Kenneth Feng <kenneth.feng@amd.com>
Date: Mon, 9 Apr 2018 14:53:51 +0800
Subject: [PATCH 0743/1286] amd/powerplay: implement the
 vega12_force_clock_level interface

pp_dpm_sclk/pp_dpm_mclk in sysfs implemented to force
gfxclk/uclk dpm level for Vega12

Signed-off-by: Kenneth Feng <kenneth.feng@amd.com>
Reviewed-by: Evan Quan <evan.quan@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../drm/amd/powerplay/hwmgr/vega12_hwmgr.c    | 42 ++++++++++++++++++-
 1 file changed, 41 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 7dca75cdf722..df234db6485e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -991,15 +991,55 @@ static uint32_t vega12_find_highest_dpm_level(
 
 static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
 {
+	struct vega12_hwmgr *data = hwmgr->backend;
+	if (data->smc_state_table.gfx_boot_level !=
+			data->dpm_table.gfx_table.dpm_state.soft_min_level) {
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+			PPSMC_MSG_SetSoftMinByFreq,
+			PPCLK_GFXCLK<<16 | data->dpm_table.gfx_table.dpm_levels[data->smc_state_table.gfx_boot_level].value);
+		data->dpm_table.gfx_table.dpm_state.soft_min_level =
+				data->smc_state_table.gfx_boot_level;
+	}
+
+	if (data->smc_state_table.mem_boot_level !=
+			data->dpm_table.mem_table.dpm_state.soft_min_level) {
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+			PPSMC_MSG_SetSoftMinByFreq,
+			PPCLK_UCLK<<16 | data->dpm_table.mem_table.dpm_levels[data->smc_state_table.mem_boot_level].value);
+		data->dpm_table.mem_table.dpm_state.soft_min_level =
+				data->smc_state_table.mem_boot_level;
+	}
+
 	return 0;
+
 }
 
 static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
 {
+	struct vega12_hwmgr *data = hwmgr->backend;
+	if (data->smc_state_table.gfx_max_level !=
+		data->dpm_table.gfx_table.dpm_state.soft_max_level) {
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+			PPSMC_MSG_SetSoftMaxByFreq,
+			/* plus the vale by 1 to align the resolution */
+			PPCLK_GFXCLK<<16 | (data->dpm_table.gfx_table.dpm_levels[data->smc_state_table.gfx_max_level].value + 1));
+		data->dpm_table.gfx_table.dpm_state.soft_max_level =
+				data->smc_state_table.gfx_max_level;
+	}
+
+	if (data->smc_state_table.mem_max_level !=
+		data->dpm_table.mem_table.dpm_state.soft_max_level) {
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+			PPSMC_MSG_SetSoftMaxByFreq,
+			/* plus the vale by 1 to align the resolution */
+			PPCLK_UCLK<<16 | (data->dpm_table.mem_table.dpm_levels[data->smc_state_table.mem_max_level].value + 1));
+		data->dpm_table.mem_table.dpm_state.soft_max_level =
+				data->smc_state_table.mem_max_level;
+	}
+
 	return 0;
 }
 
-
 int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
 {
 	struct vega12_hwmgr *data =

From 564be2fc2b1ddb6cbef2bd77f83e91c9e4a1063f Mon Sep 17 00:00:00 2001
From: Kenneth Feng <kenneth.feng@amd.com>
Date: Wed, 4 Apr 2018 15:17:22 +0800
Subject: [PATCH 0744/1286] drm/amd/powerplay: Get more than 8 level gfxclk
 states

To apply on Vega12 for more than 8 gfx dpm levels

Signed-off-by: Kenneth Feng <kenneth.feng@amd.com>
Reviewed-by: Evan Quan <evan.quan@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
index bc98b1df3b65..e81ded1ec198 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
@@ -33,7 +33,7 @@
 #define WaterMarksExist  1
 #define WaterMarksLoaded 2
 
-#define VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS   8
+#define VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS   16
 #define VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS   8
 #define VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS  8
 #define VG12_PSUEDO_NUM_UCLK_DPM_LEVELS     4

From 0bc8f3d29b188b273e92cd895da3b5c31e86434f Mon Sep 17 00:00:00 2001
From: Kenneth Feng <kenneth.feng@amd.com>
Date: Tue, 10 Apr 2018 17:05:36 +0800
Subject: [PATCH 0745/1286] drm/amd/powerplay: initialzie the dpm intial
 enabled state

To expose the right dpm levels to the sysfs

Signed-off-by: Kenneth Feng <kenneth.feng@amd.com>
Reviewed-by: Evan Quan <evan.quan@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 10 ++++++++++
 1 file changed, 10 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index df234db6485e..3e1ed0aca29c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -545,6 +545,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
 			return -EINVAL);
 
 		dpm_table->dpm_levels[i].value = clock;
+		dpm_table->dpm_levels[i].enabled = true;
 	}
 
 	vega12_init_dpm_state(&(dpm_table->dpm_state));
@@ -564,6 +565,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
 			return -EINVAL);
 
 		dpm_table->dpm_levels[i].value = clock;
+		dpm_table->dpm_levels[i].enabled = true;
 	}
 
 	vega12_init_dpm_state(&(dpm_table->dpm_state));
@@ -584,6 +586,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
 			return -EINVAL);
 
 		dpm_table->dpm_levels[i].value = clock;
+		dpm_table->dpm_levels[i].enabled = true;
 	}
 
 	vega12_init_dpm_state(&(dpm_table->dpm_state));
@@ -604,6 +607,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
 		return -EINVAL);
 
 		dpm_table->dpm_levels[i].value = clock;
+		dpm_table->dpm_levels[i].enabled = true;
 	}
 
 	vega12_init_dpm_state(&(dpm_table->dpm_state));
@@ -624,6 +628,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
 			return -EINVAL);
 
 		dpm_table->dpm_levels[i].value = clock;
+		dpm_table->dpm_levels[i].enabled = true;
 	}
 
 	vega12_init_dpm_state(&(dpm_table->dpm_state));
@@ -644,6 +649,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
 		return -EINVAL);
 
 		dpm_table->dpm_levels[i].value = clock;
+		dpm_table->dpm_levels[i].enabled = true;
 	}
 
 	vega12_init_dpm_state(&(dpm_table->dpm_state));
@@ -665,6 +671,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
 			return -EINVAL);
 
 		dpm_table->dpm_levels[i].value = clock;
+		dpm_table->dpm_levels[i].enabled = true;
 	}
 
 	vega12_init_dpm_state(&(dpm_table->dpm_state));
@@ -685,6 +692,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
 			return -EINVAL);
 
 		dpm_table->dpm_levels[i].value = clock;
+		dpm_table->dpm_levels[i].enabled = true;
 	}
 
 	vega12_init_dpm_state(&(dpm_table->dpm_state));
@@ -705,6 +713,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
 			return -EINVAL);
 
 		dpm_table->dpm_levels[i].value = clock;
+		dpm_table->dpm_levels[i].enabled = true;
 	}
 
 	vega12_init_dpm_state(&(dpm_table->dpm_state));
@@ -725,6 +734,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
 			return -EINVAL);
 
 		dpm_table->dpm_levels[i].value = clock;
+		dpm_table->dpm_levels[i].enabled = true;
 	}
 
 	vega12_init_dpm_state(&(dpm_table->dpm_state));

From 18081c2003915dadc3507b79cf6453f997948ded Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Wed, 4 Apr 2018 18:33:15 +0800
Subject: [PATCH 0746/1286] drm/amd/pp: Remove dead function in smu7_smumgr.c

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | 10 ----------
 drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h |  1 -
 2 files changed, 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index 41fab2df994e..8b9518a64121 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -231,16 +231,6 @@ int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-int smu7_wait_for_smc_inactive(struct pp_hwmgr *hwmgr)
-{
-	if (!smu7_is_smc_ram_running(hwmgr))
-		return -EINVAL;
-
-	PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0);
-	return 0;
-}
-
-
 enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type)
 {
 	enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
index 126d300259ba..39c9bfda0ab4 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
@@ -67,7 +67,6 @@ int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg,
 int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr,
 						uint16_t msg, uint32_t parameter);
 int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr);
-int smu7_wait_for_smc_inactive(struct pp_hwmgr *hwmgr);
 
 enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type);
 int smu7_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr,

From 89a111476676add9ded0286fc7606508b5efb101 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Wed, 4 Apr 2018 18:41:08 +0800
Subject: [PATCH 0747/1286] drm/amd/pp: Remove useless smu7 running state check

Only check smc running state before start smu.

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | 8 --------
 1 file changed, 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index 8b9518a64121..fb32a3fcc278 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -167,10 +167,6 @@ int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
 {
 	int ret;
 
-	if (!smu7_is_smc_ram_running(hwmgr))
-		return -EINVAL;
-
-
 	PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
 
 	ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
@@ -199,10 +195,6 @@ int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg)
 
 int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
 {
-	if (!smu7_is_smc_ram_running(hwmgr)) {
-		return -EINVAL;
-	}
-
 	PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
 
 	cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);

From 5452cf44d691edada697108f883c78edb40dc281 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
Date: Fri, 9 Mar 2018 13:39:47 +0100
Subject: [PATCH 0748/1286] drm/ttm: keep a reference to transfer pipelined BOs
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Make sure the transfered BO is never destroy before the transfer BO.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Roger He <Hongbo.He@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/ttm/ttm_bo_util.c | 48 +++++++++++++++++++------------
 1 file changed, 29 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 2ebbae6067ab..f3bf545a79cf 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -39,6 +39,11 @@
 #include <linux/module.h>
 #include <linux/reservation.h>
 
+struct ttm_transfer_obj {
+	struct ttm_buffer_object base;
+	struct ttm_buffer_object *bo;
+};
+
 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
 {
 	ttm_bo_mem_put(bo, &bo->mem);
@@ -454,7 +459,11 @@ EXPORT_SYMBOL(ttm_bo_move_memcpy);
 
 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
 {
-	kfree(bo);
+	struct ttm_transfer_obj *fbo;
+
+	fbo = container_of(bo, struct ttm_transfer_obj, base);
+	ttm_bo_unref(&fbo->bo);
+	kfree(fbo);
 }
 
 /**
@@ -475,14 +484,15 @@ static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
 				      struct ttm_buffer_object **new_obj)
 {
-	struct ttm_buffer_object *fbo;
+	struct ttm_transfer_obj *fbo;
 	int ret;
 
 	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
 	if (!fbo)
 		return -ENOMEM;
 
-	*fbo = *bo;
+	fbo->base = *bo;
+	fbo->bo = ttm_bo_reference(bo);
 
 	/**
 	 * Fix up members that we shouldn't copy directly:
@@ -490,25 +500,25 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
 	 */
 
 	atomic_inc(&bo->bdev->glob->bo_count);
-	INIT_LIST_HEAD(&fbo->ddestroy);
-	INIT_LIST_HEAD(&fbo->lru);
-	INIT_LIST_HEAD(&fbo->swap);
-	INIT_LIST_HEAD(&fbo->io_reserve_lru);
-	mutex_init(&fbo->wu_mutex);
-	fbo->moving = NULL;
-	drm_vma_node_reset(&fbo->vma_node);
-	atomic_set(&fbo->cpu_writers, 0);
+	INIT_LIST_HEAD(&fbo->base.ddestroy);
+	INIT_LIST_HEAD(&fbo->base.lru);
+	INIT_LIST_HEAD(&fbo->base.swap);
+	INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
+	mutex_init(&fbo->base.wu_mutex);
+	fbo->base.moving = NULL;
+	drm_vma_node_reset(&fbo->base.vma_node);
+	atomic_set(&fbo->base.cpu_writers, 0);
 
-	kref_init(&fbo->list_kref);
-	kref_init(&fbo->kref);
-	fbo->destroy = &ttm_transfered_destroy;
-	fbo->acc_size = 0;
-	fbo->resv = &fbo->ttm_resv;
-	reservation_object_init(fbo->resv);
-	ret = reservation_object_trylock(fbo->resv);
+	kref_init(&fbo->base.list_kref);
+	kref_init(&fbo->base.kref);
+	fbo->base.destroy = &ttm_transfered_destroy;
+	fbo->base.acc_size = 0;
+	fbo->base.resv = &fbo->base.ttm_resv;
+	reservation_object_init(fbo->base.resv);
+	ret = reservation_object_trylock(fbo->base.resv);
 	WARN_ON(!ret);
 
-	*new_obj = fbo;
+	*new_obj = &fbo->base;
 	return 0;
 }
 

From 5eeae247d227c448d4db8f60ce184ddb0e0feca0 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Tue, 10 Apr 2018 10:15:26 -0500
Subject: [PATCH 0749/1286] drm/amdgpu/gfx9: cache DB_DEBUG2 and make it
 available to userspace
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Userspace needs to query this value to work around a hw bug in
certain cases.

Acked-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h   | 2 ++
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 1 +
 drivers/gpu/drm/amd/amdgpu/soc15.c    | 3 +++
 3 files changed, 6 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 7e5defbfc3b9..7eb0e4846a76 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -867,6 +867,8 @@ struct amdgpu_gfx_config {
 
 	/* gfx configure feature */
 	uint32_t double_offchip_lds_buf;
+	/* cached value of DB_DEBUG2 */
+	uint32_t db_debug2;
 };
 
 struct amdgpu_cu_info {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 9d39fd5b1822..66bd6c1c82c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1600,6 +1600,7 @@ static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
 
 	gfx_v9_0_setup_rb(adev);
 	gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
+	adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
 
 	/* XXX SH_MEM regs */
 	/* where to put LDS, scratch, GPUVM in FSA64 space */
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 2e9ebe8db5cc..65e781f05c24 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -287,6 +287,7 @@ static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
 	{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
+	{ SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)},
 };
 
 static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
@@ -315,6 +316,8 @@ static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
 	} else {
 		if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
 			return adev->gfx.config.gb_addr_config;
+		else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2))
+			return adev->gfx.config.db_debug2;
 		return RREG32(reg_offset);
 	}
 }

From 642ad57058baaa2c105925a75c153bb486877513 Mon Sep 17 00:00:00 2001
From: Harry Wentland <harry.wentland@amd.com>
Date: Thu, 12 Apr 2018 10:51:51 -0400
Subject: [PATCH 0750/1286] Revert "drm/amd/display: fix dereferencing possible
 ERR_PTR()"
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This reverts commit cd2d6c92a8e39d7e50a5af9fcc67d07e6a89e91d.

Cc: Shirish S <shirish.s@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: stable@vger.kernel.org
Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
Signed-off-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 265f0166f688..0c29f3b97398 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4941,9 +4941,6 @@ static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state,
 			return -EDEADLK;
 
 		crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc);
-		if (IS_ERR(crtc_state))
-			return PTR_ERR(crtc_state);
-
 		if (crtc->primary == plane && crtc_state->active) {
 			if (!plane_state->fb)
 				return -EINVAL;

From 23b9ad21b262b9a85e9b85813e4adfcfb0dd96b3 Mon Sep 17 00:00:00 2001
From: Harry Wentland <harry.wentland@amd.com>
Date: Thu, 12 Apr 2018 10:51:52 -0400
Subject: [PATCH 0751/1286] Revert "drm/amd/display: disable CRTCs with NULL FB
 on their primary plane (V2)"
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This seems to cause flickering and lock-ups for a wide range of users.
Revert until we've found a proper fix for the flickering and lock-ups.

This reverts commit 36cc549d59864b7161f0e23d710c1c4d1b9cf022.

Cc: Shirish S <shirish.s@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: stable@vger.kernel.org
Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
Signed-off-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 28 -------------------
 1 file changed, 28 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 0c29f3b97398..2368ade4bae0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4925,30 +4925,6 @@ static int dm_update_planes_state(struct dc *dc,
 	return ret;
 }
 
-static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state,
-					  struct drm_crtc *crtc)
-{
-	struct drm_plane *plane;
-	struct drm_crtc_state *crtc_state;
-
-	WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
-
-	drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
-		struct drm_plane_state *plane_state =
-			drm_atomic_get_plane_state(state, plane);
-
-		if (IS_ERR(plane_state))
-			return -EDEADLK;
-
-		crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc);
-		if (crtc->primary == plane && crtc_state->active) {
-			if (!plane_state->fb)
-				return -EINVAL;
-		}
-	}
-	return 0;
-}
-
 static int amdgpu_dm_atomic_check(struct drm_device *dev,
 				  struct drm_atomic_state *state)
 {
@@ -4972,10 +4948,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
 		goto fail;
 
 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
-		ret = dm_atomic_check_plane_state_fb(state, crtc);
-		if (ret)
-			goto fail;
-
 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
 		    !new_crtc_state->color_mgmt_changed)
 			continue;

From c73a3626619018adfa2bb0fa1e64310be8e73152 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Wed, 11 Apr 2018 17:57:13 -0500
Subject: [PATCH 0752/1286] drm/amdgpu/powerplay: fix smu7_get_memory_type for
 fiji

Fiji uses a different register than other smu7 asics, but
we already have this info in the base driver so just
use that.

Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 8 ++------
 1 file changed, 2 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 21c021ba0f49..97b7c2333f19 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -4150,13 +4150,9 @@ static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr)
 static int smu7_get_memory_type(struct pp_hwmgr *hwmgr)
 {
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-	uint32_t temp;
+	struct amdgpu_device *adev = hwmgr->adev;
 
-	temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
-
-	data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
-			((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
-			 MC_SEQ_MISC0_GDDR5_SHIFT));
+	data->is_memory_gddr5 = (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5);
 
 	return 0;
 }

From 9da00630188da6e8ad1596c2b58809c833b16154 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Wed, 11 Apr 2018 18:09:39 -0500
Subject: [PATCH 0753/1286] drm/amdgpu/powerplay: rename
 smu7_upload_mc_firmware

It doesn't actually upload any firmware is just
checks the version.  The actual upload happens in
the gmc modules.

Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 97b7c2333f19..ed43dd39b5d6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -4071,7 +4071,7 @@ static int smu7_check_states_equal(struct pp_hwmgr *hwmgr,
 	return 0;
 }
 
-static int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr)
+static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
 {
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 
@@ -4200,7 +4200,7 @@ static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
 {
 	int tmp_result, result = 0;
 
-	smu7_upload_mc_firmware(hwmgr);
+	smu7_check_mc_firmware(hwmgr);
 
 	tmp_result = smu7_read_clock_registers(hwmgr);
 	PP_ASSERT_WITH_CODE((0 == tmp_result),

From 828536385ab0d25b5ddd7153347df04ea3a6961d Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Tue, 27 Mar 2018 11:58:14 -0500
Subject: [PATCH 0754/1286] drm/amdgpu: add emit_reg_write_reg_wait ring
 callback
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This callback writes a value to a register and then reads
back another register and waits for a value in a single
operation.

Provide a helper function using two operations for engines
that don't support this opertion.

Reviewed-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h      |  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 20 ++++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h |  7 +++++++
 3 files changed, 28 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 7eb0e4846a76..c25ee750c362 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1806,6 +1806,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d))
 #define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
 #define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
+#define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m))
 #define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b))
 #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
 #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index d5f526f38e50..49cad08b5c16 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -459,6 +459,26 @@ void amdgpu_ring_lru_touch(struct amdgpu_device *adev, struct amdgpu_ring *ring)
 	spin_unlock(&adev->ring_lru_list_lock);
 }
 
+/**
+ * amdgpu_ring_emit_reg_write_reg_wait_helper - ring helper
+ *
+ * @adev: amdgpu_device pointer
+ * @reg0: register to write
+ * @reg1: register to wait on
+ * @ref: reference value to write/wait on
+ * @mask: mask to wait on
+ *
+ * Helper for rings that don't support write and wait in a
+ * single oneshot packet.
+ */
+void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
+						uint32_t reg0, uint32_t reg1,
+						uint32_t ref, uint32_t mask)
+{
+	amdgpu_ring_emit_wreg(ring, reg0, ref);
+	amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
+}
+
 /*
  * Debugfs info
  */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 1a5911882657..08fcdf6f7b53 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -154,6 +154,9 @@ struct amdgpu_ring_funcs {
 	void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
 	void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg,
 			      uint32_t val, uint32_t mask);
+	void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring,
+					uint32_t reg0, uint32_t reg1,
+					uint32_t ref, uint32_t mask);
 	void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
 	/* priority functions */
 	void (*set_priority) (struct amdgpu_ring *ring,
@@ -228,6 +231,10 @@ int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type,
 			int *blacklist, int num_blacklist,
 			bool lru_pipe_order, struct amdgpu_ring **ring);
 void amdgpu_ring_lru_touch(struct amdgpu_device *adev, struct amdgpu_ring *ring);
+void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
+						uint32_t reg0, uint32_t val0,
+						uint32_t reg1, uint32_t val1);
+
 static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
 {
 	int i = 0;

From 10ed3c3190d38f189ed6857cecca1a2eb6de33a3 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Tue, 27 Mar 2018 15:07:50 -0500
Subject: [PATCH 0755/1286] drm/amdgpu/gfx9: add emit_reg_write_reg_wait ring
 callback (v2)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This adds support for writing and reading back in a single
oneshot packet.  This is needed to send a tlb invalidation
and wait for ack in a single operation.

v2: squash the gfx ring stall fix

Reviewed-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Emily Deng <Emily.Deng@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 12 ++++++++++++
 1 file changed, 12 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 66bd6c1c82c0..583f6f616dd3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -4138,6 +4138,15 @@ static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
 	gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
 }
 
+static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
+						  uint32_t reg0, uint32_t reg1,
+						  uint32_t ref, uint32_t mask)
+{
+	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
+
+	gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1, ref, mask, 0x20);
+}
+
 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
 						 enum amdgpu_interrupt_state state)
 {
@@ -4459,6 +4468,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
 	.emit_tmz = gfx_v9_0_ring_emit_tmz,
 	.emit_wreg = gfx_v9_0_ring_emit_wreg,
 	.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
+	.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
 };
 
 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
@@ -4493,6 +4503,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
 	.set_priority = gfx_v9_0_ring_set_priority_compute,
 	.emit_wreg = gfx_v9_0_ring_emit_wreg,
 	.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
+	.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
 };
 
 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
@@ -4523,6 +4534,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
 	.emit_rreg = gfx_v9_0_ring_emit_rreg,
 	.emit_wreg = gfx_v9_0_ring_emit_wreg,
 	.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
+	.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
 };
 
 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)

From 4dfe7d7b4e3ba16fc377a48a221bfe8172bc50e1 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Tue, 27 Mar 2018 16:51:41 -0500
Subject: [PATCH 0756/1286] drm/amdgpu/sdma4: add emit_reg_write_reg_wait ring
 callback (v2)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This adds support for writing and reading back in a single
oneshot packet.  This is needed to send a tlb invalidation
and wait for ack in a single operation.

v2: squash sdma hang fix into this patch

Reviewed-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Emily Deng <Emily.Deng@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 2c618a1be03e..03a36cbe7557 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1611,6 +1611,7 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
 	.pad_ib = sdma_v4_0_ring_pad_ib,
 	.emit_wreg = sdma_v4_0_ring_emit_wreg,
 	.emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
+	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
 };
 
 static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)

From 1ab0c9a75f66293a8ea719cc96ae4141218eb0e4 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Tue, 27 Mar 2018 17:05:19 -0500
Subject: [PATCH 0757/1286] drm/amdgpu/uvd7: add emit_reg_write_reg_wait ring
 callback
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This adds support for writing and reading back using the
helper since the engines doesn't have a oneshot packet.

Reviewed-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index eddc57f3b72a..280c0826e183 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -1702,6 +1702,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
 	.end_use = amdgpu_uvd_ring_end_use,
 	.emit_wreg = uvd_v7_0_enc_ring_emit_wreg,
 	.emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait,
+	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
 };
 
 static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)

From 3fa0b1cbc0a57a21c1688601f6b9c340441ba3b6 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Tue, 27 Mar 2018 17:06:33 -0500
Subject: [PATCH 0758/1286] drm/amdgpu/vce4: add emit_reg_write_reg_wait ring
 callback
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This adds support for writing and reading back using the
helper since the engines doesn't have a oneshot packet.

Reviewed-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 73fd48d6c756..8fd1b742985a 100755
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -1081,6 +1081,7 @@ static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = {
 	.end_use = amdgpu_vce_ring_end_use,
 	.emit_wreg = vce_v4_0_emit_wreg,
 	.emit_reg_wait = vce_v4_0_emit_reg_wait,
+	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
 };
 
 static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev)

From f58b85e3ec0e3d3ddeff6eb16ace23a42516ae70 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Tue, 27 Mar 2018 17:06:52 -0500
Subject: [PATCH 0759/1286] drm/amdgpu/vcn1: add emit_reg_write_reg_wait ring
 callback
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This adds support for writing and reading back using the
helper since the engines doesn't have a oneshot packet.

Reviewed-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 8c132673bc79..d9a15338db7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -1139,6 +1139,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
 	.end_use = amdgpu_vcn_ring_end_use,
 	.emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
 	.emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
+	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
 };
 
 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)

From f8bc903707ae87342b97528037e27bf190051c11 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Tue, 27 Mar 2018 17:10:56 -0500
Subject: [PATCH 0760/1286] drm/amdgpu/gmc9: use
 amdgpu_ring_emit_reg_write_reg_wait in gpu tlb flush
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Use amdgpu_ring_emit_reg_write_reg_wait.  On engines that support it,
it provides a write and wait in a single packet which avoids a missed
ack if a world switch happens between the request and waiting for the
ack.

Reviewed-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 8 +++-----
 1 file changed, 3 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 070946e1e4a7..aeaed7fe9ced 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -385,11 +385,9 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
 			      upper_32_bits(pd_addr));
 
-	amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_req + eng, req);
-
-	/* wait for the invalidate to complete */
-	amdgpu_ring_emit_reg_wait(ring, hub->vm_inv_eng0_ack + eng,
-				  1 << vmid, 1 << vmid);
+	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
+					    hub->vm_inv_eng0_ack + eng,
+					    req, 1 << vmid);
 
 	return pd_addr;
 }

From ebdef28ebbcf767d9fa687acb1d02d97d834c628 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexdeucher@gmail.com>
Date: Fri, 6 Apr 2018 14:54:09 -0500
Subject: [PATCH 0761/1286] drm/amdgpu/gmc: steal the appropriate amount of
 vram for fw hand-over (v3)

Steal 9 MB for vga emulation and fb if vga is enabled, otherwise,
steal enough to cover the current display size as set by the vbios.

If no memory is used (e.g., secondary or headless card), skip
stolen memory reserve.

v2: skip reservation if vram is limited, address Christian's comments
v3: squash in fix from Harry

Reviewed-and-Tested-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com> (v2)
Signed-off-by: Alex Deucher <alexdeucher@gmail.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 14 ++++---
 drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c   | 23 ++++++++++-
 drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c   | 23 ++++++++++-
 drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c   | 23 ++++++++++-
 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c   | 53 ++++++++++++++++++++++---
 5 files changed, 118 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index ab73300e6c7f..2be04acf4efb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1441,12 +1441,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
 		return r;
 	}
 
-	r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
-				    AMDGPU_GEM_DOMAIN_VRAM,
-				    &adev->stolen_vga_memory,
-				    NULL, NULL);
-	if (r)
-		return r;
+	if (adev->gmc.stolen_size) {
+		r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
+					    AMDGPU_GEM_DOMAIN_VRAM,
+					    &adev->stolen_vga_memory,
+					    NULL, NULL);
+		if (r)
+			return r;
+	}
 	DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
 		 (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
 
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 5617cf62c566..24e1ea36b454 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -825,6 +825,25 @@ static int gmc_v6_0_late_init(void *handle)
 		return 0;
 }
 
+static unsigned gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev)
+{
+	u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
+	unsigned size;
+
+	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
+		size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
+	} else {
+		u32 viewport = RREG32(mmVIEWPORT_SIZE);
+		size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
+			REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
+			4);
+	}
+	/* return 0 if the pre-OS buffer uses up most of vram */
+	if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
+		return 0;
+	return size;
+}
+
 static int gmc_v6_0_sw_init(void *handle)
 {
 	int r;
@@ -851,8 +870,6 @@ static int gmc_v6_0_sw_init(void *handle)
 
 	adev->gmc.mc_mask = 0xffffffffffULL;
 
-	adev->gmc.stolen_size = 256 * 1024;
-
 	adev->need_dma32 = false;
 	dma_bits = adev->need_dma32 ? 32 : 40;
 	r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
@@ -878,6 +895,8 @@ static int gmc_v6_0_sw_init(void *handle)
 	if (r)
 		return r;
 
+	adev->gmc.stolen_size = gmc_v6_0_get_vbios_fb_size(adev);
+
 	r = amdgpu_bo_init(adev);
 	if (r)
 		return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 80054f36e487..93861f9c7773 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -964,6 +964,25 @@ static int gmc_v7_0_late_init(void *handle)
 		return 0;
 }
 
+static unsigned gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev)
+{
+	u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
+	unsigned size;
+
+	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
+		size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
+	} else {
+		u32 viewport = RREG32(mmVIEWPORT_SIZE);
+		size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
+			REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
+			4);
+	}
+	/* return 0 if the pre-OS buffer uses up most of vram */
+	if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
+		return 0;
+	return size;
+}
+
 static int gmc_v7_0_sw_init(void *handle)
 {
 	int r;
@@ -998,8 +1017,6 @@ static int gmc_v7_0_sw_init(void *handle)
 	 */
 	adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
 
-	adev->gmc.stolen_size = 256 * 1024;
-
 	/* set DMA mask + need_dma32 flags.
 	 * PCIE - can handle 40-bits.
 	 * IGP - can handle 40-bits
@@ -1030,6 +1047,8 @@ static int gmc_v7_0_sw_init(void *handle)
 	if (r)
 		return r;
 
+	adev->gmc.stolen_size = gmc_v7_0_get_vbios_fb_size(adev);
+
 	/* Memory manager */
 	r = amdgpu_bo_init(adev);
 	if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index d71d4cb68f9c..fbd8f56c70f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1055,6 +1055,25 @@ static int gmc_v8_0_late_init(void *handle)
 		return 0;
 }
 
+static unsigned gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev)
+{
+	u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
+	unsigned size;
+
+	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
+		size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
+	} else {
+		u32 viewport = RREG32(mmVIEWPORT_SIZE);
+		size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
+			REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
+			4);
+	}
+	/* return 0 if the pre-OS buffer uses up most of vram */
+	if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
+		return 0;
+	return size;
+}
+
 #define mmMC_SEQ_MISC0_FIJI 0xA71
 
 static int gmc_v8_0_sw_init(void *handle)
@@ -1096,8 +1115,6 @@ static int gmc_v8_0_sw_init(void *handle)
 	 */
 	adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
 
-	adev->gmc.stolen_size = 256 * 1024;
-
 	/* set DMA mask + need_dma32 flags.
 	 * PCIE - can handle 40-bits.
 	 * IGP - can handle 40-bits
@@ -1128,6 +1145,8 @@ static int gmc_v8_0_sw_init(void *handle)
 	if (r)
 		return r;
 
+	adev->gmc.stolen_size = gmc_v8_0_get_vbios_fb_size(adev);
+
 	/* Memory manager */
 	r = amdgpu_bo_init(adev);
 	if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index aeaed7fe9ced..3071f51d6ca6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -57,6 +57,14 @@
 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK                                                        0x00000700L
 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK                                                        0xFFFFF000L
 
+/* add these here since we already include dce12 headers and these are for DCN */
+#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION                                                          0x055d
+#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX                                                 2
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT                                        0x0
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT                                       0x10
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK                                          0x00003FFFL
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK                                         0x3FFF0000L
+
 /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
 #define AMDGPU_NUM_OF_VMIDS			8
 
@@ -791,6 +799,43 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
 	return amdgpu_gart_table_vram_alloc(adev);
 }
 
+static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
+{
+#if 0
+	u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
+#endif
+	unsigned size;
+
+	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
+		size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
+	} else {
+		u32 viewport;
+
+		switch (adev->asic_type) {
+		case CHIP_RAVEN:
+			viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
+			size = (REG_GET_FIELD(viewport,
+					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
+				REG_GET_FIELD(viewport,
+					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
+				4);
+			break;
+		case CHIP_VEGA10:
+		case CHIP_VEGA12:
+		default:
+			viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
+			size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
+				REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
+				4);
+			break;
+		}
+	}
+	/* return 0 if the pre-OS buffer uses up most of vram */
+	if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
+		return 0;
+	return size;
+}
+
 static int gmc_v9_0_sw_init(void *handle)
 {
 	int r;
@@ -842,12 +887,6 @@ static int gmc_v9_0_sw_init(void *handle)
 	 */
 	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
 
-	/*
-	 * It needs to reserve 8M stolen memory for vega10
-	 * TODO: Figure out how to avoid that...
-	 */
-	adev->gmc.stolen_size = 8 * 1024 * 1024;
-
 	/* set DMA mask + need_dma32 flags.
 	 * PCIE - can handle 44-bits.
 	 * IGP - can handle 44-bits
@@ -872,6 +911,8 @@ static int gmc_v9_0_sw_init(void *handle)
 	if (r)
 		return r;
 
+	adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev);
+
 	/* Memory manager */
 	r = amdgpu_bo_init(adev);
 	if (r)

From 6f752ec2c20c6a575da29d5b297980f376830e6b Mon Sep 17 00:00:00 2001
From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Date: Fri, 6 Apr 2018 14:54:10 -0500
Subject: [PATCH 0762/1286] drm/amdgpu: Free VGA stolen memory as soon as
 possible.

Reserved VRAM is used to avoid overriding pre OS FB.
Once our display stack takes over we don't need the reserved
VRAM anymore.

v2:
Remove comment, we know actually why we need to reserve the stolen VRAM.
Fix return type for amdgpu_ttm_late_init.
v3:
Return 0 in amdgpu_bo_late_init, rebase on changes to previous patch
v4: rebase
v5:
For GMC9 reserve always just 9M and keep the stolem memory around
until GART table curruption on S3 resume is resolved.

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c |  7 ++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.h |  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c    |  6 ++++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h    |  1 +
 drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c      |  2 ++
 drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c      |  2 ++
 drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c      |  2 ++
 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c      | 26 ++++++++++++++++++++++
 8 files changed, 46 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 9e23d6f6f3f3..a160ef0332d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -852,6 +852,13 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
 	return amdgpu_ttm_init(adev);
 }
 
+int amdgpu_bo_late_init(struct amdgpu_device *adev)
+{
+	amdgpu_ttm_late_init(adev);
+
+	return 0;
+}
+
 void amdgpu_bo_fini(struct amdgpu_device *adev)
 {
 	amdgpu_ttm_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 3bee13344065..1e9fe85abcbb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -251,6 +251,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
 int amdgpu_bo_unpin(struct amdgpu_bo *bo);
 int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
 int amdgpu_bo_init(struct amdgpu_device *adev);
+int amdgpu_bo_late_init(struct amdgpu_device *adev);
 void amdgpu_bo_fini(struct amdgpu_device *adev);
 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
 				struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 2be04acf4efb..29efaac6e3ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1517,13 +1517,17 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
 	return 0;
 }
 
+void amdgpu_ttm_late_init(struct amdgpu_device *adev)
+{
+	amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
+}
+
 void amdgpu_ttm_fini(struct amdgpu_device *adev)
 {
 	if (!adev->mman.initialized)
 		return;
 
 	amdgpu_ttm_debugfs_fini(adev);
-	amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
 	amdgpu_ttm_fw_reserve_vram_fini(adev);
 	if (adev->mman.aper_base_kaddr)
 		iounmap(adev->mman.aper_base_kaddr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 6ea7de863041..e969c879d87e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -77,6 +77,7 @@ uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
 uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
 
 int amdgpu_ttm_init(struct amdgpu_device *adev);
+void amdgpu_ttm_late_init(struct amdgpu_device *adev);
 void amdgpu_ttm_fini(struct amdgpu_device *adev);
 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
 					bool enable);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 24e1ea36b454..79f9ac29019b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -819,6 +819,8 @@ static int gmc_v6_0_late_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	amdgpu_bo_late_init(adev);
+
 	if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
 		return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
 	else
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 93861f9c7773..7147bfe25a23 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -958,6 +958,8 @@ static int gmc_v7_0_late_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	amdgpu_bo_late_init(adev);
+
 	if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
 		return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
 	else
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index fbd8f56c70f3..4d970daa65f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1049,6 +1049,8 @@ static int gmc_v8_0_late_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	amdgpu_bo_late_init(adev);
+
 	if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
 		return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
 	else
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 3071f51d6ca6..e6b00b507d4d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -665,6 +665,11 @@ static int gmc_v9_0_late_init(void *handle)
 	unsigned i;
 	int r;
 
+	/*
+	 * TODO - Uncomment once GART corruption issue is fixed.
+	 */
+	/* amdgpu_bo_late_init(adev); */
+
 	for(i = 0; i < adev->num_rings; ++i) {
 		struct amdgpu_ring *ring = adev->rings[i];
 		unsigned vmhub = ring->funcs->vmhub;
@@ -806,6 +811,13 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
 #endif
 	unsigned size;
 
+	/*
+	 * TODO Remove once GART corruption is resolved
+	 * Check related code in gmc_v9_0_sw_fini
+	 * */
+	size = 9 * 1024 * 1024;
+
+#if 0
 	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
 		size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
 	} else {
@@ -833,6 +845,8 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
 	/* return 0 if the pre-OS buffer uses up most of vram */
 	if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
 		return 0;
+
+#endif
 	return size;
 }
 
@@ -956,6 +970,18 @@ static int gmc_v9_0_sw_fini(void *handle)
 	amdgpu_gem_force_release(adev);
 	amdgpu_vm_manager_fini(adev);
 	gmc_v9_0_gart_fini(adev);
+
+	/*
+	* TODO:
+	* Currently there is a bug where some memory client outside
+	* of the driver writes to first 8M of VRAM on S3 resume,
+	* this overrides GART which by default gets placed in first 8M and
+	* causes VM_FAULTS once GTT is accessed.
+	* Keep the stolen memory reservation until the while this is not solved.
+	* Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
+	*/
+	amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
+
 	amdgpu_bo_fini(adev);
 
 	return 0;

From 8ee3a52e3f35e064a3bf82f21dc74ddaf9843648 Mon Sep 17 00:00:00 2001
From: Emily Deng <Emily.Deng@amd.com>
Date: Mon, 16 Apr 2018 10:07:02 +0800
Subject: [PATCH 0763/1286] drm/gpu-sched: fix force APP kill hang(v4)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

issue:
there are VMC page fault occurred if force APP kill during
3dmark test, the cause is in entity_fini we manually signal
all those jobs in entity's queue which confuse the sync/dep
mechanism:

1)page fault occurred in sdma's clear job which operate on
shadow buffer, and shadow buffer's Gart table is cleaned by
ttm_bo_release since the fence in its reservation was fake signaled
by entity_fini() under the case of SIGKILL received.

2)page fault occurred in gfx' job because during the lifetime
of gfx job we manually fake signal all jobs from its entity
in entity_fini(), thus the unmapping/clear PTE job depend on those
result fence is satisfied and sdma start clearing the PTE and lead
to GFX page fault.

fix:
1)should at least wait all jobs already scheduled complete in entity_fini()
if SIGKILL is the case.

2)if a fence signaled and try to clear some entity's dependency, should
set this entity guilty to prevent its job really run since the dependency
is fake signaled.

v2:
splitting drm_sched_entity_fini() into two functions:
1)The first one is does the waiting, removes the entity from the
runqueue and returns an error when the process was killed.
2)The second one then goes over the entity, install it as
completion signal for the remaining jobs and signals all jobs
with an error code.

v3:
1)Replace the fini1 and fini2 with better name
2)Call the first part before the VM teardown in
amdgpu_driver_postclose_kms() and the second part
after the VM teardown
3)Keep the original function drm_sched_entity_fini to
refine the code.

v4:
1)Rename entity->finished to entity->last_scheduled;
2)Rename drm_sched_entity_fini_job_cb() to
drm_sched_entity_kill_jobs_cb();
3)Pass NULL to drm_sched_entity_fini_job_cb() if -ENOENT;
4)Replace the type of entity->fini_status with "int";
5)Remove the check about entity->finished.

Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Signed-off-by: Emily Deng <Emily.Deng@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h       |  2 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c   | 64 +++++++++++++++++---
 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c   |  5 +-
 drivers/gpu/drm/scheduler/gpu_scheduler.c | 71 +++++++++++++++++++----
 include/drm/gpu_scheduler.h               |  7 +++
 5 files changed, 128 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index c25ee750c362..ea1b28536bfc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -681,6 +681,8 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
 int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id);
 
 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
+void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr);
+void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
 
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 09d35051fdd6..eb80edfb1b0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -111,8 +111,9 @@ failed:
 	return r;
 }
 
-static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
+static void amdgpu_ctx_fini(struct kref *ref)
 {
+	struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
 	struct amdgpu_device *adev = ctx->adev;
 	unsigned i, j;
 
@@ -125,13 +126,11 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
 	kfree(ctx->fences);
 	ctx->fences = NULL;
 
-	for (i = 0; i < adev->num_rings; i++)
-		drm_sched_entity_fini(&adev->rings[i]->sched,
-				      &ctx->rings[i].entity);
-
 	amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
 
 	mutex_destroy(&ctx->lock);
+
+	kfree(ctx);
 }
 
 static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
@@ -170,12 +169,15 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
 static void amdgpu_ctx_do_release(struct kref *ref)
 {
 	struct amdgpu_ctx *ctx;
+	u32 i;
 
 	ctx = container_of(ref, struct amdgpu_ctx, refcount);
 
-	amdgpu_ctx_fini(ctx);
+	for (i = 0; i < ctx->adev->num_rings; i++)
+		drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
+			&ctx->rings[i].entity);
 
-	kfree(ctx);
+	amdgpu_ctx_fini(ref);
 }
 
 static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
@@ -435,16 +437,62 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
 	idr_init(&mgr->ctx_handles);
 }
 
+void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
+{
+	struct amdgpu_ctx *ctx;
+	struct idr *idp;
+	uint32_t id, i;
+
+	idp = &mgr->ctx_handles;
+
+	idr_for_each_entry(idp, ctx, id) {
+
+		if (!ctx->adev)
+			return;
+
+		for (i = 0; i < ctx->adev->num_rings; i++)
+			if (kref_read(&ctx->refcount) == 1)
+				drm_sched_entity_do_release(&ctx->adev->rings[i]->sched,
+						  &ctx->rings[i].entity);
+			else
+				DRM_ERROR("ctx %p is still alive\n", ctx);
+	}
+}
+
+void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr)
+{
+	struct amdgpu_ctx *ctx;
+	struct idr *idp;
+	uint32_t id, i;
+
+	idp = &mgr->ctx_handles;
+
+	idr_for_each_entry(idp, ctx, id) {
+
+		if (!ctx->adev)
+			return;
+
+		for (i = 0; i < ctx->adev->num_rings; i++)
+			if (kref_read(&ctx->refcount) == 1)
+				drm_sched_entity_cleanup(&ctx->adev->rings[i]->sched,
+					&ctx->rings[i].entity);
+			else
+				DRM_ERROR("ctx %p is still alive\n", ctx);
+	}
+}
+
 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
 {
 	struct amdgpu_ctx *ctx;
 	struct idr *idp;
 	uint32_t id;
 
+	amdgpu_ctx_mgr_entity_cleanup(mgr);
+
 	idp = &mgr->ctx_handles;
 
 	idr_for_each_entry(idp, ctx, id) {
-		if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
+		if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
 			DRM_ERROR("ctx %p is still alive\n", ctx);
 	}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index bd9e723dbb2b..1ed379524117 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -913,8 +913,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
 		return;
 
 	pm_runtime_get_sync(dev->dev);
-
-	amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
+	amdgpu_ctx_mgr_entity_fini(&fpriv->ctx_mgr);
 
 	if (adev->asic_type != CHIP_RAVEN) {
 		amdgpu_uvd_free_handles(adev, file_priv);
@@ -935,6 +934,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
 	pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
 
 	amdgpu_vm_fini(adev, &fpriv->vm);
+	amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
+
 	if (pasid)
 		amdgpu_pasid_free_delayed(pd->tbo.resv, pasid);
 	amdgpu_bo_unref(&pd);
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 310275eaf128..44d21981bf3b 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -136,6 +136,8 @@ int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
 	entity->rq = rq;
 	entity->sched = sched;
 	entity->guilty = guilty;
+	entity->fini_status = 0;
+	entity->last_scheduled = NULL;
 
 	spin_lock_init(&entity->rq_lock);
 	spin_lock_init(&entity->queue_lock);
@@ -197,19 +199,30 @@ static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
 	return true;
 }
 
+static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
+				    struct dma_fence_cb *cb)
+{
+	struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
+						 finish_cb);
+	drm_sched_fence_finished(job->s_fence);
+	WARN_ON(job->s_fence->parent);
+	dma_fence_put(&job->s_fence->finished);
+	job->sched->ops->free_job(job);
+}
+
+
 /**
  * Destroy a context entity
  *
  * @sched       Pointer to scheduler instance
  * @entity	The pointer to a valid scheduler entity
  *
- * Cleanup and free the allocated resources.
+ * Splitting drm_sched_entity_fini() into two functions, The first one is does the waiting,
+ * removes the entity from the runqueue and returns an error when the process was killed.
  */
-void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
+void drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
 			   struct drm_sched_entity *entity)
 {
-	int r;
-
 	if (!drm_sched_entity_is_initialized(sched, entity))
 		return;
 	/**
@@ -217,13 +230,28 @@ void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
 	 * queued IBs or discard them on SIGKILL
 	*/
 	if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
-		r = -ERESTARTSYS;
+		entity->fini_status = -ERESTARTSYS;
 	else
-		r = wait_event_killable(sched->job_scheduled,
+		entity->fini_status = wait_event_killable(sched->job_scheduled,
 					drm_sched_entity_is_idle(entity));
 	drm_sched_entity_set_rq(entity, NULL);
-	if (r) {
+}
+EXPORT_SYMBOL(drm_sched_entity_do_release);
+
+/**
+ * Destroy a context entity
+ *
+ * @sched       Pointer to scheduler instance
+ * @entity	The pointer to a valid scheduler entity
+ *
+ * The second one then goes over the entity and signals all jobs with an error code.
+ */
+void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
+			   struct drm_sched_entity *entity)
+{
+	if (entity->fini_status) {
 		struct drm_sched_job *job;
+		int r;
 
 		/* Park the kernel for a moment to make sure it isn't processing
 		 * our enity.
@@ -241,13 +269,26 @@ void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
 			struct drm_sched_fence *s_fence = job->s_fence;
 			drm_sched_fence_scheduled(s_fence);
 			dma_fence_set_error(&s_fence->finished, -ESRCH);
-			drm_sched_fence_finished(s_fence);
-			WARN_ON(s_fence->parent);
-			dma_fence_put(&s_fence->finished);
-			sched->ops->free_job(job);
+			r = dma_fence_add_callback(entity->last_scheduled, &job->finish_cb,
+							drm_sched_entity_kill_jobs_cb);
+			if (r == -ENOENT)
+				drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
+			else if (r)
+				DRM_ERROR("fence add callback failed (%d)\n", r);
 		}
+
+		dma_fence_put(entity->last_scheduled);
+		entity->last_scheduled = NULL;
 	}
 }
+EXPORT_SYMBOL(drm_sched_entity_cleanup);
+
+void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
+				struct drm_sched_entity *entity)
+{
+	drm_sched_entity_do_release(sched, entity);
+	drm_sched_entity_cleanup(sched, entity);
+}
 EXPORT_SYMBOL(drm_sched_entity_fini);
 
 static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
@@ -530,6 +571,10 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
 		spin_unlock(&sched->job_list_lock);
 		fence = sched->ops->run_job(s_job);
 		atomic_inc(&sched->hw_rq_count);
+
+		dma_fence_put(s_job->entity->last_scheduled);
+		s_job->entity->last_scheduled = dma_fence_get(&s_fence->finished);
+
 		if (fence) {
 			s_fence->parent = dma_fence_get(fence);
 			r = dma_fence_add_callback(fence, &s_fence->cb,
@@ -556,6 +601,7 @@ int drm_sched_job_init(struct drm_sched_job *job,
 		       void *owner)
 {
 	job->sched = sched;
+	job->entity = entity;
 	job->s_priority = entity->rq - sched->sched_rq;
 	job->s_fence = drm_sched_fence_create(entity, owner);
 	if (!job->s_fence)
@@ -669,6 +715,9 @@ static int drm_sched_main(void *param)
 		fence = sched->ops->run_job(sched_job);
 		drm_sched_fence_scheduled(s_fence);
 
+		dma_fence_put(entity->last_scheduled);
+		entity->last_scheduled = dma_fence_get(&s_fence->finished);
+
 		if (fence) {
 			s_fence->parent = dma_fence_get(fence);
 			r = dma_fence_add_callback(fence, &s_fence->cb,
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index c053a32341bf..350a62c26b29 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -65,6 +65,8 @@ struct drm_sched_entity {
 	struct dma_fence		*dependency;
 	struct dma_fence_cb		cb;
 	atomic_t			*guilty; /* points to ctx's guilty */
+	int            fini_status;
+	struct dma_fence    *last_scheduled;
 };
 
 /**
@@ -119,6 +121,7 @@ struct drm_sched_job {
 	uint64_t			id;
 	atomic_t			karma;
 	enum drm_sched_priority		s_priority;
+	struct drm_sched_entity  *entity;
 };
 
 static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
@@ -186,6 +189,10 @@ int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
 			  struct drm_sched_entity *entity,
 			  struct drm_sched_rq *rq,
 			  uint32_t jobs, atomic_t *guilty);
+void drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
+			   struct drm_sched_entity *entity);
+void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
+			   struct drm_sched_entity *entity);
 void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
 			   struct drm_sched_entity *entity);
 void drm_sched_entity_push_job(struct drm_sched_job *sched_job,

From a0701722b68e69443dd3dd7970a9f343b7560a2c Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Wed, 4 Apr 2018 14:11:45 +0800
Subject: [PATCH 0764/1286] Revert "drm/amd/powerply: fix power reading on
 Fiji"

we don't have limit of [50ms, 4sec] sampling period.
smu calculate average gpu power in real time.
we can read average gpu power through smu message or
read special register.

This reverts commit 462d8dcc9fec0d89f1ff6a1f93f1d4f670878c71.

Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index ed43dd39b5d6..5bccf895ba41 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -3364,8 +3364,7 @@ static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr,
 			"Failed to start pm status log!",
 			return -1);
 
-	/* Sampling period from 50ms to 4sec */
-	msleep_interruptible(200);
+	msleep_interruptible(20);
 
 	PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
 			PPSMC_MSG_PmStatusLogSample),

From b89c71d1eb1c43c6c61f6d74d7454702d367f18b Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Wed, 4 Apr 2018 14:17:09 +0800
Subject: [PATCH 0765/1286] drm/amd/pp: Refine get_gpu_power for VI

pkgpwr is the average gpu power of 100ms. it is calculated by
firmware in real time.

1. we can send smu message PPSMC_MSG_GetCurrPkgPwr to read currentpkgpwr directly.

2. On Fiji/tonga/bonaire/hawwii, without PPSMC_MSG_GetCurrPkgPwr support.
   Send PPSMC_MSG_PmStatusLogStart/Sample to let smu write currentpkgpwr
   to ixSMU_PM_STATUS_94. driver can read pkgpwr from ixSMU_PM_STATUS_94.

Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c  | 45 ++++++++++---------
 .../drm/amd/powerplay/smumgr/smu7_smumgr.c    | 10 +++--
 2 files changed, 31 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 5bccf895ba41..51867c702540 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -3359,30 +3359,33 @@ static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
 static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr,
 		struct pp_gpu_power *query)
 {
-	PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
-			PPSMC_MSG_PmStatusLogStart),
-			"Failed to start pm status log!",
-			return -1);
+	int i;
 
-	msleep_interruptible(20);
+	if (!query)
+		return -EINVAL;
 
-	PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
-			PPSMC_MSG_PmStatusLogSample),
-			"Failed to sample pm status log!",
-			return -1);
 
-	query->vddc_power = cgs_read_ind_register(hwmgr->device,
-			CGS_IND_REG__SMC,
-			ixSMU_PM_STATUS_40);
-	query->vddci_power = cgs_read_ind_register(hwmgr->device,
-			CGS_IND_REG__SMC,
-			ixSMU_PM_STATUS_49);
-	query->max_gpu_power = cgs_read_ind_register(hwmgr->device,
-			CGS_IND_REG__SMC,
-			ixSMU_PM_STATUS_94);
-	query->average_gpu_power = cgs_read_ind_register(hwmgr->device,
-			CGS_IND_REG__SMC,
-			ixSMU_PM_STATUS_95);
+	memset(query, 0, sizeof *query);
+
+	smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
+	query->average_gpu_power = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+
+	if (query->average_gpu_power != 0)
+		return 0;
+
+	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+							ixSMU_PM_STATUS_94, 0);
+
+	for (i = 0; i < 20; i++) {
+		mdelay(1);
+		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample);
+		query->average_gpu_power = cgs_read_ind_register(hwmgr->device,
+						CGS_IND_REG__SMC,
+						ixSMU_PM_STATUS_94);
+		if (query->average_gpu_power != 0)
+			break;
+	}
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index fb32a3fcc278..10a112376fd1 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -171,8 +171,10 @@ int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
 
 	ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
 
-	if (ret != 1)
-		pr_info("\n failed to send pre message %x ret is %d \n",  msg, ret);
+	if (ret == 0xFE)
+		pr_debug("last message was not supported\n");
+	else if (ret != 1)
+		pr_info("\n last message was failed ret is %d\n", ret);
 
 	cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
 
@@ -180,7 +182,9 @@ int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
 
 	ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
 
-	if (ret != 1)
+	if (ret == 0xFE)
+		pr_debug("message %x was not supported\n", msg);
+	else if (ret != 1)
 		pr_info("\n failed to send message %x ret is %d \n",  msg, ret);
 
 	return 0;

From 5b79d0482f3c1e8d5d78bd573a41e91dd9f0a5a1 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Wed, 4 Apr 2018 15:37:35 +0800
Subject: [PATCH 0766/1286] drm/amd/pp: Remove struct pp_gpu_power

Currently smu only calculate average gpu power in real time.

for vddc/vddci/max power,
User need to set start time and end time, firmware can calculate
the average vddc/vddci/max power. but the type of return values
is not unified. For Vi, return type is uint.
For vega, return type is float.

so this struct can't be suitable for all asics.

Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c       |  7 ++----
 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c        | 22 ++++++------------
 .../gpu/drm/amd/include/kgd_pp_interface.h    |  7 ------
 .../gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c  | 23 ++++++++-----------
 .../drm/amd/powerplay/hwmgr/vega10_hwmgr.c    | 17 ++++++--------
 .../drm/amd/powerplay/hwmgr/vega12_hwmgr.c    | 13 ++++-------
 6 files changed, 29 insertions(+), 60 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 1ed379524117..efff211d7d90 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -701,9 +701,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 		}
 	}
 	case AMDGPU_INFO_SENSOR: {
-		struct pp_gpu_power query = {0};
-		int query_size = sizeof(query);
-
 		if (!adev->pm.dpm_enabled)
 			return -ENOENT;
 
@@ -746,10 +743,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 			/* get average GPU power */
 			if (amdgpu_dpm_read_sensor(adev,
 						   AMDGPU_PP_SENSOR_GPU_POWER,
-						   (void *)&query, &query_size)) {
+						   (void *)&ui32, &ui32_size)) {
 				return -EINVAL;
 			}
-			ui32 = query.average_gpu_power >> 8;
+			ui32 >>= 8;
 			break;
 		case AMDGPU_INFO_SENSOR_VDDNB:
 			/* get VDDNB in millivolts */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index e5f60fc31516..744f105a2c75 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1020,8 +1020,8 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
 {
 	struct amdgpu_device *adev = dev_get_drvdata(dev);
 	struct drm_device *ddev = adev->ddev;
-	struct pp_gpu_power query = {0};
-	int r, size = sizeof(query);
+	u32 query = 0;
+	int r, size = sizeof(u32);
 	unsigned uw;
 
 	/* Can't get power when the card is off */
@@ -1041,7 +1041,7 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
 		return r;
 
 	/* convert to microwatts */
-	uw = (query.average_gpu_power >> 8) * 1000000;
+	uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
 
 	return snprintf(buf, PAGE_SIZE, "%u\n", uw);
 }
@@ -1752,7 +1752,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
 static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
 {
 	uint32_t value;
-	struct pp_gpu_power query = {0};
+	uint32_t query = 0;
 	int size;
 
 	/* sanity check PP is enabled */
@@ -1775,17 +1775,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
 		seq_printf(m, "\t%u mV (VDDGFX)\n", value);
 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
 		seq_printf(m, "\t%u mV (VDDNB)\n", value);
-	size = sizeof(query);
-	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size)) {
-		seq_printf(m, "\t%u.%u W (VDDC)\n", query.vddc_power >> 8,
-				query.vddc_power & 0xff);
-		seq_printf(m, "\t%u.%u W (VDDCI)\n", query.vddci_power >> 8,
-				query.vddci_power & 0xff);
-		seq_printf(m, "\t%u.%u W (max GPU)\n", query.max_gpu_power >> 8,
-				query.max_gpu_power & 0xff);
-		seq_printf(m, "\t%u.%u W (average GPU)\n", query.average_gpu_power >> 8,
-				query.average_gpu_power & 0xff);
-	}
+	size = sizeof(uint32_t);
+	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size))
+		seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
 	size = sizeof(value);
 	seq_printf(m, "\n");
 
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 5c840c022b52..1bec9072e36f 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -149,13 +149,6 @@ struct pp_states_info {
 	uint32_t states[16];
 };
 
-struct pp_gpu_power {
-	uint32_t vddc_power;
-	uint32_t vddci_power;
-	uint32_t max_gpu_power;
-	uint32_t average_gpu_power;
-};
-
 #define PP_GROUP_MASK        0xF0000000
 #define PP_GROUP_SHIFT       28
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 51867c702540..f5b3617364f1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -3356,36 +3356,34 @@ static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
 	return 0;
 }
 
-static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr,
-		struct pp_gpu_power *query)
+static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
 {
 	int i;
+	u32 tmp = 0;
 
 	if (!query)
 		return -EINVAL;
 
-
-	memset(query, 0, sizeof *query);
-
 	smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
-	query->average_gpu_power = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+	tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
 
-	if (query->average_gpu_power != 0)
+	if (tmp != 0)
 		return 0;
 
 	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
 							ixSMU_PM_STATUS_94, 0);
 
-	for (i = 0; i < 20; i++) {
+	for (i = 0; i < 10; i++) {
 		mdelay(1);
 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample);
-		query->average_gpu_power = cgs_read_ind_register(hwmgr->device,
+		tmp = cgs_read_ind_register(hwmgr->device,
 						CGS_IND_REG__SMC,
 						ixSMU_PM_STATUS_94);
-		if (query->average_gpu_power != 0)
+		if (tmp != 0)
 			break;
 	}
+	*query = tmp;
 
 	return 0;
 }
@@ -3438,10 +3436,7 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
 		*size = 4;
 		return 0;
 	case AMDGPU_PP_SENSOR_GPU_POWER:
-		if (*size < sizeof(struct pp_gpu_power))
-			return -EINVAL;
-		*size = sizeof(struct pp_gpu_power);
-		return smu7_get_gpu_power(hwmgr, (struct pp_gpu_power *)value);
+		return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
 	case AMDGPU_PP_SENSOR_VDDGFX:
 		if ((data->vr_config & 0xff) == 0x2)
 			val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 127c550e8bb1..0bbc5647d77d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -3781,16 +3781,18 @@ static uint32_t vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
 }
 
 static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
-		struct pp_gpu_power *query)
+		uint32_t *query)
 {
 	uint32_t value;
 
+	if (!query)
+		return -EINVAL;
+
 	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr);
 	value = smum_get_argument(hwmgr);
 
-	/* power value is an integer */
-	memset(query, 0, sizeof *query);
-	query->average_gpu_power = value << 8;
+	/* SMC returning actual watts, keep consistent with legacy asics, low 8 bit as 8 fractional bits */
+	*query = value << 8;
 
 	return 0;
 }
@@ -3840,12 +3842,7 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
 		*size = 4;
 		break;
 	case AMDGPU_PP_SENSOR_GPU_POWER:
-		if (*size < sizeof(struct pp_gpu_power))
-			ret = -EINVAL;
-		else {
-			*size = sizeof(struct pp_gpu_power);
-			ret = vega10_get_gpu_power(hwmgr, (struct pp_gpu_power *)value);
-		}
+		ret = vega10_get_gpu_power(hwmgr, (uint32_t *)value);
 		break;
 	case AMDGPU_PP_SENSOR_VDDGFX:
 		val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_PLANE0_CURRENTVID) &
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 3e1ed0aca29c..782e2098824d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -1113,8 +1113,7 @@ static uint32_t vega12_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
 	return (mem_clk * 100);
 }
 
-static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr,
-		struct pp_gpu_power *query)
+static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr, uint32_t *query)
 {
 #if 0
 	uint32_t value;
@@ -1126,7 +1125,7 @@ static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr,
 
 	vega12_read_arg_from_smc(hwmgr, &value);
 	/* power value is an integer */
-	query->average_gpu_power = value << 8;
+	*query = value << 8;
 #endif
 	return 0;
 }
@@ -1235,12 +1234,8 @@ static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int idx,
 		*size = 4;
 		break;
 	case AMDGPU_PP_SENSOR_GPU_POWER:
-		if (*size < sizeof(struct pp_gpu_power))
-			ret = -EINVAL;
-		else {
-			*size = sizeof(struct pp_gpu_power);
-			ret = vega12_get_gpu_power(hwmgr, (struct pp_gpu_power *)value);
-		}
+		ret = vega12_get_gpu_power(hwmgr, (uint32_t *)value);
+
 		break;
 	default:
 		ret = -EINVAL;

From 8db42a701326c8872d8634c7b4c0d045bf95f394 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Wed, 11 Apr 2018 18:11:49 +0800
Subject: [PATCH 0767/1286] drm/amd/pp: Clear smu response register before send
 smu message

smu firmware do not update response register immediately under
some delay tasks, we may read out the original value.

so need to clear the register before send smu message.

Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c   | 4 +---
 drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | 1 +
 2 files changed, 2 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
index 2a93f3a8e4f0..2d4ec8ac3a08 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
@@ -208,9 +208,7 @@ static int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
 {
 	int ret;
 
-	if (!ci_is_smc_ram_running(hwmgr))
-		return -EINVAL;
-
+	cgs_write_register(hwmgr->device, mmSMC_RESP_0, 0);
 	cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
 
 	PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index 10a112376fd1..64d33b775906 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -176,6 +176,7 @@ int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
 	else if (ret != 1)
 		pr_info("\n last message was failed ret is %d\n", ret);
 
+	cgs_write_register(hwmgr->device, mmSMC_RESP_0, 0);
 	cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
 
 	PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);

From 63c2f7ed7bb3e98b4b22d5b136f4749706f17d36 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Sun, 8 Apr 2018 16:57:55 +0800
Subject: [PATCH 0768/1286] drm/amd/pp: Move common code to smu_helper.c

Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c  | 30 +------
 .../gpu/drm/amd/powerplay/hwmgr/smu_helper.c  | 82 +++++++++++++++++++
 .../gpu/drm/amd/powerplay/hwmgr/smu_helper.h  | 24 ++++++
 .../drm/amd/powerplay/hwmgr/vega10_hwmgr.c    | 43 +---------
 4 files changed, 109 insertions(+), 70 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index f5b3617364f1..68aae09a886a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -793,32 +793,6 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-static int smu7_get_voltage_dependency_table(
-			const struct phm_ppt_v1_clock_voltage_dependency_table *allowed_dep_table,
-			struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
-{
-	uint8_t i = 0;
-	PP_ASSERT_WITH_CODE((0 != allowed_dep_table->count),
-				"Voltage Lookup Table empty",
-				return -EINVAL);
-
-	dep_table->count = allowed_dep_table->count;
-	for (i=0; i<dep_table->count; i++) {
-		dep_table->entries[i].clk = allowed_dep_table->entries[i].clk;
-		dep_table->entries[i].vddInd = allowed_dep_table->entries[i].vddInd;
-		dep_table->entries[i].vdd_offset = allowed_dep_table->entries[i].vdd_offset;
-		dep_table->entries[i].vddc = allowed_dep_table->entries[i].vddc;
-		dep_table->entries[i].vddgfx = allowed_dep_table->entries[i].vddgfx;
-		dep_table->entries[i].vddci = allowed_dep_table->entries[i].vddci;
-		dep_table->entries[i].mvdd = allowed_dep_table->entries[i].mvdd;
-		dep_table->entries[i].phases = allowed_dep_table->entries[i].phases;
-		dep_table->entries[i].cks_enable = allowed_dep_table->entries[i].cks_enable;
-		dep_table->entries[i].cks_voffset = allowed_dep_table->entries[i].cks_voffset;
-	}
-
-	return 0;
-}
-
 static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
 {
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -846,7 +820,7 @@ static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
 		entries[i].vddc = dep_sclk_table->entries[i].vddc;
 	}
 
-	smu7_get_voltage_dependency_table(dep_sclk_table,
+	smu_get_voltage_dependency_table_ppt_v1(dep_sclk_table,
 		(struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk));
 
 	odn_table->odn_memory_clock_dpm_levels.num_of_pl =
@@ -858,7 +832,7 @@ static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
 		entries[i].vddc = dep_mclk_table->entries[i].vddc;
 	}
 
-	smu7_get_voltage_dependency_table(dep_mclk_table,
+	smu_get_voltage_dependency_table_ppt_v1(dep_mclk_table,
 		(struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk));
 
 	return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
index 529be3cd768a..7c23741619b6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
@@ -624,3 +624,85 @@ void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size,
 
 	return NULL;
 }
+
+int smu_get_voltage_dependency_table_ppt_v1(
+			const struct phm_ppt_v1_clock_voltage_dependency_table *allowed_dep_table,
+			struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
+{
+	uint8_t i = 0;
+	PP_ASSERT_WITH_CODE((0 != allowed_dep_table->count),
+				"Voltage Lookup Table empty",
+				return -EINVAL);
+
+	dep_table->count = allowed_dep_table->count;
+	for (i=0; i<dep_table->count; i++) {
+		dep_table->entries[i].clk = allowed_dep_table->entries[i].clk;
+		dep_table->entries[i].vddInd = allowed_dep_table->entries[i].vddInd;
+		dep_table->entries[i].vdd_offset = allowed_dep_table->entries[i].vdd_offset;
+		dep_table->entries[i].vddc = allowed_dep_table->entries[i].vddc;
+		dep_table->entries[i].vddgfx = allowed_dep_table->entries[i].vddgfx;
+		dep_table->entries[i].vddci = allowed_dep_table->entries[i].vddci;
+		dep_table->entries[i].mvdd = allowed_dep_table->entries[i].mvdd;
+		dep_table->entries[i].phases = allowed_dep_table->entries[i].phases;
+		dep_table->entries[i].cks_enable = allowed_dep_table->entries[i].cks_enable;
+		dep_table->entries[i].cks_voffset = allowed_dep_table->entries[i].cks_voffset;
+	}
+
+	return 0;
+}
+
+int smu_set_watermarks_for_clocks_ranges(void *wt_table,
+		struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+{
+	uint32_t i;
+	struct watermarks *table = wt_table;
+
+	if (!table || wm_with_clock_ranges)
+		return -EINVAL;
+
+	if (wm_with_clock_ranges->num_wm_sets_dmif > 4 || wm_with_clock_ranges->num_wm_sets_mcif > 4)
+		return -EINVAL;
+
+	for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) {
+		table->WatermarkRow[1][i].MinClock =
+			cpu_to_le16((uint16_t)
+			(wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) /
+			100);
+		table->WatermarkRow[1][i].MaxClock =
+			cpu_to_le16((uint16_t)
+			(wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) /
+			100);
+		table->WatermarkRow[1][i].MinUclk =
+			cpu_to_le16((uint16_t)
+			(wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) /
+			100);
+		table->WatermarkRow[1][i].MaxUclk =
+			cpu_to_le16((uint16_t)
+			(wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) /
+			100);
+		table->WatermarkRow[1][i].WmSetting = (uint8_t)
+				wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id;
+	}
+
+	for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) {
+		table->WatermarkRow[0][i].MinClock =
+			cpu_to_le16((uint16_t)
+			(wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) /
+			100);
+		table->WatermarkRow[0][i].MaxClock =
+			cpu_to_le16((uint16_t)
+			(wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) /
+			100);
+		table->WatermarkRow[0][i].MinUclk =
+			cpu_to_le16((uint16_t)
+			(wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) /
+			100);
+		table->WatermarkRow[0][i].MaxUclk =
+			cpu_to_le16((uint16_t)
+			(wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) /
+			100);
+		table->WatermarkRow[0][i].WmSetting = (uint8_t)
+				wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id;
+	}
+	return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
index 14ee162ac92a..916cc01e7652 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
@@ -26,10 +26,27 @@
 struct pp_atomctrl_voltage_table;
 struct pp_hwmgr;
 struct phm_ppt_v1_voltage_lookup_table;
+struct Watermarks_t;
+struct pp_wm_sets_with_clock_ranges_soc15;
 
 uint8_t convert_to_vid(uint16_t vddc);
 uint16_t convert_to_vddc(uint8_t vid);
 
+struct watermark_row_generic_t {
+	uint16_t MinClock;
+	uint16_t MaxClock;
+	uint16_t MinUclk;
+	uint16_t MaxUclk;
+
+	uint8_t  WmSetting;
+	uint8_t  Padding[3];
+};
+
+struct watermarks {
+	struct watermark_row_generic_t WatermarkRow[2][4];
+	uint32_t     padding[7];
+};
+
 extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
 					uint32_t index,
 					uint32_t value, uint32_t mask);
@@ -85,6 +102,13 @@ int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr);
 void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size,
 						uint8_t *frev, uint8_t *crev);
 
+int smu_get_voltage_dependency_table_ppt_v1(
+	const struct phm_ppt_v1_clock_voltage_dependency_table *allowed_dep_table,
+		struct phm_ppt_v1_clock_voltage_dependency_table *dep_table);
+
+int smu_set_watermarks_for_clocks_ranges(void *wt_table,
+		struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
+
 #define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
 #define PHM_FIELD_MASK(reg, field) reg##__##field##_MASK
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 0bbc5647d77d..384aa07206c0 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -4367,50 +4367,9 @@ static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
 	struct vega10_hwmgr *data = hwmgr->backend;
 	Watermarks_t *table = &(data->smc_state_table.water_marks_table);
 	int result = 0;
-	uint32_t i;
 
 	if (!data->registry_data.disable_water_mark) {
-		for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) {
-			table->WatermarkRow[WM_DCEFCLK][i].MinClock =
-				cpu_to_le16((uint16_t)
-				(wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) /
-				100);
-			table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
-				cpu_to_le16((uint16_t)
-				(wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) /
-				100);
-			table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
-				cpu_to_le16((uint16_t)
-				(wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) /
-				100);
-			table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
-				cpu_to_le16((uint16_t)
-				(wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) /
-				100);
-			table->WatermarkRow[WM_DCEFCLK][i].WmSetting = (uint8_t)
-					wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id;
-		}
-
-		for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) {
-			table->WatermarkRow[WM_SOCCLK][i].MinClock =
-				cpu_to_le16((uint16_t)
-				(wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) /
-				100);
-			table->WatermarkRow[WM_SOCCLK][i].MaxClock =
-				cpu_to_le16((uint16_t)
-				(wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) /
-				100);
-			table->WatermarkRow[WM_SOCCLK][i].MinUclk =
-				cpu_to_le16((uint16_t)
-				(wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) /
-				100);
-			table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
-				cpu_to_le16((uint16_t)
-				(wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) /
-				100);
-			table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
-					wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id;
-		}
+		smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
 		data->water_marks_bitmap = WaterMarksExist;
 	}
 

From 1afd30efeddbb1b32cf35d3bf6477b35690eeca6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
Date: Tue, 10 Apr 2018 13:42:29 +0200
Subject: [PATCH 0769/1286] drm/amdgpu: revert "add new bo flag that indicates
 BOs don't need fallback (v2)"
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This reverts commit 6f51d28bfe8e1a676de5cd877639245bed3cc818.

Makes fallback handling to complicated. This is just a feature for the
GEM interface and shouldn't leak into the core BO create function.

Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c     | 3 +--
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 5 +----
 include/uapi/drm/amdgpu_drm.h              | 2 --
 3 files changed, 2 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 68af2f878bc9..e1756b68a17b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -385,8 +385,7 @@ retry:
 	    amdgpu_bo_in_cpu_visible_vram(bo))
 		p->bytes_moved_vis += ctx.bytes_moved;
 
-	if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains &&
-	    !(bo->flags & AMDGPU_GEM_CREATE_NO_FALLBACK)) {
+	if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
 		domain = bo->allowed_domains;
 		goto retry;
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index a160ef0332d6..1de6864da717 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -388,8 +388,6 @@ retry:
 	drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
 	INIT_LIST_HEAD(&bo->shadow_list);
 	INIT_LIST_HEAD(&bo->va);
-	bo->preferred_domains = preferred_domains;
-	bo->allowed_domains = allowed_domains;
 
 	bo->flags = flags;
 
@@ -426,8 +424,7 @@ retry:
 	r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
 				 &bo->placement, page_align, &ctx, acc_size,
 				 NULL, resv, &amdgpu_ttm_bo_destroy);
-	if (unlikely(r && r != -ERESTARTSYS) && type == ttm_bo_type_device &&
-	    !(flags & AMDGPU_GEM_CREATE_NO_FALLBACK)) {
+	if (unlikely(r && r != -ERESTARTSYS) && type == ttm_bo_type_device) {
 		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
 			flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 			goto retry;
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 4f5a27d64c54..c363b67f2d0a 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -95,8 +95,6 @@ extern "C" {
 #define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID	(1 << 6)
 /* Flag that BO sharing will be explicitly synchronized */
 #define AMDGPU_GEM_CREATE_EXPLICIT_SYNC		(1 << 7)
-/* Flag that BO doesn't need fallback */
-#define AMDGPU_GEM_CREATE_NO_FALLBACK		(1 << 8)
 
 struct drm_amdgpu_gem_create_in  {
 	/** the requested memory size */

From 0808210478c76606c12bb475b3272b7780240812 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
Date: Tue, 10 Apr 2018 13:42:38 +0200
Subject: [PATCH 0770/1286] drm/amdgpu: revert "Don't change preferred domian
 when fallback GTT v6"
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This reverts commit 7d1ca1325260a9e9329b10a21e3692e6f188936f.

Makes fallback handling to complicated. This is just a feature for the
GEM interface and shouldn't leak into the core BO create function.

The intended change to preserve the preferred domains is implemented in
a follow up patch.

Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c    | 16 ++++++++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 37 ++++++++--------------
 2 files changed, 27 insertions(+), 26 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 28c2706e48d7..46b9ea4e6103 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -56,11 +56,23 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
 		alignment = PAGE_SIZE;
 	}
 
+retry:
 	r = amdgpu_bo_create(adev, size, alignment, initial_domain,
 			     flags, type, resv, &bo);
 	if (r) {
-		DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
-			  size, initial_domain, alignment, r);
+		if (r != -ERESTARTSYS) {
+			if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
+				flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+				goto retry;
+			}
+
+			if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
+				initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
+				goto retry;
+			}
+			DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
+				  size, initial_domain, alignment, r);
+		}
 		return r;
 	}
 	*obj = &bo->gem_base;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 1de6864da717..24f582c696cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -356,7 +356,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
 	struct amdgpu_bo *bo;
 	unsigned long page_align;
 	size_t acc_size;
-	u32 domains, preferred_domains, allowed_domains;
 	int r;
 
 	page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
@@ -370,24 +369,22 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
 	acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
 				       sizeof(struct amdgpu_bo));
 
-	preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
-				      AMDGPU_GEM_DOMAIN_GTT |
-				      AMDGPU_GEM_DOMAIN_CPU |
-				      AMDGPU_GEM_DOMAIN_GDS |
-				      AMDGPU_GEM_DOMAIN_GWS |
-				      AMDGPU_GEM_DOMAIN_OA);
-	allowed_domains = preferred_domains;
-	if (type != ttm_bo_type_kernel &&
-	    allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
-		allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
-	domains = preferred_domains;
-retry:
 	bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
 	if (bo == NULL)
 		return -ENOMEM;
 	drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
 	INIT_LIST_HEAD(&bo->shadow_list);
 	INIT_LIST_HEAD(&bo->va);
+	bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
+					 AMDGPU_GEM_DOMAIN_GTT |
+					 AMDGPU_GEM_DOMAIN_CPU |
+					 AMDGPU_GEM_DOMAIN_GDS |
+					 AMDGPU_GEM_DOMAIN_GWS |
+					 AMDGPU_GEM_DOMAIN_OA);
+	bo->allowed_domains = bo->preferred_domains;
+	if (type != ttm_bo_type_kernel &&
+	    bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
+		bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
 
 	bo->flags = flags;
 
@@ -420,20 +417,12 @@ retry:
 #endif
 
 	bo->tbo.bdev = &adev->mman.bdev;
-	amdgpu_ttm_placement_from_domain(bo, domains);
+	amdgpu_ttm_placement_from_domain(bo, domain);
+
 	r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
 				 &bo->placement, page_align, &ctx, acc_size,
 				 NULL, resv, &amdgpu_ttm_bo_destroy);
-	if (unlikely(r && r != -ERESTARTSYS) && type == ttm_bo_type_device) {
-		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
-			flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
-			goto retry;
-		} else if (domains != allowed_domains) {
-			domains = allowed_domains;
-			goto retry;
-		}
-	}
-	if (unlikely(r))
+	if (unlikely(r != 0))
 		return r;
 
 	if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&

From 361883649221f975d915e4bc79907da71017f38f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
Date: Mon, 19 Mar 2018 11:49:14 +0100
Subject: [PATCH 0771/1286] drm/amdgpu: re-validate per VM BOs if required v2
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

If a per VM BO ends up in a allowed domain it never moves back into the
prefered domain.

v2: move the extra handling into amdgpu_vm_bo_update when we exit the
    state machine. Make memory type handling generic.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 15 ++++++++++++++-
 1 file changed, 14 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index da55a78d7380..f0fbc331aa30 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1556,7 +1556,20 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 	}
 
 	spin_lock(&vm->status_lock);
-	list_del_init(&bo_va->base.vm_status);
+	if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
+		unsigned mem_type = bo->tbo.mem.mem_type;
+
+		/* If the BO is not in its preferred location add it back to
+		 * the evicted list so that it gets validated again on the
+		 * next command submission.
+		 */
+		if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
+			list_add_tail(&bo_va->base.vm_status, &vm->evicted);
+		else
+			list_del_init(&bo_va->base.vm_status);
+	} else {
+		list_del_init(&bo_va->base.vm_status);
+	}
 	spin_unlock(&vm->status_lock);
 
 	list_splice_init(&bo_va->invalids, &bo_va->valids);

From 03a27de648d8a2b2bf59a7f467855fac2d850350 Mon Sep 17 00:00:00 2001
From: Mikita Lipski <mikita.lipski@amd.com>
Date: Tue, 10 Apr 2018 13:45:00 -0400
Subject: [PATCH 0772/1286] drm/amd/pp: Adding set_watermarks_for_clocks_ranges
 for SMU10

The function is never implemented for raven on linux.
It follows similair implementation as on windows.

SMU still needs to notify SMC and copy WM table, which is added
here. But on other Asics such as Vega this step is not implemented.

Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 13 +++++++++++++
 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h |  1 +
 2 files changed, 14 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 6ba3b1fa57aa..b712d16a9e6f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -992,6 +992,18 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
 	return ret;
 }
 
+static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
+		struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+{
+	struct smu10_hwmgr *data = hwmgr->backend;
+	Watermarks_t *table = &(data->water_marks_table);
+	int result = 0;
+
+	smu_set_watermarks_for_clocks_ranges(table,wm_with_clock_ranges);
+	smum_smc_table_manager(hwmgr, (uint8_t *)table, (uint16_t)SMU10_WMTABLE, false);
+	data->water_marks_exist = true;
+	return result;
+}
 static int smu10_set_mmhub_powergating_by_smu(struct pp_hwmgr *hwmgr)
 {
 	return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub);
@@ -1021,6 +1033,7 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
 	.get_current_shallow_sleep_clocks = smu10_get_current_shallow_sleep_clocks,
 	.get_clock_by_type_with_latency = smu10_get_clock_by_type_with_latency,
 	.get_clock_by_type_with_voltage = smu10_get_clock_by_type_with_voltage,
+	.set_watermarks_for_clocks_ranges = smu10_set_watermarks_for_clocks_ranges,
 	.get_max_high_clocks = smu10_get_max_high_clocks,
 	.read_sensor = smu10_read_sensor,
 	.set_active_display_count = smu10_set_active_display_count,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
index 175c3a592b6c..f68b218b9bce 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
@@ -290,6 +290,7 @@ struct smu10_hwmgr {
 	bool                           vcn_dpg_mode;
 
 	bool                           gfx_off_controled_by_driver;
+	bool                           water_marks_exist;
 	Watermarks_t                      water_marks_table;
 	struct smu10_clock_voltage_information   clock_vol_info;
 	DpmClocks_t                       clock_table;

From 5c3517d0c2ead443f378173c698f3bd09cb89d72 Mon Sep 17 00:00:00 2001
From: Mikita Lipski <mikita.lipski@amd.com>
Date: Wed, 11 Apr 2018 16:25:26 -0400
Subject: [PATCH 0773/1286] drm/amd/pp: Adding a function to store cc6 data in
 SMU10

Filling the smu10_store_cc6_data based on the implementation
of Windows Powerplay.

There is an uncertainty with one of the parameters passed to the function
pstate_switch_disable - is not a part of smu10 private data structure.
So in the function its just ignored.

Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 10 ++++++++++
 1 file changed, 10 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index b712d16a9e6f..0f252265a753 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -699,6 +699,16 @@ static int smu10_set_cpu_power_state(struct pp_hwmgr *hwmgr)
 static int smu10_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
 			bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
 {
+	struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+	if (separation_time != data->separation_time ||
+			cc6_disable != data->cc6_disable ||
+			pstate_disable != data->pstate_disable) {
+		data->separation_time = separation_time;
+		data->cc6_disable = cc6_disable;
+		data->pstate_disable = pstate_disable;
+		data->cc6_setting_changed = true;
+	}
 	return 0;
 }
 

From 2c773de2ecb8c327f2448bd1eecad224e9227087 Mon Sep 17 00:00:00 2001
From: Shirish S <shirish.s@amd.com>
Date: Mon, 16 Apr 2018 12:17:57 +0530
Subject: [PATCH 0774/1286] drm/amdgpu: defer test IBs on the rings at boot
 (V3)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

amdgpu_ib_ring_tests() runs test IB's on rings at boot
contributes to ~500 ms of amdgpu driver's boot time.

This patch defers it and ensures that its executed
in amdgpu_info_ioctl() if it wasn't scheduled.

V2: Use queue_delayed_work() & flush_delayed_work().
V3: removed usage of separate wq, ensure ib tests is
    run before enabling clockgating.

Signed-off-by: Shirish S <shirish.s@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 17 ++++++-----------
 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c    |  3 +++
 2 files changed, 9 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 62d6505ade84..d7f2bbdfd348 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1656,6 +1656,10 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
 	if (amdgpu_emu_mode == 1)
 		return 0;
 
+	r = amdgpu_ib_ring_tests(adev);
+	if (r)
+		DRM_ERROR("ib ring test failed (%d).\n", r);
+
 	for (i = 0; i < adev->num_ip_blocks; i++) {
 		if (!adev->ip_blocks[i].status.valid)
 			continue;
@@ -1706,8 +1710,8 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
 		}
 	}
 
-	mod_delayed_work(system_wq, &adev->late_init_work,
-			msecs_to_jiffies(AMDGPU_RESUME_MS));
+	queue_delayed_work(system_wq, &adev->late_init_work,
+			   msecs_to_jiffies(AMDGPU_RESUME_MS));
 
 	amdgpu_device_fill_reset_magic(adev);
 
@@ -2374,10 +2378,6 @@ fence_driver_init:
 		goto failed;
 	}
 
-	r = amdgpu_ib_ring_tests(adev);
-	if (r)
-		DRM_ERROR("ib ring test failed (%d).\n", r);
-
 	if (amdgpu_sriov_vf(adev))
 		amdgpu_virt_init_data_exchange(adev);
 
@@ -2639,11 +2639,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
 	}
 	amdgpu_fence_driver_resume(adev);
 
-	if (resume) {
-		r = amdgpu_ib_ring_tests(adev);
-		if (r)
-			DRM_ERROR("ib ring test failed (%d).\n", r);
-	}
 
 	r = amdgpu_device_ip_late_init(adev);
 	if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index efff211d7d90..4e15b6fe2839 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -279,6 +279,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 	if (!info->return_size || !info->return_pointer)
 		return -EINVAL;
 
+	/* Ensure IB tests are run on ring */
+	flush_delayed_work(&adev->late_init_work);
+
 	switch (info->query) {
 	case AMDGPU_INFO_ACCEL_WORKING:
 		ui32 = adev->accel_working;

From c2f84e03a01ad09f18f9f132f8b1e78f699a5494 Mon Sep 17 00:00:00 2001
From: Harry Wentland <harry.wentland@amd.com>
Date: Thu, 12 Apr 2018 16:37:09 -0400
Subject: [PATCH 0775/1286] drm/amd/display: Don't program bypass on linear
 regamma LUT

Even though this is required for degamma since DCE HW only supports a
couple predefined LUTs we can just program the LUT directly for regamma.

This fixes dark screens which occurs when we program regamma to bypass
while degamma is using srgb LUT.

Signed-off-by: Harry Wentland <harry.wentland@amd.com>
Reviewed-by: Leo Li <sunpeng.li@amd.com>
Cc: stable@vger.kernel.org
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c | 7 -------
 1 file changed, 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index ef5fad8c5aac..e3d90e918d1b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -139,13 +139,6 @@ int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc)
 	lut = (struct drm_color_lut *)blob->data;
 	lut_size = blob->length / sizeof(struct drm_color_lut);
 
-	if (__is_lut_linear(lut, lut_size)) {
-		/* Set to bypass if lut is set to linear */
-		stream->out_transfer_func->type = TF_TYPE_BYPASS;
-		stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR;
-		return 0;
-	}
-
 	gamma = dc_create_gamma();
 	if (!gamma)
 		return -ENOMEM;

From c74db7e42d9b538d2fa582cf0efe5640b25e950d Mon Sep 17 00:00:00 2001
From: Eric Yang <Eric.Yang2@amd.com>
Date: Tue, 3 Apr 2018 11:36:14 -0400
Subject: [PATCH 0776/1286] drm/amd/display: dal 3.1.42

Signed-off-by: Eric Yang <Eric.Yang2@amd.com>
Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 0f566a1ba35b..7ac8a1bee5ac 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -38,7 +38,7 @@
 #include "inc/compressor.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.1.41"
+#define DC_VER "3.1.42"
 
 #define MAX_SURFACES 3
 #define MAX_STREAMS 6

From ab892598d033d1943e1dcb0326f2622d6026f524 Mon Sep 17 00:00:00 2001
From: Roman Li <Roman.Li@amd.com>
Date: Thu, 29 Mar 2018 10:56:17 -0400
Subject: [PATCH 0777/1286] drm/amd/display: fix brightness level after resume
 from suspend

Adding missing call to cache current backlight values.
Otherwise the brightness resets to default value on resume.

Signed-off-by: Roman Li <Roman.Li@amd.com>
Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/core/dc_link.c       | 13 +++++++++++++
 drivers/gpu/drm/amd/display/dc/dc_link.h            |  2 ++
 .../drm/amd/display/dc/dce110/dce110_hw_sequencer.c |  4 +++-
 3 files changed, 18 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 0cd286f8eaa0..b44cf52090a5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2018,6 +2018,19 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
 	return true;
 }
 
+bool dc_link_set_abm_disable(const struct dc_link *link)
+{
+	struct dc  *core_dc = link->ctx->dc;
+	struct abm *abm = core_dc->res_pool->abm;
+
+	if ((abm == NULL) || (abm->funcs->set_backlight_level == NULL))
+		return false;
+
+	abm->funcs->set_abm_immediate_disable(abm);
+
+	return true;
+}
+
 bool dc_link_set_psr_enable(const struct dc_link *link, bool enable, bool wait)
 {
 	struct dc  *core_dc = link->ctx->dc;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index eeff98741293..8a716baa1203 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -141,6 +141,8 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_
 bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level,
 		uint32_t frame_ramp, const struct dc_stream_state *stream);
 
+bool dc_link_set_abm_disable(const struct dc_link *dc_link);
+
 bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable, bool wait);
 
 bool dc_link_get_psr_state(const struct dc_link *dc_link, uint32_t *psr_state);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 68a182ce53c7..15897f0a9616 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1046,8 +1046,10 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
 	struct dc_stream_state *stream = pipe_ctx->stream;
 	struct dc_link *link = stream->sink->link;
 
-	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP)
+	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
 		link->dc->hwss.edp_backlight_control(link, false);
+		dc_link_set_abm_disable(link);
+	}
 
 	if (dc_is_dp_signal(pipe_ctx->stream->signal))
 		pipe_ctx->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc);

From fcb2008a70c8dffc9179ce41838496ba816e14a1 Mon Sep 17 00:00:00 2001
From: Eric Bernstein <eric.bernstein@amd.com>
Date: Tue, 3 Apr 2018 11:23:11 -0400
Subject: [PATCH 0778/1286] drm/amd/display: Move dp_pixel_encoding_type to
 stream_encoder include

Signed-off-by: Eric Bernstein <eric.bernstein@amd.com>
Reviewed-by: Nikola Cornij <Nikola.Cornij@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/dc/inc/hw/hw_shared.h | 17 -----------------
 .../amd/display/dc/inc/hw/stream_encoder.h    | 19 +++++++++++++++++++
 2 files changed, 19 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index 9fe73028d588..cf7433ebf91a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -186,23 +186,6 @@ enum controller_dp_test_pattern {
 	CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA
 };
 
-enum dp_pixel_encoding_type {
-	DP_PIXEL_ENCODING_TYPE_RGB444		= 0x00000000,
-	DP_PIXEL_ENCODING_TYPE_YCBCR422		= 0x00000001,
-	DP_PIXEL_ENCODING_TYPE_YCBCR444		= 0x00000002,
-	DP_PIXEL_ENCODING_TYPE_RGB_WIDE_GAMUT	= 0x00000003,
-	DP_PIXEL_ENCODING_TYPE_Y_ONLY		= 0x00000004,
-	DP_PIXEL_ENCODING_TYPE_YCBCR420		= 0x00000005
-};
-
-enum dp_component_depth {
-	DP_COMPONENT_PIXEL_DEPTH_6BPC		= 0x00000000,
-	DP_COMPONENT_PIXEL_DEPTH_8BPC		= 0x00000001,
-	DP_COMPONENT_PIXEL_DEPTH_10BPC		= 0x00000002,
-	DP_COMPONENT_PIXEL_DEPTH_12BPC		= 0x00000003,
-	DP_COMPONENT_PIXEL_DEPTH_16BPC		= 0x00000004
-};
-
 enum dc_lut_mode {
 	LUT_BYPASS,
 	LUT_RAM_A,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index 5c21336cae4c..cfa7ec9517ae 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -29,11 +29,29 @@
 #define STREAM_ENCODER_H_
 
 #include "audio_types.h"
+#include "hw_shared.h"
 
 struct dc_bios;
 struct dc_context;
 struct dc_crtc_timing;
 
+enum dp_pixel_encoding_type {
+	DP_PIXEL_ENCODING_TYPE_RGB444		= 0x00000000,
+	DP_PIXEL_ENCODING_TYPE_YCBCR422		= 0x00000001,
+	DP_PIXEL_ENCODING_TYPE_YCBCR444		= 0x00000002,
+	DP_PIXEL_ENCODING_TYPE_RGB_WIDE_GAMUT	= 0x00000003,
+	DP_PIXEL_ENCODING_TYPE_Y_ONLY		= 0x00000004,
+	DP_PIXEL_ENCODING_TYPE_YCBCR420		= 0x00000005
+};
+
+enum dp_component_depth {
+	DP_COMPONENT_PIXEL_DEPTH_6BPC		= 0x00000000,
+	DP_COMPONENT_PIXEL_DEPTH_8BPC		= 0x00000001,
+	DP_COMPONENT_PIXEL_DEPTH_10BPC		= 0x00000002,
+	DP_COMPONENT_PIXEL_DEPTH_12BPC		= 0x00000003,
+	DP_COMPONENT_PIXEL_DEPTH_16BPC		= 0x00000004
+};
+
 struct encoder_info_frame {
 	/* auxiliary video information */
 	struct dc_info_packet avi;
@@ -138,6 +156,7 @@ struct stream_encoder_funcs {
 
 	void (*set_avmute)(
 		struct stream_encoder *enc, bool enable);
+
 };
 
 #endif /* STREAM_ENCODER_H_ */

From fc6de1c565e03f492a3d9725b93092dac0cc1845 Mon Sep 17 00:00:00 2001
From: "Leo (Sunpeng) Li" <sunpeng.li@amd.com>
Date: Tue, 3 Apr 2018 16:07:16 -0400
Subject: [PATCH 0779/1286] drm/amd/display: Fix regamma not affecting
 full-intensity color values

Hardware understands the regamma LUT as a piecewise linear function,
with points spaced exponentially along the range. We previously
programmed the LUT for range [2^-10, 2^0). This causes (normalized)
color values of 1 (=2^0) to miss the programmed LUT, and fall onto the
end region.

For DCE, the end region is extrapolated using a single (base, slope)
pair, using the max y-value from the last point in the curve as base.
This presents a problem, since this value affects all three color
channels. Scaling down the intensity of say - the blue regamma curve -
will not affect it's end region. This is especially noticiable when
using RedShift. It scales down the blue and green channels, but leaves
full-intensity colors unshifted.

Therefore, extend the range to cover [2^-10, 2^1) by programming another
hardware segment, containing only one point. That way, we won't be
hitting the end region.

Note that things are a bit different for DCN, since the end region can
be set per-channel.

Signed-off-by: Leo (Sunpeng) Li <sunpeng.li@amd.com>
Reviewed-by: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c  | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 15897f0a9616..1b5c11c8fa1f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -456,10 +456,13 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
 
 	} else {
 		/* 10 segments
-		 * segment is from 2^-10 to 2^0
+		 * segment is from 2^-10 to 2^1
+		 * We include an extra segment for range [2^0, 2^1). This is to
+		 * ensure that colors with normalized values of 1 don't miss the
+		 * LUT.
 		 */
 		region_start = -10;
-		region_end = 0;
+		region_end = 1;
 
 		seg_distr[0] = 4;
 		seg_distr[1] = 4;
@@ -471,7 +474,7 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
 		seg_distr[7] = 4;
 		seg_distr[8] = 4;
 		seg_distr[9] = 4;
-		seg_distr[10] = -1;
+		seg_distr[10] = 0;
 		seg_distr[11] = -1;
 		seg_distr[12] = -1;
 		seg_distr[13] = -1;

From c5b38aec266deade4067ddc606634ace68d2da8c Mon Sep 17 00:00:00 2001
From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Date: Thu, 29 Mar 2018 16:39:10 -0400
Subject: [PATCH 0780/1286] drm/amd/display: fix segfault on insufficient TG
 during validation

Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 8d7bc1fa9ffe..d7a92eca8a27 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1700,7 +1700,7 @@ enum dc_status resource_map_pool_resources(
 		pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
 #endif
 
-	if (pipe_idx < 0)
+	if (pipe_idx < 0 || context->res_ctx.pipe_ctx[pipe_idx].stream_res.tg == NULL)
 		return DC_NO_CONTROLLER_RESOURCE;
 
 	pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];

From d0f6f1c0319d39b792a7969bf511d5b1870f1f0e Mon Sep 17 00:00:00 2001
From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Date: Fri, 23 Mar 2018 15:25:43 -0400
Subject: [PATCH 0781/1286] drm/amd/display: change dml init to use default
 structs

Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Reviewed-by: Eric Bernstein <Eric.Bernstein@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../drm/amd/display/dc/dml/display_mode_lib.c | 138 ++++++++++--------
 1 file changed, 76 insertions(+), 62 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index c109b2c34c8f..fd9d97aab071 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -26,75 +26,89 @@
 #include "display_mode_lib.h"
 #include "dc_features.h"
 
+static const struct _vcs_dpi_ip_params_st dcn1_0_ip = {
+	.rob_buffer_size_kbytes = 64,
+	.det_buffer_size_kbytes = 164,
+	.dpte_buffer_size_in_pte_reqs = 42,
+	.dpp_output_buffer_pixels = 2560,
+	.opp_output_buffer_lines = 1,
+	.pixel_chunk_size_kbytes = 8,
+	.pte_enable = 1,
+	.pte_chunk_size_kbytes = 2,
+	.meta_chunk_size_kbytes = 2,
+	.writeback_chunk_size_kbytes = 2,
+	.line_buffer_size_bits = 589824,
+	.max_line_buffer_lines = 12,
+	.IsLineBufferBppFixed = 0,
+	.LineBufferFixedBpp = -1,
+	.writeback_luma_buffer_size_kbytes = 12,
+	.writeback_chroma_buffer_size_kbytes = 8,
+	.max_num_dpp = 4,
+	.max_num_wb = 2,
+	.max_dchub_pscl_bw_pix_per_clk = 4,
+	.max_pscl_lb_bw_pix_per_clk = 2,
+	.max_lb_vscl_bw_pix_per_clk = 4,
+	.max_vscl_hscl_bw_pix_per_clk = 4,
+	.max_hscl_ratio = 4,
+	.max_vscl_ratio = 4,
+	.hscl_mults = 4,
+	.vscl_mults = 4,
+	.max_hscl_taps = 8,
+	.max_vscl_taps = 8,
+	.dispclk_ramp_margin_percent = 1,
+	.underscan_factor = 1.10,
+	.min_vblank_lines = 14,
+	.dppclk_delay_subtotal = 90,
+	.dispclk_delay_subtotal = 42,
+	.dcfclk_cstate_latency = 10,
+	.max_inter_dcn_tile_repeaters = 8,
+	.can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0,
+	.bug_forcing_LC_req_same_size_fixed = 0,
+};
+
+static const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc = {
+	.sr_exit_time_us = 9.0,
+	.sr_enter_plus_exit_time_us = 11.0,
+	.urgent_latency_us = 4.0,
+	.writeback_latency_us = 12.0,
+	.ideal_dram_bw_after_urgent_percent = 80.0,
+	.max_request_size_bytes = 256,
+	.downspread_percent = 0.5,
+	.dram_page_open_time_ns = 50.0,
+	.dram_rw_turnaround_time_ns = 17.5,
+	.dram_return_buffer_per_channel_bytes = 8192,
+	.round_trip_ping_latency_dcfclk_cycles = 128,
+	.urgent_out_of_order_return_per_channel_bytes = 256,
+	.channel_interleave_bytes = 256,
+	.num_banks = 8,
+	.num_chans = 2,
+	.vmm_page_size_bytes = 4096,
+	.dram_clock_change_latency_us = 17.0,
+	.writeback_dram_clock_change_latency_us = 23.0,
+	.return_bus_width_bytes = 64,
+};
+
 static void set_soc_bounding_box(struct _vcs_dpi_soc_bounding_box_st *soc, enum dml_project project)
 {
-	if (project == DML_PROJECT_RAVEN1) {
-		soc->sr_exit_time_us = 9.0;
-		soc->sr_enter_plus_exit_time_us = 11.0;
-		soc->urgent_latency_us = 4.0;
-		soc->writeback_latency_us = 12.0;
-		soc->ideal_dram_bw_after_urgent_percent = 80.0;
-		soc->max_request_size_bytes = 256;
-		soc->downspread_percent = 0.5;
-		soc->dram_page_open_time_ns = 50.0;
-		soc->dram_rw_turnaround_time_ns = 17.5;
-		soc->dram_return_buffer_per_channel_bytes = 8192;
-		soc->round_trip_ping_latency_dcfclk_cycles = 128;
-		soc->urgent_out_of_order_return_per_channel_bytes = 256;
-		soc->channel_interleave_bytes = 256;
-		soc->num_banks = 8;
-		soc->num_chans = 2;
-		soc->vmm_page_size_bytes = 4096;
-		soc->dram_clock_change_latency_us = 17.0;
-		soc->writeback_dram_clock_change_latency_us = 23.0;
-		soc->return_bus_width_bytes = 64;
-	} else {
-		BREAK_TO_DEBUGGER(); /* Invalid Project Specified */
+	switch (project) {
+	case DML_PROJECT_RAVEN1:
+		*soc = dcn1_0_soc;
+		break;
+	default:
+		ASSERT(0);
+		break;
 	}
 }
 
 static void set_ip_params(struct _vcs_dpi_ip_params_st *ip, enum dml_project project)
 {
-	if (project == DML_PROJECT_RAVEN1) {
-		ip->rob_buffer_size_kbytes = 64;
-		ip->det_buffer_size_kbytes = 164;
-		ip->dpte_buffer_size_in_pte_reqs = 42;
-		ip->dpp_output_buffer_pixels = 2560;
-		ip->opp_output_buffer_lines = 1;
-		ip->pixel_chunk_size_kbytes = 8;
-		ip->pte_enable = 1;
-		ip->pte_chunk_size_kbytes = 2;
-		ip->meta_chunk_size_kbytes = 2;
-		ip->writeback_chunk_size_kbytes = 2;
-		ip->line_buffer_size_bits = 589824;
-		ip->max_line_buffer_lines = 12;
-		ip->IsLineBufferBppFixed = 0;
-		ip->LineBufferFixedBpp = -1;
-		ip->writeback_luma_buffer_size_kbytes = 12;
-		ip->writeback_chroma_buffer_size_kbytes = 8;
-		ip->max_num_dpp = 4;
-		ip->max_num_wb = 2;
-		ip->max_dchub_pscl_bw_pix_per_clk = 4;
-		ip->max_pscl_lb_bw_pix_per_clk = 2;
-		ip->max_lb_vscl_bw_pix_per_clk = 4;
-		ip->max_vscl_hscl_bw_pix_per_clk = 4;
-		ip->max_hscl_ratio = 4;
-		ip->max_vscl_ratio = 4;
-		ip->hscl_mults = 4;
-		ip->vscl_mults = 4;
-		ip->max_hscl_taps = 8;
-		ip->max_vscl_taps = 8;
-		ip->dispclk_ramp_margin_percent = 1;
-		ip->underscan_factor = 1.10;
-		ip->min_vblank_lines = 14;
-		ip->dppclk_delay_subtotal = 90;
-		ip->dispclk_delay_subtotal = 42;
-		ip->dcfclk_cstate_latency = 10;
-		ip->max_inter_dcn_tile_repeaters = 8;
-		ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0;
-		ip->bug_forcing_LC_req_same_size_fixed = 0;
-	} else {
-		BREAK_TO_DEBUGGER(); /* Invalid Project Specified */
+	switch (project) {
+	case DML_PROJECT_RAVEN1:
+		*ip = dcn1_0_ip;
+		break;
+	default:
+		ASSERT(0);
+		break;
 	}
 }
 

From 339cc82ae67700cb25a5bb10842cca5b09a79afe Mon Sep 17 00:00:00 2001
From: Yongqiang Sun <yongqiang.sun@amd.com>
Date: Wed, 4 Apr 2018 17:27:18 -0400
Subject: [PATCH 0782/1286] drm/amd/display: Check lid state to determine fast
 boot optimization.

For legacy enable boot up with lid closed, eDP information couldn't be
read correctly via SBIOS_SCRATCH_3 results in eDP cannot be light up
properly when open lid.
Check lid state instead can resolve the issue.

Signed-off-by: Yongqiang Sun <yongqiang.sun@amd.com>
Reviewed-by: Eric Yang <eric.yang2@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dc_stream.h    |  1 +
 .../display/dc/dce110/dce110_hw_sequencer.c   | 24 ++++++++++++-------
 2 files changed, 17 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index d7e6d53bb383..11b3433d6432 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -92,6 +92,7 @@ struct dc_stream_state {
 	int phy_pix_clk;
 	enum signal_type signal;
 	bool dpms_off;
+	bool lid_state_closed;
 
 	struct dc_stream_status status;
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 1b5c11c8fa1f..4a4b3bcd4230 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1471,6 +1471,17 @@ static void disable_vga_and_power_gate_all_controllers(
 	}
 }
 
+static bool is_eDP_lid_closed(struct dc_state *context)
+{
+	int i;
+
+	for (i = 0; i < context->stream_count; i++) {
+		if (context->streams[i]->signal == SIGNAL_TYPE_EDP)
+			return context->streams[i]->lid_state_closed;
+	}
+	return false;
+}
+
 static struct dc_link *get_link_for_edp_not_in_use(
 		struct dc *dc,
 		struct dc_state *context)
@@ -1505,20 +1516,17 @@ static struct dc_link *get_link_for_edp_not_in_use(
  */
 void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
 {
-	struct dc_bios *dcb = dc->ctx->dc_bios;
-
-	/* vbios already light up eDP, so we can leverage vbios and skip eDP
+	/* check eDP lid state:
+	 * If lid is open, vbios already light up eDP, so we can leverage vbios and skip eDP
 	 * programming
 	 */
-	bool can_eDP_fast_boot_optimize =
-			(dcb->funcs->get_vga_enabled_displays(dc->ctx->dc_bios) == ATOM_DISPLAY_LCD1_ACTIVE);
-
-	/* if OS doesn't light up eDP and eDP link is available, we want to disable */
+	bool lid_state_closed = is_eDP_lid_closed(context);
 	struct dc_link *edp_link_to_turnoff = NULL;
 
-	if (can_eDP_fast_boot_optimize) {
+	if (!lid_state_closed) {
 		edp_link_to_turnoff = get_link_for_edp_not_in_use(dc, context);
 
+		/* if OS doesn't light up eDP and eDP link is available, we want to disable */
 		if (!edp_link_to_turnoff)
 			dc->apply_edp_fast_boot_optimization = true;
 	}

From c4b0faae71f33377a11fe19dadcce6deb86f5037 Mon Sep 17 00:00:00 2001
From: Anthony Koo <Anthony.Koo@amd.com>
Date: Fri, 6 Apr 2018 12:07:19 -0400
Subject: [PATCH 0783/1286] drm/amd/display: Do not create memory allocation if
 stats not enabled

Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/modules/stats/stats.c | 26 +++++++++++--------
 1 file changed, 15 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
index ed5f6809a64e..48e02197919f 100644
--- a/drivers/gpu/drm/amd/display/modules/stats/stats.c
+++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c
@@ -115,18 +115,22 @@ struct mod_stats *mod_stats_create(struct dc *dc)
 			&reg_data, sizeof(unsigned int), &flag))
 		core_stats->enabled = reg_data;
 
-	core_stats->entries = DAL_STATS_ENTRIES_REGKEY_DEFAULT;
-	if (dm_read_persistent_data(dc->ctx, NULL, NULL,
-			DAL_STATS_ENTRIES_REGKEY,
-			&reg_data, sizeof(unsigned int), &flag)) {
-		if (reg_data > DAL_STATS_ENTRIES_REGKEY_MAX)
-			core_stats->entries = DAL_STATS_ENTRIES_REGKEY_MAX;
-		else
-			core_stats->entries = reg_data;
-	}
+	if (core_stats->enabled) {
+		core_stats->entries = DAL_STATS_ENTRIES_REGKEY_DEFAULT;
+		if (dm_read_persistent_data(dc->ctx, NULL, NULL,
+				DAL_STATS_ENTRIES_REGKEY,
+				&reg_data, sizeof(unsigned int), &flag)) {
+			if (reg_data > DAL_STATS_ENTRIES_REGKEY_MAX)
+				core_stats->entries = DAL_STATS_ENTRIES_REGKEY_MAX;
+			else
+				core_stats->entries = reg_data;
+		}
 
-	core_stats->time = kzalloc(sizeof(struct stats_time_cache) * core_stats->entries,
-					GFP_KERNEL);
+		core_stats->time = kzalloc(sizeof(struct stats_time_cache) * core_stats->entries,
+						GFP_KERNEL);
+	} else {
+		core_stats->entries = 0;
+	}
 
 	if (core_stats->time == NULL)
 		goto fail_construct;

From 5ebfb7a5996ea1dceeb2a392d7e46357042e4506 Mon Sep 17 00:00:00 2001
From: Eric Bernstein <eric.bernstein@amd.com>
Date: Thu, 5 Apr 2018 17:09:20 -0400
Subject: [PATCH 0784/1286] drm/amd/display: Move DCC support functions into
 dchubbub

Added dchububu.h header file for common enum/struct definitions.
Added new interface functions get_dcc_compression_cap,
dcc_support_swizzle, dcc_support_pixel_format.

Signed-off-by: Eric Bernstein <eric.bernstein@amd.com>
Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../drm/amd/display/dc/dcn10/dcn10_hubbub.c   | 221 ++++++++++++++++-
 .../drm/amd/display/dc/dcn10/dcn10_hubbub.h   |   7 +-
 .../drm/amd/display/dc/dcn10/dcn10_resource.c | 231 +-----------------
 .../gpu/drm/amd/display/dc/inc/hw/dchubbub.h  |  64 +++++
 4 files changed, 291 insertions(+), 232 deletions(-)
 create mode 100644 drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index 738f67ffd1b4..b9fb14a3224b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -476,8 +476,227 @@ void hubbub1_toggle_watermark_change_req(struct hubbub *hubbub)
 			DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, watermark_change_req);
 }
 
+static bool hubbub1_dcc_support_swizzle(
+		enum swizzle_mode_values swizzle,
+		unsigned int bytes_per_element,
+		enum segment_order *segment_order_horz,
+		enum segment_order *segment_order_vert)
+{
+	bool standard_swizzle = false;
+	bool display_swizzle = false;
+
+	switch (swizzle) {
+	case DC_SW_4KB_S:
+	case DC_SW_64KB_S:
+	case DC_SW_VAR_S:
+	case DC_SW_4KB_S_X:
+	case DC_SW_64KB_S_X:
+	case DC_SW_VAR_S_X:
+		standard_swizzle = true;
+		break;
+	case DC_SW_4KB_D:
+	case DC_SW_64KB_D:
+	case DC_SW_VAR_D:
+	case DC_SW_4KB_D_X:
+	case DC_SW_64KB_D_X:
+	case DC_SW_VAR_D_X:
+		display_swizzle = true;
+		break;
+	default:
+		break;
+	}
+
+	if (bytes_per_element == 1 && standard_swizzle) {
+		*segment_order_horz = segment_order__contiguous;
+		*segment_order_vert = segment_order__na;
+		return true;
+	}
+	if (bytes_per_element == 2 && standard_swizzle) {
+		*segment_order_horz = segment_order__non_contiguous;
+		*segment_order_vert = segment_order__contiguous;
+		return true;
+	}
+	if (bytes_per_element == 4 && standard_swizzle) {
+		*segment_order_horz = segment_order__non_contiguous;
+		*segment_order_vert = segment_order__contiguous;
+		return true;
+	}
+	if (bytes_per_element == 8 && standard_swizzle) {
+		*segment_order_horz = segment_order__na;
+		*segment_order_vert = segment_order__contiguous;
+		return true;
+	}
+	if (bytes_per_element == 8 && display_swizzle) {
+		*segment_order_horz = segment_order__contiguous;
+		*segment_order_vert = segment_order__non_contiguous;
+		return true;
+	}
+
+	return false;
+}
+
+static bool hubbub1_dcc_support_pixel_format(
+		enum surface_pixel_format format,
+		unsigned int *bytes_per_element)
+{
+	/* DML: get_bytes_per_element */
+	switch (format) {
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+	case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+		*bytes_per_element = 2;
+		return true;
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+	case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+	case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+		*bytes_per_element = 4;
+		return true;
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+	case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+		*bytes_per_element = 8;
+		return true;
+	default:
+		return false;
+	}
+}
+
+static void hubbub1_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
+		unsigned int bytes_per_element)
+{
+	/* copied from DML.  might want to refactor DML to leverage from DML */
+	/* DML : get_blk256_size */
+	if (bytes_per_element == 1) {
+		*blk256_width = 16;
+		*blk256_height = 16;
+	} else if (bytes_per_element == 2) {
+		*blk256_width = 16;
+		*blk256_height = 8;
+	} else if (bytes_per_element == 4) {
+		*blk256_width = 8;
+		*blk256_height = 8;
+	} else if (bytes_per_element == 8) {
+		*blk256_width = 8;
+		*blk256_height = 4;
+	}
+}
+
+static void hubbub1_det_request_size(
+		unsigned int height,
+		unsigned int width,
+		unsigned int bpe,
+		bool *req128_horz_wc,
+		bool *req128_vert_wc)
+{
+	unsigned int detile_buf_size = 164 * 1024;  /* 164KB for DCN1.0 */
+
+	unsigned int blk256_height = 0;
+	unsigned int blk256_width = 0;
+	unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
+
+	hubbub1_get_blk256_size(&blk256_width, &blk256_height, bpe);
+
+	swath_bytes_horz_wc = height * blk256_height * bpe;
+	swath_bytes_vert_wc = width * blk256_width * bpe;
+
+	*req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
+			false : /* full 256B request */
+			true; /* half 128b request */
+
+	*req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
+			false : /* full 256B request */
+			true; /* half 128b request */
+}
+
+static bool hubbub1_get_dcc_compression_cap(struct hubbub *hubbub,
+		const struct dc_dcc_surface_param *input,
+		struct dc_surface_dcc_cap *output)
+{
+	struct dc *dc = hubbub->ctx->dc;
+	/* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */
+	enum dcc_control dcc_control;
+	unsigned int bpe;
+	enum segment_order segment_order_horz, segment_order_vert;
+	bool req128_horz_wc, req128_vert_wc;
+
+	memset(output, 0, sizeof(*output));
+
+	if (dc->debug.disable_dcc == DCC_DISABLE)
+		return false;
+
+	if (!hubbub->funcs->dcc_support_pixel_format(input->format, &bpe))
+		return false;
+
+	if (!hubbub->funcs->dcc_support_swizzle(input->swizzle_mode, bpe,
+			&segment_order_horz, &segment_order_vert))
+		return false;
+
+	hubbub1_det_request_size(input->surface_size.height,  input->surface_size.width,
+			bpe, &req128_horz_wc, &req128_vert_wc);
+
+	if (!req128_horz_wc && !req128_vert_wc) {
+		dcc_control = dcc_control__256_256_xxx;
+	} else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
+		if (!req128_horz_wc)
+			dcc_control = dcc_control__256_256_xxx;
+		else if (segment_order_horz == segment_order__contiguous)
+			dcc_control = dcc_control__128_128_xxx;
+		else
+			dcc_control = dcc_control__256_64_64;
+	} else if (input->scan == SCAN_DIRECTION_VERTICAL) {
+		if (!req128_vert_wc)
+			dcc_control = dcc_control__256_256_xxx;
+		else if (segment_order_vert == segment_order__contiguous)
+			dcc_control = dcc_control__128_128_xxx;
+		else
+			dcc_control = dcc_control__256_64_64;
+	} else {
+		if ((req128_horz_wc &&
+			segment_order_horz == segment_order__non_contiguous) ||
+			(req128_vert_wc &&
+			segment_order_vert == segment_order__non_contiguous))
+			/* access_dir not known, must use most constraining */
+			dcc_control = dcc_control__256_64_64;
+		else
+			/* reg128 is true for either horz and vert
+			 * but segment_order is contiguous
+			 */
+			dcc_control = dcc_control__128_128_xxx;
+	}
+
+	if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE &&
+		dcc_control != dcc_control__256_256_xxx)
+		return false;
+
+	switch (dcc_control) {
+	case dcc_control__256_256_xxx:
+		output->grph.rgb.max_uncompressed_blk_size = 256;
+		output->grph.rgb.max_compressed_blk_size = 256;
+		output->grph.rgb.independent_64b_blks = false;
+		break;
+	case dcc_control__128_128_xxx:
+		output->grph.rgb.max_uncompressed_blk_size = 128;
+		output->grph.rgb.max_compressed_blk_size = 128;
+		output->grph.rgb.independent_64b_blks = false;
+		break;
+	case dcc_control__256_64_64:
+		output->grph.rgb.max_uncompressed_blk_size = 256;
+		output->grph.rgb.max_compressed_blk_size = 64;
+		output->grph.rgb.independent_64b_blks = true;
+		break;
+	}
+
+	output->capable = true;
+	output->const_color_support = false;
+
+	return true;
+}
+
 static const struct hubbub_funcs hubbub1_funcs = {
-	.update_dchub = hubbub1_update_dchub
+	.update_dchub = hubbub1_update_dchub,
+	.dcc_support_swizzle = hubbub1_dcc_support_swizzle,
+	.dcc_support_pixel_format = hubbub1_dcc_support_pixel_format,
+	.get_dcc_compression_cap = hubbub1_get_dcc_compression_cap,
 };
 
 void hubbub1_construct(struct hubbub *hubbub,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
index a16e908821a0..f479f54e5bb2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
@@ -27,6 +27,7 @@
 #define __DC_HUBBUB_DCN10_H__
 
 #include "core_types.h"
+#include "dchubbub.h"
 
 #define HUBHUB_REG_LIST_DCN()\
 	SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A),\
@@ -173,12 +174,6 @@ struct dcn_hubbub_wm {
 	struct dcn_hubbub_wm_set sets[4];
 };
 
-struct hubbub_funcs {
-	void (*update_dchub)(
-			struct hubbub *hubbub,
-			struct dchub_init_data *dh_data);
-};
-
 struct hubbub {
 	const struct hubbub_funcs *funcs;
 	struct dc_context *ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index f305f65675d8..2c0a3150bf2d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -937,235 +937,16 @@ static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer(
 	return idle_pipe;
 }
 
-enum dcc_control {
-	dcc_control__256_256_xxx,
-	dcc_control__128_128_xxx,
-	dcc_control__256_64_64,
-};
-
-enum segment_order {
-	segment_order__na,
-	segment_order__contiguous,
-	segment_order__non_contiguous,
-};
-
-static bool dcc_support_pixel_format(
-		enum surface_pixel_format format,
-		unsigned int *bytes_per_element)
-{
-	/* DML: get_bytes_per_element */
-	switch (format) {
-	case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
-	case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
-		*bytes_per_element = 2;
-		return true;
-	case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
-	case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
-	case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
-	case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
-		*bytes_per_element = 4;
-		return true;
-	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
-	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
-	case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
-		*bytes_per_element = 8;
-		return true;
-	default:
-		return false;
-	}
-}
-
-static bool dcc_support_swizzle(
-		enum swizzle_mode_values swizzle,
-		unsigned int bytes_per_element,
-		enum segment_order *segment_order_horz,
-		enum segment_order *segment_order_vert)
-{
-	bool standard_swizzle = false;
-	bool display_swizzle = false;
-
-	switch (swizzle) {
-	case DC_SW_4KB_S:
-	case DC_SW_64KB_S:
-	case DC_SW_VAR_S:
-	case DC_SW_4KB_S_X:
-	case DC_SW_64KB_S_X:
-	case DC_SW_VAR_S_X:
-		standard_swizzle = true;
-		break;
-	case DC_SW_4KB_D:
-	case DC_SW_64KB_D:
-	case DC_SW_VAR_D:
-	case DC_SW_4KB_D_X:
-	case DC_SW_64KB_D_X:
-	case DC_SW_VAR_D_X:
-		display_swizzle = true;
-		break;
-	default:
-		break;
-	}
-
-	if (bytes_per_element == 1 && standard_swizzle) {
-		*segment_order_horz = segment_order__contiguous;
-		*segment_order_vert = segment_order__na;
-		return true;
-	}
-	if (bytes_per_element == 2 && standard_swizzle) {
-		*segment_order_horz = segment_order__non_contiguous;
-		*segment_order_vert = segment_order__contiguous;
-		return true;
-	}
-	if (bytes_per_element == 4 && standard_swizzle) {
-		*segment_order_horz = segment_order__non_contiguous;
-		*segment_order_vert = segment_order__contiguous;
-		return true;
-	}
-	if (bytes_per_element == 8 && standard_swizzle) {
-		*segment_order_horz = segment_order__na;
-		*segment_order_vert = segment_order__contiguous;
-		return true;
-	}
-	if (bytes_per_element == 8 && display_swizzle) {
-		*segment_order_horz = segment_order__contiguous;
-		*segment_order_vert = segment_order__non_contiguous;
-		return true;
-	}
-
-	return false;
-}
-
-static void get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
-		unsigned int bytes_per_element)
-{
-	/* copied from DML.  might want to refactor DML to leverage from DML */
-	/* DML : get_blk256_size */
-	if (bytes_per_element == 1) {
-		*blk256_width = 16;
-		*blk256_height = 16;
-	} else if (bytes_per_element == 2) {
-		*blk256_width = 16;
-		*blk256_height = 8;
-	} else if (bytes_per_element == 4) {
-		*blk256_width = 8;
-		*blk256_height = 8;
-	} else if (bytes_per_element == 8) {
-		*blk256_width = 8;
-		*blk256_height = 4;
-	}
-}
-
-static void det_request_size(
-		unsigned int height,
-		unsigned int width,
-		unsigned int bpe,
-		bool *req128_horz_wc,
-		bool *req128_vert_wc)
-{
-	unsigned int detile_buf_size = 164 * 1024;  /* 164KB for DCN1.0 */
-
-	unsigned int blk256_height = 0;
-	unsigned int blk256_width = 0;
-	unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
-
-	get_blk256_size(&blk256_width, &blk256_height, bpe);
-
-	swath_bytes_horz_wc = height * blk256_height * bpe;
-	swath_bytes_vert_wc = width * blk256_width * bpe;
-
-	*req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
-			false : /* full 256B request */
-			true; /* half 128b request */
-
-	*req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
-			false : /* full 256B request */
-			true; /* half 128b request */
-}
-
-static bool get_dcc_compression_cap(const struct dc *dc,
+static bool dcn10_get_dcc_compression_cap(const struct dc *dc,
 		const struct dc_dcc_surface_param *input,
 		struct dc_surface_dcc_cap *output)
 {
-	/* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */
-	enum dcc_control dcc_control;
-	unsigned int bpe;
-	enum segment_order segment_order_horz, segment_order_vert;
-	bool req128_horz_wc, req128_vert_wc;
-
-	memset(output, 0, sizeof(*output));
-
-	if (dc->debug.disable_dcc == DCC_DISABLE)
-		return false;
-
-	if (!dcc_support_pixel_format(input->format,
-			&bpe))
-		return false;
-
-	if (!dcc_support_swizzle(input->swizzle_mode, bpe,
-			&segment_order_horz, &segment_order_vert))
-		return false;
-
-	det_request_size(input->surface_size.height,  input->surface_size.width,
-			bpe, &req128_horz_wc, &req128_vert_wc);
-
-	if (!req128_horz_wc && !req128_vert_wc) {
-		dcc_control = dcc_control__256_256_xxx;
-	} else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
-		if (!req128_horz_wc)
-			dcc_control = dcc_control__256_256_xxx;
-		else if (segment_order_horz == segment_order__contiguous)
-			dcc_control = dcc_control__128_128_xxx;
-		else
-			dcc_control = dcc_control__256_64_64;
-	} else if (input->scan == SCAN_DIRECTION_VERTICAL) {
-		if (!req128_vert_wc)
-			dcc_control = dcc_control__256_256_xxx;
-		else if (segment_order_vert == segment_order__contiguous)
-			dcc_control = dcc_control__128_128_xxx;
-		else
-			dcc_control = dcc_control__256_64_64;
-	} else {
-		if ((req128_horz_wc &&
-			segment_order_horz == segment_order__non_contiguous) ||
-			(req128_vert_wc &&
-			segment_order_vert == segment_order__non_contiguous))
-			/* access_dir not known, must use most constraining */
-			dcc_control = dcc_control__256_64_64;
-		else
-			/* reg128 is true for either horz and vert
-			 * but segment_order is contiguous
-			 */
-			dcc_control = dcc_control__128_128_xxx;
-	}
-
-	if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE &&
-		dcc_control != dcc_control__256_256_xxx)
-		return false;
-
-	switch (dcc_control) {
-	case dcc_control__256_256_xxx:
-		output->grph.rgb.max_uncompressed_blk_size = 256;
-		output->grph.rgb.max_compressed_blk_size = 256;
-		output->grph.rgb.independent_64b_blks = false;
-		break;
-	case dcc_control__128_128_xxx:
-		output->grph.rgb.max_uncompressed_blk_size = 128;
-		output->grph.rgb.max_compressed_blk_size = 128;
-		output->grph.rgb.independent_64b_blks = false;
-		break;
-	case dcc_control__256_64_64:
-		output->grph.rgb.max_uncompressed_blk_size = 256;
-		output->grph.rgb.max_compressed_blk_size = 64;
-		output->grph.rgb.independent_64b_blks = true;
-		break;
-	}
-
-	output->capable = true;
-	output->const_color_support = false;
-
-	return true;
+	return dc->res_pool->hubbub->funcs->get_dcc_compression_cap(
+			dc->res_pool->hubbub,
+			input,
+			output);
 }
 
-
 static void dcn10_destroy_resource_pool(struct resource_pool **pool)
 {
 	struct dcn10_resource_pool *dcn10_pool = TO_DCN10_RES_POOL(*pool);
@@ -1186,7 +967,7 @@ static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_st
 }
 
 static struct dc_cap_funcs cap_funcs = {
-	.get_dcc_compression_cap = get_dcc_compression_cap
+	.get_dcc_compression_cap = dcn10_get_dcc_compression_cap
 };
 
 static struct resource_funcs dcn10_res_pool_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
new file mode 100644
index 000000000000..02f757dd70d4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DCHUBBUB_H__
+#define __DAL_DCHUBBUB_H__
+
+
+enum dcc_control {
+	dcc_control__256_256_xxx,
+	dcc_control__128_128_xxx,
+	dcc_control__256_64_64,
+};
+
+enum segment_order {
+	segment_order__na,
+	segment_order__contiguous,
+	segment_order__non_contiguous,
+};
+
+
+struct hubbub_funcs {
+	void (*update_dchub)(
+			struct hubbub *hubbub,
+			struct dchub_init_data *dh_data);
+
+	bool (*get_dcc_compression_cap)(struct hubbub *hubbub,
+			const struct dc_dcc_surface_param *input,
+			struct dc_surface_dcc_cap *output);
+
+	bool (*dcc_support_swizzle)(
+			enum swizzle_mode_values swizzle,
+			unsigned int bytes_per_element,
+			enum segment_order *segment_order_horz,
+			enum segment_order *segment_order_vert);
+
+	bool (*dcc_support_pixel_format)(
+			enum surface_pixel_format format,
+			unsigned int *bytes_per_element);
+};
+
+
+#endif

From 7ac897b5afb98369a4edd71950921026c3029d5f Mon Sep 17 00:00:00 2001
From: Charlene Liu <charlene.liu@amd.com>
Date: Fri, 6 Apr 2018 23:03:12 -0400
Subject: [PATCH 0785/1286] drm/amd/display: HDMI has no sound after Panel
 power off/on

Signed-off-by: Charlene Liu <charlene.liu@amd.com>
Reviewed-by: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Cc: stable@vger.kernel.org
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 07c32421c226..84e26c894046 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -718,6 +718,8 @@ static void dce110_stream_encoder_update_hdmi_info_packets(
 		if (info_frame->avi.valid) {
 			const uint32_t *content =
 				(const uint32_t *) &info_frame->avi.sb[0];
+			/*we need turn on clock before programming AFMT block*/
+			REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
 
 			REG_WRITE(AFMT_AVI_INFO0, content[0]);
 

From 2c37e49a6bcd5e0c66963301e9feab63b5f928f3 Mon Sep 17 00:00:00 2001
From: Yongqiang Sun <yongqiang.sun@amd.com>
Date: Fri, 6 Apr 2018 21:38:10 -0400
Subject: [PATCH 0786/1286] drm/amd/display: Check SCRATCH reg to determine S3
 resume.

Use lid state only to determine fast boot optimization is not enough.
For S3/Resume, due to bios isn't involved in boot, eDP wasn't
light up, while lid state is open, if do fast boot optimization,
eDP panel will skip enable link and result in black screen after boot.
And becasue of bios isn't involved, no matter UEFI or Legacy boot,
BIOS_SCRATCH_3 value should be 0, use this to determine the case.

Signed-off-by: Yongqiang Sun <yongqiang.sun@amd.com>
Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../display/dc/dce110/dce110_hw_sequencer.c   | 33 ++++++++++++++++---
 1 file changed, 28 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 4a4b3bcd4230..bd34193ad779 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1516,18 +1516,41 @@ static struct dc_link *get_link_for_edp_not_in_use(
  */
 void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
 {
-	/* check eDP lid state:
-	 * If lid is open, vbios already light up eDP, so we can leverage vbios and skip eDP
-	 * programming
+	/* check eDP lid state and BIOS_SCRATCH_3 to determine fast boot optimization
+	 * UEFI boot
+	 *				edp_active_status_from_scratch		fast boot optimization
+	 * S4/S5 resume:
+	 * Lid Open		true								true
+	 * Lid Close	false								false
+	 *
+	 * S3/ resume:
+	 * Lid Open		false								false
+	 * Lid Close	false								false
+	 *
+	 * Legacy boot:
+	 *				edp_active_status_from_scratch		fast boot optimization
+	 * S4/S resume:
+	 * Lid Open		true								true
+	 * Lid Close	true								false
+	 *
+	 * S3/ resume:
+	 * Lid Open		false								false
+	 * Lid Close	false								false
 	 */
+	struct dc_bios *dcb = dc->ctx->dc_bios;
 	bool lid_state_closed = is_eDP_lid_closed(context);
 	struct dc_link *edp_link_to_turnoff = NULL;
+	bool edp_active_status_from_scratch =
+			(dcb->funcs->get_vga_enabled_displays(dc->ctx->dc_bios) == ATOM_DISPLAY_LCD1_ACTIVE);
 
+	/*Lid open*/
 	if (!lid_state_closed) {
 		edp_link_to_turnoff = get_link_for_edp_not_in_use(dc, context);
 
-		/* if OS doesn't light up eDP and eDP link is available, we want to disable */
-		if (!edp_link_to_turnoff)
+		/* if OS doesn't light up eDP and eDP link is available, we want to disable
+		 * If resume from S4/S5, should optimization.
+		 */
+		if (!edp_link_to_turnoff && edp_active_status_from_scratch)
 			dc->apply_edp_fast_boot_optimization = true;
 	}
 

From 0a93dc7f595f43b621277ecfc05a44ed0c719a5f Mon Sep 17 00:00:00 2001
From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Date: Thu, 29 Mar 2018 08:43:02 -0400
Subject: [PATCH 0787/1286] drm/amd/display: add rq/dlg/ttu to dtn log

Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dc_helper.c    |  59 +++++++
 .../gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 153 +++++++++++++++++-
 .../gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h |  19 +--
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c | 114 ++++++++++++-
 drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h  |  20 +++
 .../gpu/drm/amd/display/dc/inc/reg_helper.h   |  56 +++++++
 6 files changed, 401 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index 48e1fcf53d43..bd0fda0ceb91 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -117,6 +117,65 @@ uint32_t generic_reg_get5(const struct dc_context *ctx, uint32_t addr,
 	return reg_val;
 }
 
+uint32_t generic_reg_get6(const struct dc_context *ctx, uint32_t addr,
+		uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+		uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+		uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
+		uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
+		uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
+		uint8_t shift6, uint32_t mask6, uint32_t *field_value6)
+{
+	uint32_t reg_val = dm_read_reg(ctx, addr);
+	*field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
+	*field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
+	*field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
+	*field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
+	*field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
+	*field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6);
+	return reg_val;
+}
+
+uint32_t generic_reg_get7(const struct dc_context *ctx, uint32_t addr,
+		uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+		uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+		uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
+		uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
+		uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
+		uint8_t shift6, uint32_t mask6, uint32_t *field_value6,
+		uint8_t shift7, uint32_t mask7, uint32_t *field_value7)
+{
+	uint32_t reg_val = dm_read_reg(ctx, addr);
+	*field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
+	*field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
+	*field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
+	*field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
+	*field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
+	*field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6);
+	*field_value7 = get_reg_field_value_ex(reg_val, mask7, shift7);
+	return reg_val;
+}
+
+uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr,
+		uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+		uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+		uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
+		uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
+		uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
+		uint8_t shift6, uint32_t mask6, uint32_t *field_value6,
+		uint8_t shift7, uint32_t mask7, uint32_t *field_value7,
+		uint8_t shift8, uint32_t mask8, uint32_t *field_value8)
+{
+	uint32_t reg_val = dm_read_reg(ctx, addr);
+	*field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
+	*field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
+	*field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
+	*field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
+	*field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
+	*field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6);
+	*field_value7 = get_reg_field_value_ex(reg_val, mask7, shift7);
+	*field_value8 = get_reg_field_value_ex(reg_val, mask8, shift8);
+	return reg_val;
+}
 /* note:  va version of this is pretty bad idea, since there is a output parameter pass by pointer
  * compiler won't be able to check for size match and is prone to stack corruption type of bugs
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 4ca9b6e9a824..58062172cf3f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -756,9 +756,159 @@ void min_set_viewport(
 		  PRI_VIEWPORT_Y_START_C, viewport_c->y);
 }
 
-void hubp1_read_state(struct dcn10_hubp *hubp1,
+void hubp1_read_state(struct hubp *hubp,
 		struct dcn_hubp_state *s)
 {
+	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+	struct _vcs_dpi_display_dlg_regs_st *dlg_attr = &s->dlg_attr;
+	struct _vcs_dpi_display_ttu_regs_st *ttu_attr = &s->ttu_attr;
+	struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
+
+	/* Requester */
+	REG_GET(HUBPRET_CONTROL,
+			DET_BUF_PLANE1_BASE_ADDRESS, &rq_regs->plane1_base_address);
+	REG_GET_4(DCN_EXPANSION_MODE,
+			DRQ_EXPANSION_MODE, &rq_regs->drq_expansion_mode,
+			PRQ_EXPANSION_MODE, &rq_regs->prq_expansion_mode,
+			MRQ_EXPANSION_MODE, &rq_regs->mrq_expansion_mode,
+			CRQ_EXPANSION_MODE, &rq_regs->crq_expansion_mode);
+	REG_GET_8(DCHUBP_REQ_SIZE_CONFIG,
+		CHUNK_SIZE, &rq_regs->rq_regs_l.chunk_size,
+		MIN_CHUNK_SIZE, &rq_regs->rq_regs_l.min_chunk_size,
+		META_CHUNK_SIZE, &rq_regs->rq_regs_l.meta_chunk_size,
+		MIN_META_CHUNK_SIZE, &rq_regs->rq_regs_l.min_meta_chunk_size,
+		DPTE_GROUP_SIZE, &rq_regs->rq_regs_l.dpte_group_size,
+		MPTE_GROUP_SIZE, &rq_regs->rq_regs_l.mpte_group_size,
+		SWATH_HEIGHT, &rq_regs->rq_regs_l.swath_height,
+		PTE_ROW_HEIGHT_LINEAR, &rq_regs->rq_regs_l.pte_row_height_linear);
+	REG_GET_8(DCHUBP_REQ_SIZE_CONFIG_C,
+		CHUNK_SIZE_C, &rq_regs->rq_regs_c.chunk_size,
+		MIN_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_chunk_size,
+		META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.meta_chunk_size,
+		MIN_META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_meta_chunk_size,
+		DPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.dpte_group_size,
+		MPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.mpte_group_size,
+		SWATH_HEIGHT_C, &rq_regs->rq_regs_c.swath_height,
+		PTE_ROW_HEIGHT_LINEAR_C, &rq_regs->rq_regs_c.pte_row_height_linear);
+
+	/* DLG - Per hubp */
+	REG_GET_2(BLANK_OFFSET_0,
+		REFCYC_H_BLANK_END, &dlg_attr->refcyc_h_blank_end,
+		DLG_V_BLANK_END, &dlg_attr->dlg_vblank_end);
+
+	REG_GET(BLANK_OFFSET_1,
+		MIN_DST_Y_NEXT_START, &dlg_attr->min_dst_y_next_start);
+
+	REG_GET(DST_DIMENSIONS,
+		REFCYC_PER_HTOTAL, &dlg_attr->refcyc_per_htotal);
+
+	REG_GET_2(DST_AFTER_SCALER,
+		REFCYC_X_AFTER_SCALER, &dlg_attr->refcyc_x_after_scaler,
+		DST_Y_AFTER_SCALER, &dlg_attr->dst_y_after_scaler);
+
+	if (REG(PREFETCH_SETTINS))
+		REG_GET_2(PREFETCH_SETTINS,
+			DST_Y_PREFETCH, &dlg_attr->dst_y_prefetch,
+			VRATIO_PREFETCH, &dlg_attr->vratio_prefetch);
+	else
+		REG_GET_2(PREFETCH_SETTINGS,
+			DST_Y_PREFETCH, &dlg_attr->dst_y_prefetch,
+			VRATIO_PREFETCH, &dlg_attr->vratio_prefetch);
+
+	REG_GET_2(VBLANK_PARAMETERS_0,
+		DST_Y_PER_VM_VBLANK, &dlg_attr->dst_y_per_vm_vblank,
+		DST_Y_PER_ROW_VBLANK, &dlg_attr->dst_y_per_row_vblank);
+
+	REG_GET(REF_FREQ_TO_PIX_FREQ,
+		REF_FREQ_TO_PIX_FREQ, &dlg_attr->ref_freq_to_pix_freq);
+
+	/* DLG - Per luma/chroma */
+	REG_GET(VBLANK_PARAMETERS_1,
+		REFCYC_PER_PTE_GROUP_VBLANK_L, &dlg_attr->refcyc_per_pte_group_vblank_l);
+
+	REG_GET(VBLANK_PARAMETERS_3,
+		REFCYC_PER_META_CHUNK_VBLANK_L, &dlg_attr->refcyc_per_meta_chunk_vblank_l);
+
+	if (REG(NOM_PARAMETERS_0))
+		REG_GET(NOM_PARAMETERS_0,
+			DST_Y_PER_PTE_ROW_NOM_L, &dlg_attr->dst_y_per_pte_row_nom_l);
+
+	if (REG(NOM_PARAMETERS_1))
+		REG_GET(NOM_PARAMETERS_1,
+			REFCYC_PER_PTE_GROUP_NOM_L, &dlg_attr->refcyc_per_pte_group_nom_l);
+
+	REG_GET(NOM_PARAMETERS_4,
+		DST_Y_PER_META_ROW_NOM_L, &dlg_attr->dst_y_per_meta_row_nom_l);
+
+	REG_GET(NOM_PARAMETERS_5,
+		REFCYC_PER_META_CHUNK_NOM_L, &dlg_attr->refcyc_per_meta_chunk_nom_l);
+
+	REG_GET_2(PER_LINE_DELIVERY_PRE,
+		REFCYC_PER_LINE_DELIVERY_PRE_L, &dlg_attr->refcyc_per_line_delivery_pre_l,
+		REFCYC_PER_LINE_DELIVERY_PRE_C, &dlg_attr->refcyc_per_line_delivery_pre_c);
+
+	REG_GET_2(PER_LINE_DELIVERY,
+		REFCYC_PER_LINE_DELIVERY_L, &dlg_attr->refcyc_per_line_delivery_l,
+		REFCYC_PER_LINE_DELIVERY_C, &dlg_attr->refcyc_per_line_delivery_c);
+
+	if (REG(PREFETCH_SETTINS_C))
+		REG_GET(PREFETCH_SETTINS_C,
+			VRATIO_PREFETCH_C, &dlg_attr->vratio_prefetch_c);
+	else
+		REG_GET(PREFETCH_SETTINGS_C,
+			VRATIO_PREFETCH_C, &dlg_attr->vratio_prefetch_c);
+
+	REG_GET(VBLANK_PARAMETERS_2,
+		REFCYC_PER_PTE_GROUP_VBLANK_C, &dlg_attr->refcyc_per_pte_group_vblank_c);
+
+	REG_GET(VBLANK_PARAMETERS_4,
+		REFCYC_PER_META_CHUNK_VBLANK_C, &dlg_attr->refcyc_per_meta_chunk_vblank_c);
+
+	if (REG(NOM_PARAMETERS_2))
+		REG_GET(NOM_PARAMETERS_2,
+			DST_Y_PER_PTE_ROW_NOM_C, &dlg_attr->dst_y_per_pte_row_nom_c);
+
+	if (REG(NOM_PARAMETERS_3))
+		REG_GET(NOM_PARAMETERS_3,
+			REFCYC_PER_PTE_GROUP_NOM_C, &dlg_attr->refcyc_per_pte_group_nom_c);
+
+	REG_GET(NOM_PARAMETERS_6,
+		DST_Y_PER_META_ROW_NOM_C, &dlg_attr->dst_y_per_meta_row_nom_c);
+
+	REG_GET(NOM_PARAMETERS_7,
+		REFCYC_PER_META_CHUNK_NOM_C, &dlg_attr->refcyc_per_meta_chunk_nom_c);
+
+	/* TTU - per hubp */
+	REG_GET_2(DCN_TTU_QOS_WM,
+		QoS_LEVEL_LOW_WM, &ttu_attr->qos_level_low_wm,
+		QoS_LEVEL_HIGH_WM, &ttu_attr->qos_level_high_wm);
+
+	REG_GET_2(DCN_GLOBAL_TTU_CNTL,
+		MIN_TTU_VBLANK, &ttu_attr->min_ttu_vblank,
+		QoS_LEVEL_FLIP, &ttu_attr->qos_level_flip);
+
+	/* TTU - per luma/chroma */
+	/* Assumed surf0 is luma and 1 is chroma */
+
+	REG_GET_3(DCN_SURF0_TTU_CNTL0,
+		REFCYC_PER_REQ_DELIVERY, &ttu_attr->refcyc_per_req_delivery_l,
+		QoS_LEVEL_FIXED, &ttu_attr->qos_level_fixed_l,
+		QoS_RAMP_DISABLE, &ttu_attr->qos_ramp_disable_l);
+
+	REG_GET(DCN_SURF0_TTU_CNTL1,
+		REFCYC_PER_REQ_DELIVERY_PRE,
+		&ttu_attr->refcyc_per_req_delivery_pre_l);
+
+	REG_GET_3(DCN_SURF1_TTU_CNTL0,
+		REFCYC_PER_REQ_DELIVERY, &ttu_attr->refcyc_per_req_delivery_c,
+		QoS_LEVEL_FIXED, &ttu_attr->qos_level_fixed_c,
+		QoS_RAMP_DISABLE, &ttu_attr->qos_ramp_disable_c);
+
+	REG_GET(DCN_SURF1_TTU_CNTL1,
+		REFCYC_PER_REQ_DELIVERY_PRE,
+		&ttu_attr->refcyc_per_req_delivery_pre_c);
+
+	/* Rest of hubp */
 	REG_GET(DCSURF_SURFACE_CONFIG,
 			SURFACE_PIXEL_FORMAT, &s->pixel_format);
 
@@ -956,6 +1106,7 @@ static struct hubp_funcs dcn10_hubp_funcs = {
 	.hubp_disconnect = hubp1_disconnect,
 	.hubp_clk_cntl = hubp1_clk_cntl,
 	.hubp_vtg_sel = hubp1_vtg_sel,
+	.hubp_read_state = hubp1_read_state,
 };
 
 /*****************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index e0d6d32357c0..920ae3a1b412 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -690,24 +690,7 @@ void dcn10_hubp_construct(
 	const struct dcn_mi_shift *hubp_shift,
 	const struct dcn_mi_mask *hubp_mask);
 
-
-struct dcn_hubp_state {
-	uint32_t pixel_format;
-	uint32_t inuse_addr_hi;
-	uint32_t viewport_width;
-	uint32_t viewport_height;
-	uint32_t rotation_angle;
-	uint32_t h_mirror_en;
-	uint32_t sw_mode;
-	uint32_t dcc_en;
-	uint32_t blank_en;
-	uint32_t underflow_status;
-	uint32_t ttu_disable;
-	uint32_t min_ttu_vblank;
-	uint32_t qos_level_low_wm;
-	uint32_t qos_level_high_wm;
-};
-void hubp1_read_state(struct dcn10_hubp *hubp1,
+void hubp1_read_state(struct hubp *hubp,
 		struct dcn_hubp_state *s);
 
 enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index a6cf9ade9131..7dd130d15a67 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -112,6 +112,104 @@ void dcn10_log_hubbub_state(struct dc *dc)
 	DTN_INFO("\n");
 }
 
+static void print_rq_dlg_ttu_regs(struct dc_context *dc_ctx, struct dcn_hubp_state *s)
+{
+	struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
+	struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
+	struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
+
+	DTN_INFO("========Requester========\n");
+	DTN_INFO("drq_expansion_mode      = 0x%0x\n", rq_regs->drq_expansion_mode);
+	DTN_INFO("prq_expansion_mode      = 0x%0x\n", rq_regs->prq_expansion_mode);
+	DTN_INFO("mrq_expansion_mode      = 0x%0x\n", rq_regs->mrq_expansion_mode);
+	DTN_INFO("crq_expansion_mode      = 0x%0x\n", rq_regs->crq_expansion_mode);
+	DTN_INFO("plane1_base_address     = 0x%0x\n", rq_regs->plane1_base_address);
+	DTN_INFO("==<LUMA>==\n");
+	DTN_INFO("chunk_size              = 0x%0x\n", rq_regs->rq_regs_l.chunk_size);
+	DTN_INFO("min_chunk_size          = 0x%0x\n", rq_regs->rq_regs_l.min_chunk_size);
+	DTN_INFO("meta_chunk_size         = 0x%0x\n", rq_regs->rq_regs_l.meta_chunk_size);
+	DTN_INFO("min_meta_chunk_size     = 0x%0x\n", rq_regs->rq_regs_l.min_meta_chunk_size);
+	DTN_INFO("dpte_group_size         = 0x%0x\n", rq_regs->rq_regs_l.dpte_group_size);
+	DTN_INFO("mpte_group_size         = 0x%0x\n", rq_regs->rq_regs_l.mpte_group_size);
+	DTN_INFO("swath_height            = 0x%0x\n", rq_regs->rq_regs_l.swath_height);
+	DTN_INFO("pte_row_height_linear   = 0x%0x\n", rq_regs->rq_regs_l.pte_row_height_linear);
+	DTN_INFO("==<CHROMA>==\n");
+	DTN_INFO("chunk_size              = 0x%0x\n", rq_regs->rq_regs_c.chunk_size);
+	DTN_INFO("min_chunk_size          = 0x%0x\n", rq_regs->rq_regs_c.min_chunk_size);
+	DTN_INFO("meta_chunk_size         = 0x%0x\n", rq_regs->rq_regs_c.meta_chunk_size);
+	DTN_INFO("min_meta_chunk_size     = 0x%0x\n", rq_regs->rq_regs_c.min_meta_chunk_size);
+	DTN_INFO("dpte_group_size         = 0x%0x\n", rq_regs->rq_regs_c.dpte_group_size);
+	DTN_INFO("mpte_group_size         = 0x%0x\n", rq_regs->rq_regs_c.mpte_group_size);
+	DTN_INFO("swath_height            = 0x%0x\n", rq_regs->rq_regs_c.swath_height);
+	DTN_INFO("pte_row_height_linear   = 0x%0x\n", rq_regs->rq_regs_c.pte_row_height_linear);
+
+	DTN_INFO("========DLG========\n");
+	DTN_INFO("refcyc_h_blank_end                  = 0x%0x\n", dlg_regs->refcyc_h_blank_end);
+	DTN_INFO("dlg_vblank_end                      = 0x%0x\n", dlg_regs->dlg_vblank_end);
+	DTN_INFO("min_dst_y_next_start                = 0x%0x\n", dlg_regs->min_dst_y_next_start);
+	DTN_INFO("refcyc_per_htotal                   = 0x%0x\n", dlg_regs->refcyc_per_htotal);
+	DTN_INFO("refcyc_x_after_scaler               = 0x%0x\n", dlg_regs->refcyc_x_after_scaler);
+	DTN_INFO("dst_y_after_scaler                  = 0x%0x\n", dlg_regs->dst_y_after_scaler);
+	DTN_INFO("dst_y_prefetch                      = 0x%0x\n", dlg_regs->dst_y_prefetch);
+	DTN_INFO("dst_y_per_vm_vblank                 = 0x%0x\n", dlg_regs->dst_y_per_vm_vblank);
+	DTN_INFO("dst_y_per_row_vblank                = 0x%0x\n", dlg_regs->dst_y_per_row_vblank);
+	DTN_INFO("dst_y_per_vm_flip                   = 0x%0x\n", dlg_regs->dst_y_per_vm_flip);
+	DTN_INFO("dst_y_per_row_flip                  = 0x%0x\n", dlg_regs->dst_y_per_row_flip);
+	DTN_INFO("ref_freq_to_pix_freq                = 0x%0x\n", dlg_regs->ref_freq_to_pix_freq);
+	DTN_INFO("vratio_prefetch                     = 0x%0x\n", dlg_regs->vratio_prefetch);
+	DTN_INFO("vratio_prefetch_c                   = 0x%0x\n", dlg_regs->vratio_prefetch_c);
+	DTN_INFO("refcyc_per_pte_group_vblank_l       = 0x%0x\n", dlg_regs->refcyc_per_pte_group_vblank_l);
+	DTN_INFO("refcyc_per_pte_group_vblank_c       = 0x%0x\n", dlg_regs->refcyc_per_pte_group_vblank_c);
+	DTN_INFO("refcyc_per_meta_chunk_vblank_l      = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_vblank_l);
+	DTN_INFO("refcyc_per_meta_chunk_vblank_c      = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_vblank_c);
+	DTN_INFO("refcyc_per_pte_group_flip_l         = 0x%0x\n", dlg_regs->refcyc_per_pte_group_flip_l);
+	DTN_INFO("refcyc_per_pte_group_flip_c         = 0x%0x\n", dlg_regs->refcyc_per_pte_group_flip_c);
+	DTN_INFO("refcyc_per_meta_chunk_flip_l        = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_flip_l);
+	DTN_INFO("refcyc_per_meta_chunk_flip_c        = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_flip_c);
+	DTN_INFO("dst_y_per_pte_row_nom_l             = 0x%0x\n", dlg_regs->dst_y_per_pte_row_nom_l);
+	DTN_INFO("dst_y_per_pte_row_nom_c             = 0x%0x\n", dlg_regs->dst_y_per_pte_row_nom_c);
+	DTN_INFO("refcyc_per_pte_group_nom_l          = 0x%0x\n", dlg_regs->refcyc_per_pte_group_nom_l);
+	DTN_INFO("refcyc_per_pte_group_nom_c          = 0x%0x\n", dlg_regs->refcyc_per_pte_group_nom_c);
+	DTN_INFO("dst_y_per_meta_row_nom_l            = 0x%0x\n", dlg_regs->dst_y_per_meta_row_nom_l);
+	DTN_INFO("dst_y_per_meta_row_nom_c            = 0x%0x\n", dlg_regs->dst_y_per_meta_row_nom_c);
+	DTN_INFO("refcyc_per_meta_chunk_nom_l         = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_nom_l);
+	DTN_INFO("refcyc_per_meta_chunk_nom_c         = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_nom_c);
+	DTN_INFO("refcyc_per_line_delivery_pre_l      = 0x%0x\n", dlg_regs->refcyc_per_line_delivery_pre_l);
+	DTN_INFO("refcyc_per_line_delivery_pre_c      = 0x%0x\n", dlg_regs->refcyc_per_line_delivery_pre_c);
+	DTN_INFO("refcyc_per_line_delivery_l          = 0x%0x\n", dlg_regs->refcyc_per_line_delivery_l);
+	DTN_INFO("refcyc_per_line_delivery_c          = 0x%0x\n", dlg_regs->refcyc_per_line_delivery_c);
+	DTN_INFO("chunk_hdl_adjust_cur0               = 0x%0x\n", dlg_regs->chunk_hdl_adjust_cur0);
+	DTN_INFO("dst_y_offset_cur1                   = 0x%0x\n", dlg_regs->dst_y_offset_cur1);
+	DTN_INFO("chunk_hdl_adjust_cur1               = 0x%0x\n", dlg_regs->chunk_hdl_adjust_cur1);
+	DTN_INFO("vready_after_vcount0                = 0x%0x\n", dlg_regs->vready_after_vcount0);
+	DTN_INFO("dst_y_delta_drq_limit               = 0x%0x\n", dlg_regs->dst_y_delta_drq_limit);
+	DTN_INFO("xfc_reg_transfer_delay              = 0x%0x\n", dlg_regs->xfc_reg_transfer_delay);
+	DTN_INFO("xfc_reg_precharge_delay             = 0x%0x\n", dlg_regs->xfc_reg_precharge_delay);
+	DTN_INFO("xfc_reg_remote_surface_flip_latency = 0x%0x\n", dlg_regs->xfc_reg_remote_surface_flip_latency);
+
+	DTN_INFO("========TTU========\n");
+	DTN_INFO("qos_level_low_wm                  = 0x%0x\n", ttu_regs->qos_level_low_wm);
+	DTN_INFO("qos_level_high_wm                 = 0x%0x\n", ttu_regs->qos_level_high_wm);
+	DTN_INFO("min_ttu_vblank                    = 0x%0x\n", ttu_regs->min_ttu_vblank);
+	DTN_INFO("qos_level_flip                    = 0x%0x\n", ttu_regs->qos_level_flip);
+	DTN_INFO("refcyc_per_req_delivery_pre_l     = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_pre_l);
+	DTN_INFO("refcyc_per_req_delivery_l         = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_l);
+	DTN_INFO("refcyc_per_req_delivery_pre_c     = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_pre_c);
+	DTN_INFO("refcyc_per_req_delivery_c         = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_c);
+	DTN_INFO("refcyc_per_req_delivery_cur0      = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_cur0);
+	DTN_INFO("refcyc_per_req_delivery_pre_cur0  = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_pre_cur0);
+	DTN_INFO("refcyc_per_req_delivery_cur1      = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_cur1);
+	DTN_INFO("refcyc_per_req_delivery_pre_cur1  = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_pre_cur1);
+	DTN_INFO("qos_level_fixed_l                 = 0x%0x\n", ttu_regs->qos_level_fixed_l);
+	DTN_INFO("qos_ramp_disable_l                = 0x%0x\n", ttu_regs->qos_ramp_disable_l);
+	DTN_INFO("qos_level_fixed_c                 = 0x%0x\n", ttu_regs->qos_level_fixed_c);
+	DTN_INFO("qos_ramp_disable_c                = 0x%0x\n", ttu_regs->qos_ramp_disable_c);
+	DTN_INFO("qos_level_fixed_cur0              = 0x%0x\n", ttu_regs->qos_level_fixed_cur0);
+	DTN_INFO("qos_ramp_disable_cur0             = 0x%0x\n", ttu_regs->qos_ramp_disable_cur0);
+	DTN_INFO("qos_level_fixed_cur1              = 0x%0x\n", ttu_regs->qos_level_fixed_cur1);
+	DTN_INFO("qos_ramp_disable_cur1             = 0x%0x\n", ttu_regs->qos_ramp_disable_cur1);
+}
+
 void dcn10_log_hw_state(struct dc *dc)
 {
 	struct dc_context *dc_ctx = dc->ctx;
@@ -129,7 +227,7 @@ void dcn10_log_hw_state(struct dc *dc)
 		struct hubp *hubp = pool->hubps[i];
 		struct dcn_hubp_state s;
 
-		hubp1_read_state(TO_DCN10_HUBP(hubp), &s);
+		hubp->funcs->hubp_read_state(hubp, &s);
 
 		DTN_INFO("[%2d]:  %5xh  %6xh  %5d  %6d  %2xh  %2xh  %6xh"
 				"  %6d  %8d  %7d  %8xh",
@@ -201,6 +299,20 @@ void dcn10_log_hw_state(struct dc *dc)
 	}
 	DTN_INFO("\n");
 
+	for (i = 0; i < pool->pipe_count; i++) {
+		struct hubp *hubp = pool->hubps[i];
+		struct dcn_hubp_state s = {0};
+
+		if (!dc->current_state->res_ctx.pipe_ctx[i].stream)
+			continue;
+
+		hubp->funcs->hubp_read_state(hubp, &s);
+		DTN_INFO("RQ-DLG-TTU registers for HUBP%d:\n", i);
+		print_rq_dlg_ttu_regs(dc_ctx, &s);
+		DTN_INFO("\n");
+	}
+	DTN_INFO("\n");
+
 	log_mpc_crc(dc);
 
 	DTN_INFO_END();
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 9ced254e652c..3866147fb02a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -56,6 +56,25 @@ struct hubp {
 	bool power_gated;
 };
 
+struct dcn_hubp_state {
+	struct _vcs_dpi_display_dlg_regs_st dlg_attr;
+	struct _vcs_dpi_display_ttu_regs_st ttu_attr;
+	struct _vcs_dpi_display_rq_regs_st rq_regs;
+	uint32_t pixel_format;
+	uint32_t inuse_addr_hi;
+	uint32_t viewport_width;
+	uint32_t viewport_height;
+	uint32_t rotation_angle;
+	uint32_t h_mirror_en;
+	uint32_t sw_mode;
+	uint32_t dcc_en;
+	uint32_t blank_en;
+	uint32_t underflow_status;
+	uint32_t ttu_disable;
+	uint32_t min_ttu_vblank;
+	uint32_t qos_level_low_wm;
+	uint32_t qos_level_high_wm;
+};
 
 struct hubp_funcs {
 	void (*hubp_setup)(
@@ -121,6 +140,7 @@ struct hubp_funcs {
 
 	void (*hubp_clk_cntl)(struct hubp *hubp, bool enable);
 	void (*hubp_vtg_sel)(struct hubp *hubp, uint32_t otg_inst);
+	void (*hubp_read_state)(struct hubp *hubp, struct dcn_hubp_state *s);
 
 };
 
diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
index 77eb72874e90..3306e7b0b3e3 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
@@ -183,6 +183,36 @@
 				FN(reg_name, f4), v4, \
 				FN(reg_name, f5), v5)
 
+#define REG_GET_6(reg_name, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6)	\
+		generic_reg_get6(CTX, REG(reg_name), \
+				FN(reg_name, f1), v1, \
+				FN(reg_name, f2), v2, \
+				FN(reg_name, f3), v3, \
+				FN(reg_name, f4), v4, \
+				FN(reg_name, f5), v5, \
+				FN(reg_name, f6), v6)
+
+#define REG_GET_7(reg_name, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7)	\
+		generic_reg_get7(CTX, REG(reg_name), \
+				FN(reg_name, f1), v1, \
+				FN(reg_name, f2), v2, \
+				FN(reg_name, f3), v3, \
+				FN(reg_name, f4), v4, \
+				FN(reg_name, f5), v5, \
+				FN(reg_name, f6), v6, \
+				FN(reg_name, f7), v7)
+
+#define REG_GET_8(reg_name, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7, f8, v8)	\
+		generic_reg_get8(CTX, REG(reg_name), \
+				FN(reg_name, f1), v1, \
+				FN(reg_name, f2), v2, \
+				FN(reg_name, f3), v3, \
+				FN(reg_name, f4), v4, \
+				FN(reg_name, f5), v5, \
+				FN(reg_name, f6), v6, \
+				FN(reg_name, f7), v7, \
+				FN(reg_name, f8), v8)
+
 /* macro to poll and wait for a register field to read back given value */
 
 #define REG_WAIT(reg_name, field, val, delay_between_poll_us, max_try)	\
@@ -389,4 +419,30 @@ uint32_t generic_reg_get5(const struct dc_context *ctx, uint32_t addr,
 		uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
 		uint8_t shift5, uint32_t mask5, uint32_t *field_value5);
 
+uint32_t generic_reg_get6(const struct dc_context *ctx, uint32_t addr,
+		uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+		uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+		uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
+		uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
+		uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
+		uint8_t shift6, uint32_t mask6, uint32_t *field_value6);
+
+uint32_t generic_reg_get7(const struct dc_context *ctx, uint32_t addr,
+		uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+		uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+		uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
+		uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
+		uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
+		uint8_t shift6, uint32_t mask6, uint32_t *field_value6,
+		uint8_t shift7, uint32_t mask7, uint32_t *field_value7);
+
+uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr,
+		uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+		uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+		uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
+		uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
+		uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
+		uint8_t shift6, uint32_t mask6, uint32_t *field_value6,
+		uint8_t shift7, uint32_t mask7, uint32_t *field_value7,
+		uint8_t shift8, uint32_t mask8, uint32_t *field_value8);
 #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_ */

From a47654633596a63f14a9035b9c762f8aaf1e00a3 Mon Sep 17 00:00:00 2001
From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Date: Wed, 4 Apr 2018 16:03:38 -0400
Subject: [PATCH 0788/1286] drm/amd/display: add calculated clock logging to
 DTN

Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 11 ++++++++++-
 1 file changed, 10 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 7dd130d15a67..e547f46d3516 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -311,7 +311,16 @@ void dcn10_log_hw_state(struct dc *dc)
 		print_rq_dlg_ttu_regs(dc_ctx, &s);
 		DTN_INFO("\n");
 	}
-	DTN_INFO("\n");
+
+	DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d  dcfclk_deep_sleep_khz:%d  dispclk_khz:%d\n"
+		"dppclk_khz:%d  max_supported_dppclk_khz:%d  fclk_khz:%d  socclk_khz:%d\n\n",
+			dc->current_state->bw.dcn.calc_clk.dcfclk_khz,
+			dc->current_state->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
+			dc->current_state->bw.dcn.calc_clk.dispclk_khz,
+			dc->current_state->bw.dcn.calc_clk.dppclk_khz,
+			dc->current_state->bw.dcn.calc_clk.max_supported_dppclk_khz,
+			dc->current_state->bw.dcn.calc_clk.fclk_khz,
+			dc->current_state->bw.dcn.calc_clk.socclk_khz);
 
 	log_mpc_crc(dc);
 

From ad019f7b6db893271d13148d6d80001d0c23cdf9 Mon Sep 17 00:00:00 2001
From: Yue Hin Lau <Yuehin.Lau@amd.com>
Date: Mon, 9 Apr 2018 14:46:32 -0400
Subject: [PATCH 0789/1286] drm/amd/display: add missing colorspace for set
 black color

Signed-off-by: Yue Hin Lau <Yuehin.Lau@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../drm/amd/display/dc/core/dc_hw_sequencer.c | 21 ++++++++++++++++++-
 1 file changed, 20 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 481f6928a9c0..83d121510ef5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -208,6 +208,7 @@ void color_space_to_black_color(
 	case COLOR_SPACE_YCBCR709:
 	case COLOR_SPACE_YCBCR601_LIMITED:
 	case COLOR_SPACE_YCBCR709_LIMITED:
+	case COLOR_SPACE_2020_YCBCR:
 		*black_color = black_color_format[BLACK_COLOR_FORMAT_YUV_CV];
 		break;
 
@@ -216,7 +217,25 @@ void color_space_to_black_color(
 			black_color_format[BLACK_COLOR_FORMAT_RGB_LIMITED];
 		break;
 
-	default:
+	/**
+	 * Remove default and add case for all color space
+	 * so when we forget to add new color space
+	 * compiler will give a warning
+	 */
+	case COLOR_SPACE_UNKNOWN:
+	case COLOR_SPACE_SRGB:
+	case COLOR_SPACE_XR_RGB:
+	case COLOR_SPACE_MSREF_SCRGB:
+	case COLOR_SPACE_XV_YCC_709:
+	case COLOR_SPACE_XV_YCC_601:
+	case COLOR_SPACE_2020_RGB_FULLRANGE:
+	case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
+	case COLOR_SPACE_ADOBERGB:
+	case COLOR_SPACE_DCIP3:
+	case COLOR_SPACE_DISPLAYNATIVE:
+	case COLOR_SPACE_DOLBYVISION:
+	case COLOR_SPACE_APPCTRL:
+	case COLOR_SPACE_CUSTOMPOINTS:
 		/* fefault is sRGB black (full range). */
 		*black_color =
 			black_color_format[BLACK_COLOR_FORMAT_RGB_FULLRANGE];

From f0c0761b38ac30b04d4fed436ff10e894ec0e525 Mon Sep 17 00:00:00 2001
From: Yongqiang Sun <yongqiang.sun@amd.com>
Date: Mon, 9 Apr 2018 16:15:20 -0400
Subject: [PATCH 0790/1286] drm/amd/display: Use dig enable to determine fast
 boot optimization.

Linux doesn't know lid state, better to check dig enable
value from register.

Signed-off-by: Yongqiang Sun <yongqiang.sun@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dc_stream.h    |  1 -
 .../drm/amd/display/dc/dce/dce_link_encoder.c |  6 ++-
 .../drm/amd/display/dc/dce/dce_link_encoder.h |  2 +
 .../display/dc/dce110/dce110_hw_sequencer.c   | 47 ++++++-------------
 .../drm/amd/display/dc/inc/hw/link_encoder.h  |  1 +
 5 files changed, 21 insertions(+), 36 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 11b3433d6432..d7e6d53bb383 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -92,7 +92,6 @@ struct dc_stream_state {
 	int phy_pix_clk;
 	enum signal_type signal;
 	bool dpms_off;
-	bool lid_state_closed;
 
 	struct dc_stream_status status;
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 8167cad7bcf7..dbe3b26b6d9e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -113,6 +113,7 @@ static const struct link_encoder_funcs dce110_lnk_enc_funcs = {
 	.connect_dig_be_to_fe = dce110_link_encoder_connect_dig_be_to_fe,
 	.enable_hpd = dce110_link_encoder_enable_hpd,
 	.disable_hpd = dce110_link_encoder_disable_hpd,
+	.is_dig_enabled = dce110_is_dig_enabled,
 	.destroy = dce110_link_encoder_destroy
 };
 
@@ -535,8 +536,9 @@ void dce110_psr_program_secondary_packet(struct link_encoder *enc,
 		DP_SEC_GSP0_PRIORITY, 1);
 }
 
-static bool is_dig_enabled(const struct dce110_link_encoder *enc110)
+bool dce110_is_dig_enabled(struct link_encoder *enc)
 {
+	struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
 	uint32_t value;
 
 	REG_GET(DIG_BE_EN_CNTL, DIG_ENABLE, &value);
@@ -1031,7 +1033,7 @@ void dce110_link_encoder_disable_output(
 	struct bp_transmitter_control cntl = { 0 };
 	enum bp_result result;
 
-	if (!is_dig_enabled(enc110)) {
+	if (!dce110_is_dig_enabled(enc)) {
 		/* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */
 		return;
 	}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
index 0ec3433d34b6..347069461a22 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
@@ -263,4 +263,6 @@ void dce110_psr_program_dp_dphy_fast_training(struct link_encoder *enc,
 void dce110_psr_program_secondary_packet(struct link_encoder *enc,
 			unsigned int sdp_transmit_line_num_deadline);
 
+bool dce110_is_dig_enabled(struct link_encoder *enc);
+
 #endif /* __DC_LINK_ENCODER__DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index bd34193ad779..e70ccb9b6afe 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1471,15 +1471,15 @@ static void disable_vga_and_power_gate_all_controllers(
 	}
 }
 
-static bool is_eDP_lid_closed(struct dc_state *context)
+static struct dc_link *get_link_for_edp(struct dc *dc)
 {
 	int i;
 
-	for (i = 0; i < context->stream_count; i++) {
-		if (context->streams[i]->signal == SIGNAL_TYPE_EDP)
-			return context->streams[i]->lid_state_closed;
+	for (i = 0; i < dc->link_count; i++) {
+		if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP)
+			return dc->links[i];
 	}
-	return false;
+	return NULL;
 }
 
 static struct dc_link *get_link_for_edp_not_in_use(
@@ -1516,41 +1516,22 @@ static struct dc_link *get_link_for_edp_not_in_use(
  */
 void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
 {
-	/* check eDP lid state and BIOS_SCRATCH_3 to determine fast boot optimization
-	 * UEFI boot
-	 *				edp_active_status_from_scratch		fast boot optimization
-	 * S4/S5 resume:
-	 * Lid Open		true								true
-	 * Lid Close	false								false
-	 *
-	 * S3/ resume:
-	 * Lid Open		false								false
-	 * Lid Close	false								false
-	 *
-	 * Legacy boot:
-	 *				edp_active_status_from_scratch		fast boot optimization
-	 * S4/S resume:
-	 * Lid Open		true								true
-	 * Lid Close	true								false
-	 *
-	 * S3/ resume:
-	 * Lid Open		false								false
-	 * Lid Close	false								false
-	 */
-	struct dc_bios *dcb = dc->ctx->dc_bios;
-	bool lid_state_closed = is_eDP_lid_closed(context);
 	struct dc_link *edp_link_to_turnoff = NULL;
-	bool edp_active_status_from_scratch =
-			(dcb->funcs->get_vga_enabled_displays(dc->ctx->dc_bios) == ATOM_DISPLAY_LCD1_ACTIVE);
+	struct dc_link *edp_link = get_link_for_edp(dc);
+	bool can_eDP_fast_boot_optimize = false;
 
-	/*Lid open*/
-	if (!lid_state_closed) {
+	if (edp_link) {
+		can_eDP_fast_boot_optimize =
+				edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc);
+	}
+
+	if (can_eDP_fast_boot_optimize) {
 		edp_link_to_turnoff = get_link_for_edp_not_in_use(dc, context);
 
 		/* if OS doesn't light up eDP and eDP link is available, we want to disable
 		 * If resume from S4/S5, should optimization.
 		 */
-		if (!edp_link_to_turnoff && edp_active_status_from_scratch)
+		if (!edp_link_to_turnoff)
 			dc->apply_edp_fast_boot_optimization = true;
 	}
 
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index 54d8a1386142..cf6df2e7beb2 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -149,6 +149,7 @@ struct link_encoder_funcs {
 		bool connect);
 	void (*enable_hpd)(struct link_encoder *enc);
 	void (*disable_hpd)(struct link_encoder *enc);
+	bool (*is_dig_enabled)(struct link_encoder *enc);
 	void (*destroy)(struct link_encoder **enc);
 };
 

From a906dbb1e20f5791d728c7d9e2366b8acb4f1bb2 Mon Sep 17 00:00:00 2001
From: Chunming Zhou <david1.zhou@amd.com>
Date: Mon, 16 Apr 2018 17:57:19 +0800
Subject: [PATCH 0791/1286] drm/amdgpu: add amdgpu_bo_param
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

amdgpu_bo_create has too many parameters, and used in
too many places. Collect them to one structure.

Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 75 ++++++++++++----------
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.h |  9 +++
 2 files changed, 51 insertions(+), 33 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 24f582c696cc..b33a7fdea7f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -341,27 +341,25 @@ fail:
 	return false;
 }
 
-static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
-			       int byte_align, u32 domain,
-			       u64 flags, enum ttm_bo_type type,
-			       struct reservation_object *resv,
+static int amdgpu_bo_do_create(struct amdgpu_device *adev,
+			       struct amdgpu_bo_param *bp,
 			       struct amdgpu_bo **bo_ptr)
 {
 	struct ttm_operation_ctx ctx = {
-		.interruptible = (type != ttm_bo_type_kernel),
+		.interruptible = (bp->type != ttm_bo_type_kernel),
 		.no_wait_gpu = false,
-		.resv = resv,
+		.resv = bp->resv,
 		.flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
 	};
 	struct amdgpu_bo *bo;
-	unsigned long page_align;
+	unsigned long page_align, size = bp->size;
 	size_t acc_size;
 	int r;
 
-	page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
+	page_align = roundup(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
 	size = ALIGN(size, PAGE_SIZE);
 
-	if (!amdgpu_bo_validate_size(adev, size, domain))
+	if (!amdgpu_bo_validate_size(adev, size, bp->domain))
 		return -ENOMEM;
 
 	*bo_ptr = NULL;
@@ -375,18 +373,18 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
 	drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
 	INIT_LIST_HEAD(&bo->shadow_list);
 	INIT_LIST_HEAD(&bo->va);
-	bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
-					 AMDGPU_GEM_DOMAIN_GTT |
-					 AMDGPU_GEM_DOMAIN_CPU |
-					 AMDGPU_GEM_DOMAIN_GDS |
-					 AMDGPU_GEM_DOMAIN_GWS |
-					 AMDGPU_GEM_DOMAIN_OA);
+	bo->preferred_domains = bp->domain & (AMDGPU_GEM_DOMAIN_VRAM |
+					      AMDGPU_GEM_DOMAIN_GTT |
+					      AMDGPU_GEM_DOMAIN_CPU |
+					      AMDGPU_GEM_DOMAIN_GDS |
+					      AMDGPU_GEM_DOMAIN_GWS |
+					      AMDGPU_GEM_DOMAIN_OA);
 	bo->allowed_domains = bo->preferred_domains;
-	if (type != ttm_bo_type_kernel &&
+	if (bp->type != ttm_bo_type_kernel &&
 	    bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
 		bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
 
-	bo->flags = flags;
+	bo->flags = bp->flags;
 
 #ifdef CONFIG_X86_32
 	/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
@@ -417,11 +415,11 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
 #endif
 
 	bo->tbo.bdev = &adev->mman.bdev;
-	amdgpu_ttm_placement_from_domain(bo, domain);
+	amdgpu_ttm_placement_from_domain(bo, bp->domain);
 
-	r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
+	r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
 				 &bo->placement, page_align, &ctx, acc_size,
-				 NULL, resv, &amdgpu_ttm_bo_destroy);
+				 NULL, bp->resv, &amdgpu_ttm_bo_destroy);
 	if (unlikely(r != 0))
 		return r;
 
@@ -433,10 +431,10 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
 	else
 		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
 
-	if (type == ttm_bo_type_kernel)
+	if (bp->type == ttm_bo_type_kernel)
 		bo->tbo.priority = 1;
 
-	if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
+	if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
 	    bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
 		struct dma_fence *fence;
 
@@ -449,20 +447,20 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
 		bo->tbo.moving = dma_fence_get(fence);
 		dma_fence_put(fence);
 	}
-	if (!resv)
+	if (!bp->resv)
 		amdgpu_bo_unreserve(bo);
 	*bo_ptr = bo;
 
 	trace_amdgpu_bo_create(bo);
 
 	/* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
-	if (type == ttm_bo_type_device)
+	if (bp->type == ttm_bo_type_device)
 		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 
 	return 0;
 
 fail_unreserve:
-	if (!resv)
+	if (!bp->resv)
 		ww_mutex_unlock(&bo->tbo.resv->lock);
 	amdgpu_bo_unref(&bo);
 	return r;
@@ -472,16 +470,21 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
 				   unsigned long size, int byte_align,
 				   struct amdgpu_bo *bo)
 {
+	struct amdgpu_bo_param bp = {
+		.size = size,
+		.byte_align = byte_align,
+		.domain = AMDGPU_GEM_DOMAIN_GTT,
+		.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
+			AMDGPU_GEM_CREATE_SHADOW,
+		.type = ttm_bo_type_kernel,
+		.resv = bo->tbo.resv
+	};
 	int r;
 
 	if (bo->shadow)
 		return 0;
 
-	r = amdgpu_bo_do_create(adev, size, byte_align, AMDGPU_GEM_DOMAIN_GTT,
-				AMDGPU_GEM_CREATE_CPU_GTT_USWC |
-				AMDGPU_GEM_CREATE_SHADOW,
-				ttm_bo_type_kernel,
-				bo->tbo.resv, &bo->shadow);
+	r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
 	if (!r) {
 		bo->shadow->parent = amdgpu_bo_ref(bo);
 		mutex_lock(&adev->shadow_list_lock);
@@ -498,11 +501,17 @@ int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size,
 		     struct reservation_object *resv,
 		     struct amdgpu_bo **bo_ptr)
 {
-	uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW;
+	struct amdgpu_bo_param bp = {
+		.size = size,
+		.byte_align = byte_align,
+		.domain = domain,
+		.flags = flags & ~AMDGPU_GEM_CREATE_SHADOW,
+		.type = type,
+		.resv = resv
+	};
 	int r;
 
-	r = amdgpu_bo_do_create(adev, size, byte_align, domain,
-				parent_flags, type, resv, bo_ptr);
+	r = amdgpu_bo_do_create(adev, &bp, bo_ptr);
 	if (r)
 		return r;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 1e9fe85abcbb..4bb6f0a8d799 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -33,6 +33,15 @@
 
 #define AMDGPU_BO_INVALID_OFFSET	LONG_MAX
 
+struct amdgpu_bo_param {
+	unsigned long			size;
+	int				byte_align;
+	u32				domain;
+	u64				flags;
+	enum ttm_bo_type		type;
+	struct reservation_object	*resv;
+};
+
 /* bo virtual addresses in a vm */
 struct amdgpu_bo_va_mapping {
 	struct amdgpu_bo_va		*bo_va;

From 3216c6b71d1e6a7dce2fd29c531e8c99c1b88c95 Mon Sep 17 00:00:00 2001
From: Chunming Zhou <david1.zhou@amd.com>
Date: Mon, 16 Apr 2018 18:27:50 +0800
Subject: [PATCH 0792/1286] drm/amdgpu: use amdgpu_bo_param for
 amdgpu_bo_create v2
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

After that, we can easily add new parameter when need.

v2:
a) rebase.
b) Initialize struct amdgpu_bo_param, future new
member could only be used in some one case, but all member
should have its own initial value.

Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com> (v1)
Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Cc: christian.koenig@amd.com
Cc: Felix.Kuehling@amd.com
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c    | 12 +++-
 .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  | 11 +++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c | 15 +++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c      | 17 ++++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c       | 11 +++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c    | 58 +++++++++----------
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.h    |  6 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c     | 12 +++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_test.c      | 16 +++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c       | 15 +++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c        | 26 ++++++---
 11 files changed, 129 insertions(+), 70 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 4d36203ffb11..887702c59488 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -217,13 +217,19 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 	struct amdgpu_bo *bo = NULL;
+	struct amdgpu_bo_param bp;
 	int r;
 	uint64_t gpu_addr_tmp = 0;
 	void *cpu_ptr_tmp = NULL;
 
-	r = amdgpu_bo_create(adev, size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
-			     AMDGPU_GEM_CREATE_CPU_GTT_USWC, ttm_bo_type_kernel,
-			     NULL, &bo);
+	memset(&bp, 0, sizeof(bp));
+	bp.size = size;
+	bp.byte_align = PAGE_SIZE;
+	bp.domain = AMDGPU_GEM_DOMAIN_GTT;
+	bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+	bp.type = ttm_bo_type_kernel;
+	bp.resv = NULL;
+	r = amdgpu_bo_create(adev, &bp, &bo);
 	if (r) {
 		dev_err(adev->dev,
 			"failed to allocate BO for amdkfd (%d)\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 1d6e1479da38..c1b0cdb401dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1004,6 +1004,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
 	struct amdgpu_bo *bo;
+	struct amdgpu_bo_param bp;
 	int byte_align;
 	u32 alloc_domain;
 	u64 alloc_flags;
@@ -1069,8 +1070,14 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
 	pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
 			va, size, domain_string(alloc_domain));
 
-	ret = amdgpu_bo_create(adev, size, byte_align,
-				alloc_domain, alloc_flags, ttm_bo_type_device, NULL, &bo);
+	memset(&bp, 0, sizeof(bp));
+	bp.size = size;
+	bp.byte_align = byte_align;
+	bp.domain = alloc_domain;
+	bp.flags = alloc_flags;
+	bp.type = ttm_bo_type_device;
+	bp.resv = NULL;
+	ret = amdgpu_bo_create(adev, &bp, &bo);
 	if (ret) {
 		pr_debug("Failed to create BO on domain %s. ret %d\n",
 				domain_string(alloc_domain), ret);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 02b849be083b..19cfff31f2e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -75,13 +75,20 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
 {
 	struct amdgpu_bo *dobj = NULL;
 	struct amdgpu_bo *sobj = NULL;
+	struct amdgpu_bo_param bp;
 	uint64_t saddr, daddr;
 	int r, n;
 	int time;
 
+	memset(&bp, 0, sizeof(bp));
+	bp.size = size;
+	bp.byte_align = PAGE_SIZE;
+	bp.domain = sdomain;
+	bp.flags = 0;
+	bp.type = ttm_bo_type_kernel;
+	bp.resv = NULL;
 	n = AMDGPU_BENCHMARK_ITERATIONS;
-	r = amdgpu_bo_create(adev, size, PAGE_SIZE,sdomain, 0,
-			     ttm_bo_type_kernel, NULL, &sobj);
+	r = amdgpu_bo_create(adev, &bp, &sobj);
 	if (r) {
 		goto out_cleanup;
 	}
@@ -93,8 +100,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
 	if (r) {
 		goto out_cleanup;
 	}
-	r = amdgpu_bo_create(adev, size, PAGE_SIZE, ddomain, 0,
-			     ttm_bo_type_kernel, NULL, &dobj);
+	bp.domain = ddomain;
+	r = amdgpu_bo_create(adev, &bp, &dobj);
 	if (r) {
 		goto out_cleanup;
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index cf0f186c6092..17d6b9fb6d77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -113,12 +113,17 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
 	int r;
 
 	if (adev->gart.robj == NULL) {
-		r = amdgpu_bo_create(adev, adev->gart.table_size, PAGE_SIZE,
-				     AMDGPU_GEM_DOMAIN_VRAM,
-				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
-				     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
-				     ttm_bo_type_kernel, NULL,
-				     &adev->gart.robj);
+		struct amdgpu_bo_param bp;
+
+		memset(&bp, 0, sizeof(bp));
+		bp.size = adev->gart.table_size;
+		bp.byte_align = PAGE_SIZE;
+		bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
+		bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+			AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+		bp.type = ttm_bo_type_kernel;
+		bp.resv = NULL;
+		r = amdgpu_bo_create(adev, &bp, &adev->gart.robj);
 		if (r) {
 			return r;
 		}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 46b9ea4e6103..1200c5ba37da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -48,17 +48,24 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
 			     struct drm_gem_object **obj)
 {
 	struct amdgpu_bo *bo;
+	struct amdgpu_bo_param bp;
 	int r;
 
+	memset(&bp, 0, sizeof(bp));
 	*obj = NULL;
 	/* At least align on page size */
 	if (alignment < PAGE_SIZE) {
 		alignment = PAGE_SIZE;
 	}
 
+	bp.size = size;
+	bp.byte_align = alignment;
+	bp.type = type;
+	bp.resv = resv;
 retry:
-	r = amdgpu_bo_create(adev, size, alignment, initial_domain,
-			     flags, type, resv, &bo);
+	bp.flags = flags;
+	bp.domain = initial_domain;
+	r = amdgpu_bo_create(adev, &bp, &bo);
 	if (r) {
 		if (r != -ERESTARTSYS) {
 			if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index b33a7fdea7f2..cac65e32a0b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -191,14 +191,21 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
 			      u32 domain, struct amdgpu_bo **bo_ptr,
 			      u64 *gpu_addr, void **cpu_addr)
 {
+	struct amdgpu_bo_param bp;
 	bool free = false;
 	int r;
 
+	memset(&bp, 0, sizeof(bp));
+	bp.size = size;
+	bp.byte_align = align;
+	bp.domain = domain;
+	bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+		AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+	bp.type = ttm_bo_type_kernel;
+	bp.resv = NULL;
+
 	if (!*bo_ptr) {
-		r = amdgpu_bo_create(adev, size, align, domain,
-				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
-				     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
-				     ttm_bo_type_kernel, NULL, bo_ptr);
+		r = amdgpu_bo_create(adev, &bp, bo_ptr);
 		if (r) {
 			dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
 				r);
@@ -470,20 +477,21 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
 				   unsigned long size, int byte_align,
 				   struct amdgpu_bo *bo)
 {
-	struct amdgpu_bo_param bp = {
-		.size = size,
-		.byte_align = byte_align,
-		.domain = AMDGPU_GEM_DOMAIN_GTT,
-		.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
-			AMDGPU_GEM_CREATE_SHADOW,
-		.type = ttm_bo_type_kernel,
-		.resv = bo->tbo.resv
-	};
+	struct amdgpu_bo_param bp;
 	int r;
 
 	if (bo->shadow)
 		return 0;
 
+	memset(&bp, 0, sizeof(bp));
+	bp.size = size;
+	bp.byte_align = byte_align;
+	bp.domain = AMDGPU_GEM_DOMAIN_GTT;
+	bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
+		AMDGPU_GEM_CREATE_SHADOW;
+	bp.type = ttm_bo_type_kernel;
+	bp.resv = bo->tbo.resv;
+
 	r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
 	if (!r) {
 		bo->shadow->parent = amdgpu_bo_ref(bo);
@@ -495,34 +503,26 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
 	return r;
 }
 
-int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size,
-		     int byte_align, u32 domain,
-		     u64 flags, enum ttm_bo_type type,
-		     struct reservation_object *resv,
+int amdgpu_bo_create(struct amdgpu_device *adev,
+		     struct amdgpu_bo_param *bp,
 		     struct amdgpu_bo **bo_ptr)
 {
-	struct amdgpu_bo_param bp = {
-		.size = size,
-		.byte_align = byte_align,
-		.domain = domain,
-		.flags = flags & ~AMDGPU_GEM_CREATE_SHADOW,
-		.type = type,
-		.resv = resv
-	};
+	u64 flags = bp->flags;
 	int r;
 
-	r = amdgpu_bo_do_create(adev, &bp, bo_ptr);
+	bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
+	r = amdgpu_bo_do_create(adev, bp, bo_ptr);
 	if (r)
 		return r;
 
 	if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) {
-		if (!resv)
+		if (!bp->resv)
 			WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv,
 							NULL));
 
-		r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr));
+		r = amdgpu_bo_create_shadow(adev, bp->size, bp->byte_align, (*bo_ptr));
 
-		if (!resv)
+		if (!bp->resv)
 			reservation_object_unlock((*bo_ptr)->tbo.resv);
 
 		if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 4bb6f0a8d799..e9a21d991e77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -233,10 +233,8 @@ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
 	return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
 }
 
-int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size,
-		     int byte_align, u32 domain,
-		     u64 flags, enum ttm_bo_type type,
-		     struct reservation_object *resv,
+int amdgpu_bo_create(struct amdgpu_device *adev,
+		     struct amdgpu_bo_param *bp,
 		     struct amdgpu_bo **bo_ptr);
 int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
 			      unsigned long size, int align,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 4b584cb75bf4..713417b6d15d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -102,12 +102,18 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
 	struct reservation_object *resv = attach->dmabuf->resv;
 	struct amdgpu_device *adev = dev->dev_private;
 	struct amdgpu_bo *bo;
+	struct amdgpu_bo_param bp;
 	int ret;
 
+	memset(&bp, 0, sizeof(bp));
+	bp.size = attach->dmabuf->size;
+	bp.byte_align = PAGE_SIZE;
+	bp.domain = AMDGPU_GEM_DOMAIN_CPU;
+	bp.flags = 0;
+	bp.type = ttm_bo_type_sg;
+	bp.resv = resv;
 	ww_mutex_lock(&resv->lock, NULL);
-	ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE,
-			       AMDGPU_GEM_DOMAIN_CPU, 0, ttm_bo_type_sg,
-			       resv, &bo);
+	ret = amdgpu_bo_create(adev, &bp, &bo);
 	if (ret)
 		goto error;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index 2dbe87591f81..d167e8ab76d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -33,6 +33,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
 	struct amdgpu_bo *vram_obj = NULL;
 	struct amdgpu_bo **gtt_obj = NULL;
+	struct amdgpu_bo_param bp;
 	uint64_t gart_addr, vram_addr;
 	unsigned n, size;
 	int i, r;
@@ -58,9 +59,15 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 		r = 1;
 		goto out_cleanup;
 	}
+	memset(&bp, 0, sizeof(bp));
+	bp.size = size;
+	bp.byte_align = PAGE_SIZE;
+	bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
+	bp.flags = 0;
+	bp.type = ttm_bo_type_kernel;
+	bp.resv = NULL;
 
-	r = amdgpu_bo_create(adev, size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 0,
-			     ttm_bo_type_kernel, NULL, &vram_obj);
+	r = amdgpu_bo_create(adev, &bp, &vram_obj);
 	if (r) {
 		DRM_ERROR("Failed to create VRAM object\n");
 		goto out_cleanup;
@@ -79,9 +86,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 		void **vram_start, **vram_end;
 		struct dma_fence *fence = NULL;
 
-		r = amdgpu_bo_create(adev, size, PAGE_SIZE,
-				     AMDGPU_GEM_DOMAIN_GTT, 0,
-				     ttm_bo_type_kernel, NULL, gtt_obj + i);
+		bp.domain = AMDGPU_GEM_DOMAIN_GTT;
+		r = amdgpu_bo_create(adev, &bp, gtt_obj + i);
 		if (r) {
 			DRM_ERROR("Failed to create GTT object %d\n", i);
 			goto out_lclean;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 29efaac6e3ed..dfd22db13fb1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1316,6 +1316,7 @@ static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
 static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
 {
 	struct ttm_operation_ctx ctx = { false, false };
+	struct amdgpu_bo_param bp;
 	int r = 0;
 	int i;
 	u64 vram_size = adev->gmc.visible_vram_size;
@@ -1323,17 +1324,21 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
 	u64 size = adev->fw_vram_usage.size;
 	struct amdgpu_bo *bo;
 
+	memset(&bp, 0, sizeof(bp));
+	bp.size = adev->fw_vram_usage.size;
+	bp.byte_align = PAGE_SIZE;
+	bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
+	bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+		AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+	bp.type = ttm_bo_type_kernel;
+	bp.resv = NULL;
 	adev->fw_vram_usage.va = NULL;
 	adev->fw_vram_usage.reserved_bo = NULL;
 
 	if (adev->fw_vram_usage.size > 0 &&
 		adev->fw_vram_usage.size <= vram_size) {
 
-		r = amdgpu_bo_create(adev, adev->fw_vram_usage.size, PAGE_SIZE,
-				     AMDGPU_GEM_DOMAIN_VRAM,
-				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
-				     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
-				     ttm_bo_type_kernel, NULL,
+		r = amdgpu_bo_create(adev, &bp,
 				     &adev->fw_vram_usage.reserved_bo);
 		if (r)
 			goto error_create;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index f0fbc331aa30..9ec7c1041df2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -412,11 +412,16 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
 		struct amdgpu_bo *pt;
 
 		if (!entry->base.bo) {
-			r = amdgpu_bo_create(adev,
-					     amdgpu_vm_bo_size(adev, level),
-					     AMDGPU_GPU_PAGE_SIZE,
-					     AMDGPU_GEM_DOMAIN_VRAM, flags,
-					     ttm_bo_type_kernel, resv, &pt);
+			struct amdgpu_bo_param bp;
+
+			memset(&bp, 0, sizeof(bp));
+			bp.size = amdgpu_vm_bo_size(adev, level);
+			bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
+			bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
+			bp.flags = flags;
+			bp.type = ttm_bo_type_kernel;
+			bp.resv = resv;
+			r = amdgpu_bo_create(adev, &bp, &pt);
 			if (r)
 				return r;
 
@@ -2368,6 +2373,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 		   int vm_context, unsigned int pasid)
 {
+	struct amdgpu_bo_param bp;
 	const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
 		AMDGPU_VM_PTE_COUNT(adev) * 8);
 	unsigned ring_instance;
@@ -2422,8 +2428,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 		flags |= AMDGPU_GEM_CREATE_SHADOW;
 
 	size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
-	r = amdgpu_bo_create(adev, size, align, AMDGPU_GEM_DOMAIN_VRAM, flags,
-			     ttm_bo_type_kernel, NULL, &vm->root.base.bo);
+	memset(&bp, 0, sizeof(bp));
+	bp.size = size;
+	bp.byte_align = align;
+	bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
+	bp.flags = flags;
+	bp.type = ttm_bo_type_kernel;
+	bp.resv = NULL;
+	r = amdgpu_bo_create(adev, &bp, &vm->root.base.bo);
 	if (r)
 		goto error_free_sched_entity;
 

From 7951e376704773134cefcf0751e9042368226f15 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Fri, 13 Apr 2018 16:13:41 +0800
Subject: [PATCH 0793/1286] drm/amdgpu: Reserved vram for smu to save debug
 info.

v2: check reserved vram size before allocate.

Reviewed-by: Evan Quan <evan.quan@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h           |  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c    | 44 +++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h       |  2 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c       |  6 +++
 drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 35 +++++++++++++++
 5 files changed, 88 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index ea1b28536bfc..d64ef30fed47 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -129,6 +129,7 @@ extern int amdgpu_lbpw;
 extern int amdgpu_compute_multipipe;
 extern int amdgpu_gpu_recovery;
 extern int amdgpu_emu_mode;
+extern uint amdgpu_smu_memory_pool_size;
 
 #ifdef CONFIG_DRM_AMDGPU_SI
 extern int amdgpu_si_support;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index d7f2bbdfd348..5958e8112489 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -690,6 +690,8 @@ void amdgpu_device_gart_location(struct amdgpu_device *adev,
 {
 	u64 size_af, size_bf;
 
+	mc->gart_size += adev->pm.smu_prv_buffer_size;
+
 	size_af = adev->gmc.mc_mask - mc->vram_end;
 	size_bf = mc->vram_start;
 	if (size_bf > size_af) {
@@ -907,6 +909,46 @@ static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
 	}
 }
 
+static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
+{
+	struct sysinfo si;
+	bool is_os_64 = (sizeof(void *) == 8) ? true : false;
+	uint64_t total_memory;
+	uint64_t dram_size_seven_GB = 0x1B8000000;
+	uint64_t dram_size_three_GB = 0xB8000000;
+
+	if (amdgpu_smu_memory_pool_size == 0)
+		return;
+
+	if (!is_os_64) {
+		DRM_WARN("Not 64-bit OS, feature not supported\n");
+		goto def_value;
+	}
+	si_meminfo(&si);
+	total_memory = (uint64_t)si.totalram * si.mem_unit;
+
+	if ((amdgpu_smu_memory_pool_size == 1) ||
+		(amdgpu_smu_memory_pool_size == 2)) {
+		if (total_memory < dram_size_three_GB)
+			goto def_value1;
+	} else if ((amdgpu_smu_memory_pool_size == 4) ||
+		(amdgpu_smu_memory_pool_size == 8)) {
+		if (total_memory < dram_size_seven_GB)
+			goto def_value1;
+	} else {
+		DRM_WARN("Smu memory pool size not supported\n");
+		goto def_value;
+	}
+	adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
+
+	return;
+
+def_value1:
+	DRM_WARN("No enough system memory\n");
+def_value:
+	adev->pm.smu_prv_buffer_size = 0;
+}
+
 /**
  * amdgpu_device_check_arguments - validate module params
  *
@@ -948,6 +990,8 @@ static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
 		amdgpu_vm_fragment_size = -1;
 	}
 
+	amdgpu_device_check_smu_prv_buffer_size(adev);
+
 	amdgpu_device_check_vm_size(adev);
 
 	amdgpu_device_check_block_size(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index b8c5177fa809..19d8bf590da3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -445,6 +445,8 @@ struct amdgpu_pm {
 	uint32_t                pcie_gen_mask;
 	uint32_t                pcie_mlw_mask;
 	struct amd_pp_display_configuration pm_display_cfg;/* set by dc */
+	uint32_t                smu_prv_buffer_size;
+	struct amdgpu_bo        *smu_prv_buffer;
 };
 
 #define R600_SSTU_DFLT                               0
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 0b19482b36b8..5c0567ad1ba7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -132,6 +132,7 @@ int amdgpu_lbpw = -1;
 int amdgpu_compute_multipipe = -1;
 int amdgpu_gpu_recovery = -1; /* auto */
 int amdgpu_emu_mode = 0;
+uint amdgpu_smu_memory_pool_size = 0;
 
 MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
 module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -316,6 +317,11 @@ MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled)
 module_param_named(cik_support, amdgpu_cik_support, int, 0444);
 #endif
 
+MODULE_PARM_DESC(smu_memory_pool_size,
+	"reserve gtt for smu debug usage, 0 = disable,"
+		"0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte");
+module_param_named(smu_memory_pool_size, amdgpu_smu_memory_pool_size, uint, 0444);
+
 static const struct pci_device_id pciidlist[] = {
 #ifdef  CONFIG_DRM_AMDGPU_SI
 	{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 66c49b89cdb4..6c8191444646 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -145,6 +145,37 @@ static int pp_hw_fini(void *handle)
 	return 0;
 }
 
+static void pp_reserve_vram_for_smu(struct amdgpu_device *adev)
+{
+	int r = -EINVAL;
+	void *cpu_ptr = NULL;
+	uint64_t gpu_addr;
+	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+
+	if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size,
+						PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
+						&adev->pm.smu_prv_buffer,
+						&gpu_addr,
+						&cpu_ptr)) {
+		DRM_ERROR("amdgpu: failed to create smu prv buffer\n");
+		return;
+	}
+
+	if (hwmgr->hwmgr_func->notify_cac_buffer_info)
+		r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr,
+					lower_32_bits((unsigned long)cpu_ptr),
+					upper_32_bits((unsigned long)cpu_ptr),
+					lower_32_bits(gpu_addr),
+					upper_32_bits(gpu_addr),
+					adev->pm.smu_prv_buffer_size);
+
+	if (r) {
+		amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
+		adev->pm.smu_prv_buffer = NULL;
+		DRM_ERROR("amdgpu: failed to notify SMU buffer address\n");
+	}
+}
+
 static int pp_late_init(void *handle)
 {
 	struct amdgpu_device *adev = handle;
@@ -156,6 +187,8 @@ static int pp_late_init(void *handle)
 					AMD_PP_TASK_COMPLETE_INIT, NULL);
 		mutex_unlock(&hwmgr->smu_lock);
 	}
+	if (adev->pm.smu_prv_buffer_size != 0)
+		pp_reserve_vram_for_smu(adev);
 	return 0;
 }
 
@@ -163,6 +196,8 @@ static void pp_late_fini(void *handle)
 {
 	struct amdgpu_device *adev = handle;
 
+	if (adev->pm.smu_prv_buffer)
+		amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
 	amd_powerplay_destroy(adev);
 }
 

From a0d454a67737162b0e4b1cc91612d7b25d5681b0 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Fri, 13 Apr 2018 16:16:49 +0800
Subject: [PATCH 0794/1286] drm/amd/pp: Remove dead interface

Reviewed-by: Evan Quan <evan.quan@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h       |  6 ----
 .../gpu/drm/amd/include/kgd_pp_interface.h    |  5 ----
 drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 30 -------------------
 3 files changed, 41 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 19d8bf590da3..354c6dc99481 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -349,12 +349,6 @@ enum amdgpu_pcie_gen {
 		((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\
 			(adev)->powerplay.pp_handle, msg_id))
 
-#define amdgpu_dpm_notify_smu_memory_info(adev, virtual_addr_low, \
-			virtual_addr_hi, mc_addr_low, mc_addr_hi, size) \
-		((adev)->powerplay.pp_funcs->notify_smu_memory_info)( \
-			(adev)->powerplay.pp_handle, virtual_addr_low, \
-			virtual_addr_hi, mc_addr_low, mc_addr_hi, size)
-
 #define amdgpu_dpm_get_power_profile_mode(adev, buf) \
 		((adev)->powerplay.pp_funcs->get_power_profile_mode(\
 			(adev)->powerplay.pp_handle, buf))
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 1bec9072e36f..01969b135ab4 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -239,11 +239,6 @@ struct amd_pm_funcs {
 	int (*load_firmware)(void *handle);
 	int (*wait_for_fw_loading_complete)(void *handle);
 	int (*set_clockgating_by_smu)(void *handle, uint32_t msg_id);
-	int (*notify_smu_memory_info)(void *handle, uint32_t virtual_addr_low,
-					uint32_t virtual_addr_hi,
-					uint32_t mc_addr_low,
-					uint32_t mc_addr_hi,
-					uint32_t size);
 	int (*set_power_limit)(void *handle, uint32_t n);
 	int (*get_power_limit)(void *handle, uint32_t *limit, bool default_limit);
 /* export to DC */
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 6c8191444646..bd0d387584ac 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -934,35 +934,6 @@ static int pp_dpm_switch_power_profile(void *handle,
 	return 0;
 }
 
-static int pp_dpm_notify_smu_memory_info(void *handle,
-					uint32_t virtual_addr_low,
-					uint32_t virtual_addr_hi,
-					uint32_t mc_addr_low,
-					uint32_t mc_addr_hi,
-					uint32_t size)
-{
-	struct pp_hwmgr *hwmgr = handle;
-	int ret = 0;
-
-	if (!hwmgr || !hwmgr->pm_en)
-		return -EINVAL;
-
-	if (hwmgr->hwmgr_func->notify_cac_buffer_info == NULL) {
-		pr_info("%s was not implemented.\n", __func__);
-		return -EINVAL;
-	}
-
-	mutex_lock(&hwmgr->smu_lock);
-
-	ret = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr, virtual_addr_low,
-					virtual_addr_hi, mc_addr_low, mc_addr_hi,
-					size);
-
-	mutex_unlock(&hwmgr->smu_lock);
-
-	return ret;
-}
-
 static int pp_set_power_limit(void *handle, uint32_t limit)
 {
 	struct pp_hwmgr *hwmgr = handle;
@@ -1229,7 +1200,6 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
 	.get_vce_clock_state = pp_dpm_get_vce_clock_state,
 	.switch_power_profile = pp_dpm_switch_power_profile,
 	.set_clockgating_by_smu = pp_set_clockgating_by_smu,
-	.notify_smu_memory_info = pp_dpm_notify_smu_memory_info,
 	.get_power_profile_mode = pp_get_power_profile_mode,
 	.set_power_profile_mode = pp_set_power_profile_mode,
 	.odn_edit_dpm_table = pp_odn_edit_dpm_table,

From 8d80fada066bec682f1b7e9015b8412e3460c1b3 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Tue, 17 Apr 2018 17:26:26 +0800
Subject: [PATCH 0795/1286] drm/amd/pp: Fix bug voltage can't be OD separately
 on VI

Make sure to update the MCLK and SCLK flags when setting the VDDC
flags due to dependencies.

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 16 ++++++++++------
 1 file changed, 10 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 68aae09a886a..720ac47d3365 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -4679,23 +4679,27 @@ static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
 
 	for (i=0; i < dep_table->count; i++) {
 		if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
-			data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
-			break;
+			data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
+			return;
 		}
 	}
-	if (i == dep_table->count)
+	if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
 		data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
+		data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+	}
 
 	dep_table = table_info->vdd_dep_on_sclk;
 	odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
 	for (i=0; i < dep_table->count; i++) {
 		if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
-			data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
-			break;
+			data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
+			return;
 		}
 	}
-	if (i == dep_table->count)
+	if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
 		data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
+		data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+	}
 }
 
 static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,

From 32d8c6620d49779600714f197611856ed503a7a5 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Tue, 17 Apr 2018 08:55:44 -0500
Subject: [PATCH 0796/1286] drm/amdgpu: print the vbios version in the debugfs
 firmware info

Useful for info gathering about what firmwares are in use in
the driver.

Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 4e15b6fe2839..d602f8b14c58 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -31,6 +31,7 @@
 #include "amdgpu_sched.h"
 #include "amdgpu_uvd.h"
 #include "amdgpu_vce.h"
+#include "atom.h"
 
 #include <linux/vga_switcheroo.h>
 #include <linux/slab.h>
@@ -1089,6 +1090,7 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
 	struct amdgpu_device *adev = dev->dev_private;
 	struct drm_amdgpu_info_firmware fw_info;
 	struct drm_amdgpu_query_fw query_fw;
+	struct atom_context *ctx = adev->mode_info.atom_context;
 	int ret, i;
 
 	/* VCE */
@@ -1211,6 +1213,9 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
 	seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n",
 		   fw_info.feature, fw_info.ver);
 
+
+	seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
+
 	return 0;
 }
 

From a4b3996aeebbaafd2682f4db06bb5659e1653da7 Mon Sep 17 00:00:00 2001
From: Pixel Ding <Pixel.Ding@amd.com>
Date: Wed, 18 Apr 2018 04:33:26 -0400
Subject: [PATCH 0797/1286] drm/scheduler: always put last_sched fence in
 entity_fini
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Fix the potential memleak since scheduler main thread always
hold one last_sched fence.

Signed-off-by: Pixel Ding <Pixel.Ding@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/scheduler/gpu_scheduler.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 44d21981bf3b..4968867da7a6 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -276,10 +276,10 @@ void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
 			else if (r)
 				DRM_ERROR("fence add callback failed (%d)\n", r);
 		}
-
-		dma_fence_put(entity->last_scheduled);
-		entity->last_scheduled = NULL;
 	}
+
+	dma_fence_put(entity->last_scheduled);
+	entity->last_scheduled = NULL;
 }
 EXPORT_SYMBOL(drm_sched_entity_cleanup);
 

From b5b4ea4d98b42f94442e5d46d5942f392ed8af56 Mon Sep 17 00:00:00 2001
From: Pixel Ding <Pixel.Ding@amd.com>
Date: Wed, 18 Apr 2018 04:37:40 -0400
Subject: [PATCH 0798/1286] drm/scheduler: move last_sched fence updating prior
 to job popping (v2)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Make sure main thread won't update last_sched fence when entity
is cleanup.

Fix a racing issue which is caused by putting last_sched fence
twice. Running vulkaninfo in tight loop can produce this issue
as seeing wild fence pointer.

v2: squash in build fix (Christian)

Signed-off-by: Pixel Ding <Pixel.Ding@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Monk Liu <Monk.Liu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/scheduler/gpu_scheduler.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 4968867da7a6..088ff2b4e8fb 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -402,6 +402,9 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
 	if (entity->guilty && atomic_read(entity->guilty))
 		dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
 
+	dma_fence_put(entity->last_scheduled);
+	entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished);
+
 	spsc_queue_pop(&entity->job_queue);
 	return sched_job;
 }
@@ -715,9 +718,6 @@ static int drm_sched_main(void *param)
 		fence = sched->ops->run_job(sched_job);
 		drm_sched_fence_scheduled(s_fence);
 
-		dma_fence_put(entity->last_scheduled);
-		entity->last_scheduled = dma_fence_get(&s_fence->finished);
-
 		if (fence) {
 			s_fence->parent = dma_fence_get(fence);
 			r = dma_fence_add_callback(fence, &s_fence->cb,

From 58cd8fbc64b03d0e9961d627526bd07edbea00b9 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
Date: Tue, 17 Apr 2018 14:47:42 +0200
Subject: [PATCH 0799/1286] drm/amdgpu: limit reg_write_reg_wait workaround to
 SRIOV v2
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Turned out that this locks up some bare metal Vega10.

v2: fix stupid typo

Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 583f6f616dd3..6a19e0311a9c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -4144,7 +4144,12 @@ static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
 {
 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
 
-	gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1, ref, mask, 0x20);
+	if (amdgpu_sriov_vf(ring->adev))
+		gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
+				      ref, mask, 0x20);
+	else
+		amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
+							   ref, mask);
 }
 
 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,

From aa2b2e2822831d78a283edb12cf8b7da21bdd0ed Mon Sep 17 00:00:00 2001
From: Chunming Zhou <david1.zhou@amd.com>
Date: Tue, 17 Apr 2018 11:52:53 +0800
Subject: [PATCH 0800/1286] drm/amdgpu: set preferred_domain independent of
 fallback handling
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

When GEM needs to fallback to GTT for VRAM BOs we still want the
preferred domain to be untouched so that the BO has a cance to move back
to VRAM in the future.

Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c    |  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 15 +++++++++------
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.h |  1 +
 3 files changed, 11 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 1200c5ba37da..ff606ce88837 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -62,6 +62,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
 	bp.byte_align = alignment;
 	bp.type = type;
 	bp.resv = resv;
+	bp.preferred_domain = initial_domain;
 retry:
 	bp.flags = flags;
 	bp.domain = initial_domain;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index cac65e32a0b9..9258f0694922 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -360,6 +360,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
 	};
 	struct amdgpu_bo *bo;
 	unsigned long page_align, size = bp->size;
+	u32 preferred_domains;
 	size_t acc_size;
 	int r;
 
@@ -380,12 +381,14 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
 	drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
 	INIT_LIST_HEAD(&bo->shadow_list);
 	INIT_LIST_HEAD(&bo->va);
-	bo->preferred_domains = bp->domain & (AMDGPU_GEM_DOMAIN_VRAM |
-					      AMDGPU_GEM_DOMAIN_GTT |
-					      AMDGPU_GEM_DOMAIN_CPU |
-					      AMDGPU_GEM_DOMAIN_GDS |
-					      AMDGPU_GEM_DOMAIN_GWS |
-					      AMDGPU_GEM_DOMAIN_OA);
+	preferred_domains = bp->preferred_domain ? bp->preferred_domain :
+		bp->domain;
+	bo->preferred_domains = preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
+						     AMDGPU_GEM_DOMAIN_GTT |
+						     AMDGPU_GEM_DOMAIN_CPU |
+						     AMDGPU_GEM_DOMAIN_GDS |
+						     AMDGPU_GEM_DOMAIN_GWS |
+						     AMDGPU_GEM_DOMAIN_OA);
 	bo->allowed_domains = bo->preferred_domains;
 	if (bp->type != ttm_bo_type_kernel &&
 	    bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index e9a21d991e77..540e03fa159f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -37,6 +37,7 @@ struct amdgpu_bo_param {
 	unsigned long			size;
 	int				byte_align;
 	u32				domain;
+	u32				preferred_domain;
 	u64				flags;
 	enum ttm_bo_type		type;
 	struct reservation_object	*resv;

From 3f188453faf7ba5b59e8064df4afffbc946e25ec Mon Sep 17 00:00:00 2001
From: Chunming Zhou <david1.zhou@amd.com>
Date: Tue, 17 Apr 2018 18:34:40 +0800
Subject: [PATCH 0801/1286] drm/amdgpu: handle domain mask checking v2
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

if domain is illegal, we should return error.
v2:
  remove duplicated domain checking.

Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c    | 7 +------
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 9 +--------
 include/uapi/drm/amdgpu_drm.h              | 6 ++++++
 3 files changed, 8 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index ff606ce88837..c62c3dd4dcc6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -229,12 +229,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
 		return -EINVAL;
 
 	/* reject invalid gem domains */
-	if (args->in.domains & ~(AMDGPU_GEM_DOMAIN_CPU |
-				 AMDGPU_GEM_DOMAIN_GTT |
-				 AMDGPU_GEM_DOMAIN_VRAM |
-				 AMDGPU_GEM_DOMAIN_GDS |
-				 AMDGPU_GEM_DOMAIN_GWS |
-				 AMDGPU_GEM_DOMAIN_OA))
+	if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
 		return -EINVAL;
 
 	/* create a gem object to contain this object in */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 9258f0694922..feece0a491a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -360,7 +360,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
 	};
 	struct amdgpu_bo *bo;
 	unsigned long page_align, size = bp->size;
-	u32 preferred_domains;
 	size_t acc_size;
 	int r;
 
@@ -381,14 +380,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
 	drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
 	INIT_LIST_HEAD(&bo->shadow_list);
 	INIT_LIST_HEAD(&bo->va);
-	preferred_domains = bp->preferred_domain ? bp->preferred_domain :
+	bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
 		bp->domain;
-	bo->preferred_domains = preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
-						     AMDGPU_GEM_DOMAIN_GTT |
-						     AMDGPU_GEM_DOMAIN_CPU |
-						     AMDGPU_GEM_DOMAIN_GDS |
-						     AMDGPU_GEM_DOMAIN_GWS |
-						     AMDGPU_GEM_DOMAIN_OA);
 	bo->allowed_domains = bo->preferred_domains;
 	if (bp->type != ttm_bo_type_kernel &&
 	    bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index c363b67f2d0a..b193e95f1f24 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -78,6 +78,12 @@ extern "C" {
 #define AMDGPU_GEM_DOMAIN_GDS		0x8
 #define AMDGPU_GEM_DOMAIN_GWS		0x10
 #define AMDGPU_GEM_DOMAIN_OA		0x20
+#define AMDGPU_GEM_DOMAIN_MASK		(AMDGPU_GEM_DOMAIN_CPU | \
+					 AMDGPU_GEM_DOMAIN_GTT | \
+					 AMDGPU_GEM_DOMAIN_VRAM | \
+					 AMDGPU_GEM_DOMAIN_GDS | \
+					 AMDGPU_GEM_DOMAIN_GWS | \
+					 AMDGPU_GEM_DOMAIN_OA)
 
 /* Flag that CPU access will be required for the case of VRAM domain */
 #define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED	(1 << 0)

From d240cd9eddd943dbe0267d081697195ff1e90b65 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Marek=20Ol=C5=A1=C3=A1k?= <marek.olsak@amd.com>
Date: Tue, 3 Apr 2018 13:05:03 -0400
Subject: [PATCH 0802/1286] drm/amdgpu: optionally do a writeback but don't
 invalidate TC for IB fences
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

There is a new IB flag that enables this new behavior.
Full invalidation is unnecessary for RELEASE_MEM and doesn't make sense
when draw calls from two adjacent gfx IBs run in parallel. This will be
the new default for Mesa.

v2: bump the version

Signed-off-by: Marek Olšák <marek.olsak@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c   |  3 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c |  5 +++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c    |  8 ++++++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h  |  4 +++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c    |  2 +-
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c     | 11 +++++++----
 drivers/gpu/drm/amd/amdgpu/soc15d.h       |  1 +
 include/uapi/drm/amdgpu_drm.h             |  4 ++++
 8 files changed, 27 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 5c0567ad1ba7..7c17a0bc2cd2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -75,9 +75,10 @@
  * - 3.23.0 - Add query for VRAM lost counter
  * - 3.24.0 - Add high priority compute support for gfx9
  * - 3.25.0 - Add support for sensor query info (stable pstate sclk/mclk).
+ * - 3.26.0 - GFX9: Process AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE.
  */
 #define KMS_DRIVER_MAJOR	3
-#define KMS_DRIVER_MINOR	25
+#define KMS_DRIVER_MINOR	26
 #define KMS_DRIVER_PATCHLEVEL	0
 
 int amdgpu_vram_limit = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 97449e06a242..d09fcab2398f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -131,7 +131,8 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
  * Emits a fence command on the requested ring (all asics).
  * Returns 0 on success, -ENOMEM on failure.
  */
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f)
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
+		      unsigned flags)
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_fence *fence;
@@ -149,7 +150,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f)
 		       adev->fence_context + ring->idx,
 		       seq);
 	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
-			       seq, AMDGPU_FENCE_FLAG_INT);
+			       seq, flags | AMDGPU_FENCE_FLAG_INT);
 
 	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
 	/* This function can't be called concurrently anyway, otherwise
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 311589e02d17..f70eeed9ed76 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -127,6 +127,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 	struct amdgpu_vm *vm;
 	uint64_t fence_ctx;
 	uint32_t status = 0, alloc_size;
+	unsigned fence_flags = 0;
 
 	unsigned i;
 	int r = 0;
@@ -227,7 +228,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 #endif
 		amdgpu_asic_invalidate_hdp(adev, ring);
 
-	r = amdgpu_fence_emit(ring, f);
+	if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
+		fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;
+
+	r = amdgpu_fence_emit(ring, f, fence_flags);
 	if (r) {
 		dev_err(adev->dev, "failed to emit fence (%d)\n", r);
 		if (job && job->vmid)
@@ -242,7 +246,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 	/* wrap the last IB with fence */
 	if (job && job->uf_addr) {
 		amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
-				       AMDGPU_FENCE_FLAG_64BIT);
+				       fence_flags | AMDGPU_FENCE_FLAG_64BIT);
 	}
 
 	if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 08fcdf6f7b53..4f8dac2d36a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -42,6 +42,7 @@
 
 #define AMDGPU_FENCE_FLAG_64BIT         (1 << 0)
 #define AMDGPU_FENCE_FLAG_INT           (1 << 1)
+#define AMDGPU_FENCE_FLAG_TC_WB_ONLY    (1 << 2)
 
 enum amdgpu_ring_type {
 	AMDGPU_RING_TYPE_GFX,
@@ -90,7 +91,8 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
 				   unsigned irq_type);
 void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
 void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence);
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence,
+		      unsigned flags);
 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s);
 void amdgpu_fence_process(struct amdgpu_ring *ring);
 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 9ec7c1041df2..9c2195a2896d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -633,7 +633,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
 		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
 
 	if (vm_flush_needed || pasid_mapping_needed) {
-		r = amdgpu_fence_emit(ring, &fence);
+		r = amdgpu_fence_emit(ring, &fence, 0);
 		if (r)
 			return r;
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 6a19e0311a9c..05b2d34110b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -3775,13 +3775,16 @@ static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
 {
 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
 	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
+	bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
 
 	/* RELEASE_MEM - flush caches, send int */
 	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
-	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
-				 EOP_TC_ACTION_EN |
-				 EOP_TC_WB_ACTION_EN |
-				 EOP_TC_MD_ACTION_EN |
+	amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
+					       EOP_TC_NC_ACTION_EN) :
+					      (EOP_TCL1_ACTION_EN |
+					       EOP_TC_ACTION_EN |
+					       EOP_TC_WB_ACTION_EN |
+					       EOP_TC_MD_ACTION_EN)) |
 				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
 				 EVENT_INDEX(5)));
 	amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h
index 7f408f85fdb6..839a144c1645 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15d.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h
@@ -159,6 +159,7 @@
 #define		EOP_TC_WB_ACTION_EN                     (1 << 15) /* L2 */
 #define		EOP_TCL1_ACTION_EN                      (1 << 16)
 #define		EOP_TC_ACTION_EN                        (1 << 17) /* L2 */
+#define		EOP_TC_NC_ACTION_EN			(1 << 19)
 #define		EOP_TC_MD_ACTION_EN			(1 << 21) /* L2 metadata */
 
 #define		DATA_SEL(x)                             ((x) << 29)
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index b193e95f1f24..78fe828f2f79 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -526,6 +526,10 @@ union drm_amdgpu_cs {
 /* Preempt flag, IB should set Pre_enb bit if PREEMPT flag detected */
 #define AMDGPU_IB_FLAG_PREEMPT (1<<2)
 
+/* The IB fence should do the L2 writeback but not invalidate any shader
+ * caches (L2/vL1/sL1/I$). */
+#define AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE (1 << 3)
+
 struct drm_amdgpu_cs_chunk_ib {
 	__u32 _pad;
 	/** AMDGPU_IB_FLAG_* */

From 7fd645f258711a4ea4d777188949494f9e68b787 Mon Sep 17 00:00:00 2001
From: Chunming Zhou <david1.zhou@amd.com>
Date: Wed, 18 Apr 2018 18:35:09 +0800
Subject: [PATCH 0803/1286] drm/amdgpu: fix list not initialized
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Otherwise, cpu stuck for 22s with kernel panic.

Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 9c2195a2896d..8c34060e130f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1568,10 +1568,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 		 * the evicted list so that it gets validated again on the
 		 * next command submission.
 		 */
+		list_del_init(&bo_va->base.vm_status);
 		if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
 			list_add_tail(&bo_va->base.vm_status, &vm->evicted);
-		else
-			list_del_init(&bo_va->base.vm_status);
 	} else {
 		list_del_init(&bo_va->base.vm_status);
 	}

From 6197ae28911841369ff61ebbdf9d732ff6069138 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Thu, 19 Apr 2018 12:40:15 +0800
Subject: [PATCH 0804/1286] drm/amd/pp: Fix NULL point check error in
 smu_set_watermarks_for_clocks_ranges

It is caused by
'commit d6c9a7dc86cd ("drm/amd/pp: Move common code to smu_helper.c")'

Reviewed-by: Evan Quan <evan.quan@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
index 7c23741619b6..93a3d022ba47 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
@@ -657,7 +657,7 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
 	uint32_t i;
 	struct watermarks *table = wt_table;
 
-	if (!table || wm_with_clock_ranges)
+	if (!table || !wm_with_clock_ranges)
 		return -EINVAL;
 
 	if (wm_with_clock_ranges->num_wm_sets_dmif > 4 || wm_with_clock_ranges->num_wm_sets_mcif > 4)

From bfa8eea29b12e403b391820b7ef5cf5c77ab0afe Mon Sep 17 00:00:00 2001
From: Flora Cui <Flora.Cui@amd.com>
Date: Wed, 18 Apr 2018 17:12:19 +0800
Subject: [PATCH 0805/1286] drm/amdgpu: init gfx9 aperture settings

fix settings.

Signed-off-by: Flora Cui <Flora.Cui@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 5 ++++-
 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 3 +--
 2 files changed, 5 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 05b2d34110b7..587a8731fa31 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1617,7 +1617,10 @@ static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
 			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
 					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
 			WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
-			tmp = adev->gmc.shared_aperture_start >> 48;
+			tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
+				(adev->gmc.private_aperture_start >> 48));
+			tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
+				(adev->gmc.shared_aperture_start >> 48));
 			WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp);
 		}
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index e6b00b507d4d..6c9f7f999532 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -562,8 +562,7 @@ static int gmc_v9_0_early_init(void *handle)
 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
 	adev->gmc.shared_aperture_end =
 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
-	adev->gmc.private_aperture_start =
-		adev->gmc.shared_aperture_end + 1;
+	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
 	adev->gmc.private_aperture_end =
 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
 

From bb475839eca7e3990f59a3b4e9e810635ef0ac4a Mon Sep 17 00:00:00 2001
From: Junwei Zhang <Jerry.Zhang@amd.com>
Date: Thu, 19 Apr 2018 13:17:26 +0800
Subject: [PATCH 0806/1286] drm/amdgpu: simplify bo_va list when vm bo update
 (v2)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

v2: fix compiling warning

Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 22 ++++++++++------------
 1 file changed, 10 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 8c34060e130f..6a372ca11ee3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1509,6 +1509,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 	struct drm_mm_node *nodes;
 	struct dma_fence *exclusive, **last_update;
 	uint64_t flags;
+	uint32_t mem_type;
 	int r;
 
 	if (clear || !bo_va->base.bo) {
@@ -1561,19 +1562,16 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 	}
 
 	spin_lock(&vm->status_lock);
-	if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
-		unsigned mem_type = bo->tbo.mem.mem_type;
+	list_del_init(&bo_va->base.vm_status);
 
-		/* If the BO is not in its preferred location add it back to
-		 * the evicted list so that it gets validated again on the
-		 * next command submission.
-		 */
-		list_del_init(&bo_va->base.vm_status);
-		if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
-			list_add_tail(&bo_va->base.vm_status, &vm->evicted);
-	} else {
-		list_del_init(&bo_va->base.vm_status);
-	}
+	/* If the BO is not in its preferred location add it back to
+	 * the evicted list so that it gets validated again on the
+	 * next command submission.
+	 */
+	mem_type = bo->tbo.mem.mem_type;
+	if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
+	    !(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
+		list_add_tail(&bo_va->base.vm_status, &vm->evicted);
 	spin_unlock(&vm->status_lock);
 
 	list_splice_init(&bo_va->invalids, &bo_va->valids);

From 1a3132a1cc03abcf153d08f4eb471cd7d396f2a3 Mon Sep 17 00:00:00 2001
From: Kenneth Feng <kenneth.feng@amd.com>
Date: Tue, 17 Apr 2018 21:49:51 +0800
Subject: [PATCH 0807/1286] drm/amd/powerplay: header file interface to SMU
 update

update vega12 smu interface.

Signed-off-by: Kenneth Feng <kenneth.feng@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
index fb696e3d06cf..2f8a3b983cce 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
@@ -412,8 +412,10 @@ typedef struct {
   QuadraticInt_t    ReservedEquation2;
   QuadraticInt_t    ReservedEquation3;
 
+	uint16_t     MinVoltageUlvGfx;
+	uint16_t     MinVoltageUlvSoc;
 
-  uint32_t     Reserved[15];
+	uint32_t     Reserved[14];
 
 
 

From b1f223c02a3a2b41847f48f75797eba5979ea25d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
Date: Sun, 25 Mar 2018 10:10:25 +0200
Subject: [PATCH 0808/1286] drm/amdgpu: print DMA-buf status in debugfs
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Just note if a BO was imported/exported.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 11 +++++++++++
 1 file changed, 11 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index c62c3dd4dcc6..7d3dc229fa47 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -780,6 +780,8 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
 	struct seq_file *m = data;
 
+	struct dma_buf_attachment *attachment;
+	struct dma_buf *dma_buf;
 	unsigned domain;
 	const char *placement;
 	unsigned pin_count;
@@ -808,6 +810,15 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
 	pin_count = READ_ONCE(bo->pin_count);
 	if (pin_count)
 		seq_printf(m, " pin count %d", pin_count);
+
+	dma_buf = READ_ONCE(bo->gem_base.dma_buf);
+	attachment = READ_ONCE(bo->gem_base.import_attach);
+
+	if (attachment)
+		seq_printf(m, " imported from %p", dma_buf);
+	else if (dma_buf)
+		seq_printf(m, " exported as %p", dma_buf);
+
 	seq_printf(m, "\n");
 
 	return 0;

From e0e93d03efa1c53012cc609fd48112df3e06da69 Mon Sep 17 00:00:00 2001
From: Kenneth Feng <kenneth.feng@amd.com>
Date: Fri, 20 Apr 2018 13:55:39 +0800
Subject: [PATCH 0809/1286] drm/amd/powerplay: add registry key to disable ACG

For the dummy ACG fuses,need to disable ACG, otherwise
corruption will be caused.

Signed-off-by: Kenneth Feng <kenneth.feng@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c | 5 +++++
 drivers/gpu/drm/amd/powerplay/inc/hwmgr.h                    | 1 +
 2 files changed, 6 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
index 7fa1ba89ac54..888ddca902d8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
@@ -224,6 +224,11 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
 	ppsmc_pptable->AcgGfxclkSpreadPercent = smc_dpm_table.acggfxclkspreadpercent;
 	ppsmc_pptable->AcgGfxclkSpreadFreq = smc_dpm_table.acggfxclkspreadfreq;
 
+	/* 0xFFFF will disable the ACG feature */
+	if (!(hwmgr->feature_mask & PP_ACG_MASK)) {
+		ppsmc_pptable->AcgThresholdFreqHigh = 0xFFFF;
+		ppsmc_pptable->AcgThresholdFreqLow = 0xFFFF;
+	}
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 9b3dd7dce4e2..2f203ec3d19c 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -82,6 +82,7 @@ enum PP_FEATURE_MASK {
 	PP_SOCCLK_DPM_MASK = 0x1000,
 	PP_DCEFCLK_DPM_MASK = 0x2000,
 	PP_OVERDRIVE_MASK = 0x4000,
+	PP_ACG_MASK = 0x10000,
 };
 
 enum PHM_BackEnd_Magic {

From cf671071334ebbf6c960f88383b35b99d5d53212 Mon Sep 17 00:00:00 2001
From: Huang Rui <ray.huang@amd.com>
Date: Tue, 5 Dec 2017 18:48:48 +0800
Subject: [PATCH 0810/1286] drm/amdgpu: update psp gfx if header
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Huang Rui <ray.huang@amd.com>
Acked-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h | 67 +++++++++++++++++--------
 1 file changed, 46 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index 8da6da90b1c9..0cf48d26c676 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -40,11 +40,20 @@ enum psp_gfx_crtl_cmd_id
     GFX_CTRL_CMD_ID_INIT_GPCOM_RING = 0x00020000,   /* initialize GPCOM ring */
     GFX_CTRL_CMD_ID_DESTROY_RINGS   = 0x00030000,   /* destroy rings */
     GFX_CTRL_CMD_ID_CAN_INIT_RINGS  = 0x00040000,   /* is it allowed to initialized the rings */
+    GFX_CTRL_CMD_ID_ENABLE_INT      = 0x00050000,   /* enable PSP-to-Gfx interrupt */
+    GFX_CTRL_CMD_ID_DISABLE_INT     = 0x00060000,   /* disable PSP-to-Gfx interrupt */
+    GFX_CTRL_CMD_ID_MODE1_RST       = 0x00070000,   /* trigger the Mode 1 reset */
 
     GFX_CTRL_CMD_ID_MAX             = 0x000F0000,   /* max command ID */
 };
 
 
+/*-----------------------------------------------------------------------------
+    NOTE:   All physical addresses used in this interface are actually
+            GPU Virtual Addresses.
+*/
+
+
 /* Control registers of the TEE Gfx interface. These are located in
 *  SRBM-to-PSP mailbox registers (total 8 registers).
 */
@@ -55,8 +64,8 @@ struct psp_gfx_ctrl
     volatile uint32_t   rbi_rptr;         /* +8   Read pointer (index) of RBI ring */
     volatile uint32_t   gpcom_wptr;       /* +12  Write pointer (index) of GPCOM ring */
     volatile uint32_t   gpcom_rptr;       /* +16  Read pointer (index) of GPCOM ring */
-    volatile uint32_t   ring_addr_lo;     /* +20  bits [31:0] of physical address of ring buffer */
-    volatile uint32_t   ring_addr_hi;     /* +24  bits [63:32] of physical address of ring buffer */
+    volatile uint32_t   ring_addr_lo;     /* +20  bits [31:0] of GPU Virtual of ring buffer (VMID=0)*/
+    volatile uint32_t   ring_addr_hi;     /* +24  bits [63:32] of GPU Virtual of ring buffer (VMID=0) */
     volatile uint32_t   ring_buf_size;    /* +28  Ring buffer size (in bytes) */
 
 };
@@ -78,6 +87,8 @@ enum psp_gfx_cmd_id
     GFX_CMD_ID_LOAD_ASD     = 0x00000004,   /* load ASD Driver */
     GFX_CMD_ID_SETUP_TMR    = 0x00000005,   /* setup TMR region */
     GFX_CMD_ID_LOAD_IP_FW   = 0x00000006,   /* load HW IP FW */
+    GFX_CMD_ID_DESTROY_TMR  = 0x00000007,   /* destroy TMR region */
+    GFX_CMD_ID_SAVE_RESTORE = 0x00000008,   /* save/restore HW IP FW */
 
 };
 
@@ -85,11 +96,11 @@ enum psp_gfx_cmd_id
 /* Command to load Trusted Application binary into PSP OS. */
 struct psp_gfx_cmd_load_ta
 {
-    uint32_t        app_phy_addr_lo;        /* bits [31:0] of the physical address of the TA binary (must be 4 KB aligned) */
-    uint32_t        app_phy_addr_hi;        /* bits [63:32] of the physical address of the TA binary */
+    uint32_t        app_phy_addr_lo;        /* bits [31:0] of the GPU Virtual address of the TA binary (must be 4 KB aligned) */
+    uint32_t        app_phy_addr_hi;        /* bits [63:32] of the GPU Virtual address of the TA binary */
     uint32_t        app_len;                /* length of the TA binary in bytes */
-    uint32_t        cmd_buf_phy_addr_lo;    /* bits [31:0] of the physical address of CMD buffer (must be 4 KB aligned) */
-    uint32_t        cmd_buf_phy_addr_hi;    /* bits [63:32] of the physical address of CMD buffer */
+    uint32_t        cmd_buf_phy_addr_lo;    /* bits [31:0] of the GPU Virtual address of CMD buffer (must be 4 KB aligned) */
+    uint32_t        cmd_buf_phy_addr_hi;    /* bits [63:32] of the GPU Virtual address of CMD buffer */
     uint32_t        cmd_buf_len;            /* length of the CMD buffer in bytes; must be multiple of 4 KB */
 
     /* Note: CmdBufLen can be set to 0. In this case no persistent CMD buffer is provided
@@ -111,8 +122,8 @@ struct psp_gfx_cmd_unload_ta
 */
 struct psp_gfx_buf_desc
 {
-    uint32_t        buf_phy_addr_lo;       /* bits [31:0] of physical address of the buffer (must be 4 KB aligned) */
-    uint32_t        buf_phy_addr_hi;       /* bits [63:32] of physical address of the buffer */
+    uint32_t        buf_phy_addr_lo;       /* bits [31:0] of GPU Virtual address of the buffer (must be 4 KB aligned) */
+    uint32_t        buf_phy_addr_hi;       /* bits [63:32] of GPU Virtual address of the buffer */
     uint32_t        buf_size;              /* buffer size in bytes (must be multiple of 4 KB and no bigger than 64 MB) */
 
 };
@@ -145,8 +156,8 @@ struct psp_gfx_cmd_invoke_cmd
 /* Command to setup TMR region. */
 struct psp_gfx_cmd_setup_tmr
 {
-    uint32_t        buf_phy_addr_lo;       /* bits [31:0] of physical address of TMR buffer (must be 4 KB aligned) */
-    uint32_t        buf_phy_addr_hi;       /* bits [63:32] of physical address of TMR buffer */
+    uint32_t        buf_phy_addr_lo;       /* bits [31:0] of GPU Virtual address of TMR buffer (must be 4 KB aligned) */
+    uint32_t        buf_phy_addr_hi;       /* bits [63:32] of GPU Virtual address of TMR buffer */
     uint32_t        buf_size;              /* buffer size in bytes (must be multiple of 4 KB) */
 
 };
@@ -174,18 +185,32 @@ enum psp_gfx_fw_type
     GFX_FW_TYPE_ISP         = 16,
     GFX_FW_TYPE_ACP         = 17,
     GFX_FW_TYPE_SMU         = 18,
+    GFX_FW_TYPE_MMSCH       = 19,
+    GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM        = 20,
+    GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM        = 21,
+    GFX_FW_TYPE_RLC_RESTORE_LIST_CNTL           = 22,
+    GFX_FW_TYPE_MAX         = 23
 };
 
 /* Command to load HW IP FW. */
 struct psp_gfx_cmd_load_ip_fw
 {
-    uint32_t                fw_phy_addr_lo;    /* bits [31:0] of physical address of FW location (must be 4 KB aligned) */
-    uint32_t                fw_phy_addr_hi;    /* bits [63:32] of physical address of FW location */
+    uint32_t                fw_phy_addr_lo;    /* bits [31:0] of GPU Virtual address of FW location (must be 4 KB aligned) */
+    uint32_t                fw_phy_addr_hi;    /* bits [63:32] of GPU Virtual address of FW location */
     uint32_t                fw_size;           /* FW buffer size in bytes */
     enum psp_gfx_fw_type    fw_type;           /* FW type */
 
 };
 
+/* Command to save/restore HW IP FW. */
+struct psp_gfx_cmd_save_restore_ip_fw
+{
+    uint32_t                save_fw;              /* if set, command is used for saving fw otherwise for resetoring*/
+    uint32_t                save_restore_addr_lo; /* bits [31:0] of FB address of GART memory used as save/restore buffer (must be 4 KB aligned) */
+    uint32_t                save_restore_addr_hi; /* bits [63:32] of FB address of GART memory used as save/restore buffer */
+    uint32_t                buf_size;             /* Size of the save/restore buffer in bytes */
+    enum psp_gfx_fw_type    fw_type;              /* FW type */
+};
 
 /* All GFX ring buffer commands. */
 union psp_gfx_commands
@@ -195,7 +220,7 @@ union psp_gfx_commands
     struct psp_gfx_cmd_invoke_cmd       cmd_invoke_cmd;
     struct psp_gfx_cmd_setup_tmr        cmd_setup_tmr;
     struct psp_gfx_cmd_load_ip_fw       cmd_load_ip_fw;
-
+    struct psp_gfx_cmd_save_restore_ip_fw cmd_save_restore_ip_fw;
 };
 
 
@@ -226,8 +251,8 @@ struct psp_gfx_cmd_resp
 
     /* These fields are used for RBI only. They are all 0 in GPCOM commands
     */
-    uint32_t        resp_buf_addr_lo;   /* +12 bits [31:0] of physical address of response buffer (must be 4 KB aligned) */
-    uint32_t        resp_buf_addr_hi;   /* +16 bits [63:32] of physical address of response buffer */
+    uint32_t        resp_buf_addr_lo;   /* +12 bits [31:0] of GPU Virtual address of response buffer (must be 4 KB aligned) */
+    uint32_t        resp_buf_addr_hi;   /* +16 bits [63:32] of GPU Virtual address of response buffer */
     uint32_t        resp_offset;        /* +20 offset within response buffer */
     uint32_t        resp_buf_size;      /* +24 total size of the response buffer in bytes */
 
@@ -251,19 +276,19 @@ struct psp_gfx_cmd_resp
 /* Structure of the Ring Buffer Frame */
 struct psp_gfx_rb_frame
 {
-    uint32_t    cmd_buf_addr_lo;    /* +0  bits [31:0] of physical address of command buffer (must be 4 KB aligned) */
-    uint32_t    cmd_buf_addr_hi;    /* +4  bits [63:32] of physical address of command buffer */
+    uint32_t    cmd_buf_addr_lo;    /* +0  bits [31:0] of GPU Virtual address of command buffer (must be 4 KB aligned) */
+    uint32_t    cmd_buf_addr_hi;    /* +4  bits [63:32] of GPU Virtual address of command buffer */
     uint32_t    cmd_buf_size;       /* +8  command buffer size in bytes */
-    uint32_t    fence_addr_lo;      /* +12 bits [31:0] of physical address of Fence for this frame */
-    uint32_t    fence_addr_hi;      /* +16 bits [63:32] of physical address of Fence for this frame */
+    uint32_t    fence_addr_lo;      /* +12 bits [31:0] of GPU Virtual address of Fence for this frame */
+    uint32_t    fence_addr_hi;      /* +16 bits [63:32] of GPU Virtual address of Fence for this frame */
     uint32_t    fence_value;        /* +20 Fence value */
     uint32_t    sid_lo;             /* +24 bits [31:0] of SID value (used only for RBI frames) */
     uint32_t    sid_hi;             /* +28 bits [63:32] of SID value (used only for RBI frames) */
     uint8_t     vmid;               /* +32 VMID value used for mapping of all addresses for this frame */
     uint8_t     frame_type;         /* +33 1: destory context frame, 0: all other frames; used only for RBI frames */
     uint8_t     reserved1[2];       /* +34 reserved, must be 0 */
-    uint32_t    reserved2[7];       /* +40 reserved, must be 0 */
-    /* total 64 bytes */
+    uint32_t    reserved2[7];       /* +36 reserved, must be 0 */
+                /* total 64 bytes */
 };
 
 #endif /* _PSP_TEE_GFX_IF_H_ */

From d40e9b13c8bad15e56f2e8c9572f62c1229833a6 Mon Sep 17 00:00:00 2001
From: Huang Rui <ray.huang@amd.com>
Date: Mon, 22 Jan 2018 17:51:35 +0800
Subject: [PATCH 0811/1286] drm/amdgpu: add new rlc firmware header format v2.1
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Huang Rui <ray.huang@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 34 +++++++++++++++++++++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h | 19 +++++++++++++
 2 files changed, 51 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index dd6f98921918..84d652599d5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -161,8 +161,38 @@ void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr)
 			  le32_to_cpu(rlc_hdr->reg_list_format_separate_array_offset_bytes));
 		DRM_DEBUG("reg_list_separate_size_bytes: %u\n",
 			  le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes));
-		DRM_DEBUG("reg_list_separate_size_bytes: %u\n",
-			  le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes));
+		DRM_DEBUG("reg_list_separate_array_offset_bytes: %u\n",
+			  le32_to_cpu(rlc_hdr->reg_list_separate_array_offset_bytes));
+		if (version_minor == 1) {
+			const struct rlc_firmware_header_v2_1 *v2_1 =
+				container_of(rlc_hdr, struct rlc_firmware_header_v2_1, v2_0);
+			DRM_DEBUG("reg_list_format_direct_reg_list_length: %u\n",
+				  le32_to_cpu(v2_1->reg_list_format_direct_reg_list_length));
+			DRM_DEBUG("save_restore_list_cntl_ucode_ver: %u\n",
+				  le32_to_cpu(v2_1->save_restore_list_cntl_ucode_ver));
+			DRM_DEBUG("save_restore_list_cntl_feature_ver: %u\n",
+				  le32_to_cpu(v2_1->save_restore_list_cntl_feature_ver));
+			DRM_DEBUG("save_restore_list_cntl_size_bytes %u\n",
+				  le32_to_cpu(v2_1->save_restore_list_cntl_size_bytes));
+			DRM_DEBUG("save_restore_list_cntl_offset_bytes: %u\n",
+				  le32_to_cpu(v2_1->save_restore_list_cntl_offset_bytes));
+			DRM_DEBUG("save_restore_list_gpm_ucode_ver: %u\n",
+				  le32_to_cpu(v2_1->save_restore_list_gpm_ucode_ver));
+			DRM_DEBUG("save_restore_list_gpm_feature_ver: %u\n",
+				  le32_to_cpu(v2_1->save_restore_list_gpm_feature_ver));
+			DRM_DEBUG("save_restore_list_gpm_size_bytes %u\n",
+				  le32_to_cpu(v2_1->save_restore_list_gpm_size_bytes));
+			DRM_DEBUG("save_restore_list_gpm_offset_bytes: %u\n",
+				  le32_to_cpu(v2_1->save_restore_list_gpm_offset_bytes));
+			DRM_DEBUG("save_restore_list_srm_ucode_ver: %u\n",
+				  le32_to_cpu(v2_1->save_restore_list_srm_ucode_ver));
+			DRM_DEBUG("save_restore_list_srm_feature_ver: %u\n",
+				  le32_to_cpu(v2_1->save_restore_list_srm_feature_ver));
+			DRM_DEBUG("save_restore_list_srm_size_bytes %u\n",
+				  le32_to_cpu(v2_1->save_restore_list_srm_size_bytes));
+			DRM_DEBUG("save_restore_list_srm_offset_bytes: %u\n",
+				  le32_to_cpu(v2_1->save_restore_list_srm_offset_bytes));
+		}
 	} else {
 		DRM_ERROR("Unknown RLC ucode version: %u.%u\n", version_major, version_minor);
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 30b5500dc152..0b262f4bb4fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -98,6 +98,24 @@ struct rlc_firmware_header_v2_0 {
 	uint32_t reg_list_separate_array_offset_bytes; /* payload offset from the start of the header */
 };
 
+/* version_major=2, version_minor=1 */
+struct rlc_firmware_header_v2_1 {
+	struct rlc_firmware_header_v2_0 v2_0;
+	uint32_t reg_list_format_direct_reg_list_length; /* length of direct reg list format array */
+	uint32_t save_restore_list_cntl_ucode_ver;
+	uint32_t save_restore_list_cntl_feature_ver;
+	uint32_t save_restore_list_cntl_size_bytes;
+	uint32_t save_restore_list_cntl_offset_bytes;
+	uint32_t save_restore_list_gpm_ucode_ver;
+	uint32_t save_restore_list_gpm_feature_ver;
+	uint32_t save_restore_list_gpm_size_bytes;
+	uint32_t save_restore_list_gpm_offset_bytes;
+	uint32_t save_restore_list_srm_ucode_ver;
+	uint32_t save_restore_list_srm_feature_ver;
+	uint32_t save_restore_list_srm_size_bytes;
+	uint32_t save_restore_list_srm_offset_bytes;
+};
+
 /* version_major=1, version_minor=0 */
 struct sdma_firmware_header_v1_0 {
 	struct common_firmware_header header;
@@ -148,6 +166,7 @@ union amdgpu_firmware_header {
 	struct gfx_firmware_header_v1_0 gfx;
 	struct rlc_firmware_header_v1_0 rlc;
 	struct rlc_firmware_header_v2_0 rlc_v2_0;
+	struct rlc_firmware_header_v2_1 rlc_v2_1;
 	struct sdma_firmware_header_v1_0 sdma;
 	struct sdma_firmware_header_v1_1 sdma_v1_1;
 	struct gpu_info_firmware_header_v1_0 gpu_info;

From 621a6318adea69b08a3652c64bc7cc0cb4dacfb4 Mon Sep 17 00:00:00 2001
From: Huang Rui <ray.huang@amd.com>
Date: Mon, 22 Jan 2018 20:48:14 +0800
Subject: [PATCH 0812/1286] drm/amdgpu: add save restore list cntl gpm and srm
 firmware support
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

RLC save/restore list cntl/gpm_mem/srm_mem ucodes are used for CGPG and gfxoff
function.

Signed-off-by: Huang Rui <ray.huang@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h       | 15 +++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c   | 36 +++++++++++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 17 ++++++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h |  3 ++
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c     | 55 ++++++++++++++++++++++-
 drivers/gpu/drm/amd/amdgpu/psp_v10_0.c    |  9 ++++
 include/uapi/drm/amdgpu_drm.h             |  6 +++
 7 files changed, 138 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index d64ef30fed47..5ad893915a85 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -774,9 +774,18 @@ struct amdgpu_rlc {
 	u32 starting_offsets_start;
 	u32 reg_list_format_size_bytes;
 	u32 reg_list_size_bytes;
+	u32 reg_list_format_direct_reg_list_length;
+	u32 save_restore_list_cntl_size_bytes;
+	u32 save_restore_list_gpm_size_bytes;
+	u32 save_restore_list_srm_size_bytes;
 
 	u32 *register_list_format;
 	u32 *register_restore;
+	u8 *save_restore_list_cntl;
+	u8 *save_restore_list_gpm;
+	u8 *save_restore_list_srm;
+
+	bool is_rlc_v2_1;
 };
 
 #define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
@@ -943,6 +952,12 @@ struct amdgpu_gfx {
 	uint32_t			ce_feature_version;
 	uint32_t			pfp_feature_version;
 	uint32_t			rlc_feature_version;
+	uint32_t			rlc_srlc_fw_version;
+	uint32_t			rlc_srlc_feature_version;
+	uint32_t			rlc_srlg_fw_version;
+	uint32_t			rlc_srlg_feature_version;
+	uint32_t			rlc_srls_fw_version;
+	uint32_t			rlc_srls_feature_version;
 	uint32_t			mec_feature_version;
 	uint32_t			mec2_feature_version;
 	struct amdgpu_ring		gfx_ring[AMDGPU_MAX_GFX_RINGS];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index d602f8b14c58..eb4785e51573 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -215,6 +215,18 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
 		fw_info->ver = adev->gfx.rlc_fw_version;
 		fw_info->feature = adev->gfx.rlc_feature_version;
 		break;
+	case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL:
+		fw_info->ver = adev->gfx.rlc_srlc_fw_version;
+		fw_info->feature = adev->gfx.rlc_srlc_feature_version;
+		break;
+	case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM:
+		fw_info->ver = adev->gfx.rlc_srlg_fw_version;
+		fw_info->feature = adev->gfx.rlc_srlg_feature_version;
+		break;
+	case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM:
+		fw_info->ver = adev->gfx.rlc_srls_fw_version;
+		fw_info->feature = adev->gfx.rlc_srls_feature_version;
+		break;
 	case AMDGPU_INFO_FW_GFX_MEC:
 		if (query_fw->index == 0) {
 			fw_info->ver = adev->gfx.mec_fw_version;
@@ -1149,6 +1161,30 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
 	seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
 		   fw_info.feature, fw_info.ver);
 
+	/* RLC SAVE RESTORE LIST CNTL */
+	query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL;
+	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+	if (ret)
+		return ret;
+	seq_printf(m, "RLC SRLC feature version: %u, firmware version: 0x%08x\n",
+		   fw_info.feature, fw_info.ver);
+
+	/* RLC SAVE RESTORE LIST GPM MEM */
+	query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM;
+	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+	if (ret)
+		return ret;
+	seq_printf(m, "RLC SRLG feature version: %u, firmware version: 0x%08x\n",
+		   fw_info.feature, fw_info.ver);
+
+	/* RLC SAVE RESTORE LIST SRM MEM */
+	query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM;
+	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+	if (ret)
+		return ret;
+	seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n",
+		   fw_info.feature, fw_info.ver);
+
 	/* MEC */
 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
 	query_fw.index = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 84d652599d5b..0c74c09ef3b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -337,7 +337,10 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
 	    (ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1 &&
 	     ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2 &&
 	     ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1_JT &&
-	     ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2_JT)) {
+	     ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2_JT &&
+	     ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL &&
+	     ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM &&
+	     ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM)) {
 		ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes);
 
 		memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
@@ -359,6 +362,18 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
 					      le32_to_cpu(header->ucode_array_offset_bytes) +
 					      le32_to_cpu(cp_hdr->jt_offset) * 4),
 		       ucode->ucode_size);
+	} else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL) {
+		ucode->ucode_size = adev->gfx.rlc.save_restore_list_cntl_size_bytes;
+		memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_cntl,
+		       ucode->ucode_size);
+	} else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM) {
+		ucode->ucode_size = adev->gfx.rlc.save_restore_list_gpm_size_bytes;
+		memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_gpm,
+		       ucode->ucode_size);
+	} else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) {
+		ucode->ucode_size = adev->gfx.rlc.save_restore_list_srm_size_bytes;
+		memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_srm,
+		       ucode->ucode_size);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 0b262f4bb4fc..08e38579af24 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -187,6 +187,9 @@ enum AMDGPU_UCODE_ID {
 	AMDGPU_UCODE_ID_CP_MEC2,
 	AMDGPU_UCODE_ID_CP_MEC2_JT,
 	AMDGPU_UCODE_ID_RLC_G,
+	AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL,
+	AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM,
+	AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM,
 	AMDGPU_UCODE_ID_STORAGE,
 	AMDGPU_UCODE_ID_SMC,
 	AMDGPU_UCODE_ID_UVD,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 587a8731fa31..73b76fa29bad 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -41,7 +41,6 @@
 #define GFX9_MEC_HPD_SIZE 2048
 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
-#define GFX9_RLC_FORMAT_DIRECT_REG_LIST_LENGTH 34
 
 #define mmPWR_MISC_CNTL_STATUS					0x0183
 #define mmPWR_MISC_CNTL_STATUS_BASE_IDX				0
@@ -401,6 +400,27 @@ static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
 	kfree(adev->gfx.rlc.register_list_format);
 }
 
+static void gfx_v9_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
+{
+	const struct rlc_firmware_header_v2_1 *rlc_hdr;
+
+	rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
+	adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
+	adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
+	adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
+	adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
+	adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
+	adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
+	adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
+	adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
+	adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
+	adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
+	adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
+	adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
+	adev->gfx.rlc.reg_list_format_direct_reg_list_length =
+			le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
+}
+
 static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
 {
 	const char *chip_name;
@@ -412,6 +432,8 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
 	unsigned int *tmp = NULL;
 	unsigned int i = 0;
+	uint16_t version_major;
+	uint16_t version_minor;
 
 	DRM_DEBUG("\n");
 
@@ -468,6 +490,12 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
 		goto out;
 	err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
 	rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
+
+	version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
+	version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
+	if (version_major == 2 && version_minor == 1)
+		adev->gfx.rlc.is_rlc_v2_1 = true;
+
 	adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
 	adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
 	adev->gfx.rlc.save_and_restore_offset =
@@ -508,6 +536,9 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
 	for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
 		adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
 
+	if (adev->gfx.rlc.is_rlc_v2_1)
+		gfx_v9_0_init_rlc_ext_microcode(adev);
+
 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
 	err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
 	if (err)
@@ -566,6 +597,26 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
 		adev->firmware.fw_size +=
 			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
 
+		if (adev->gfx.rlc.is_rlc_v2_1) {
+			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
+			info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
+			info->fw = adev->gfx.rlc_fw;
+			adev->firmware.fw_size +=
+				ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
+
+			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
+			info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
+			info->fw = adev->gfx.rlc_fw;
+			adev->firmware.fw_size +=
+				ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
+
+			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
+			info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
+			info->fw = adev->gfx.rlc_fw;
+			adev->firmware.fw_size +=
+				ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
+		}
+
 		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
 		info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
 		info->fw = adev->gfx.mec_fw;
@@ -1781,7 +1832,7 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
 
 	/* setup unique_indirect_regs array and indirect_start_offsets array */
 	gfx_v9_0_parse_ind_reg_list(register_list_format,
-				GFX9_RLC_FORMAT_DIRECT_REG_LIST_LENGTH,
+				adev->gfx.rlc.reg_list_format_direct_reg_list_length,
 				adev->gfx.rlc.reg_list_format_size_bytes >> 2,
 				unique_indirect_regs,
 				&unique_indirect_reg_count,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 8873d833a7f7..0ff136d02d9b 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -70,6 +70,15 @@ psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *
 	case AMDGPU_UCODE_ID_RLC_G:
 		*type = GFX_FW_TYPE_RLC_G;
 		break;
+	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
+		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_CNTL;
+		break;
+	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
+		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
+		break;
+	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
+		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
+		break;
 	case AMDGPU_UCODE_ID_SMC:
 		*type = GFX_FW_TYPE_SMU;
 		break;
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 78fe828f2f79..081d25640b64 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -630,6 +630,12 @@ struct drm_amdgpu_cs_chunk_data {
 	#define AMDGPU_INFO_FW_ASD		0x0d
 	/* Subquery id: Query VCN firmware version */
 	#define AMDGPU_INFO_FW_VCN		0x0e
+	/* Subquery id: Query GFX RLC SRLC firmware version */
+	#define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL 0x0f
+	/* Subquery id: Query GFX RLC SRLG firmware version */
+	#define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM 0x10
+	/* Subquery id: Query GFX RLC SRLS firmware version */
+	#define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM 0x11
 /* number of bytes moved for TTM migration */
 #define AMDGPU_INFO_NUM_BYTES_MOVED		0x0f
 /* the used VRAM size */

From 72408a41d0d78dbbd7fe7e24849c683596c8b79a Mon Sep 17 00:00:00 2001
From: Huang Rui <ray.huang@amd.com>
Date: Thu, 21 Dec 2017 15:03:31 +0800
Subject: [PATCH 0813/1286] drm/amdgpu: enter rlc safe mode before set cgpg
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Huang Rui <ray.huang@amd.com>
Acked-by: Hawking Zhang <Hawking.Zhang@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 73b76fa29bad..69370f0df4b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -3394,8 +3394,7 @@ static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
 						bool enable)
 {
-	/* TODO: double check if we need to perform under safe mdoe */
-	/* gfx_v9_0_enter_rlc_safe_mode(adev); */
+	gfx_v9_0_enter_rlc_safe_mode(adev);
 
 	if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
 		gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
@@ -3406,7 +3405,7 @@ static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
 		gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
 	}
 
-	/* gfx_v9_0_exit_rlc_safe_mode(adev); */
+	gfx_v9_0_exit_rlc_safe_mode(adev);
 }
 
 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
@@ -3797,7 +3796,7 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
 	}
 
 	amdgpu_ring_write(ring, header);
-BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
+	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
 	amdgpu_ring_write(ring,
 #ifdef __BIG_ENDIAN
 		(2 << 0) |

From a5acf930269e71c76a7e7ad6819a86919c752fb1 Mon Sep 17 00:00:00 2001
From: Huang Rui <ray.huang@amd.com>
Date: Thu, 21 Dec 2017 15:48:27 +0800
Subject: [PATCH 0814/1286] drm/amdgpu: cleanup init power gating function
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Remove gfx_v9_0_enable_sck_slow_down_on_power_up/down and CP power gating
enabling functions because they only need to be called on setting power gating
behavior. We keep it in set_powergating callback to enable/disable PG in
late_init.

Signed-off-by: Huang Rui <ray.huang@amd.com>
Acked-by: Hawking Zhang <Hawking.Zhang@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 24 ++++++------------------
 1 file changed, 6 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 69370f0df4b4..eff1fd14b01f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2065,6 +2065,9 @@ static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *ad
 
 static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
 {
+	if (!adev->gfx.rlc.is_rlc_v2_1)
+		return;
+
 	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
 			      AMD_PG_SUPPORT_GFX_SMG |
 			      AMD_PG_SUPPORT_GFX_DMG |
@@ -2075,24 +2078,9 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
 		gfx_v9_0_init_rlc_save_restore_list(adev);
 		gfx_v9_0_enable_save_restore_machine(adev);
 
-		if (adev->asic_type == CHIP_RAVEN) {
-			WREG32(mmRLC_JUMP_TABLE_RESTORE,
-				adev->gfx.rlc.cp_table_gpu_addr >> 8);
-			gfx_v9_0_init_gfx_power_gating(adev);
-
-			if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
-				gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
-				gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
-			} else {
-				gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
-				gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
-			}
-
-			if (adev->pg_flags & AMD_PG_SUPPORT_CP)
-				gfx_v9_0_enable_cp_power_gating(adev, true);
-			else
-				gfx_v9_0_enable_cp_power_gating(adev, false);
-		}
+		WREG32(mmRLC_JUMP_TABLE_RESTORE,
+		       adev->gfx.rlc.cp_table_gpu_addr >> 8);
+		gfx_v9_0_init_gfx_power_gating(adev);
 	}
 }
 

From 727b888f6643b69db2cad1a9f0ae5f8804fa12cd Mon Sep 17 00:00:00 2001
From: Huang Rui <ray.huang@amd.com>
Date: Thu, 21 Dec 2017 16:13:02 +0800
Subject: [PATCH 0815/1286] drm/amdgpu: revise init_rlc_save_restore_list
 behavior to support latest register_list_format/register_restore table
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

RLC save/restore list will be used on CGPG and GFXOFF function, it loads two bin
table of register_list_format/register_restore in RLC firmware.

Signed-off-by: Huang Rui <ray.huang@amd.com>
Acked-by: Hawking Zhang <Hawking.Zhang@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 149 ++++++++++++++++----------
 1 file changed, 91 insertions(+), 58 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index eff1fd14b01f..3abd91f27e31 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -184,6 +184,30 @@ static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000)
 };
 
+static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
+{
+	mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
+	mmRLC_SRM_INDEX_CNTL_ADDR_1 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
+	mmRLC_SRM_INDEX_CNTL_ADDR_2 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
+	mmRLC_SRM_INDEX_CNTL_ADDR_3 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
+	mmRLC_SRM_INDEX_CNTL_ADDR_4 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
+	mmRLC_SRM_INDEX_CNTL_ADDR_5 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
+	mmRLC_SRM_INDEX_CNTL_ADDR_6 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
+	mmRLC_SRM_INDEX_CNTL_ADDR_7 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
+};
+
+static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
+{
+	mmRLC_SRM_INDEX_CNTL_DATA_0 - mmRLC_SRM_INDEX_CNTL_DATA_0,
+	mmRLC_SRM_INDEX_CNTL_DATA_1 - mmRLC_SRM_INDEX_CNTL_DATA_0,
+	mmRLC_SRM_INDEX_CNTL_DATA_2 - mmRLC_SRM_INDEX_CNTL_DATA_0,
+	mmRLC_SRM_INDEX_CNTL_DATA_3 - mmRLC_SRM_INDEX_CNTL_DATA_0,
+	mmRLC_SRM_INDEX_CNTL_DATA_4 - mmRLC_SRM_INDEX_CNTL_DATA_0,
+	mmRLC_SRM_INDEX_CNTL_DATA_5 - mmRLC_SRM_INDEX_CNTL_DATA_0,
+	mmRLC_SRM_INDEX_CNTL_DATA_6 - mmRLC_SRM_INDEX_CNTL_DATA_0,
+	mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
+};
+
 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
@@ -1763,55 +1787,42 @@ static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
 			adev->gfx.rlc.clear_state_size);
 }
 
-static void gfx_v9_0_parse_ind_reg_list(int *register_list_format,
+static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
 				int indirect_offset,
 				int list_size,
 				int *unique_indirect_regs,
 				int *unique_indirect_reg_count,
-				int max_indirect_reg_count,
 				int *indirect_start_offsets,
-				int *indirect_start_offsets_count,
-				int max_indirect_start_offsets_count)
+				int *indirect_start_offsets_count)
 {
 	int idx;
-	bool new_entry = true;
 
 	for (; indirect_offset < list_size; indirect_offset++) {
+		indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
+		*indirect_start_offsets_count = *indirect_start_offsets_count + 1;
 
-		if (new_entry) {
-			new_entry = false;
-			indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
-			*indirect_start_offsets_count = *indirect_start_offsets_count + 1;
-			BUG_ON(*indirect_start_offsets_count >= max_indirect_start_offsets_count);
+		while (register_list_format[indirect_offset] != 0xFFFFFFFF) {
+			indirect_offset += 2;
+
+			/* look for the matching indice */
+			for (idx = 0; idx < *unique_indirect_reg_count; idx++) {
+				if (unique_indirect_regs[idx] ==
+					register_list_format[indirect_offset] ||
+					!unique_indirect_regs[idx])
+					break;
+			}
+
+			BUG_ON(idx >= *unique_indirect_reg_count);
+
+			if (!unique_indirect_regs[idx])
+				unique_indirect_regs[idx] = register_list_format[indirect_offset];
+
+			indirect_offset++;
 		}
-
-		if (register_list_format[indirect_offset] == 0xFFFFFFFF) {
-			new_entry = true;
-			continue;
-		}
-
-		indirect_offset += 2;
-
-		/* look for the matching indice */
-		for (idx = 0; idx < *unique_indirect_reg_count; idx++) {
-			if (unique_indirect_regs[idx] ==
-				register_list_format[indirect_offset])
-				break;
-		}
-
-		if (idx >= *unique_indirect_reg_count) {
-			unique_indirect_regs[*unique_indirect_reg_count] =
-				register_list_format[indirect_offset];
-			idx = *unique_indirect_reg_count;
-			*unique_indirect_reg_count = *unique_indirect_reg_count + 1;
-			BUG_ON(*unique_indirect_reg_count >= max_indirect_reg_count);
-		}
-
-		register_list_format[indirect_offset] = idx;
 	}
 }
 
-static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
+static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev)
 {
 	int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
 	int unique_indirect_reg_count = 0;
@@ -1820,7 +1831,7 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
 	int indirect_start_offsets_count = 0;
 
 	int list_size = 0;
-	int i = 0;
+	int i = 0, j = 0;
 	u32 tmp = 0;
 
 	u32 *register_list_format =
@@ -1831,15 +1842,14 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
 		adev->gfx.rlc.reg_list_format_size_bytes);
 
 	/* setup unique_indirect_regs array and indirect_start_offsets array */
-	gfx_v9_0_parse_ind_reg_list(register_list_format,
-				adev->gfx.rlc.reg_list_format_direct_reg_list_length,
-				adev->gfx.rlc.reg_list_format_size_bytes >> 2,
-				unique_indirect_regs,
-				&unique_indirect_reg_count,
-				ARRAY_SIZE(unique_indirect_regs),
-				indirect_start_offsets,
-				&indirect_start_offsets_count,
-				ARRAY_SIZE(indirect_start_offsets));
+	unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs);
+	gfx_v9_1_parse_ind_reg_list(register_list_format,
+				    adev->gfx.rlc.reg_list_format_direct_reg_list_length,
+				    adev->gfx.rlc.reg_list_format_size_bytes >> 2,
+				    unique_indirect_regs,
+				    &unique_indirect_reg_count,
+				    indirect_start_offsets,
+				    &indirect_start_offsets_count);
 
 	/* enable auto inc in case it is disabled */
 	tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
@@ -1853,19 +1863,37 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
 			adev->gfx.rlc.register_restore[i]);
 
-	/* load direct register */
-	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR), 0);
-	for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
-		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
-			adev->gfx.rlc.register_restore[i]);
-
 	/* load indirect register */
 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
 		adev->gfx.rlc.reg_list_format_start);
-	for (i = 0; i < adev->gfx.rlc.reg_list_format_size_bytes >> 2; i++)
+
+	/* direct register portion */
+	for (i = 0; i < adev->gfx.rlc.reg_list_format_direct_reg_list_length; i++)
 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
 			register_list_format[i]);
 
+	/* indirect register portion */
+	while (i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2)) {
+		if (register_list_format[i] == 0xFFFFFFFF) {
+			WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
+			continue;
+		}
+
+		WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
+		WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
+
+		for (j = 0; j < unique_indirect_reg_count; j++) {
+			if (register_list_format[i] == unique_indirect_regs[j]) {
+				WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, j);
+				break;
+			}
+		}
+
+		BUG_ON(j >= unique_indirect_reg_count);
+
+		i++;
+	}
+
 	/* set save/restore list size */
 	list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
 	list_size = list_size >> 1;
@@ -1878,14 +1906,19 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
 		adev->gfx.rlc.starting_offsets_start);
 	for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
-			indirect_start_offsets[i]);
+		       indirect_start_offsets[i]);
 
 	/* load unique indirect regs*/
 	for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
-		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0) + i,
-			unique_indirect_regs[i] & 0x3FFFF);
-		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0) + i,
-			unique_indirect_regs[i] >> 20);
+		if (unique_indirect_regs[i] != 0) {
+			WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0)
+			       + GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[i],
+			       unique_indirect_regs[i] & 0x3FFFF);
+
+			WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0)
+			       + GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[i],
+			       unique_indirect_regs[i] >> 20);
+		}
 	}
 
 	kfree(register_list_format);
@@ -2075,7 +2108,7 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
 			      AMD_PG_SUPPORT_GDS |
 			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
 		gfx_v9_0_init_csb(adev);
-		gfx_v9_0_init_rlc_save_restore_list(adev);
+		gfx_v9_1_init_rlc_save_restore_list(adev);
 		gfx_v9_0_enable_save_restore_machine(adev);
 
 		WREG32(mmRLC_JUMP_TABLE_RESTORE,

From 0df3e67d343b6af7eb71f6353f93e4d0a5e952a7 Mon Sep 17 00:00:00 2001
From: Huang Rui <ray.huang@amd.com>
Date: Wed, 6 Dec 2017 09:23:50 +0800
Subject: [PATCH 0816/1286] drm/amdgpu: add setting powergating method for gfx9
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Huang Rui <ray.huang@amd.com>
Acked-by: Hawking Zhang <Hawking.Zhang@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 3abd91f27e31..8d54207471d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -3355,6 +3355,11 @@ static int gfx_v9_0_late_init(void *handle)
 	if (r)
 		return r;
 
+	r = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
+						   AMD_PG_STATE_GATE);
+	if (r)
+		return r;
+
 	return 0;
 }
 

From af15890df97d09e2faba2199b36f5e69bf129342 Mon Sep 17 00:00:00 2001
From: Huang Rui <ray.huang@amd.com>
Date: Thu, 14 Dec 2017 13:38:13 +0800
Subject: [PATCH 0817/1286] drm/amd/powerplay: send CGPG smc message if PG is
 enabled for raven
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Huang Rui <ray.huang@amd.com>
Acked-by: Hawking Zhang <Hawking.Zhang@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 8 +++++++-
 drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h      | 1 +
 2 files changed, 8 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 0f252265a753..f0727b4f1ebf 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -206,12 +206,18 @@ static int smu10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input
 static int smu10_init_power_gate_state(struct pp_hwmgr *hwmgr)
 {
 	struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+	struct amdgpu_device *adev = hwmgr->adev;
 
 	smu10_data->vcn_power_gated = true;
 	smu10_data->isp_tileA_power_gated = true;
 	smu10_data->isp_tileB_power_gated = true;
 
-	return 0;
+	if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)
+		return smum_send_msg_to_smc_with_parameter(hwmgr,
+							   PPSMC_MSG_SetGfxCGPG,
+							   true);
+	else
+		return 0;
 }
 
 
diff --git a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
index 426bff2aad2b..5d07b6ea0a55 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
@@ -75,6 +75,7 @@
 #define PPSMC_MSG_GetMinGfxclkFrequency         0x2C
 #define PPSMC_MSG_GetMaxGfxclkFrequency         0x2D
 #define PPSMC_MSG_SoftReset                     0x2E
+#define PPSMC_MSG_SetGfxCGPG			0x2F
 #define PPSMC_MSG_SetSoftMaxGfxClk              0x30
 #define PPSMC_MSG_SetHardMinGfxClk              0x31
 #define PPSMC_MSG_SetSoftMaxSocclkByFreq        0x32

From fa7bd27d7352bfd57aed60a7e1b678bc1f475fc4 Mon Sep 17 00:00:00 2001
From: Huang Rui <ray.huang@amd.com>
Date: Tue, 13 Mar 2018 15:13:46 +0800
Subject: [PATCH 0818/1286] drm/amdgpu: move PP_FEATURE_MASK to amd_shared
 header
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

It will be used not only for powerplay but also on amdgpu part in future
patches. So move it into amd_shared header file.

Signed-off-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h   |  2 --
 drivers/gpu/drm/amd/amdgpu/ci_dpm.c       |  2 +-
 drivers/gpu/drm/amd/amdgpu/kv_dpm.c       |  2 +-
 drivers/gpu/drm/amd/include/amd_shared.h  | 19 +++++++++++++++++++
 drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 19 -------------------
 5 files changed, 21 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 354c6dc99481..dd6203a0a6b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -52,8 +52,6 @@ enum amdgpu_dpm_event_src {
 	AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
 };
 
-#define SCLK_DEEP_SLEEP_MASK 0x8
-
 struct amdgpu_ps {
 	u32 caps; /* vbios flags */
 	u32 class; /* vbios flags */
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index be6b19951e6a..f48168fbdfe6 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -5903,7 +5903,7 @@ static int ci_dpm_init(struct amdgpu_device *adev)
 	pi->pcie_dpm_key_disabled = 0;
 	pi->thermal_sclk_dpm_enabled = 0;
 
-	if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK)
+	if (amdgpu_pp_feature_mask & PP_SCLK_DEEP_SLEEP_MASK)
 		pi->caps_sclk_ds = true;
 	else
 		pi->caps_sclk_ds = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index bc1720ea4959..ef668a321ef1 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2817,7 +2817,7 @@ static int kv_dpm_init(struct amdgpu_device *adev)
 		pi->caps_tcp_ramping = true;
 	}
 
-	if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK)
+	if (amdgpu_pp_feature_mask & PP_SCLK_DEEP_SLEEP_MASK)
 		pi->caps_sclk_ds = true;
 	else
 		pi->caps_sclk_ds = false;
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 9fa3aaef3f33..efeea9a9f27e 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -109,6 +109,25 @@ enum amd_powergating_state {
 #define AMD_PG_SUPPORT_GFX_PIPELINE		(1 << 12)
 #define AMD_PG_SUPPORT_MMHUB			(1 << 13)
 
+enum PP_FEATURE_MASK {
+	PP_SCLK_DPM_MASK = 0x1,
+	PP_MCLK_DPM_MASK = 0x2,
+	PP_PCIE_DPM_MASK = 0x4,
+	PP_SCLK_DEEP_SLEEP_MASK = 0x8,
+	PP_POWER_CONTAINMENT_MASK = 0x10,
+	PP_UVD_HANDSHAKE_MASK = 0x20,
+	PP_SMC_VOLTAGE_CONTROL_MASK = 0x40,
+	PP_VBI_TIME_SUPPORT_MASK = 0x80,
+	PP_ULV_MASK = 0x100,
+	PP_ENABLE_GFX_CG_THRU_SMU = 0x200,
+	PP_CLOCK_STRETCH_MASK = 0x400,
+	PP_OD_FUZZY_FAN_CONTROL_MASK = 0x800,
+	PP_SOCCLK_DPM_MASK = 0x1000,
+	PP_DCEFCLK_DPM_MASK = 0x2000,
+	PP_OVERDRIVE_MASK = 0x4000,
+	PP_ACG_MASK = 0x10000,
+};
+
 struct amd_ip_funcs {
 	/* Name of IP block */
 	char *name;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 2f203ec3d19c..0d2b3cebd9cf 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -66,25 +66,6 @@ struct vi_dpm_table {
 #define PCIE_PERF_REQ_GEN2         3
 #define PCIE_PERF_REQ_GEN3         4
 
-enum PP_FEATURE_MASK {
-	PP_SCLK_DPM_MASK = 0x1,
-	PP_MCLK_DPM_MASK = 0x2,
-	PP_PCIE_DPM_MASK = 0x4,
-	PP_SCLK_DEEP_SLEEP_MASK = 0x8,
-	PP_POWER_CONTAINMENT_MASK = 0x10,
-	PP_UVD_HANDSHAKE_MASK = 0x20,
-	PP_SMC_VOLTAGE_CONTROL_MASK = 0x40,
-	PP_VBI_TIME_SUPPORT_MASK = 0x80,
-	PP_ULV_MASK = 0x100,
-	PP_ENABLE_GFX_CG_THRU_SMU = 0x200,
-	PP_CLOCK_STRETCH_MASK = 0x400,
-	PP_OD_FUZZY_FAN_CONTROL_MASK = 0x800,
-	PP_SOCCLK_DPM_MASK = 0x1000,
-	PP_DCEFCLK_DPM_MASK = 0x2000,
-	PP_OVERDRIVE_MASK = 0x4000,
-	PP_ACG_MASK = 0x10000,
-};
-
 enum PHM_BackEnd_Magic {
 	PHM_Dummy_Magic       = 0xAA5555AA,
 	PHM_RV770_Magic       = 0xDCBAABCD,

From 6f92ad2a1772ebaa5eb3d27c9c8dd8caf2e3cbdb Mon Sep 17 00:00:00 2001
From: Huang Rui <ray.huang@amd.com>
Date: Fri, 2 Mar 2018 14:16:06 +0800
Subject: [PATCH 0819/1286] drm/amdgpu: add gfxoff feature mask
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/include/amd_shared.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index efeea9a9f27e..33de33016bda 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -125,6 +125,7 @@ enum PP_FEATURE_MASK {
 	PP_SOCCLK_DPM_MASK = 0x1000,
 	PP_DCEFCLK_DPM_MASK = 0x2000,
 	PP_OVERDRIVE_MASK = 0x4000,
+	PP_GFXOFF_MASK = 0x8000,
 	PP_ACG_MASK = 0x10000,
 };
 

From 917d8614c4cdddfb257229e0fb3077b8842dd9e0 Mon Sep 17 00:00:00 2001
From: Huang Rui <ray.huang@amd.com>
Date: Fri, 2 Mar 2018 14:40:53 +0800
Subject: [PATCH 0820/1286] drm/amdgpu: set gfxoff disabled by default
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 7c17a0bc2cd2..998ba8e710de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -122,7 +122,7 @@ uint amdgpu_pg_mask = 0xffffffff;
 uint amdgpu_sdma_phase_quantum = 32;
 char *amdgpu_disable_cu = NULL;
 char *amdgpu_virtual_display = NULL;
-uint amdgpu_pp_feature_mask = 0xffffbfff;
+uint amdgpu_pp_feature_mask = 0xffff3fff; /* gfxoff (bit 15) disabled by default */
 int amdgpu_ngg = 0;
 int amdgpu_prim_buf_per_se = 0;
 int amdgpu_pos_buf_per_se = 0;

From 9c82214160ee5e2a1e4137612822a35dc0cc064b Mon Sep 17 00:00:00 2001
From: Huang Rui <ray.huang@amd.com>
Date: Fri, 2 Mar 2018 15:10:52 +0800
Subject: [PATCH 0821/1286] drm/amd/powerplay: add gfx off control function
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

gfx_off_control is used to be called for sending enabling/disabling gfxoff
message.

Signed-off-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 36 ++++++++++++++++++-
 drivers/gpu/drm/amd/powerplay/inc/hwmgr.h     |  1 +
 2 files changed, 36 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index f0727b4f1ebf..fde1e5c00a3c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -42,6 +42,13 @@
 #define SMU10_DISPCLK_BYPASS_THRESHOLD     10000 /* 100Mhz */
 #define SMC_RAM_END                     0x40000
 
+#define mmPWR_MISC_CNTL_STATUS					0x0183
+#define mmPWR_MISC_CNTL_STATUS_BASE_IDX				0
+#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT	0x0
+#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT		0x1
+#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK		0x00000001L
+#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK		0x00000006L
+
 static const unsigned long SMU10_Magic = (unsigned long) PHM_Rv_Magic;
 
 
@@ -243,13 +250,31 @@ static int smu10_power_off_asic(struct pp_hwmgr *hwmgr)
 	return smu10_reset_cc6_data(hwmgr);
 }
 
+static bool smu10_is_gfx_on(struct pp_hwmgr *hwmgr)
+{
+	uint32_t reg;
+	struct amdgpu_device *adev = hwmgr->adev;
+
+	reg = RREG32_SOC15(PWR, 0, mmPWR_MISC_CNTL_STATUS);
+	if ((reg & PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK) ==
+	    (0x2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT))
+		return true;
+
+	return false;
+}
+
 static int smu10_disable_gfx_off(struct pp_hwmgr *hwmgr)
 {
 	struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
 
-	if (smu10_data->gfx_off_controled_by_driver)
+	if (smu10_data->gfx_off_controled_by_driver) {
 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff);
 
+		/* confirm gfx is back to "on" state */
+		while (!smu10_is_gfx_on(hwmgr))
+			msleep(1);
+	}
+
 	return 0;
 }
 
@@ -273,6 +298,14 @@ static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
 	return smu10_enable_gfx_off(hwmgr);
 }
 
+static int smu10_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable)
+{
+	if (enable)
+		return smu10_enable_gfx_off(hwmgr);
+	else
+		return smu10_disable_gfx_off(hwmgr);
+}
+
 static int smu10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
 				struct pp_power_state  *prequest_ps,
 			const struct pp_power_state *pcurrent_ps)
@@ -1060,6 +1093,7 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
 	.power_state_set = smu10_set_power_state_tasks,
 	.dynamic_state_management_disable = smu10_disable_dpm_tasks,
 	.set_mmhub_powergating_by_smu = smu10_set_mmhub_powergating_by_smu,
+	.gfx_off_control = smu10_gfx_off_control,
 };
 
 int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 0d2b3cebd9cf..3d9743f5bb45 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -296,6 +296,7 @@ struct pp_hwmgr_func {
 	int (*display_clock_voltage_request)(struct pp_hwmgr *hwmgr,
 			struct pp_display_clock_request *clock);
 	int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks);
+	int (*gfx_off_control)(struct pp_hwmgr *hwmgr, bool enable);
 	int (*power_off_asic)(struct pp_hwmgr *hwmgr);
 	int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask);
 	int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf);

From 775b0c11e27fce7d204d2911220fd7eebcc074d0 Mon Sep 17 00:00:00 2001
From: Huang Rui <ray.huang@amd.com>
Date: Fri, 2 Mar 2018 15:18:54 +0800
Subject: [PATCH 0822/1286] drm/amd/powerplay: enable/disable gfxoff through
 smu
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index bd0d387584ac..6976596449a8 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -222,10 +222,19 @@ static int pp_set_powergating_state(void *handle,
 {
 	struct amdgpu_device *adev = handle;
 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+	int ret;
 
 	if (!hwmgr || !hwmgr->pm_en)
 		return 0;
 
+	if (hwmgr->hwmgr_func->gfx_off_control) {
+		/* Enable/disable GFX off through SMU */
+		ret = hwmgr->hwmgr_func->gfx_off_control(hwmgr,
+							 state == AMD_PG_STATE_GATE);
+		if (ret)
+			pr_err("gfx off control failed!\n");
+	}
+
 	if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
 		pr_info("%s was not implemented.\n", __func__);
 		return 0;

From 00f54b97d7de97c41cffaad83d32a9bf03edad89 Mon Sep 17 00:00:00 2001
From: Huang Rui <ray.huang@amd.com>
Date: Tue, 27 Feb 2018 21:53:00 +0800
Subject: [PATCH 0823/1286] drm/amdgpu: use pp_feature member to store the mask
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h           | 1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c    | 2 ++
 drivers/gpu/drm/amd/amdgpu/ci_dpm.c           | 2 +-
 drivers/gpu/drm/amd/amdgpu/kv_dpm.c           | 2 +-
 drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 2 +-
 5 files changed, 6 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 5ad893915a85..75700552c71d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1430,6 +1430,7 @@ enum amd_hw_ip_block_type {
 struct amd_powerplay {
 	void *pp_handle;
 	const struct amd_pm_funcs *pp_funcs;
+	uint32_t pp_feature;
 };
 
 #define AMDGPU_RESET_MAGIC_NUM 64
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 5958e8112489..e8b57cf48555 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1545,6 +1545,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
 			return -EAGAIN;
 	}
 
+	adev->powerplay.pp_feature = amdgpu_pp_feature_mask;
+
 	for (i = 0; i < adev->num_ip_blocks; i++) {
 		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
 			DRM_ERROR("disabled ip block: %d <%s>\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index f48168fbdfe6..a266dcf5daed 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -5903,7 +5903,7 @@ static int ci_dpm_init(struct amdgpu_device *adev)
 	pi->pcie_dpm_key_disabled = 0;
 	pi->thermal_sclk_dpm_enabled = 0;
 
-	if (amdgpu_pp_feature_mask & PP_SCLK_DEEP_SLEEP_MASK)
+	if (adev->powerplay.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
 		pi->caps_sclk_ds = true;
 	else
 		pi->caps_sclk_ds = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index ef668a321ef1..17f7f074cedc 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2817,7 +2817,7 @@ static int kv_dpm_init(struct amdgpu_device *adev)
 		pi->caps_tcp_ramping = true;
 	}
 
-	if (amdgpu_pp_feature_mask & PP_SCLK_DEEP_SLEEP_MASK)
+	if (adev->powerplay.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
 		pi->caps_sclk_ds = true;
 	else
 		pi->caps_sclk_ds = false;
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 6976596449a8..246f8e9e9451 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -53,7 +53,7 @@ static int amd_powerplay_create(struct amdgpu_device *adev)
 	mutex_init(&hwmgr->smu_lock);
 	hwmgr->chip_family = adev->family;
 	hwmgr->chip_id = adev->asic_type;
-	hwmgr->feature_mask = amdgpu_pp_feature_mask;
+	hwmgr->feature_mask = adev->powerplay.pp_feature;
 	hwmgr->display_config = &adev->pm.pm_display_cfg;
 	adev->powerplay.pp_handle = hwmgr;
 	adev->powerplay.pp_funcs = &pp_dpm_funcs;

From 1dedc62338accff01ce4d56302e1c55a6b43b3d6 Mon Sep 17 00:00:00 2001
From: Huang Rui <ray.huang@amd.com>
Date: Tue, 13 Mar 2018 17:59:12 +0800
Subject: [PATCH 0824/1286] drm/amdgpu: clear gfxoff feature mask if the asic
 is not raven
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index bca67df29c8c..d1052b5e0ca8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -95,7 +95,8 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
 		hwmgr->smumgr_funcs = &ci_smu_funcs;
 		ci_set_asic_special_caps(hwmgr);
 		hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK |
-					PP_ENABLE_GFX_CG_THRU_SMU);
+					 PP_ENABLE_GFX_CG_THRU_SMU |
+					 PP_GFXOFF_MASK);
 		hwmgr->pp_table_version = PP_TABLE_V0;
 		hwmgr->od_enabled = false;
 		smu7_init_function_pointers(hwmgr);
@@ -103,9 +104,11 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
 	case AMDGPU_FAMILY_CZ:
 		hwmgr->od_enabled = false;
 		hwmgr->smumgr_funcs = &smu8_smu_funcs;
+		hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
 		smu8_init_function_pointers(hwmgr);
 		break;
 	case AMDGPU_FAMILY_VI:
+		hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
 		switch (hwmgr->chip_id) {
 		case CHIP_TOPAZ:
 			hwmgr->smumgr_funcs = &iceland_smu_funcs;
@@ -139,6 +142,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
 		smu7_init_function_pointers(hwmgr);
 		break;
 	case AMDGPU_FAMILY_AI:
+		hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
 		switch (hwmgr->chip_id) {
 		case CHIP_VEGA10:
 			hwmgr->smumgr_funcs = &vega10_smu_funcs;

From 9667849bbb8d8a2b97798ba0972fe25d13ea8acf Mon Sep 17 00:00:00 2001
From: Huang Rui <ray.huang@amd.com>
Date: Tue, 13 Mar 2018 18:32:39 +0800
Subject: [PATCH 0825/1286] drm/amd/powerplay: add control gfxoff enabling in
 late init
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 246f8e9e9451..b493369e6d0f 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -180,6 +180,7 @@ static int pp_late_init(void *handle)
 {
 	struct amdgpu_device *adev = handle;
 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+	int ret;
 
 	if (hwmgr && hwmgr->pm_en) {
 		mutex_lock(&hwmgr->smu_lock);
@@ -189,6 +190,14 @@ static int pp_late_init(void *handle)
 	}
 	if (adev->pm.smu_prv_buffer_size != 0)
 		pp_reserve_vram_for_smu(adev);
+
+	if (hwmgr->hwmgr_func->gfx_off_control &&
+	    (hwmgr->feature_mask & PP_GFXOFF_MASK)) {
+		ret = hwmgr->hwmgr_func->gfx_off_control(hwmgr, true);
+		if (ret)
+			pr_err("gfx off enabling failed!\n");
+	}
+
 	return 0;
 }
 

From b083369621e84dc0c8ec1ae7191d009f6f1c4d75 Mon Sep 17 00:00:00 2001
From: Huang Rui <ray.huang@amd.com>
Date: Tue, 13 Mar 2018 18:39:48 +0800
Subject: [PATCH 0826/1286] drm/amdgpu: it should disable gfxoff when system is
 going to suspend
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index e8b57cf48555..9e917f53f357 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1902,6 +1902,12 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
 	if (amdgpu_sriov_vf(adev))
 		amdgpu_virt_request_full_gpu(adev, false);
 
+	/* ungate SMC block powergating */
+	if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
+		amdgpu_device_ip_set_powergating_state(adev,
+						       AMD_IP_BLOCK_TYPE_SMC,
+						       AMD_CG_STATE_UNGATE);
+
 	/* ungate SMC block first */
 	r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
 						   AMD_CG_STATE_UNGATE);

From 151b5d7fd35876120dc744f93865e4c7dc2c1f36 Mon Sep 17 00:00:00 2001
From: Huang Rui <ray.huang@amd.com>
Date: Tue, 27 Feb 2018 13:43:59 +0800
Subject: [PATCH 0827/1286] drm/amdgpu: fix to disable powergating in hw_fini
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

We need enable CGPG and GFXOFF together. If only enable one of them, this system
will get hang after startx (do draw command). So when gfxoff is disabled, it
also need disable CGPG after that.

Signed-off-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 8d54207471d7..2c5e2a41632e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -3137,6 +3137,9 @@ static int gfx_v9_0_hw_fini(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	int i;
 
+	amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
+					       AMD_PG_STATE_UNGATE);
+
 	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
 	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
 

From 9ac4b0d95a7a554bb60d97fbee5fbfd1b73df50a Mon Sep 17 00:00:00 2001
From: Huang Rui <ray.huang@amd.com>
Date: Fri, 15 Dec 2017 14:34:57 +0800
Subject: [PATCH 0828/1286] drm/amdgpu: set CGPG if gfxoff is enabled for raven
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/soc15.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 65e781f05c24..90065766fffb 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -682,6 +682,11 @@ static int soc15_common_early_init(void *handle)
 			AMD_CG_SUPPORT_SDMA_LS;
 		adev->pg_flags = AMD_PG_SUPPORT_SDMA;
 
+		if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
+			adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+				AMD_PG_SUPPORT_CP |
+				AMD_PG_SUPPORT_RLC_SMU_HS;
+
 		adev->external_rev_id = 0x1;
 		break;
 	default:

From f5264548008a5cde7090c2b6b85c8d65cb86d2f7 Mon Sep 17 00:00:00 2001
From: Huang Rui <ray.huang@amd.com>
Date: Thu, 14 Dec 2017 15:33:53 +0800
Subject: [PATCH 0829/1286] drm/amd/powerplay: use the flag to decide whether
 send gfxoff smc message
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index fde1e5c00a3c..7712eb62539a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -81,11 +81,15 @@ static int smu10_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
 	smu10_data->thermal_auto_throttling_treshold = 0;
 	smu10_data->is_nb_dpm_enabled = 1;
 	smu10_data->dpm_flags = 1;
-	smu10_data->gfx_off_controled_by_driver = false;
 	smu10_data->need_min_deep_sleep_dcefclk = true;
 	smu10_data->num_active_display = 0;
 	smu10_data->deep_sleep_dcefclk = 0;
 
+	if (hwmgr->feature_mask & PP_GFXOFF_MASK)
+		smu10_data->gfx_off_controled_by_driver = true;
+	else
+		smu10_data->gfx_off_controled_by_driver = false;
+
 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
 					PHM_PlatformCaps_SclkDeepSleep);
 

From 1d2361e5a6c60d7b142d19555c3e6240ffe93731 Mon Sep 17 00:00:00 2001
From: Samuel Li <Samuel.Li@amd.com>
Date: Wed, 18 Apr 2018 15:06:02 -0400
Subject: [PATCH 0830/1286] drm/amdgpu: Rename
 amdgpu_display_framebuffer_domains()
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

It returns supported domains for display, and domains actually used are to be
decided later when pinned.

Signed-off-by: Samuel Li <Samuel.Li@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c       | 4 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_display.h       | 2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c            | 2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c         | 2 +-
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 +--
 5 files changed, 6 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index b83ae998fe27..76ee8e04ff11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -189,7 +189,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
 		goto cleanup;
 	}
 
-	r = amdgpu_bo_pin(new_abo, amdgpu_display_framebuffer_domains(adev), &base);
+	r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev), &base);
 	if (unlikely(r != 0)) {
 		DRM_ERROR("failed to pin new abo buffer before flip\n");
 		goto unreserve;
@@ -484,7 +484,7 @@ static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
 	.create_handle = drm_gem_fb_create_handle,
 };
 
-uint32_t amdgpu_display_framebuffer_domains(struct amdgpu_device *adev)
+uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev)
 {
 	uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
index 2b11d808f297..f66e3e3fef0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
@@ -23,7 +23,7 @@
 #ifndef __AMDGPU_DISPLAY_H__
 #define __AMDGPU_DISPLAY_H__
 
-uint32_t amdgpu_display_framebuffer_domains(struct amdgpu_device *adev);
+uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev);
 struct drm_framebuffer *
 amdgpu_display_user_framebuffer_create(struct drm_device *dev,
 				       struct drm_file *file_priv,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index ff89e84b34ce..bc5fd8ebab5d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -137,7 +137,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
 	/* need to align pitch with crtc limits */
 	mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp,
 						  fb_tiled);
-	domain = amdgpu_display_framebuffer_domains(adev);
+	domain = amdgpu_display_supported_domains(adev);
 
 	height = ALIGN(mode_cmd->height, 8);
 	size = mode_cmd->pitches[0] * height;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 713417b6d15d..4683626b065f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -215,7 +215,7 @@ static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 	struct ttm_operation_ctx ctx = { true, false };
-	u32 domain = amdgpu_display_framebuffer_domains(adev);
+	u32 domain = amdgpu_display_supported_domains(adev);
 	int ret;
 	bool reads = (direction == DMA_BIDIRECTIONAL ||
 		      direction == DMA_FROM_DEVICE);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 2368ade4bae0..28d8c08efeeb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3049,12 +3049,11 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
 		return r;
 
 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
-		domain = amdgpu_display_framebuffer_domains(adev);
+		domain = amdgpu_display_supported_domains(adev);
 	else
 		domain = AMDGPU_GEM_DOMAIN_VRAM;
 
 	r = amdgpu_bo_pin(rbo, domain, &afb->address);
-
 	amdgpu_bo_unreserve(rbo);
 
 	if (unlikely(r != 0)) {

From 9b3f217faf48603c91d4ca44a18e6ff74c3c1c0c Mon Sep 17 00:00:00 2001
From: Samuel Li <Samuel.Li@amd.com>
Date: Wed, 18 Apr 2018 16:26:18 -0400
Subject: [PATCH 0831/1286] drm/amdgpu: Remove VRAM from shared bo domains.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This fixes an issue introduced by change "allow framebuffer in GART
memory as well" which could lead to a shared buffer ending up
pinned in vram.  Use GTT if it is included in the domain, otherwise
return an error.

Signed-off-by: Samuel Li <Samuel.Li@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index feece0a491a3..1985c08413c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -694,8 +694,12 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
 		return -EINVAL;
 
 	/* A shared bo cannot be migrated to VRAM */
-	if (bo->prime_shared_count && (domain == AMDGPU_GEM_DOMAIN_VRAM))
-		return -EINVAL;
+	if (bo->prime_shared_count) {
+		if (domain & AMDGPU_GEM_DOMAIN_GTT)
+			domain = AMDGPU_GEM_DOMAIN_GTT;
+		else
+			return -EINVAL;
+	}
 
 	if (bo->pin_count) {
 		uint32_t mem_type = bo->tbo.mem.mem_type;

From 8567f68147de1f09cc868b52b02a0c11dc048206 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Thu, 19 Apr 2018 13:46:03 -0500
Subject: [PATCH 0832/1286] drm/amdgpu/pm: document
 power_dpm_force_performance_level

Provide documentation for power_dpm_force_performance_level
which is used to adjust things related to GPU power states.

Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 53 ++++++++++++++++++++++++++
 1 file changed, 53 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 744f105a2c75..ee11e92cc4d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -131,6 +131,59 @@ fail:
 	return count;
 }
 
+
+/**
+ * DOC: power_dpm_force_performance_level
+ *
+ * The amdgpu driver provides a sysfs API for adjusting certain power
+ * related parameters.  The file power_dpm_force_performance_level is
+ * used for this.  It accepts the following arguments:
+ * - auto
+ * - low
+ * - high
+ * - manual
+ * - GPU fan
+ * - profile_standard
+ * - profile_min_sclk
+ * - profile_min_mclk
+ * - profile_peak
+ *
+ * auto
+ *
+ * When auto is selected, the driver will attempt to dynamically select
+ * the optimal power profile for current conditions in the driver.
+ *
+ * low
+ *
+ * When low is selected, the clocks are forced to the lowest power state.
+ *
+ * high
+ *
+ * When high is selected, the clocks are forced to the highest power state.
+ *
+ * manual
+ *
+ * When manual is selected, the user can manually adjust which power states
+ * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
+ * and pp_dpm_pcie files and adjust the power state transition heuristics
+ * via the pp_power_profile_mode sysfs file.
+ *
+ * profile_standard
+ * profile_min_sclk
+ * profile_min_mclk
+ * profile_peak
+ *
+ * When the profiling modes are selected, clock and power gating are
+ * disabled and the clocks are set for different profiling cases. This
+ * mode is recommended for profiling specific work loads where you do
+ * not want clock or power gating for clock fluctuation to interfere
+ * with your results. profile_standard sets the clocks to a fixed clock
+ * level which varies from asic to asic.  profile_min_sclk forces the sclk
+ * to the lowest level.  profile_min_mclk forces the mclk to the lowest level.
+ * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
+ *
+ */
+
 static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
 						struct device_attribute *attr,
 								char *buf)

From ca8d40ca194dfb573e59a5e42b88da83e63a6630 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Thu, 19 Apr 2018 13:56:41 -0500
Subject: [PATCH 0833/1286] drm/amdgpu/pm: document power_dpm_state

This is a legacy file and is only provided for
backwards compatibility.

Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 31 ++++++++++++++++++++++++++
 1 file changed, 31 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index ee11e92cc4d2..e33e0f4c4a28 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -77,6 +77,37 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
 	}
 }
 
+/**
+ * DOC: power_dpm_state
+ *
+ * This is a legacy interface and is only provided for backwards compatibility.
+ * The amdgpu driver provides a sysfs API for adjusting certain power
+ * related parameters.  The file power_dpm_state is used for this.
+ * It accepts the following arguments:
+ * - battery
+ * - balanced
+ * - performance
+ *
+ * battery
+ *
+ * On older GPUs, the vbios provided a special power state for battery
+ * operation.  Selecting battery switched to this state.  This is no
+ * longer provided on newer GPUs so the option does nothing in that case.
+ *
+ * balanced
+ *
+ * On older GPUs, the vbios provided a special power state for balanced
+ * operation.  Selecting balanced switched to this state.  This is no
+ * longer provided on newer GPUs so the option does nothing in that case.
+ *
+ * performance
+ *
+ * On older GPUs, the vbios provided a special power state for performance
+ * operation.  Selecting performance switched to this state.  This is no
+ * longer provided on newer GPUs so the option does nothing in that case.
+ *
+ */
+
 static ssize_t amdgpu_get_dpm_state(struct device *dev,
 				    struct device_attribute *attr,
 				    char *buf)

From d54bb40f607d40fca60da0613c65005086653300 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Thu, 19 Apr 2018 14:02:52 -0500
Subject: [PATCH 0834/1286] drm/amdgpu/pm: document pp_table

This file is for uploading new powerplay tables.

Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 11 +++++++++++
 1 file changed, 11 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index e33e0f4c4a28..9982f1b1f8c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -408,6 +408,17 @@ fail:
 	return count;
 }
 
+/**
+ * DOC: pp_table
+ *
+ * The amdgpu driver provides a sysfs API for uploading new powerplay
+ * tables.  The file pp_table is used for this.  Reading the file
+ * will dump the current power play table.  Writing to the file
+ * will attempt to upload a new powerplay table and re-initialize
+ * powerplay using that new table.
+ *
+ */
+
 static ssize_t amdgpu_get_pp_table(struct device *dev,
 		struct device_attribute *attr,
 		char *buf)

From 271dc908732b72bb9b1ad22b7cd14e75df3612c5 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Thu, 19 Apr 2018 14:22:24 -0500
Subject: [PATCH 0835/1286] drm/amdgpu/pm: document pp_dpm_sclk pp_dpm_mclk
 pp_dpm_pcie (v2)

Used for manually masking dpm states.

v2: drop comment about current state (Rex)

Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 17 +++++++++++++++++
 1 file changed, 17 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 9982f1b1f8c4..07f2e9606337 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -539,6 +539,23 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
 
 }
 
+/**
+ * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_pcie
+ *
+ * The amdgpu driver provides a sysfs API for adjusting what power levels
+ * are enabled for a given power state.  The files pp_dpm_sclk, pp_dpm_mclk,
+ * and pp_dpm_pcie are used for this.
+ *
+ * Reading back the files will show you the available power levels within
+ * the power state and the clock information for those levels.
+ *
+ * To manually adjust these states, first select manual using
+ * power_dpm_force_performance_level.  Writing a string of the level
+ * numbers to the file will select which levels you want to enable.
+ * E.g., writing 456 to the file will enable levels 4, 5, and 6.
+ *
+ */
+
 static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
 		struct device_attribute *attr,
 		char *buf)

From 6b2576f5bddae4c89f29481f387735ac99e256d5 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Thu, 19 Apr 2018 14:38:31 -0500
Subject: [PATCH 0836/1286] drm/amdgpu/pm: document pp_power_profile_mode

sysfs file for adjusting power level heuristics.

Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 20 ++++++++++++++++++++
 1 file changed, 20 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 07f2e9606337..d6e66414bb12 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -780,6 +780,26 @@ fail:
 	return count;
 }
 
+/**
+ * DOC: pp_power_profile_mode
+ *
+ * The amdgpu driver provides a sysfs API for adjusting the heuristics
+ * related to switching between power levels in a power state.  The file
+ * pp_power_profile_mode is used for this.
+ *
+ * Reading this file outputs a list of all of the predefined power profiles
+ * and the relevant heuristics settings for that profile.
+ *
+ * To select a profile or create a custom profile, first select manual using
+ * power_dpm_force_performance_level.  Writing the number of a predefined
+ * profile to pp_power_profile_mode will enable those heuristics.  To
+ * create a custom set of heuristics, write a string of numbers to the file
+ * starting with the number of the custom profile along with a setting
+ * for each heuristic parameter.  Due to differences across asic families
+ * the heuristic parameters vary from family to family.
+ *
+ */
+
 static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
 		struct device_attribute *attr,
 		char *buf)

From 4e418c3401867cccc3ba67973d1e03510da7c92d Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Thu, 19 Apr 2018 14:59:55 -0500
Subject: [PATCH 0837/1286] drm/amdgpu/pm: document pp_od_clk_voltage

sysfs interface for fine grained clock and voltage control.

Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 23 +++++++++++++++++++++++
 1 file changed, 23 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index d6e66414bb12..ce8be467608d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -455,6 +455,29 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
 	return count;
 }
 
+/**
+ * DOC: pp_od_clk_voltage
+ *
+ * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
+ * in each power level within a power state.  The pp_od_clk_voltage is used for
+ * this.
+ *
+ * Reading the file will display:
+ * - a list of engine clock levels and voltages labeled OD_SCLK
+ * - a list of memory clock levels and voltages labeled OD_MCLK
+ * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
+ *
+ * To manually adjust these settings, first select manual using
+ * power_dpm_force_performance_level. Enter a new value for each
+ * level by writing a string that contains "s/m level clock voltage" to
+ * the file.  E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
+ * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
+ * 810 mV.  When you have edited all of the states as needed, write
+ * "c" (commit) to the file to commit your changes.  If you want to reset to the
+ * default power levels, write "r" (reset) to the file to reset them.
+ *
+ */
+
 static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
 		struct device_attribute *attr,
 		const char *buf,

From d10fb4a6f382474025f326bf90ee3b64396486ea Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Fri, 20 Apr 2018 12:57:10 +0800
Subject: [PATCH 0838/1286] drm/amd/pp: Change pstate_clk frequency unit to
 10KHz on Rv

to keep consistent with other asics

Reviewed-by: Evan Quan <evan.quan@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 7712eb62539a..ef09073c88d9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -479,8 +479,8 @@ static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
 
 	hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
 
-	hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK;
-	hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK;
+	hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK * 100;
+	hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK * 100;
 
 	return result;
 }

From 21c77de35661152e118908a081b8a51e7bca7bb4 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Fri, 20 Apr 2018 13:03:15 +0800
Subject: [PATCH 0839/1286] drm/amd/pp: Use dynamic gfx_clk rather than
 hardcoded values

Reviewed-by: Evan Quan <evan.quan@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 47 ++++++++++++-------
 .../gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h |  2 -
 2 files changed, 29 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index ef09073c88d9..be6d6e202819 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -383,7 +383,7 @@ static int smu10_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
 
 static int smu10_populate_clock_table(struct pp_hwmgr *hwmgr)
 {
-	int result;
+	uint32_t result;
 
 	struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
 	DpmClocks_t  *table = &(smu10_data->clock_table);
@@ -429,11 +429,11 @@ static int smu10_populate_clock_table(struct pp_hwmgr *hwmgr)
 
 	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency);
 	result = smum_get_argument(hwmgr);
-	smu10_data->gfx_min_freq_limit = result * 100;
+	smu10_data->gfx_min_freq_limit = result / 10 * 1000;
 
 	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency);
 	result = smum_get_argument(hwmgr);
-	smu10_data->gfx_max_freq_limit = result * 100;
+	smu10_data->gfx_max_freq_limit = result / 10 * 1000;
 
 	return 0;
 }
@@ -515,6 +515,8 @@ static int smu10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
 static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
 				enum amd_dpm_forced_level level)
 {
+	struct smu10_hwmgr *data = hwmgr->backend;
+
 	if (hwmgr->smu_version < 0x1E3700) {
 		pr_info("smu firmware version too old, can not set dpm level\n");
 		return 0;
@@ -525,7 +527,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
 	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
 		smum_send_msg_to_smc_with_parameter(hwmgr,
 						PPSMC_MSG_SetHardMinGfxClk,
-						SMU10_UMD_PSTATE_PEAK_GFXCLK);
+						data->gfx_max_freq_limit/100);
 		smum_send_msg_to_smc_with_parameter(hwmgr,
 						PPSMC_MSG_SetHardMinFclkByFreq,
 						SMU10_UMD_PSTATE_PEAK_FCLK);
@@ -538,7 +540,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
 
 		smum_send_msg_to_smc_with_parameter(hwmgr,
 						PPSMC_MSG_SetSoftMaxGfxClk,
-						SMU10_UMD_PSTATE_PEAK_GFXCLK);
+						data->gfx_max_freq_limit/100);
 		smum_send_msg_to_smc_with_parameter(hwmgr,
 						PPSMC_MSG_SetSoftMaxFclkByFreq,
 						SMU10_UMD_PSTATE_PEAK_FCLK);
@@ -552,10 +554,10 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
 	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
 		smum_send_msg_to_smc_with_parameter(hwmgr,
 						PPSMC_MSG_SetHardMinGfxClk,
-						SMU10_UMD_PSTATE_MIN_GFXCLK);
+						data->gfx_min_freq_limit/100);
 		smum_send_msg_to_smc_with_parameter(hwmgr,
 						PPSMC_MSG_SetSoftMaxGfxClk,
-						SMU10_UMD_PSTATE_MIN_GFXCLK);
+						data->gfx_min_freq_limit/100);
 		break;
 	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
 		smum_send_msg_to_smc_with_parameter(hwmgr,
@@ -595,7 +597,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
 	case AMD_DPM_FORCED_LEVEL_AUTO:
 		smum_send_msg_to_smc_with_parameter(hwmgr,
 						PPSMC_MSG_SetHardMinGfxClk,
-						SMU10_UMD_PSTATE_MIN_GFXCLK);
+						data->gfx_min_freq_limit/100);
 		smum_send_msg_to_smc_with_parameter(hwmgr,
 						PPSMC_MSG_SetHardMinFclkByFreq,
 						SMU10_UMD_PSTATE_MIN_FCLK);
@@ -608,7 +610,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
 
 		smum_send_msg_to_smc_with_parameter(hwmgr,
 						PPSMC_MSG_SetSoftMaxGfxClk,
-						SMU10_UMD_PSTATE_PEAK_GFXCLK);
+						data->gfx_max_freq_limit/100);
 		smum_send_msg_to_smc_with_parameter(hwmgr,
 						PPSMC_MSG_SetSoftMaxFclkByFreq,
 						SMU10_UMD_PSTATE_PEAK_FCLK);
@@ -622,10 +624,10 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
 	case AMD_DPM_FORCED_LEVEL_LOW:
 		smum_send_msg_to_smc_with_parameter(hwmgr,
 						PPSMC_MSG_SetHardMinGfxClk,
-						SMU10_UMD_PSTATE_MIN_GFXCLK);
+						data->gfx_min_freq_limit/100);
 		smum_send_msg_to_smc_with_parameter(hwmgr,
 						PPSMC_MSG_SetSoftMaxGfxClk,
-						SMU10_UMD_PSTATE_MIN_GFXCLK);
+						data->gfx_min_freq_limit/100);
 		smum_send_msg_to_smc_with_parameter(hwmgr,
 						PPSMC_MSG_SetHardMinFclkByFreq,
 						SMU10_UMD_PSTATE_MIN_FCLK);
@@ -773,21 +775,30 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
 	struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend);
 	struct smu10_voltage_dependency_table *mclk_table =
 			data->clock_vol_info.vdd_dep_on_fclk;
-	int i, now, size = 0;
+	uint32_t i, now, size = 0;
 
 	switch (type) {
 	case PP_SCLK:
 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
 		now = smum_get_argument(hwmgr);
 
+	/* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */
+		if (now == data->gfx_max_freq_limit/100)
+			i = 2;
+		else if (now == data->gfx_min_freq_limit/100)
+			i = 0;
+		else
+			i = 1;
+
 		size += sprintf(buf + size, "0: %uMhz %s\n",
-				data->gfx_min_freq_limit / 100,
-				((data->gfx_min_freq_limit / 100)
-				 == now) ? "*" : "");
+					data->gfx_min_freq_limit/100,
+					i == 0 ? "*" : "");
 		size += sprintf(buf + size, "1: %uMhz %s\n",
-				data->gfx_max_freq_limit / 100,
-				((data->gfx_max_freq_limit / 100)
-				 == now) ? "*" : "");
+					i == 1 ? now : SMU10_UMD_PSTATE_GFXCLK,
+					i == 1 ? "*" : "");
+		size += sprintf(buf + size, "2: %uMhz %s\n",
+					data->gfx_max_freq_limit/100,
+					i == 2 ? "*" : "");
 		break;
 	case PP_MCLK:
 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
index f68b218b9bce..1fb296a996f3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
@@ -311,11 +311,9 @@ int smu10_init_function_pointers(struct pp_hwmgr *hwmgr);
 #define SMU10_UMD_PSTATE_FCLK                   933
 #define SMU10_UMD_PSTATE_VCE                    0x03C00320
 
-#define SMU10_UMD_PSTATE_PEAK_GFXCLK            1100
 #define SMU10_UMD_PSTATE_PEAK_SOCCLK            757
 #define SMU10_UMD_PSTATE_PEAK_FCLK              1200
 
-#define SMU10_UMD_PSTATE_MIN_GFXCLK             200
 #define SMU10_UMD_PSTATE_MIN_FCLK               400
 #define SMU10_UMD_PSTATE_MIN_SOCCLK             200
 #define SMU10_UMD_PSTATE_MIN_VCE                0x0190012C

From ca6e0c5bdc44a2cd7152002191a8107fc566084f Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Wed, 18 Apr 2018 18:43:19 +0800
Subject: [PATCH 0840/1286] drm/amd/pp: Refine the OD state checking code in
 smu7

if vddc restore to default value, driver clear the
bit of DPMTABLE_OD_UPDATE_VDDC and need to repopulate sclk
and mclk table.

1. Remove variable i checking code.
2. move clear DPMTABLE_OD_UPDATE_VDDC bit to the end of the
   function to avoid sclk table will not be updated.

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 8 ++------
 1 file changed, 2 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 720ac47d3365..965459326652 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -4683,10 +4683,6 @@ static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
 			return;
 		}
 	}
-	if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
-		data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
-		data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
-	}
 
 	dep_table = table_info->vdd_dep_on_sclk;
 	odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
@@ -4696,9 +4692,9 @@ static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
 			return;
 		}
 	}
-	if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
+	if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
 		data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
-		data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+		data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
 	}
 }
 

From d389d607e60809726fe818113c80f5fc3aac4675 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Wed, 18 Apr 2018 21:09:35 +0800
Subject: [PATCH 0841/1286] drm/amd/pp: Change voltage/clk range for OD feature
 on VI

read vddc range from vbios.

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c  | 28 +++++++++
 .../gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h  |  3 +
 .../gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c  | 60 ++++++++++++-------
 .../gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h  |  2 +
 4 files changed, 73 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index 971fb5dfb620..d58be7eb8256 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -1505,3 +1505,31 @@ int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr,
 
 	return 0;
 }
+
+void atomctrl_get_voltage_range(struct pp_hwmgr *hwmgr, uint32_t *max_vddc,
+							uint32_t *min_vddc)
+{
+	void *profile;
+
+	profile = smu_atom_get_data_table(hwmgr->adev,
+					GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
+					NULL, NULL, NULL);
+
+	if (profile) {
+		switch (hwmgr->chip_id) {
+		case CHIP_TONGA:
+		case CHIP_FIJI:
+			*max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMaxVddc/4);
+			*min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMinVddc/4);
+			break;
+		case CHIP_POLARIS11:
+		case CHIP_POLARIS10:
+		case CHIP_POLARIS12:
+			*max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMaxVddc/100);
+			*min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMinVddc/100);
+			break;
+		default:
+			return;
+		}
+	}
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
index c672a5069840..e1b5d6b0b548 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
@@ -320,5 +320,8 @@ extern int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr,
 					uint16_t virtual_voltage_id,
 					uint16_t efuse_voltage_id);
 extern int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual_voltage_id);
+
+extern void atomctrl_get_voltage_range(struct pp_hwmgr *hwmgr, uint32_t *max_vddc,
+							uint32_t *min_vddc);
 #endif
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 965459326652..e1196372a6ba 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -838,6 +838,33 @@ static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
+static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	uint32_t min_vddc, max_vddc;
+
+	if (!table_info)
+		return;
+
+	dep_sclk_table = table_info->vdd_dep_on_sclk;
+
+	atomctrl_get_voltage_range(hwmgr, &max_vddc, &min_vddc);
+
+	if (min_vddc == 0 || min_vddc > 2000
+		|| min_vddc > dep_sclk_table->entries[0].vddc)
+		min_vddc = dep_sclk_table->entries[0].vddc;
+
+	if (max_vddc == 0 || max_vddc > 2000
+		|| max_vddc < dep_sclk_table->entries[dep_sclk_table->count-1].vddc)
+		max_vddc = dep_sclk_table->entries[dep_sclk_table->count-1].vddc;
+
+	data->odn_dpm_table.min_vddc = min_vddc;
+	data->odn_dpm_table.max_vddc = max_vddc;
+}
+
 static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
 {
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -856,8 +883,10 @@ static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
 			sizeof(struct smu7_dpm_table));
 
 	/* initialize ODN table */
-	if (hwmgr->od_enabled)
+	if (hwmgr->od_enabled) {
+		smu7_setup_voltage_range_from_vbios(hwmgr);
 		smu7_odn_initial_default_setting(hwmgr);
+	}
 
 	return 0;
 }
@@ -4605,36 +4634,27 @@ static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
 {
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 
-	struct phm_ppt_v1_information *table_info =
-			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	uint32_t min_vddc;
-	struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
-
-	if (table_info == NULL)
-		return false;
-
-	dep_sclk_table = table_info->vdd_dep_on_sclk;
-	min_vddc = dep_sclk_table->entries[0].vddc;
-
-	if (voltage < min_vddc || voltage > 2000) {
-		pr_info("OD voltage is out of range [%d - 2000] mV\n", min_vddc);
+	if (voltage < data->odn_dpm_table.min_vddc || voltage > data->odn_dpm_table.max_vddc) {
+		pr_info("OD voltage is out of range [%d - %d] mV\n",
+						data->odn_dpm_table.min_vddc,
+						data->odn_dpm_table.max_vddc);
 		return false;
 	}
 
 	if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
-		if (data->vbios_boot_state.sclk_bootup_value > clk ||
+		if (data->golden_dpm_table.sclk_table.dpm_levels[0].value > clk ||
 			hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) {
 			pr_info("OD engine clock is out of range [%d - %d] MHz\n",
-				data->vbios_boot_state.sclk_bootup_value,
-				hwmgr->platform_descriptor.overdriveLimit.engineClock / 100);
+				data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
+				hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
 			return false;
 		}
 	} else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
-		if (data->vbios_boot_state.mclk_bootup_value > clk ||
+		if (data->golden_dpm_table.mclk_table.dpm_levels[0].value > clk ||
 			hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) {
 			pr_info("OD memory clock is out of range [%d - %d] MHz\n",
-				data->vbios_boot_state.mclk_bootup_value/100,
-				hwmgr->platform_descriptor.overdriveLimit.memoryClock / 100);
+				data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
+				hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
 			return false;
 		}
 	} else {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
index f40179c9ca97..51a776ed5906 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
@@ -184,6 +184,8 @@ struct smu7_odn_dpm_table {
 	struct smu7_odn_clock_voltage_dependency_table	vdd_dependency_on_sclk;
 	struct smu7_odn_clock_voltage_dependency_table	vdd_dependency_on_mclk;
 	uint32_t					odn_mclk_min_limit;
+	uint32_t min_vddc;
+	uint32_t max_vddc;
 };
 
 struct profile_mode_setting {

From a3c991f922f99160cb695f9d28e04cd8e818d6f9 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Thu, 19 Apr 2018 10:39:17 +0800
Subject: [PATCH 0842/1286] drm/amd/pp: Print out voltage/clock range in sysfs

when user cat pp_od_clk_voltage
add display info about the sclk/mclk/vddc range that user can overdrive
output as:
OD_SCLK:
0:        300MHz        900mV
1:        400MHz        912mV
2:        500MHz        925mV
3:        600MHz        937mV
4:        700MHz        950mV
5:        800MHz        975mV
6:        900MHz        987mV
7:       1000MHz       1000mV
OD_MCLK:
0:        300MHz        900mV
1:       1500MHz        912mV
OD_RANGE:
SCLK:     300MHz       1200MHz
MCLK:     300MHz       1500MHz
VDDC:     700mV        1200mV

also
1. remove unnecessary whitespace before a quoted newline
2. change unit of frequency Mhz to MHz

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c        |  1 +
 .../gpu/drm/amd/include/kgd_pp_interface.h    |  1 +
 .../gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c  | 26 ++++++++++++++-----
 3 files changed, 22 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index ce8be467608d..27d8dd77860d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -555,6 +555,7 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
 	if (adev->powerplay.pp_funcs->print_clock_levels) {
 		size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
 		size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
+		size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
 		return size;
 	} else {
 		return snprintf(buf, PAGE_SIZE, "\n");
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 01969b135ab4..06f08f34a110 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -94,6 +94,7 @@ enum pp_clock_type {
 	PP_PCIE,
 	OD_SCLK,
 	OD_MCLK,
+	OD_RANGE,
 };
 
 enum amd_pp_sensors {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index e1196372a6ba..232ea6fc30f4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -4335,22 +4335,36 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
 		break;
 	case OD_SCLK:
 		if (hwmgr->od_enabled) {
-			size = sprintf(buf, "%s: \n", "OD_SCLK");
+			size = sprintf(buf, "%s:\n", "OD_SCLK");
 			for (i = 0; i < odn_sclk_table->num_of_pl; i++)
-				size += sprintf(buf + size, "%d: %10uMhz %10u mV\n",
-					i, odn_sclk_table->entries[i].clock / 100,
+				size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
+					i, odn_sclk_table->entries[i].clock/100,
 					odn_sclk_table->entries[i].vddc);
 		}
 		break;
 	case OD_MCLK:
 		if (hwmgr->od_enabled) {
-			size = sprintf(buf, "%s: \n", "OD_MCLK");
+			size = sprintf(buf, "%s:\n", "OD_MCLK");
 			for (i = 0; i < odn_mclk_table->num_of_pl; i++)
-				size += sprintf(buf + size, "%d: %10uMhz %10u mV\n",
-					i, odn_mclk_table->entries[i].clock / 100,
+				size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
+					i, odn_mclk_table->entries[i].clock/100,
 					odn_mclk_table->entries[i].vddc);
 		}
 		break;
+	case OD_RANGE:
+		if (hwmgr->od_enabled) {
+			size = sprintf(buf, "%s:\n", "OD_RANGE");
+			size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
+				data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
+				hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
+			size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
+				data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
+				hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
+			size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
+				data->odn_dpm_table.min_vddc,
+				data->odn_dpm_table.max_vddc);
+		}
+		break;
 	default:
 		break;
 	}

From 9e70b539292652d31091568f89e73b54e3a4f79d Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Sat, 21 Apr 2018 14:09:59 -0500
Subject: [PATCH 0843/1286] drm/amdgpu/powerplay: actually return the power
 with the new query
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Set query to the power value so we actually return it.  Fixes
no power value returned on asics with the new query.

Tested-by: Dieter Nützel <Dieter@nuetzel-hh.de>
Reviewed-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 232ea6fc30f4..c9dd0bec1e24 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -3369,6 +3369,7 @@ static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
 
 	smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
 	tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+	*query = tmp;
 
 	if (tmp != 0)
 		return 0;

From 6c8d74caa2fa33908ecd07fb1cf1b7bc629b367a Mon Sep 17 00:00:00 2001
From: Samuel Li <Samuel.Li@amd.com>
Date: Wed, 18 Apr 2018 16:15:52 -0400
Subject: [PATCH 0844/1286] drm/amdgpu: Enable scatter gather display support

Enables sg display if vram size <= THRESHOLD(256M); otherwise
still use vram as display buffer.
This patch fixed some potention issues introduced by change
"allow framebuffer in GART memory as well" due to CZ/ST hardware
limitation.

v2: Change default setting to auto.
v3: Move some logic from amdgpu_display_framebuffer_domains()
    to pin function, suggested by Christian.
v4: Split into several patches.
v5: Drop module parameter for now.

Signed-off-by: Samuel Li <Samuel.Li@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h        | 1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 9 +++++++++
 2 files changed, 10 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 75700552c71d..03a2c0be0bf2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -138,6 +138,7 @@ extern int amdgpu_si_support;
 extern int amdgpu_cik_support;
 #endif
 
+#define AMDGPU_SG_THRESHOLD			(256*1024*1024)
 #define AMDGPU_DEFAULT_GTT_SIZE_MB		3072ULL /* 3GB by default */
 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS	        3000
 #define AMDGPU_MAX_USEC_TIMEOUT			100000	/* 100 ms */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 1985c08413c6..e62153a86001 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -701,6 +701,15 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
 			return -EINVAL;
 	}
 
+	/* This assumes only APU display buffers are pinned with (VRAM|GTT).
+	 * See function amdgpu_display_supported_domains()
+	 */
+	if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
+		domain = AMDGPU_GEM_DOMAIN_VRAM;
+		if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
+			domain = AMDGPU_GEM_DOMAIN_GTT;
+	}
+
 	if (bo->pin_count) {
 		uint32_t mem_type = bo->tbo.mem.mem_type;
 

From 8239f57ac3e9bf9ad0cf4d396ebfa721e91ac611 Mon Sep 17 00:00:00 2001
From: Junwei Zhang <Jerry.Zhang@amd.com>
Date: Mon, 23 Apr 2018 17:21:21 +0800
Subject: [PATCH 0845/1286] drm/amdgpu: bo could be null when access in vm bo
 update

Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
Reviewed-by: David Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 6a372ca11ee3..1c00f1a56e8b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1509,7 +1509,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 	struct drm_mm_node *nodes;
 	struct dma_fence *exclusive, **last_update;
 	uint64_t flags;
-	uint32_t mem_type;
 	int r;
 
 	if (clear || !bo_va->base.bo) {
@@ -1568,9 +1567,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 	 * the evicted list so that it gets validated again on the
 	 * next command submission.
 	 */
-	mem_type = bo->tbo.mem.mem_type;
 	if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
-	    !(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
+	    !(bo->preferred_domains &
+	    amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)))
 		list_add_tail(&bo_va->base.vm_status, &vm->evicted);
 	spin_unlock(&vm->status_lock);
 

From 38610f15a7ad7a914e4fd0a9a5a6c386700b8ba0 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Nicolai=20H=C3=A4hnle?= <nicolai.haehnle@amd.com>
Date: Thu, 12 Apr 2018 16:34:19 +0200
Subject: [PATCH 0846/1286] drm/amdgpu: set COMPUTE_PGM_RSRC1 for SGPR/VGPR
 clearing shaders
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Otherwise, the SQ may skip some of the register writes, or shader waves may
be allocated where we don't expect them, so that as a result we don't actually
reset all of the register SRAMs. This can lead to spurious ECC errors later on
if a shader uses an uninitialized register.

Signed-off-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Cc: stable@vger.kernel.org
---
 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index b0e591eaa71a..e14263fca1c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1459,10 +1459,11 @@ static const u32 sgpr_init_compute_shader[] =
 static const u32 vgpr_init_regs[] =
 {
 	mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff,
-	mmCOMPUTE_RESOURCE_LIMITS, 0,
+	mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
 	mmCOMPUTE_NUM_THREAD_X, 256*4,
 	mmCOMPUTE_NUM_THREAD_Y, 1,
 	mmCOMPUTE_NUM_THREAD_Z, 1,
+	mmCOMPUTE_PGM_RSRC1, 0x100004f, /* VGPRS=15 (64 logical VGPRs), SGPRS=1 (16 SGPRs), BULKY=1 */
 	mmCOMPUTE_PGM_RSRC2, 20,
 	mmCOMPUTE_USER_DATA_0, 0xedcedc00,
 	mmCOMPUTE_USER_DATA_1, 0xedcedc01,
@@ -1479,10 +1480,11 @@ static const u32 vgpr_init_regs[] =
 static const u32 sgpr1_init_regs[] =
 {
 	mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f,
-	mmCOMPUTE_RESOURCE_LIMITS, 0x1000000,
+	mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
 	mmCOMPUTE_NUM_THREAD_X, 256*5,
 	mmCOMPUTE_NUM_THREAD_Y, 1,
 	mmCOMPUTE_NUM_THREAD_Z, 1,
+	mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
 	mmCOMPUTE_PGM_RSRC2, 20,
 	mmCOMPUTE_USER_DATA_0, 0xedcedc00,
 	mmCOMPUTE_USER_DATA_1, 0xedcedc01,
@@ -1503,6 +1505,7 @@ static const u32 sgpr2_init_regs[] =
 	mmCOMPUTE_NUM_THREAD_X, 256*5,
 	mmCOMPUTE_NUM_THREAD_Y, 1,
 	mmCOMPUTE_NUM_THREAD_Z, 1,
+	mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
 	mmCOMPUTE_PGM_RSRC2, 20,
 	mmCOMPUTE_USER_DATA_0, 0xedcedc00,
 	mmCOMPUTE_USER_DATA_1, 0xedcedc01,

From 48edde3959e2a538ff963e6dbdc9c9adca8b159b Mon Sep 17 00:00:00 2001
From: welu <wei.lu2@amd.com>
Date: Tue, 24 Apr 2018 09:13:20 -0400
Subject: [PATCH 0847/1286] drm/amdgpu: change pp_dpm clk/mclk/pcie input
 format.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

1. support more than 8 values when setting get_pp_dpm_mclk/
sclk/pcie, the former design just parse command format like
"echo xxxx > pp_dpm_sclk" and current can parse "echo xx xxx
 xxxx > pp_dpm_sclk" whose operation is more user-friendly
and convinent and can offer more values;
2. be compatible with former design like "xx".
3. add DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_pcie
Bug:KFD-385

Signed-off-by: welu <wei.lu2@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 103 ++++++++++++++-----------
 1 file changed, 59 insertions(+), 44 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 27d8dd77860d..d9802d938e33 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -574,10 +574,10 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
  * the power state and the clock information for those levels.
  *
  * To manually adjust these states, first select manual using
- * power_dpm_force_performance_level.  Writing a string of the level
- * numbers to the file will select which levels you want to enable.
- * E.g., writing 456 to the file will enable levels 4, 5, and 6.
- *
+ * power_dpm_force_performance_level.
+ * Secondly,Enter a new value for each level by inputing a string that
+ * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
+ * E.g., echo 4 5 6 to > pp_dpm_sclk will enable sclk levels 4, 5, and 6.
  */
 
 static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
@@ -602,23 +602,27 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
 	struct amdgpu_device *adev = ddev->dev_private;
 	int ret;
 	long level;
-	uint32_t i, mask = 0;
-	char sub_str[2];
+	uint32_t mask = 0;
+	char *sub_str = NULL;
+	char *tmp;
+	char buf_cpy[count];
+	const char delimiter[3] = {' ', '\n', '\0'};
 
-	for (i = 0; i < strlen(buf); i++) {
-		if (*(buf + i) == '\n')
-			continue;
-		sub_str[0] = *(buf + i);
-		sub_str[1] = '\0';
-		ret = kstrtol(sub_str, 0, &level);
+	memcpy(buf_cpy, buf, count+1);
+	tmp = buf_cpy;
+	while (tmp[0]) {
+		sub_str =  strsep(&tmp, delimiter);
+		if (strlen(sub_str)) {
+			ret = kstrtol(sub_str, 0, &level);
 
-		if (ret) {
-			count = -EINVAL;
-			goto fail;
-		}
-		mask |= 1 << level;
+			if (ret) {
+				count = -EINVAL;
+				goto fail;
+			}
+			mask |= 1 << level;
+		} else
+			break;
 	}
-
 	if (adev->powerplay.pp_funcs->force_clock_level)
 		amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
 
@@ -648,21 +652,26 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
 	struct amdgpu_device *adev = ddev->dev_private;
 	int ret;
 	long level;
-	uint32_t i, mask = 0;
-	char sub_str[2];
+	uint32_t mask = 0;
+	char *sub_str = NULL;
+	char *tmp;
+	char buf_cpy[count];
+	const char delimiter[3] = {' ', '\n', '\0'};
 
-	for (i = 0; i < strlen(buf); i++) {
-		if (*(buf + i) == '\n')
-			continue;
-		sub_str[0] = *(buf + i);
-		sub_str[1] = '\0';
-		ret = kstrtol(sub_str, 0, &level);
+	memcpy(buf_cpy, buf, count+1);
+	tmp = buf_cpy;
+	while (tmp[0]) {
+		sub_str =  strsep(&tmp, delimiter);
+		if (strlen(sub_str)) {
+			ret = kstrtol(sub_str, 0, &level);
 
-		if (ret) {
-			count = -EINVAL;
-			goto fail;
-		}
-		mask |= 1 << level;
+			if (ret) {
+				count = -EINVAL;
+				goto fail;
+			}
+			mask |= 1 << level;
+		} else
+			break;
 	}
 	if (adev->powerplay.pp_funcs->force_clock_level)
 		amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
@@ -693,21 +702,27 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
 	struct amdgpu_device *adev = ddev->dev_private;
 	int ret;
 	long level;
-	uint32_t i, mask = 0;
-	char sub_str[2];
+	uint32_t mask = 0;
+	char *sub_str = NULL;
+	char *tmp;
+	char buf_cpy[count];
+	const char delimiter[3] = {' ', '\n', '\0'};
 
-	for (i = 0; i < strlen(buf); i++) {
-		if (*(buf + i) == '\n')
-			continue;
-		sub_str[0] = *(buf + i);
-		sub_str[1] = '\0';
-		ret = kstrtol(sub_str, 0, &level);
+	memcpy(buf_cpy, buf, count+1);
+	tmp = buf_cpy;
 
-		if (ret) {
-			count = -EINVAL;
-			goto fail;
-		}
-		mask |= 1 << level;
+	while (tmp[0]) {
+		sub_str =  strsep(&tmp, delimiter);
+		if (strlen(sub_str)) {
+			ret = kstrtol(sub_str, 0, &level);
+
+			if (ret) {
+				count = -EINVAL;
+				goto fail;
+			}
+			mask |= 1 << level;
+		} else
+			break;
 	}
 	if (adev->powerplay.pp_funcs->force_clock_level)
 		amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);

From 09daf474d27aeb9fbd2f665b613d98c76f1e84f0 Mon Sep 17 00:00:00 2001
From: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Date: Tue, 24 Apr 2018 15:15:34 +0200
Subject: [PATCH 0848/1286] drm/amdgpu: fix amdgpu_atpx_get_client_id()'s
 return type

The method struct vga_switcheroo_handler::get_client_id() is defined
as returning an 'enum vga_switcheroo_client_id' but the implementation
in this driver, amdgpu_atpx_get_client_id(), returns an 'int'.

Fix this by returning 'enum vga_switcheroo_client_id' in this driver too.

Signed-off-by: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 1ae5ae8c45a4..1bcb2b247335 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -550,7 +550,7 @@ static int amdgpu_atpx_init(void)
  * look up whether we are the integrated or discrete GPU (all asics).
  * Returns the client id.
  */
-static int amdgpu_atpx_get_client_id(struct pci_dev *pdev)
+static enum vga_switcheroo_client_id amdgpu_atpx_get_client_id(struct pci_dev *pdev)
 {
 	if (amdgpu_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev))
 		return VGA_SWITCHEROO_IGD;

From 4a8f264a8a8756bb7d3a478d08e449c67d291ab9 Mon Sep 17 00:00:00 2001
From: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Date: Tue, 24 Apr 2018 15:15:41 +0200
Subject: [PATCH 0849/1286] drm/radeon: fix radeon_atpx_get_client_id()'s
 return type

The method struct vga_switcheroo_handler::get_client_id() is defined
as returning an 'enum vga_switcheroo_client_id' but the implementation
in this driver, radeon_atpx_get_client_id(), returns an 'int'.

Fix this by returning 'enum vga_switcheroo_client_id' in this driver too.

Signed-off-by: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/radeon/radeon_atpx_handler.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 40be4068ca69..fa5fadaa9bbb 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -526,7 +526,7 @@ static int radeon_atpx_init(void)
  * look up whether we are the integrated or discrete GPU (all asics).
  * Returns the client id.
  */
-static int radeon_atpx_get_client_id(struct pci_dev *pdev)
+static enum vga_switcheroo_client_id radeon_atpx_get_client_id(struct pci_dev *pdev)
 {
 	if (radeon_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev))
 		return VGA_SWITCHEROO_IGD;

From 7a47f20eb1fb8fa8d7a8fe3a4fd8c721f04c2174 Mon Sep 17 00:00:00 2001
From: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Date: Tue, 24 Apr 2018 15:15:13 +0200
Subject: [PATCH 0850/1286] drm/radeon: fix mode_valid's return type

The method struct drm_connector_helper_funcs::mode_valid is defined
as returning an 'enum drm_mode_status' but the driver implementation
for this method uses an 'int' for it.

Fix this by using 'enum drm_mode_status' in the driver too.

Signed-off-by: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/radeon/radeon_connectors.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index df9469a8fdb1..2aea2bdff99b 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -852,7 +852,7 @@ static int radeon_lvds_get_modes(struct drm_connector *connector)
 	return ret;
 }
 
-static int radeon_lvds_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status radeon_lvds_mode_valid(struct drm_connector *connector,
 				  struct drm_display_mode *mode)
 {
 	struct drm_encoder *encoder = radeon_best_single_encoder(connector);
@@ -1012,7 +1012,7 @@ static int radeon_vga_get_modes(struct drm_connector *connector)
 	return ret;
 }
 
-static int radeon_vga_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status radeon_vga_mode_valid(struct drm_connector *connector,
 				  struct drm_display_mode *mode)
 {
 	struct drm_device *dev = connector->dev;
@@ -1156,7 +1156,7 @@ static int radeon_tv_get_modes(struct drm_connector *connector)
 	return 1;
 }
 
-static int radeon_tv_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status radeon_tv_mode_valid(struct drm_connector *connector,
 				struct drm_display_mode *mode)
 {
 	if ((mode->hdisplay > 1024) || (mode->vdisplay > 768))
@@ -1498,7 +1498,7 @@ static void radeon_dvi_force(struct drm_connector *connector)
 		radeon_connector->use_digital = true;
 }
 
-static int radeon_dvi_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status radeon_dvi_mode_valid(struct drm_connector *connector,
 				  struct drm_display_mode *mode)
 {
 	struct drm_device *dev = connector->dev;
@@ -1800,7 +1800,7 @@ out:
 	return ret;
 }
 
-static int radeon_dp_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status radeon_dp_mode_valid(struct drm_connector *connector,
 				  struct drm_display_mode *mode)
 {
 	struct drm_device *dev = connector->dev;

From ba9ca0886dc0541ac1a716b3cbd43f640a1ce8c4 Mon Sep 17 00:00:00 2001
From: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Date: Tue, 24 Apr 2018 15:14:18 +0200
Subject: [PATCH 0851/1286] drm/admgpu: fix mode_valid's return type

The method struct drm_connector_helper_funcs::mode_valid is defined
as returning an 'enum drm_mode_status' but the driver implementation
for this method uses an 'int' for it.

Fix this by using 'enum drm_mode_status' in the driver too.

Reviewed-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c    | 8 ++++----
 drivers/gpu/drm/amd/amdgpu/dce_virtual.c          | 2 +-
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +-
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 2 +-
 4 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 96501ff0e55b..8e66851eb427 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -691,7 +691,7 @@ static int amdgpu_connector_lvds_get_modes(struct drm_connector *connector)
 	return ret;
 }
 
-static int amdgpu_connector_lvds_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status amdgpu_connector_lvds_mode_valid(struct drm_connector *connector,
 					     struct drm_display_mode *mode)
 {
 	struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
@@ -843,7 +843,7 @@ static int amdgpu_connector_vga_get_modes(struct drm_connector *connector)
 	return ret;
 }
 
-static int amdgpu_connector_vga_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status amdgpu_connector_vga_mode_valid(struct drm_connector *connector,
 					    struct drm_display_mode *mode)
 {
 	struct drm_device *dev = connector->dev;
@@ -1172,7 +1172,7 @@ static void amdgpu_connector_dvi_force(struct drm_connector *connector)
 		amdgpu_connector->use_digital = true;
 }
 
-static int amdgpu_connector_dvi_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status amdgpu_connector_dvi_mode_valid(struct drm_connector *connector,
 					    struct drm_display_mode *mode)
 {
 	struct drm_device *dev = connector->dev;
@@ -1448,7 +1448,7 @@ out:
 	return ret;
 }
 
-static int amdgpu_connector_dp_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status amdgpu_connector_dp_mode_valid(struct drm_connector *connector,
 					   struct drm_display_mode *mode)
 {
 	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index 89b2286a9d6b..6454cc371f57 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -327,7 +327,7 @@ static int dce_virtual_get_modes(struct drm_connector *connector)
 	return 0;
 }
 
-static int dce_virtual_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status dce_virtual_mode_valid(struct drm_connector *connector,
 				  struct drm_display_mode *mode)
 {
 	return MODE_OK;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 28d8c08efeeb..656a01891f6c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2838,7 +2838,7 @@ static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
 	create_eml_sink(aconnector);
 }
 
-int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
+enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
 				   struct drm_display_mode *mode)
 {
 	int result = MODE_ERROR;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 005cf0d2dc34..d5aa89ad5571 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -247,7 +247,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
 				     struct dc_link *link,
 				     int link_index);
 
-int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
+enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
 				   struct drm_display_mode *mode);
 
 void dm_restore_drm_connector_state(struct drm_device *dev,

From c5a4484941be553b37facd681daf990d040cce81 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Wed, 18 Apr 2018 18:46:07 +0800
Subject: [PATCH 0852/1286] drm/amd/pp: Add OVERDRIVE support on Vega10 (v2)

when bit14 in module parameter ppfeaturemask was set.
od feature will be enabled on Vega10 except vbios not support.

user can read od range by reading sysfs pp_od_clk_voltage,
cat pp_od_clk_voltage
OD_SCLK:
0:        852Mhz        800mV
1:        991Mhz        900mV
2:       1138Mhz        950mV
3:       1269Mhz       1000mV
4:       1348Mhz       1050mV
5:       1399Mhz       1100mV
6:       1440Mhz       1150mV
7:       1500Mhz       1200mV
OD_MCLK:
0:        167Mhz        800mV
1:        500Mhz        800mV
2:        800Mhz        950mV
3:        945Mhz       1000mV
OD_RANGE:
SCLK:     852MHz       2200MHz
MCLK:     167MHz       1500MHz
VDDC:     800mV        1200mV

and can configure the clock/voltage by writing pp_od_clk_voltage
for example:

echo "s 0 900 820">pp_od_clk_voltage to change the sclk/vddc
to 900MHz and 820 mV in dpm level0.

echo "r" to change the clk/voltage to  default value.

echo "c">pp_od_clk_voltage
to commit the change

v2: squash in warning fix (Alex)

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../drm/amd/powerplay/hwmgr/vega10_hwmgr.c    | 723 +++++++++---------
 .../drm/amd/powerplay/hwmgr/vega10_hwmgr.h    |  26 +-
 .../drm/amd/powerplay/inc/hardwaremanager.h   |   6 +-
 3 files changed, 392 insertions(+), 363 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 384aa07206c0..748612074d20 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -285,6 +285,48 @@ static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
+static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
+{
+	struct vega10_hwmgr *data = hwmgr->backend;
+	struct phm_ppt_v2_information *table_info =
+			(struct phm_ppt_v2_information *)(hwmgr->pptable);
+	struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
+	struct vega10_odn_vddc_lookup_table *od_lookup_table;
+	struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table;
+	struct phm_ppt_v1_clock_voltage_dependency_table *dep_table[3];
+	struct phm_ppt_v1_clock_voltage_dependency_table *od_table[3];
+	uint32_t i;
+
+	od_lookup_table = &odn_table->vddc_lookup_table;
+	vddc_lookup_table = table_info->vddc_lookup_table;
+
+	for (i = 0; i < vddc_lookup_table->count; i++)
+		od_lookup_table->entries[i].us_vdd = vddc_lookup_table->entries[i].us_vdd;
+
+	od_lookup_table->count = vddc_lookup_table->count;
+
+	dep_table[0] = table_info->vdd_dep_on_sclk;
+	dep_table[1] = table_info->vdd_dep_on_mclk;
+	dep_table[2] = table_info->vdd_dep_on_socclk;
+	od_table[0] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_sclk;
+	od_table[1] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_mclk;
+	od_table[2] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_socclk;
+
+	for (i = 0; i < 3; i++)
+		smu_get_voltage_dependency_table_ppt_v1(dep_table[i], od_table[i]);
+
+	if (odn_table->max_vddc == 0 || odn_table->max_vddc > 2000)
+		odn_table->max_vddc = dep_table[0]->entries[dep_table[0]->count - 1].vddc;
+	if (odn_table->min_vddc == 0 || odn_table->min_vddc > 2000)
+		odn_table->min_vddc = dep_table[0]->entries[0].vddc;
+
+	i = od_table[2]->count - 1;
+	od_table[2]->entries[i].clk = hwmgr->platform_descriptor.overdriveLimit.memoryClock;
+	od_table[2]->entries[i].vddc = odn_table->max_vddc;
+
+	return 0;
+}
+
 static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
 {
 	struct vega10_hwmgr *data = hwmgr->backend;
@@ -421,7 +463,6 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
 		/* ACG firmware has major version 5 */
 	if ((hwmgr->smu_version & 0xff000000) == 0x5000000)
 		data->smu_features[GNLD_ACG].supported = true;
-
 	if (data->registry_data.didt_support)
 		data->smu_features[GNLD_DIDT].supported = true;
 
@@ -1360,48 +1401,6 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
 	memcpy(&(data->golden_dpm_table), &(data->dpm_table),
 			sizeof(struct vega10_dpm_table));
 
-	if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) ||
-	    PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) {
-		data->odn_dpm_table.odn_core_clock_dpm_levels.num_of_pl =
-						data->dpm_table.gfx_table.count;
-		for (i = 0; i < data->dpm_table.gfx_table.count; i++) {
-			data->odn_dpm_table.odn_core_clock_dpm_levels.entries[i].clock =
-					data->dpm_table.gfx_table.dpm_levels[i].value;
-			data->odn_dpm_table.odn_core_clock_dpm_levels.entries[i].enabled = true;
-		}
-
-		data->odn_dpm_table.vdd_dependency_on_sclk.count =
-				dep_gfx_table->count;
-		for (i = 0; i < dep_gfx_table->count; i++) {
-			data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].clk =
-					dep_gfx_table->entries[i].clk;
-			data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].vddInd =
-					dep_gfx_table->entries[i].vddInd;
-			data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_enable =
-					dep_gfx_table->entries[i].cks_enable;
-			data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_voffset =
-					dep_gfx_table->entries[i].cks_voffset;
-		}
-
-		data->odn_dpm_table.odn_memory_clock_dpm_levels.num_of_pl =
-						data->dpm_table.mem_table.count;
-		for (i = 0; i < data->dpm_table.mem_table.count; i++) {
-			data->odn_dpm_table.odn_memory_clock_dpm_levels.entries[i].clock =
-					data->dpm_table.mem_table.dpm_levels[i].value;
-			data->odn_dpm_table.odn_memory_clock_dpm_levels.entries[i].enabled = true;
-		}
-
-		data->odn_dpm_table.vdd_dependency_on_mclk.count = dep_mclk_table->count;
-		for (i = 0; i < dep_mclk_table->count; i++) {
-			data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].clk =
-					dep_mclk_table->entries[i].clk;
-			data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddInd =
-					dep_mclk_table->entries[i].vddInd;
-			data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddci =
-					dep_mclk_table->entries[i].vddci;
-		}
-	}
-
 	return 0;
 }
 
@@ -1504,18 +1503,18 @@ static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
 {
 	struct phm_ppt_v2_information *table_info =
 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
-	struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_sclk =
-			table_info->vdd_dep_on_sclk;
+	struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_sclk;
 	struct vega10_hwmgr *data = hwmgr->backend;
 	struct pp_atomfwctrl_clock_dividers_soc15 dividers;
 	uint32_t gfx_max_clock =
 			hwmgr->platform_descriptor.overdriveLimit.engineClock;
 	uint32_t i = 0;
 
-	if (data->apply_overdrive_next_settings_mask &
-			DPMTABLE_OD_UPDATE_VDDC)
+	if (hwmgr->od_enabled)
 		dep_on_sclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
-						&(data->odn_dpm_table.vdd_dependency_on_sclk);
+						&(data->odn_dpm_table.vdd_dep_on_sclk);
+	else
+		dep_on_sclk = table_info->vdd_dep_on_sclk;
 
 	PP_ASSERT_WITH_CODE(dep_on_sclk,
 			"Invalid SOC_VDD-GFX_CLK Dependency Table!",
@@ -1567,23 +1566,32 @@ static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
 		uint32_t soc_clock, uint8_t *current_soc_did,
 		uint8_t *current_vol_index)
 {
+	struct vega10_hwmgr *data = hwmgr->backend;
 	struct phm_ppt_v2_information *table_info =
 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
-	struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_soc =
-			table_info->vdd_dep_on_socclk;
+	struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_soc;
 	struct pp_atomfwctrl_clock_dividers_soc15 dividers;
 	uint32_t i;
 
-	PP_ASSERT_WITH_CODE(dep_on_soc,
-			"Invalid SOC_VDD-SOC_CLK Dependency Table!",
-			return -EINVAL);
-	for (i = 0; i < dep_on_soc->count; i++) {
-		if (dep_on_soc->entries[i].clk == soc_clock)
-			break;
+	if (hwmgr->od_enabled) {
+		dep_on_soc = (struct phm_ppt_v1_clock_voltage_dependency_table *)
+						&data->odn_dpm_table.vdd_dep_on_socclk;
+		for (i = 0; i < dep_on_soc->count; i++) {
+			if (dep_on_soc->entries[i].clk >= soc_clock)
+				break;
+		}
+	} else {
+		dep_on_soc = table_info->vdd_dep_on_socclk;
+		for (i = 0; i < dep_on_soc->count; i++) {
+			if (dep_on_soc->entries[i].clk == soc_clock)
+				break;
+		}
 	}
+
 	PP_ASSERT_WITH_CODE(dep_on_soc->count > i,
 			"Cannot find SOC_CLK in SOC_VDD-SOC_CLK Dependency Table",
 			return -EINVAL);
+
 	PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
 			COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
 			soc_clock, &dividers),
@@ -1592,22 +1600,6 @@ static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
 
 	*current_soc_did = (uint8_t)dividers.ulDid;
 	*current_vol_index = (uint8_t)(dep_on_soc->entries[i].vddInd);
-
-	return 0;
-}
-
-uint16_t vega10_locate_vddc_given_clock(struct pp_hwmgr *hwmgr,
-		uint32_t clk,
-		struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
-{
-	uint16_t i;
-
-	for (i = 0; i < dep_table->count; i++) {
-		if (dep_table->entries[i].clk == clk)
-			return dep_table->entries[i].vddc;
-	}
-
-	pr_info("[LocateVddcGivenClock] Cannot locate SOC Vddc for this clock!");
 	return 0;
 }
 
@@ -1621,8 +1613,6 @@ static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
 	struct vega10_hwmgr *data = hwmgr->backend;
 	struct phm_ppt_v2_information *table_info =
 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
-	struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
-			table_info->vdd_dep_on_socclk;
 	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
 	struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
 	int result = 0;
@@ -1653,11 +1643,6 @@ static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
 
 	dpm_table = &(data->dpm_table.soc_table);
 	for (i = 0; i < dpm_table->count; i++) {
-		pp_table->SocVid[i] =
-				(uint8_t)convert_to_vid(
-				vega10_locate_vddc_given_clock(hwmgr,
-						dpm_table->dpm_levels[i].value,
-						dep_table));
 		result = vega10_populate_single_soc_level(hwmgr,
 				dpm_table->dpm_levels[i].value,
 				&(pp_table->SocclkDid[i]),
@@ -1668,7 +1653,6 @@ static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
 
 	j = i - 1;
 	while (i < NUM_SOCCLK_DPM_LEVELS) {
-		pp_table->SocVid[i] = pp_table->SocVid[j];
 		result = vega10_populate_single_soc_level(hwmgr,
 				dpm_table->dpm_levels[j].value,
 				&(pp_table->SocclkDid[i]),
@@ -1681,6 +1665,32 @@ static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
 	return result;
 }
 
+static void vega10_populate_vddc_soc_levels(struct pp_hwmgr *hwmgr)
+{
+	struct vega10_hwmgr *data = hwmgr->backend;
+	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
+	struct phm_ppt_v2_information *table_info = hwmgr->pptable;
+	struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table;
+
+	uint8_t soc_vid = 0;
+	uint32_t i, max_vddc_level;
+
+	if (hwmgr->od_enabled)
+		vddc_lookup_table = (struct phm_ppt_v1_voltage_lookup_table *)&data->odn_dpm_table.vddc_lookup_table;
+	else
+		vddc_lookup_table = table_info->vddc_lookup_table;
+
+	max_vddc_level = vddc_lookup_table->count;
+	for (i = 0; i < max_vddc_level; i++) {
+		soc_vid = (uint8_t)convert_to_vid(vddc_lookup_table->entries[i].us_vdd);
+		pp_table->SocVid[i] = soc_vid;
+	}
+	while (i < MAX_REGULAR_DPM_NUMBER) {
+		pp_table->SocVid[i] = soc_vid;
+		i++;
+	}
+}
+
 /**
  * @brief Populates single SMC GFXCLK structure using the provided clock.
  *
@@ -1695,25 +1705,25 @@ static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
 	struct vega10_hwmgr *data = hwmgr->backend;
 	struct phm_ppt_v2_information *table_info =
 			(struct phm_ppt_v2_information *)(hwmgr->pptable);
-	struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_mclk =
-			table_info->vdd_dep_on_mclk;
+	struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_mclk;
 	struct pp_atomfwctrl_clock_dividers_soc15 dividers;
 	uint32_t mem_max_clock =
 			hwmgr->platform_descriptor.overdriveLimit.memoryClock;
 	uint32_t i = 0;
 
-	if (data->apply_overdrive_next_settings_mask &
-			DPMTABLE_OD_UPDATE_VDDC)
+	if (hwmgr->od_enabled)
 		dep_on_mclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
-					&data->odn_dpm_table.vdd_dependency_on_mclk;
+					&data->odn_dpm_table.vdd_dep_on_mclk;
+	else
+		dep_on_mclk = table_info->vdd_dep_on_mclk;
 
 	PP_ASSERT_WITH_CODE(dep_on_mclk,
 			"Invalid SOC_VDD-UCLK Dependency Table!",
 			return -EINVAL);
 
-	if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
+	if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
 		mem_clock = mem_clock > mem_max_clock ? mem_max_clock : mem_clock;
-	else {
+	} else {
 		for (i = 0; i < dep_on_mclk->count; i++) {
 			if (dep_on_mclk->entries[i].clk == mem_clock)
 				break;
@@ -2057,6 +2067,9 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
 	if (data->smu_features[GNLD_AVFS].supported) {
 		result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
 		if (!result) {
+			data->odn_dpm_table.max_vddc = avfs_params.ulMaxVddc;
+			data->odn_dpm_table.min_vddc = avfs_params.ulMinVddc;
+
 			pp_table->MinVoltageVid = (uint8_t)
 					convert_to_vid((uint16_t)(avfs_params.ulMinVddc));
 			pp_table->MaxVoltageVid = (uint8_t)
@@ -2335,6 +2348,22 @@ static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable)
 	return 0;
 }
 
+static int vega10_update_avfs(struct pp_hwmgr *hwmgr)
+{
+	struct vega10_hwmgr *data = hwmgr->backend;
+
+	if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
+		vega10_avfs_enable(hwmgr, false);
+	} else if (data->need_update_dpm_table) {
+		vega10_avfs_enable(hwmgr, false);
+		vega10_avfs_enable(hwmgr, true);
+	} else {
+		vega10_avfs_enable(hwmgr, true);
+	}
+
+	return 0;
+}
+
 static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
 {
 	int result = 0;
@@ -2396,6 +2425,10 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
 			"Failed to setup default DPM tables!",
 			return result);
 
+	/* initialize ODN table */
+	if (hwmgr->od_enabled)
+		vega10_odn_initial_default_setting(hwmgr);
+
 	pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
 			VOLTAGE_OBJ_SVID2,  &voltage_table);
 	pp_table->MaxVidStep = voltage_table.max_vid_step;
@@ -2442,6 +2475,8 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
 			"Failed to initialize Memory Level!",
 			return result);
 
+	vega10_populate_vddc_soc_levels(hwmgr);
+
 	result = vega10_populate_all_display_clock_levels(hwmgr);
 	PP_ASSERT_WITH_CODE(!result,
 			"Failed to initialize Display Level!",
@@ -3164,82 +3199,11 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
 
 static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
 {
-	const struct phm_set_power_state_input *states =
-			(const struct phm_set_power_state_input *)input;
-	const struct vega10_power_state *vega10_ps =
-			cast_const_phw_vega10_power_state(states->pnew_state);
 	struct vega10_hwmgr *data = hwmgr->backend;
-	struct vega10_single_dpm_table *sclk_table =
-			&(data->dpm_table.gfx_table);
-	uint32_t sclk = vega10_ps->performance_levels
-			[vega10_ps->performance_level_count - 1].gfx_clock;
-	struct vega10_single_dpm_table *mclk_table =
-			&(data->dpm_table.mem_table);
-	uint32_t mclk = vega10_ps->performance_levels
-			[vega10_ps->performance_level_count - 1].mem_clock;
-	struct PP_Clocks min_clocks = {0};
-	uint32_t i;
 
-	data->need_update_dpm_table = 0;
+	if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
+		data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
 
-	if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) ||
-	    PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) {
-		for (i = 0; i < sclk_table->count; i++) {
-			if (sclk == sclk_table->dpm_levels[i].value)
-				break;
-		}
-
-		if (!(data->apply_overdrive_next_settings_mask &
-				DPMTABLE_OD_UPDATE_SCLK) && i >= sclk_table->count) {
-			/* Check SCLK in DAL's minimum clocks
-			 * in case DeepSleep divider update is required.
-			 */
-			if (data->display_timing.min_clock_in_sr !=
-					min_clocks.engineClockInSR &&
-				(min_clocks.engineClockInSR >=
-						VEGA10_MINIMUM_ENGINE_CLOCK ||
-					data->display_timing.min_clock_in_sr >=
-						VEGA10_MINIMUM_ENGINE_CLOCK))
-				data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK;
-		}
-
-		if (data->display_timing.num_existing_displays !=
-				hwmgr->display_config->num_display)
-			data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
-	} else {
-		for (i = 0; i < sclk_table->count; i++) {
-			if (sclk == sclk_table->dpm_levels[i].value)
-				break;
-		}
-
-		if (i >= sclk_table->count)
-			data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
-		else {
-			/* Check SCLK in DAL's minimum clocks
-			 * in case DeepSleep divider update is required.
-			 */
-			if (data->display_timing.min_clock_in_sr !=
-					min_clocks.engineClockInSR &&
-				(min_clocks.engineClockInSR >=
-						VEGA10_MINIMUM_ENGINE_CLOCK ||
-					data->display_timing.min_clock_in_sr >=
-						VEGA10_MINIMUM_ENGINE_CLOCK))
-				data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK;
-		}
-
-		for (i = 0; i < mclk_table->count; i++) {
-			if (mclk == mclk_table->dpm_levels[i].value)
-				break;
-		}
-
-		if (i >= mclk_table->count)
-			data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
-
-		if (data->display_timing.num_existing_displays !=
-				hwmgr->display_config->num_display ||
-				i >= mclk_table->count)
-			data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
-	}
 	return 0;
 }
 
@@ -3247,194 +3211,29 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
 		struct pp_hwmgr *hwmgr, const void *input)
 {
 	int result = 0;
-	const struct phm_set_power_state_input *states =
-			(const struct phm_set_power_state_input *)input;
-	const struct vega10_power_state *vega10_ps =
-			cast_const_phw_vega10_power_state(states->pnew_state);
 	struct vega10_hwmgr *data = hwmgr->backend;
-	uint32_t sclk = vega10_ps->performance_levels
-			[vega10_ps->performance_level_count - 1].gfx_clock;
-	uint32_t mclk = vega10_ps->performance_levels
-			[vega10_ps->performance_level_count - 1].mem_clock;
-	struct vega10_dpm_table *dpm_table = &data->dpm_table;
-	struct vega10_dpm_table *golden_dpm_table =
-			&data->golden_dpm_table;
-	uint32_t dpm_count, clock_percent;
-	uint32_t i;
 
-	if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) ||
-	    PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) {
+	if (!data->need_update_dpm_table)
+		return 0;
 
-		if (!data->need_update_dpm_table &&
-			!data->apply_optimized_settings &&
-			!data->apply_overdrive_next_settings_mask)
-			return 0;
-
-		if (data->apply_overdrive_next_settings_mask &
-				DPMTABLE_OD_UPDATE_SCLK) {
-			for (dpm_count = 0;
-					dpm_count < dpm_table->gfx_table.count;
-					dpm_count++) {
-				dpm_table->gfx_table.dpm_levels[dpm_count].enabled =
-					data->odn_dpm_table.odn_core_clock_dpm_levels.entries[dpm_count].enabled;
-				dpm_table->gfx_table.dpm_levels[dpm_count].value =
-					data->odn_dpm_table.odn_core_clock_dpm_levels.entries[dpm_count].clock;
-			}
-		}
-
-		if (data->apply_overdrive_next_settings_mask &
-				DPMTABLE_OD_UPDATE_MCLK) {
-			for (dpm_count = 0;
-					dpm_count < dpm_table->mem_table.count;
-					dpm_count++) {
-				dpm_table->mem_table.dpm_levels[dpm_count].enabled =
-					data->odn_dpm_table.odn_memory_clock_dpm_levels.entries[dpm_count].enabled;
-				dpm_table->mem_table.dpm_levels[dpm_count].value =
-					data->odn_dpm_table.odn_memory_clock_dpm_levels.entries[dpm_count].clock;
-			}
-		}
-
-		if ((data->need_update_dpm_table & DPMTABLE_UPDATE_SCLK) ||
-			data->apply_optimized_settings ||
-			(data->apply_overdrive_next_settings_mask &
-					DPMTABLE_OD_UPDATE_SCLK)) {
-			result = vega10_populate_all_graphic_levels(hwmgr);
-			PP_ASSERT_WITH_CODE(!result,
-					"Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
-					return result);
-		}
-
-		if ((data->need_update_dpm_table & DPMTABLE_UPDATE_MCLK) ||
-			(data->apply_overdrive_next_settings_mask &
-					DPMTABLE_OD_UPDATE_MCLK)){
-			result = vega10_populate_all_memory_levels(hwmgr);
-			PP_ASSERT_WITH_CODE(!result,
-					"Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
-					return result);
-		}
-	} else {
-		if (!data->need_update_dpm_table &&
-				!data->apply_optimized_settings)
-			return 0;
-
-		if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK &&
-				data->smu_features[GNLD_DPM_GFXCLK].supported) {
-				dpm_table->
-				gfx_table.dpm_levels[dpm_table->gfx_table.count - 1].
-				value = sclk;
-				if (hwmgr->od_enabled) {
-					/* Need to do calculation based on the golden DPM table
-					 * as the Heatmap GPU Clock axis is also based on
-					 * the default values
-					 */
-					PP_ASSERT_WITH_CODE(
-							golden_dpm_table->gfx_table.dpm_levels
-							[golden_dpm_table->gfx_table.count - 1].value,
-							"Divide by 0!",
-							return -1);
-
-					dpm_count = dpm_table->gfx_table.count < 2 ?
-							0 : dpm_table->gfx_table.count - 2;
-					for (i = dpm_count; i > 1; i--) {
-						if (sclk > golden_dpm_table->gfx_table.dpm_levels
-							[golden_dpm_table->gfx_table.count - 1].value) {
-							clock_percent =
-								((sclk - golden_dpm_table->gfx_table.dpm_levels
-								[golden_dpm_table->gfx_table.count - 1].value) *
-								100) /
-								golden_dpm_table->gfx_table.dpm_levels
-								[golden_dpm_table->gfx_table.count - 1].value;
-
-							dpm_table->gfx_table.dpm_levels[i].value =
-								golden_dpm_table->gfx_table.dpm_levels[i].value +
-								(golden_dpm_table->gfx_table.dpm_levels[i].value *
-								clock_percent) / 100;
-						} else if (golden_dpm_table->
-								gfx_table.dpm_levels[dpm_table->gfx_table.count-1].value >
-								sclk) {
-							clock_percent =
-								((golden_dpm_table->gfx_table.dpm_levels
-								[golden_dpm_table->gfx_table.count - 1].value -
-								sclk) *	100) /
-								golden_dpm_table->gfx_table.dpm_levels
-								[golden_dpm_table->gfx_table.count-1].value;
-
-							dpm_table->gfx_table.dpm_levels[i].value =
-								golden_dpm_table->gfx_table.dpm_levels[i].value -
-								(golden_dpm_table->gfx_table.dpm_levels[i].value *
-								clock_percent) / 100;
-						} else
-							dpm_table->gfx_table.dpm_levels[i].value =
-								golden_dpm_table->gfx_table.dpm_levels[i].value;
-					}
-				}
-			}
-
-		if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK &&
-				data->smu_features[GNLD_DPM_UCLK].supported) {
-			dpm_table->
-			mem_table.dpm_levels[dpm_table->mem_table.count - 1].
-			value = mclk;
-
-			if (hwmgr->od_enabled) {
-				PP_ASSERT_WITH_CODE(
-					golden_dpm_table->mem_table.dpm_levels
-					[golden_dpm_table->mem_table.count - 1].value,
-					"Divide by 0!",
-					return -1);
-
-				dpm_count = dpm_table->mem_table.count < 2 ?
-						0 : dpm_table->mem_table.count - 2;
-				for (i = dpm_count; i > 1; i--) {
-					if (mclk > golden_dpm_table->mem_table.dpm_levels
-						[golden_dpm_table->mem_table.count-1].value) {
-						clock_percent = ((mclk -
-							golden_dpm_table->mem_table.dpm_levels
-							[golden_dpm_table->mem_table.count-1].value) *
-							100) /
-							golden_dpm_table->mem_table.dpm_levels
-							[golden_dpm_table->mem_table.count-1].value;
-
-						dpm_table->mem_table.dpm_levels[i].value =
-							golden_dpm_table->mem_table.dpm_levels[i].value +
-							(golden_dpm_table->mem_table.dpm_levels[i].value *
-							clock_percent) / 100;
-					} else if (golden_dpm_table->mem_table.dpm_levels
-							[dpm_table->mem_table.count-1].value > mclk) {
-						clock_percent = ((golden_dpm_table->mem_table.dpm_levels
-							[golden_dpm_table->mem_table.count-1].value - mclk) *
-							100) /
-							golden_dpm_table->mem_table.dpm_levels
-							[golden_dpm_table->mem_table.count-1].value;
-
-						dpm_table->mem_table.dpm_levels[i].value =
-							golden_dpm_table->mem_table.dpm_levels[i].value -
-							(golden_dpm_table->mem_table.dpm_levels[i].value *
-							clock_percent) / 100;
-					} else
-						dpm_table->mem_table.dpm_levels[i].value =
-							golden_dpm_table->mem_table.dpm_levels[i].value;
-				}
-			}
-		}
-
-		if ((data->need_update_dpm_table &
-			(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) ||
-			data->apply_optimized_settings) {
-			result = vega10_populate_all_graphic_levels(hwmgr);
-			PP_ASSERT_WITH_CODE(!result,
-					"Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
-					return result);
-		}
-
-		if (data->need_update_dpm_table &
-				(DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
-			result = vega10_populate_all_memory_levels(hwmgr);
-			PP_ASSERT_WITH_CODE(!result,
-					"Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
-					return result);
-		}
+	if (data->need_update_dpm_table &
+			(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK + DPMTABLE_UPDATE_SOCCLK)) {
+		result = vega10_populate_all_graphic_levels(hwmgr);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
+				return result);
 	}
+
+	if (data->need_update_dpm_table &
+			(DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
+		result = vega10_populate_all_memory_levels(hwmgr);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
+				return result);
+	}
+
+	vega10_populate_vddc_soc_levels(hwmgr);
+
 	return result;
 }
 
@@ -3730,8 +3529,9 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
 	PP_ASSERT_WITH_CODE(!result,
 			"Failed to upload PPtable!", return result);
 
-	data->apply_optimized_settings = false;
-	data->apply_overdrive_next_settings_mask = 0;
+	vega10_update_avfs(hwmgr);
+
+	data->need_update_dpm_table &= DPMTABLE_OD_UPDATE_VDDC;
 
 	return 0;
 }
@@ -4383,6 +4183,8 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
 	struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
 	struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
 	struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
+	struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL;
+
 	int i, now, size = 0;
 
 	switch (type) {
@@ -4421,6 +4223,40 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
 					(pcie_table->pcie_gen[i] == 2) ? "8.0GT/s, x16" : "",
 					(i == now) ? "*" : "");
 		break;
+	case OD_SCLK:
+		if (hwmgr->od_enabled) {
+			size = sprintf(buf, "%s:\n", "OD_SCLK");
+			podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
+			for (i = 0; i < podn_vdd_dep->count; i++)
+				size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
+					i, podn_vdd_dep->entries[i].clk / 100,
+						podn_vdd_dep->entries[i].vddc);
+		}
+		break;
+	case OD_MCLK:
+		if (hwmgr->od_enabled) {
+			size = sprintf(buf, "%s:\n", "OD_MCLK");
+			podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
+			for (i = 0; i < podn_vdd_dep->count; i++)
+				size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
+					i, podn_vdd_dep->entries[i].clk/100,
+						podn_vdd_dep->entries[i].vddc);
+		}
+		break;
+	case OD_RANGE:
+		if (hwmgr->od_enabled) {
+			size = sprintf(buf, "%s:\n", "OD_RANGE");
+			size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
+				data->golden_dpm_table.gfx_table.dpm_levels[0].value/100,
+				hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
+			size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
+				data->golden_dpm_table.mem_table.dpm_levels[0].value/100,
+				hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
+			size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
+				data->odn_dpm_table.min_vddc,
+				data->odn_dpm_table.max_vddc);
+		}
+		break;
 	default:
 		break;
 	}
@@ -4808,6 +4644,200 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
 	return 0;
 }
 
+
+static bool vega10_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
+					enum PP_OD_DPM_TABLE_COMMAND type,
+					uint32_t clk,
+					uint32_t voltage)
+{
+	struct vega10_hwmgr *data = hwmgr->backend;
+	struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
+	struct vega10_single_dpm_table *golden_table;
+
+	if (voltage < odn_table->min_vddc || voltage > odn_table->max_vddc) {
+		pr_info("OD voltage is out of range [%d - %d] mV\n", odn_table->min_vddc, odn_table->max_vddc);
+		return false;
+	}
+
+	if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
+		golden_table = &(data->golden_dpm_table.gfx_table);
+		if (golden_table->dpm_levels[0].value > clk ||
+			hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) {
+			pr_info("OD engine clock is out of range [%d - %d] MHz\n",
+				golden_table->dpm_levels[0].value/100,
+				hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
+			return false;
+		}
+	} else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
+		golden_table = &(data->golden_dpm_table.mem_table);
+		if (golden_table->dpm_levels[0].value > clk ||
+			hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) {
+			pr_info("OD memory clock is out of range [%d - %d] MHz\n",
+				golden_table->dpm_levels[0].value/100,
+				hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
+			return false;
+		}
+	} else {
+		return false;
+	}
+
+	return true;
+}
+
+static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
+{
+	struct vega10_hwmgr *data = hwmgr->backend;
+	struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
+	struct phm_ppt_v2_information *table_info = hwmgr->pptable;
+	struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
+	struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
+	uint32_t i;
+
+	dep_table = table_info->vdd_dep_on_mclk;
+	odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_mclk);
+
+	for (i = 0; i < dep_table->count; i++) {
+		if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
+			data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
+			return;
+		}
+	}
+
+	dep_table = table_info->vdd_dep_on_sclk;
+	odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_sclk);
+	for (i = 0; i < dep_table->count; i++) {
+		if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
+			data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
+			return;
+		}
+	}
+
+	if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
+		data->need_update_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
+		data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
+	}
+}
+
+static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr,
+						enum PP_OD_DPM_TABLE_COMMAND type)
+{
+	struct vega10_hwmgr *data = hwmgr->backend;
+	struct phm_ppt_v2_information *table_info = hwmgr->pptable;
+	struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = table_info->vdd_dep_on_socclk;
+	struct vega10_single_dpm_table *dpm_table = &data->golden_dpm_table.soc_table;
+
+	struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep_on_socclk =
+							&data->odn_dpm_table.vdd_dep_on_socclk;
+	struct vega10_odn_vddc_lookup_table *od_vddc_lookup_table = &data->odn_dpm_table.vddc_lookup_table;
+
+	struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep;
+	uint8_t i, j;
+
+	if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
+		podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
+		for (i = 0; i < podn_vdd_dep->count - 1; i++)
+			od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
+		if (od_vddc_lookup_table->entries[i].us_vdd < podn_vdd_dep->entries[i].vddc)
+			od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
+	} else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
+		podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
+		for (i = 0; i < dpm_table->count; i++) {
+			for (j = 0; j < od_vddc_lookup_table->count; j++) {
+				if (od_vddc_lookup_table->entries[j].us_vdd >
+					podn_vdd_dep->entries[i].vddc)
+					break;
+			}
+			if (j == od_vddc_lookup_table->count) {
+				od_vddc_lookup_table->entries[j-1].us_vdd =
+					podn_vdd_dep->entries[i].vddc;
+				data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
+			}
+			podn_vdd_dep->entries[i].vddInd = j;
+		}
+		dpm_table = &data->dpm_table.soc_table;
+		for (i = 0; i < dep_table->count; i++) {
+			if (dep_table->entries[i].vddInd == podn_vdd_dep->entries[dep_table->count-1].vddInd &&
+					dep_table->entries[i].clk < podn_vdd_dep->entries[dep_table->count-1].clk) {
+				data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
+				podn_vdd_dep_on_socclk->entries[i].clk = podn_vdd_dep->entries[dep_table->count-1].clk;
+				dpm_table->dpm_levels[i].value = podn_vdd_dep_on_socclk->entries[i].clk;
+			}
+		}
+		if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk <
+					podn_vdd_dep->entries[dep_table->count-1].clk) {
+			data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
+			podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk = podn_vdd_dep->entries[dep_table->count-1].clk;
+			dpm_table->dpm_levels[podn_vdd_dep_on_socclk->count - 1].value = podn_vdd_dep->entries[dep_table->count-1].clk;
+		}
+		if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd <
+					podn_vdd_dep->entries[dep_table->count-1].vddInd) {
+			data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
+			podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd = podn_vdd_dep->entries[dep_table->count-1].vddInd;
+		}
+	}
+}
+
+static int vega10_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
+					enum PP_OD_DPM_TABLE_COMMAND type,
+					long *input, uint32_t size)
+{
+	struct vega10_hwmgr *data = hwmgr->backend;
+	struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep_table;
+	struct vega10_single_dpm_table *dpm_table;
+
+	uint32_t input_clk;
+	uint32_t input_vol;
+	uint32_t input_level;
+	uint32_t i;
+
+	PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage",
+				return -EINVAL);
+
+	if (!hwmgr->od_enabled) {
+		pr_info("OverDrive feature not enabled\n");
+		return -EINVAL;
+	}
+
+	if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) {
+		dpm_table = &data->dpm_table.gfx_table;
+		podn_vdd_dep_table = &data->odn_dpm_table.vdd_dep_on_sclk;
+		data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+	} else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) {
+		dpm_table = &data->dpm_table.mem_table;
+		podn_vdd_dep_table = &data->odn_dpm_table.vdd_dep_on_mclk;
+		data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+	} else if (PP_OD_RESTORE_DEFAULT_TABLE == type) {
+		memcpy(&(data->dpm_table), &(data->golden_dpm_table), sizeof(struct vega10_dpm_table));
+		vega10_odn_initial_default_setting(hwmgr);
+		return 0;
+	} else if (PP_OD_COMMIT_DPM_TABLE == type) {
+		vega10_check_dpm_table_updated(hwmgr);
+		return 0;
+	} else {
+		return -EINVAL;
+	}
+
+	for (i = 0; i < size; i += 3) {
+		if (i + 3 > size || input[i] >= podn_vdd_dep_table->count) {
+			pr_info("invalid clock voltage input\n");
+			return 0;
+		}
+		input_level = input[i];
+		input_clk = input[i+1] * 100;
+		input_vol = input[i+2];
+
+		if (vega10_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) {
+			dpm_table->dpm_levels[input_level].value = input_clk;
+			podn_vdd_dep_table->entries[input_level].clk = input_clk;
+			podn_vdd_dep_table->entries[input_level].vddc = input_vol;
+		} else {
+			return -EINVAL;
+		}
+	}
+	vega10_odn_update_soc_table(hwmgr, type);
+	return 0;
+}
+
 static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
 	.backend_init = vega10_hwmgr_backend_init,
 	.backend_fini = vega10_hwmgr_backend_fini,
@@ -4866,6 +4896,7 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
 	.get_power_profile_mode = vega10_get_power_profile_mode,
 	.set_power_profile_mode = vega10_set_power_profile_mode,
 	.set_power_limit = vega10_set_power_limit,
+	.odn_edit_dpm_table = vega10_odn_edit_dpm_table,
 };
 
 int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
index 5339ea1f3dce..aadd6cbc7e85 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
@@ -282,15 +282,21 @@ struct vega10_registry_data {
 
 struct vega10_odn_clock_voltage_dependency_table {
 	uint32_t count;
-	struct phm_ppt_v1_clock_voltage_dependency_record
-		entries[MAX_REGULAR_DPM_NUMBER];
+	struct phm_ppt_v1_clock_voltage_dependency_record entries[MAX_REGULAR_DPM_NUMBER];
+};
+
+struct vega10_odn_vddc_lookup_table {
+	uint32_t count;
+	struct phm_ppt_v1_voltage_lookup_record entries[MAX_REGULAR_DPM_NUMBER];
 };
 
 struct vega10_odn_dpm_table {
-	struct phm_odn_clock_levels		odn_core_clock_dpm_levels;
-	struct phm_odn_clock_levels		odn_memory_clock_dpm_levels;
-	struct vega10_odn_clock_voltage_dependency_table		vdd_dependency_on_sclk;
-	struct vega10_odn_clock_voltage_dependency_table		vdd_dependency_on_mclk;
+	struct vega10_odn_clock_voltage_dependency_table vdd_dep_on_sclk;
+	struct vega10_odn_clock_voltage_dependency_table vdd_dep_on_mclk;
+	struct vega10_odn_clock_voltage_dependency_table vdd_dep_on_socclk;
+	struct vega10_odn_vddc_lookup_table vddc_lookup_table;
+	uint32_t max_vddc;
+	uint32_t min_vddc;
 };
 
 struct vega10_odn_fan_table {
@@ -301,8 +307,8 @@ struct vega10_odn_fan_table {
 };
 
 struct vega10_hwmgr {
-	struct vega10_dpm_table			dpm_table;
-	struct vega10_dpm_table			golden_dpm_table;
+	struct vega10_dpm_table          dpm_table;
+	struct vega10_dpm_table          golden_dpm_table;
 	struct vega10_registry_data      registry_data;
 	struct vega10_vbios_boot_state   vbios_boot_state;
 	struct vega10_mclk_latency_table mclk_latency_table;
@@ -368,12 +374,8 @@ struct vega10_hwmgr {
 	bool                           need_long_memory_training;
 
 	/* Internal settings to apply the application power optimization parameters */
-	bool                           apply_optimized_settings;
 	uint32_t                       disable_dpm_mask;
 
-	/* ---- Overdrive next setting ---- */
-	uint32_t                       apply_overdrive_next_settings_mask;
-
 	/* ---- SMU9 ---- */
 	struct smu_features            smu_features[GNLD_FEATURES_MAX];
 	struct vega10_smc_state_table  smc_state_table;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index 8b78bbecd1bc..9bb87857a20f 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -377,11 +377,7 @@ struct phm_clocks {
 #define DPMTABLE_UPDATE_SCLK        0x00000004
 #define DPMTABLE_UPDATE_MCLK        0x00000008
 #define DPMTABLE_OD_UPDATE_VDDC     0x00000010
-
-/* To determine if sclk and mclk are in overdrive state */
-#define SCLK_OVERDRIVE_ENABLED           0x00000001
-#define MCLK_OVERDRIVE_ENABLED           0x00000002
-#define VDDC_OVERDRIVE_ENABLED           0x00000010
+#define DPMTABLE_UPDATE_SOCCLK      0x00000020
 
 struct phm_odn_performance_level {
 	uint32_t clock;

From 037d1a66ae640ca2723f47c0115ffa9e603699b3 Mon Sep 17 00:00:00 2001
From: Mathieu Malaterre <malat@debian.org>
Date: Tue, 24 Apr 2018 21:55:11 +0200
Subject: [PATCH 0853/1286] drm/radeon: Change the default to PCI on PowerPC
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

AGP mode is unstable on PowerPC. Symptoms are generally of the form:

[ 1228.795711] radeon 0000:00:10.0: ring 0 stalled for more than 10240msec

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Mathieu Malaterre <malat@debian.org>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/radeon/radeon_drv.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index b28288a781ef..2a7977a23b31 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -168,7 +168,12 @@ int radeon_no_wb;
 int radeon_modeset = -1;
 int radeon_dynclks = -1;
 int radeon_r4xx_atom = 0;
+#ifdef __powerpc__
+/* Default to PCI on PowerPC (fdo #95017) */
+int radeon_agpmode = -1;
+#else
 int radeon_agpmode = 0;
+#endif
 int radeon_vram_limit = 0;
 int radeon_gart_size = -1; /* auto */
 int radeon_benchmarking = 0;

From 48ff108d9dc42bf92256484c50cdb3697f5ccb04 Mon Sep 17 00:00:00 2001
From: Leo Liu <leo.liu@amd.com>
Date: Thu, 9 Nov 2017 13:18:24 -0500
Subject: [PATCH 0854/1286] drm/amdgpu: add VEGAM ASIC type

Signed-off-by: Leo Liu <leo.liu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 +
 include/drm/amd_asic_type.h                | 1 +
 2 files changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 9e917f53f357..8ce60e6e2614 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -83,6 +83,7 @@ static const char *amdgpu_asic_name[] = {
 	"POLARIS10",
 	"POLARIS11",
 	"POLARIS12",
+	"VEGAM",
 	"VEGA10",
 	"VEGA12",
 	"RAVEN",
diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h
index 6c731c52c071..695bde7eb055 100644
--- a/include/drm/amd_asic_type.h
+++ b/include/drm/amd_asic_type.h
@@ -44,6 +44,7 @@ enum amd_asic_type {
 	CHIP_POLARIS10,
 	CHIP_POLARIS11,
 	CHIP_POLARIS12,
+	CHIP_VEGAM,
 	CHIP_VEGA10,
 	CHIP_VEGA12,
 	CHIP_RAVEN,

From cc07f18ddb618af5ad28669dcb32b27e2f2312af Mon Sep 17 00:00:00 2001
From: Leo Liu <leo.liu@amd.com>
Date: Thu, 9 Nov 2017 13:19:58 -0500
Subject: [PATCH 0855/1286] drm/amdgpu: bypass GPU info firmware load for VEGAM

Signed-off-by: Leo Liu <leo.liu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 8ce60e6e2614..47b65f3a1927 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1367,9 +1367,10 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
 	case CHIP_TOPAZ:
 	case CHIP_TONGA:
 	case CHIP_FIJI:
-	case CHIP_POLARIS11:
 	case CHIP_POLARIS10:
+	case CHIP_POLARIS11:
 	case CHIP_POLARIS12:
+	case CHIP_VEGAM:
 	case CHIP_CARRIZO:
 	case CHIP_STONEY:
 #ifdef CONFIG_DRM_AMDGPU_SI

From 32cc7e536a546e4e2ad9ac75d02ce07d9d2327f2 Mon Sep 17 00:00:00 2001
From: Leo Liu <leo.liu@amd.com>
Date: Thu, 9 Nov 2017 13:22:54 -0500
Subject: [PATCH 0856/1286] drm/amdgpu: set VEGAM to ASIC family and ip blocks

Signed-off-by: Leo Liu <leo.liu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 47b65f3a1927..7929ff83f3ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1476,9 +1476,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
 	case CHIP_TOPAZ:
 	case CHIP_TONGA:
 	case CHIP_FIJI:
-	case CHIP_POLARIS11:
 	case CHIP_POLARIS10:
+	case CHIP_POLARIS11:
 	case CHIP_POLARIS12:
+	case CHIP_VEGAM:
 	case CHIP_CARRIZO:
 	case CHIP_STONEY:
 		if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)

From 34fd54bc0891b0d835de73978ba5277665814be3 Mon Sep 17 00:00:00 2001
From: Leo Liu <leo.liu@amd.com>
Date: Thu, 9 Nov 2017 13:26:54 -0500
Subject: [PATCH 0857/1286] drm/amdgpu: specify VEGAM ucode SMU load method

Signed-off-by: Leo Liu <leo.liu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 0c74c09ef3b0..ee71c40b3920 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -295,6 +295,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
 	case CHIP_POLARIS10:
 	case CHIP_POLARIS11:
 	case CHIP_POLARIS12:
+	case CHIP_VEGAM:
 		if (!load_type)
 			return AMDGPU_FW_LOAD_DIRECT;
 		else

From 5830bb986dcd6aea290ef54446e077c09cc8498e Mon Sep 17 00:00:00 2001
From: Leo Liu <leo.liu@amd.com>
Date: Thu, 9 Nov 2017 13:24:47 -0500
Subject: [PATCH 0858/1286] drm/amdgpu: add VEGAM SMU firmware support

Signed-off-by: Leo Liu <leo.liu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c       | 3 +++
 drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c | 1 +
 2 files changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index a8a942c60ea2..5b3d3bf5b599 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -385,6 +385,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
 			case CHIP_POLARIS12:
 				strcpy(fw_name, "amdgpu/polaris12_smc.bin");
 				break;
+			case CHIP_VEGAM:
+				strcpy(fw_name, "amdgpu/vegam_smc.bin");
+				break;
 			case CHIP_VEGA10:
 				if ((adev->pdev->device == 0x687f) &&
 					((adev->pdev->revision == 0xc0) ||
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index c28b60aae5f8..ee236dfbf1d6 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -41,6 +41,7 @@ MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
+MODULE_FIRMWARE("amdgpu/vegam_smc.bin");
 MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
 MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin");
 MODULE_FIRMWARE("amdgpu/vega12_smc.bin");

From be2c8cde0b867033914fc48d51b0cca0481b39b6 Mon Sep 17 00:00:00 2001
From: Leo Liu <leo.liu@amd.com>
Date: Fri, 3 Nov 2017 14:22:16 -0400
Subject: [PATCH 0859/1286] drm/amdgpu/virtual_dce: add VEGAM support

Signed-off-by: Leo Liu <leo.liu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index 6454cc371f57..de7be3de0f41 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -460,8 +460,9 @@ static int dce_virtual_hw_init(void *handle)
 		break;
 	case CHIP_CARRIZO:
 	case CHIP_STONEY:
-	case CHIP_POLARIS11:
 	case CHIP_POLARIS10:
+	case CHIP_POLARIS11:
+	case CHIP_VEGAM:
 		dce_v11_0_disable_dce(adev);
 		break;
 	case CHIP_TOPAZ:

From 675fd32b2730f362b425a65f99fcc1eae8898fc5 Mon Sep 17 00:00:00 2001
From: Leo Liu <leo.liu@amd.com>
Date: Wed, 8 Nov 2017 18:07:12 -0500
Subject: [PATCH 0860/1286] drm/amdgpu: add VEGAM dc support check

Signed-off-by: Leo Liu <leo.liu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 7929ff83f3ed..e6657ec363b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2147,9 +2147,10 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
 	case CHIP_MULLINS:
 	case CHIP_CARRIZO:
 	case CHIP_STONEY:
-	case CHIP_POLARIS11:
 	case CHIP_POLARIS10:
+	case CHIP_POLARIS11:
 	case CHIP_POLARIS12:
+	case CHIP_VEGAM:
 	case CHIP_TONGA:
 	case CHIP_FIJI:
 	case CHIP_VEGA10:

From 589ecd753aa9e69ea40e307d2a0c013b03e418f1 Mon Sep 17 00:00:00 2001
From: Leo Liu <leo.liu@amd.com>
Date: Thu, 16 Nov 2017 13:15:12 -0500
Subject: [PATCH 0861/1286] drm/amdgpu: skip VEGAM MC firmware load

Directly loaded by VBIOS

Signed-off-by: Leo Liu <leo.liu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 4d970daa65f4..97fcca805d48 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -231,6 +231,7 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
 	case CHIP_FIJI:
 	case CHIP_CARRIZO:
 	case CHIP_STONEY:
+	case CHIP_VEGAM:
 		return 0;
 	default: BUG();
 	}

From 13b75aac5dd9a6448417769c43d21b2343ce1cc8 Mon Sep 17 00:00:00 2001
From: Leo Liu <leo.liu@amd.com>
Date: Wed, 11 Apr 2018 15:18:20 -0500
Subject: [PATCH 0862/1286] drm/amdgpu: add VEGAM GMC golden settings

Signed-off-by: Leo Liu <leo.liu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 97fcca805d48..6721b04b7796 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -138,6 +138,7 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
 		break;
 	case CHIP_POLARIS11:
 	case CHIP_POLARIS12:
+	case CHIP_VEGAM:
 		amdgpu_device_program_register_sequence(adev,
 							golden_settings_polaris11_a11,
 							ARRAY_SIZE(golden_settings_polaris11_a11));

From f43c72ba03152920c52f1921e45100c6c090faef Mon Sep 17 00:00:00 2001
From: Leo Liu <leo.liu@amd.com>
Date: Wed, 11 Apr 2018 15:20:35 -0500
Subject: [PATCH 0863/1286] drm/amdgpu: initialize VEGAM GMC (v2)

v2: use proper register rather than hardcoding.

Signed-off-by: Leo Liu <leo.liu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 6721b04b7796..1edbe6b477b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -569,9 +569,10 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
 	/* set the gart size */
 	if (amdgpu_gart_size == -1) {
 		switch (adev->asic_type) {
-		case CHIP_POLARIS11: /* all engines support GPUVM */
 		case CHIP_POLARIS10: /* all engines support GPUVM */
+		case CHIP_POLARIS11: /* all engines support GPUVM */
 		case CHIP_POLARIS12: /* all engines support GPUVM */
+		case CHIP_VEGAM:     /* all engines support GPUVM */
 		default:
 			adev->gmc.gart_size = 256ULL << 20;
 			break;
@@ -1091,7 +1092,8 @@ static int gmc_v8_0_sw_init(void *handle)
 	} else {
 		u32 tmp;
 
-		if (adev->asic_type == CHIP_FIJI)
+		if ((adev->asic_type == CHIP_FIJI) ||
+		    (adev->asic_type == CHIP_VEGAM))
 			tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
 		else
 			tmp = RREG32(mmMC_SEQ_MISC0);

From 2267e26241d6cd0c8d92614a4a70562b009354c9 Mon Sep 17 00:00:00 2001
From: Leo Liu <leo.liu@amd.com>
Date: Thu, 9 Nov 2017 13:56:12 -0500
Subject: [PATCH 0864/1286] drm/amdgpu: add VEGAM SDMA firmware support

Signed-off-by: Leo Liu <leo.liu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 11 ++++++++---
 1 file changed, 8 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index be20a387d961..add0b80a5355 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -62,6 +62,8 @@ MODULE_FIRMWARE("amdgpu/polaris11_sdma.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_sdma1.bin");
 MODULE_FIRMWARE("amdgpu/polaris12_sdma.bin");
 MODULE_FIRMWARE("amdgpu/polaris12_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/vegam_sdma.bin");
+MODULE_FIRMWARE("amdgpu/vegam_sdma1.bin");
 
 
 static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
@@ -275,15 +277,18 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
 	case CHIP_FIJI:
 		chip_name = "fiji";
 		break;
-	case CHIP_POLARIS11:
-		chip_name = "polaris11";
-		break;
 	case CHIP_POLARIS10:
 		chip_name = "polaris10";
 		break;
+	case CHIP_POLARIS11:
+		chip_name = "polaris11";
+		break;
 	case CHIP_POLARIS12:
 		chip_name = "polaris12";
 		break;
+	case CHIP_VEGAM:
+		chip_name = "vegam";
+		break;
 	case CHIP_CARRIZO:
 		chip_name = "carrizo";
 		break;

From c3f27c08ec15b61bed2a1af592ab5bdc89fe7dee Mon Sep 17 00:00:00 2001
From: Leo Liu <leo.liu@amd.com>
Date: Wed, 11 Apr 2018 15:22:20 -0500
Subject: [PATCH 0865/1286] drm/amdgpu: add VEGAM SDMA golden settings

Signed-off-by: Leo Liu <leo.liu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index add0b80a5355..aa9ab299fd32 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -211,6 +211,7 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
 		break;
 	case CHIP_POLARIS11:
 	case CHIP_POLARIS12:
+	case CHIP_VEGAM:
 		amdgpu_device_program_register_sequence(adev,
 							golden_settings_polaris11_a11,
 							ARRAY_SIZE(golden_settings_polaris11_a11));

From 62aac2010de1d233739c30a168d0bbff31b3cb43 Mon Sep 17 00:00:00 2001
From: Leo Liu <leo.liu@amd.com>
Date: Fri, 10 Nov 2017 11:04:09 -0500
Subject: [PATCH 0866/1286] drm/amdgpu: add VEGAM GFX firmware support

Signed-off-by: Leo Liu <leo.liu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 42 +++++++++++++++++----------
 1 file changed, 26 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index e14263fca1c9..2be287078ec6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -125,18 +125,6 @@ MODULE_FIRMWARE("amdgpu/fiji_mec.bin");
 MODULE_FIRMWARE("amdgpu/fiji_mec2.bin");
 MODULE_FIRMWARE("amdgpu/fiji_rlc.bin");
 
-MODULE_FIRMWARE("amdgpu/polaris11_ce.bin");
-MODULE_FIRMWARE("amdgpu/polaris11_ce_2.bin");
-MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin");
-MODULE_FIRMWARE("amdgpu/polaris11_pfp_2.bin");
-MODULE_FIRMWARE("amdgpu/polaris11_me.bin");
-MODULE_FIRMWARE("amdgpu/polaris11_me_2.bin");
-MODULE_FIRMWARE("amdgpu/polaris11_mec.bin");
-MODULE_FIRMWARE("amdgpu/polaris11_mec_2.bin");
-MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin");
-MODULE_FIRMWARE("amdgpu/polaris11_mec2_2.bin");
-MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin");
-
 MODULE_FIRMWARE("amdgpu/polaris10_ce.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_ce_2.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_pfp.bin");
@@ -149,6 +137,18 @@ MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_mec2_2.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin");
 
+MODULE_FIRMWARE("amdgpu/polaris11_ce.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_ce_2.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_pfp_2.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_me.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_me_2.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_mec.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_mec_2.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_mec2_2.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin");
+
 MODULE_FIRMWARE("amdgpu/polaris12_ce.bin");
 MODULE_FIRMWARE("amdgpu/polaris12_ce_2.bin");
 MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin");
@@ -161,6 +161,13 @@ MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin");
 MODULE_FIRMWARE("amdgpu/polaris12_mec2_2.bin");
 MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin");
 
+MODULE_FIRMWARE("amdgpu/vegam_ce.bin");
+MODULE_FIRMWARE("amdgpu/vegam_pfp.bin");
+MODULE_FIRMWARE("amdgpu/vegam_me.bin");
+MODULE_FIRMWARE("amdgpu/vegam_mec.bin");
+MODULE_FIRMWARE("amdgpu/vegam_mec2.bin");
+MODULE_FIRMWARE("amdgpu/vegam_rlc.bin");
+
 static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
 {
 	{mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
@@ -918,17 +925,20 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
 	case CHIP_FIJI:
 		chip_name = "fiji";
 		break;
-	case CHIP_POLARIS11:
-		chip_name = "polaris11";
+	case CHIP_STONEY:
+		chip_name = "stoney";
 		break;
 	case CHIP_POLARIS10:
 		chip_name = "polaris10";
 		break;
+	case CHIP_POLARIS11:
+		chip_name = "polaris11";
+		break;
 	case CHIP_POLARIS12:
 		chip_name = "polaris12";
 		break;
-	case CHIP_STONEY:
-		chip_name = "stoney";
+	case CHIP_VEGAM:
+		chip_name = "vegam";
 		break;
 	default:
 		BUG();

From aefbbd6cc55cba823fecd0231116a8e1073e4892 Mon Sep 17 00:00:00 2001
From: Leo Liu <leo.liu@amd.com>
Date: Thu, 16 Nov 2017 13:41:03 -0500
Subject: [PATCH 0867/1286] drm/amdgpu: add VEGAM GFX golden settings

Signed-off-by: Leo Liu <leo.liu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 39 +++++++++++++++++++++++++++
 1 file changed, 39 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 2be287078ec6..d789723f0478 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -299,6 +299,37 @@ static const u32 tonga_mgcg_cgcg_init[] =
 	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
 };
 
+static const u32 golden_settings_vegam_a11[] =
+{
+	mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208,
+	mmCB_HW_CONTROL_2, 0x0f000000, 0x0d000000,
+	mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
+	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
+	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
+	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
+	mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x3a00161a,
+	mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x0000002e,
+	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
+	mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
+	mmSQ_CONFIG, 0x07f80000, 0x01180000,
+	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
+	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
+	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
+	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
+	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x32761054,
+	mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
+};
+
+static const u32 vegam_golden_common_all[] =
+{
+	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
+	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
+	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
+	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
+	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
+};
+
 static const u32 golden_settings_polaris11_a11[] =
 {
 	mmCB_HW_CONTROL, 0x0000f3cf, 0x00007208,
@@ -719,6 +750,14 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
 							tonga_golden_common_all,
 							ARRAY_SIZE(tonga_golden_common_all));
 		break;
+	case CHIP_VEGAM:
+		amdgpu_device_program_register_sequence(adev,
+							golden_settings_vegam_a11,
+							ARRAY_SIZE(golden_settings_vegam_a11));
+		amdgpu_device_program_register_sequence(adev,
+							vegam_golden_common_all,
+							ARRAY_SIZE(vegam_golden_common_all));
+		break;
 	case CHIP_POLARIS11:
 	case CHIP_POLARIS12:
 		amdgpu_device_program_register_sequence(adev,

From 7176546958ddd7d4732d2a19692a3b14e3519caa Mon Sep 17 00:00:00 2001
From: Leo Liu <leo.liu@amd.com>
Date: Thu, 16 Nov 2017 13:49:56 -0500
Subject: [PATCH 0868/1286] drm/amdgpu: initialize VEGAM GFX

Signed-off-by: Leo Liu <leo.liu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 21 +++++++++++++++------
 1 file changed, 15 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index d789723f0478..818874b13c99 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1819,6 +1819,7 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
 		gb_addr_config = POLARIS11_GB_ADDR_CONFIG_GOLDEN;
 		break;
 	case CHIP_POLARIS10:
+	case CHIP_VEGAM:
 		ret = amdgpu_atombios_get_gfx_info(adev);
 		if (ret)
 			return ret;
@@ -2006,12 +2007,13 @@ static int gfx_v8_0_sw_init(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	switch (adev->asic_type) {
-	case CHIP_FIJI:
 	case CHIP_TONGA:
+	case CHIP_CARRIZO:
+	case CHIP_FIJI:
+	case CHIP_POLARIS10:
 	case CHIP_POLARIS11:
 	case CHIP_POLARIS12:
-	case CHIP_POLARIS10:
-	case CHIP_CARRIZO:
+	case CHIP_VEGAM:
 		adev->gfx.mec.num_mec = 2;
 		break;
 	case CHIP_TOPAZ:
@@ -2372,6 +2374,7 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
 
 		break;
 	case CHIP_FIJI:
+	case CHIP_VEGAM:
 		modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
 				PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
 				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
@@ -3553,6 +3556,7 @@ gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
 {
 	switch (adev->asic_type) {
 	case CHIP_FIJI:
+	case CHIP_VEGAM:
 		*rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
 			  RB_XSEL2(1) | PKR_MAP(2) |
 			  PKR_XSEL(1) | PKR_YSEL(1) |
@@ -4120,7 +4124,8 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
 		gfx_v8_0_init_power_gating(adev);
 		WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
 	} else if ((adev->asic_type == CHIP_POLARIS11) ||
-		   (adev->asic_type == CHIP_POLARIS12)) {
+		   (adev->asic_type == CHIP_POLARIS12) ||
+		   (adev->asic_type == CHIP_VEGAM)) {
 		gfx_v8_0_init_csb(adev);
 		gfx_v8_0_init_save_restore_list(adev);
 		gfx_v8_0_enable_save_restore_machine(adev);
@@ -4195,7 +4200,8 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
 	WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
 	if (adev->asic_type == CHIP_POLARIS11 ||
 	    adev->asic_type == CHIP_POLARIS10 ||
-	    adev->asic_type == CHIP_POLARIS12) {
+	    adev->asic_type == CHIP_POLARIS12 ||
+	    adev->asic_type == CHIP_VEGAM) {
 		tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D);
 		tmp &= ~0x3;
 		WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp);
@@ -5547,7 +5553,8 @@ static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *ade
 						       bool enable)
 {
 	if ((adev->asic_type == CHIP_POLARIS11) ||
-	    (adev->asic_type == CHIP_POLARIS12))
+	    (adev->asic_type == CHIP_POLARIS12) ||
+	    (adev->asic_type == CHIP_VEGAM))
 		/* Send msg to SMU via Powerplay */
 		amdgpu_device_ip_set_powergating_state(adev,
 						       AMD_IP_BLOCK_TYPE_SMC,
@@ -5637,6 +5644,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
 		break;
 	case CHIP_POLARIS11:
 	case CHIP_POLARIS12:
+	case CHIP_VEGAM:
 		if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
 			gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
 		else
@@ -6203,6 +6211,7 @@ static int gfx_v8_0_set_clockgating_state(void *handle,
 	case CHIP_POLARIS10:
 	case CHIP_POLARIS11:
 	case CHIP_POLARIS12:
+	case CHIP_VEGAM:
 		gfx_v8_0_polaris_update_gfx_clock_gating(adev, state);
 		break;
 	default:

From ba8f7ad0e5b25851299cd45a63b57d843e50b577 Mon Sep 17 00:00:00 2001
From: Leo Liu <leo.liu@amd.com>
Date: Fri, 10 Nov 2017 12:27:40 -0500
Subject: [PATCH 0869/1286] drm/amdgpu: add VEGAM UVD firmware support

Signed-off-by: Leo Liu <leo.liu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 627542b22ae4..d8dd4028c2bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -66,6 +66,7 @@
 #define FIRMWARE_POLARIS10	"amdgpu/polaris10_uvd.bin"
 #define FIRMWARE_POLARIS11	"amdgpu/polaris11_uvd.bin"
 #define FIRMWARE_POLARIS12	"amdgpu/polaris12_uvd.bin"
+#define FIRMWARE_VEGAM		"amdgpu/vegam_uvd.bin"
 
 #define FIRMWARE_VEGA10		"amdgpu/vega10_uvd.bin"
 #define FIRMWARE_VEGA12		"amdgpu/vega12_uvd.bin"
@@ -109,6 +110,7 @@ MODULE_FIRMWARE(FIRMWARE_STONEY);
 MODULE_FIRMWARE(FIRMWARE_POLARIS10);
 MODULE_FIRMWARE(FIRMWARE_POLARIS11);
 MODULE_FIRMWARE(FIRMWARE_POLARIS12);
+MODULE_FIRMWARE(FIRMWARE_VEGAM);
 
 MODULE_FIRMWARE(FIRMWARE_VEGA10);
 MODULE_FIRMWARE(FIRMWARE_VEGA12);
@@ -172,6 +174,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
 	case CHIP_VEGA12:
 		fw_name = FIRMWARE_VEGA12;
 		break;
+	case CHIP_VEGAM:
+		fw_name = FIRMWARE_VEGAM;
+		break;
 	default:
 		return -EINVAL;
 	}

From 136b10ad9b515a7ffdfbf4df01941856682bf94e Mon Sep 17 00:00:00 2001
From: Leo Liu <leo.liu@amd.com>
Date: Wed, 11 Apr 2018 15:24:01 -0500
Subject: [PATCH 0870/1286] drm/amdgpu: add VEGAM UVD encode support

Signed-off-by: Leo Liu <leo.liu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index f26f515db2fb..6d3359889c0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -62,7 +62,7 @@ static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
 static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev)
 {
 	return ((adev->asic_type >= CHIP_POLARIS10) &&
-			(adev->asic_type <= CHIP_POLARIS12) &&
+			(adev->asic_type <= CHIP_VEGAM) &&
 			(!adev->uvd.fw_version || adev->uvd.fw_version >= FW_1_130_16));
 }
 

From f11ded5ec23602d651cab3381243c527ad8c55a9 Mon Sep 17 00:00:00 2001
From: Leo Liu <leo.liu@amd.com>
Date: Wed, 11 Apr 2018 15:25:57 -0500
Subject: [PATCH 0871/1286] drm/amdgpu: add VEGAM VCE firmware support

Signed-off-by: Leo Liu <leo.liu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index d7261e01ff8a..e2186eda3271 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -51,8 +51,9 @@
 #define FIRMWARE_FIJI		"amdgpu/fiji_vce.bin"
 #define FIRMWARE_STONEY		"amdgpu/stoney_vce.bin"
 #define FIRMWARE_POLARIS10	"amdgpu/polaris10_vce.bin"
-#define FIRMWARE_POLARIS11         "amdgpu/polaris11_vce.bin"
-#define FIRMWARE_POLARIS12         "amdgpu/polaris12_vce.bin"
+#define FIRMWARE_POLARIS11	"amdgpu/polaris11_vce.bin"
+#define FIRMWARE_POLARIS12	"amdgpu/polaris12_vce.bin"
+#define FIRMWARE_VEGAM		"amdgpu/vegam_vce.bin"
 
 #define FIRMWARE_VEGA10		"amdgpu/vega10_vce.bin"
 #define FIRMWARE_VEGA12		"amdgpu/vega12_vce.bin"
@@ -71,6 +72,7 @@ MODULE_FIRMWARE(FIRMWARE_STONEY);
 MODULE_FIRMWARE(FIRMWARE_POLARIS10);
 MODULE_FIRMWARE(FIRMWARE_POLARIS11);
 MODULE_FIRMWARE(FIRMWARE_POLARIS12);
+MODULE_FIRMWARE(FIRMWARE_VEGAM);
 
 MODULE_FIRMWARE(FIRMWARE_VEGA10);
 MODULE_FIRMWARE(FIRMWARE_VEGA12);
@@ -132,6 +134,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
 	case CHIP_POLARIS12:
 		fw_name = FIRMWARE_POLARIS12;
 		break;
+	case CHIP_VEGAM:
+		fw_name = FIRMWARE_VEGAM;
+		break;
 	case CHIP_VEGA10:
 		fw_name = FIRMWARE_VEGA10;
 		break;

From a771289786824f15d4d4307242389d0499e83e59 Mon Sep 17 00:00:00 2001
From: Leo Liu <leo.liu@amd.com>
Date: Fri, 10 Nov 2017 12:32:04 -0500
Subject: [PATCH 0872/1286] drm/amdgpu: add VEGAM to VCE harvest config

Signed-off-by: Leo Liu <leo.liu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index ac9617269a2f..0999c843f623 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -388,7 +388,8 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
 	default:
 		if ((adev->asic_type == CHIP_POLARIS10) ||
 		    (adev->asic_type == CHIP_POLARIS11) ||
-		    (adev->asic_type == CHIP_POLARIS12))
+		    (adev->asic_type == CHIP_POLARIS12) ||
+		    (adev->asic_type == CHIP_VEGAM))
 			return AMDGPU_VCE_HARVEST_VCE1;
 
 		return 0;

From b51c5194a5b8d781e45a86776f2eec234f7567fe Mon Sep 17 00:00:00 2001
From: Leo Liu <leo.liu@amd.com>
Date: Wed, 11 Apr 2018 15:28:28 -0500
Subject: [PATCH 0873/1286] drm/amdgpu: add VEGAM support to vi

Signed-off-by: Leo Liu <leo.liu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/vi.c | 31 +++++++++++++++++++++++++++++--
 1 file changed, 29 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 4034a2863226..4ac1288ab7df 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -305,9 +305,10 @@ static void vi_init_golden_registers(struct amdgpu_device *adev)
 							stoney_mgcg_cgcg_init,
 							ARRAY_SIZE(stoney_mgcg_cgcg_init));
 		break;
-	case CHIP_POLARIS11:
 	case CHIP_POLARIS10:
+	case CHIP_POLARIS11:
 	case CHIP_POLARIS12:
+	case CHIP_VEGAM:
 	default:
 		break;
 	}
@@ -1096,6 +1097,30 @@ static int vi_common_early_init(void *handle)
 		adev->pg_flags = 0;
 		adev->external_rev_id = adev->rev_id + 0x64;
 		break;
+	case CHIP_VEGAM:
+		adev->cg_flags = 0;
+			/*AMD_CG_SUPPORT_GFX_MGCG |
+			AMD_CG_SUPPORT_GFX_RLC_LS |
+			AMD_CG_SUPPORT_GFX_CP_LS |
+			AMD_CG_SUPPORT_GFX_CGCG |
+			AMD_CG_SUPPORT_GFX_CGLS |
+			AMD_CG_SUPPORT_GFX_3D_CGCG |
+			AMD_CG_SUPPORT_GFX_3D_CGLS |
+			AMD_CG_SUPPORT_SDMA_MGCG |
+			AMD_CG_SUPPORT_SDMA_LS |
+			AMD_CG_SUPPORT_BIF_MGCG |
+			AMD_CG_SUPPORT_BIF_LS |
+			AMD_CG_SUPPORT_HDP_MGCG |
+			AMD_CG_SUPPORT_HDP_LS |
+			AMD_CG_SUPPORT_ROM_MGCG |
+			AMD_CG_SUPPORT_MC_MGCG |
+			AMD_CG_SUPPORT_MC_LS |
+			AMD_CG_SUPPORT_DRM_LS |
+			AMD_CG_SUPPORT_UVD_MGCG |
+			AMD_CG_SUPPORT_VCE_MGCG;*/
+		adev->pg_flags = 0;
+		adev->external_rev_id = adev->rev_id + 0x6E;
+		break;
 	case CHIP_CARRIZO:
 		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
 			AMD_CG_SUPPORT_GFX_MGCG |
@@ -1487,6 +1512,7 @@ static int vi_common_set_clockgating_state(void *handle,
 	case CHIP_POLARIS10:
 	case CHIP_POLARIS11:
 	case CHIP_POLARIS12:
+	case CHIP_VEGAM:
 		vi_common_set_clockgating_state_by_smu(adev, state);
 	default:
 		break;
@@ -1616,9 +1642,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
 			amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
 		}
 		break;
-	case CHIP_POLARIS11:
 	case CHIP_POLARIS10:
+	case CHIP_POLARIS11:
 	case CHIP_POLARIS12:
+	case CHIP_VEGAM:
 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
 		amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
 		amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);

From e930793280799e66c3197e2ee6e70b1129f8aa12 Mon Sep 17 00:00:00 2001
From: Leo Liu <leo.liu@amd.com>
Date: Thu, 9 Nov 2017 13:25:31 -0500
Subject: [PATCH 0874/1286] drm/amdgpu: add VEGAM pci ids

Signed-off-by: Leo Liu <leo.liu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 998ba8e710de..739e7e09c8b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -541,6 +541,9 @@ static const struct pci_device_id pciidlist[] = {
 	{0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
 	{0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
 	{0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+	/* VEGAM */
+	{0x1002, 0x694C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
+	{0x1002, 0x694E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
 	/* Vega 10 */
 	{0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
 	{0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},

From 0c75d5acc80dc5247962370c9f555922197b1ec3 Mon Sep 17 00:00:00 2001
From: "Jerry (Fangzhi) Zuo" <Jerry.Zuo@amd.com>
Date: Wed, 11 Apr 2018 15:39:35 -0500
Subject: [PATCH 0875/1286] drm/amd/display: Implement VEGAM device IDs in DC

Implement device IDs for VEGAM

Signed-off-by: Jerry (Fangzhi) Zuo <Jerry.Zuo@amd.com>
Reviewed-by: Harry Wentland <harry.wentland@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/Kconfig                      | 6 ++++++
 .../gpu/drm/amd/display/dc/bios/command_table_helper.c   | 3 +++
 .../gpu/drm/amd/display/dc/bios/command_table_helper2.c  | 3 +++
 drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c         | 9 +++++++++
 drivers/gpu/drm/amd/display/dc/core/dc_resource.c        | 7 +++++++
 drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c    | 6 ++++++
 drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c         | 3 +++
 drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c       | 3 +++
 drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c           | 3 +++
 drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h           | 3 +++
 drivers/gpu/drm/amd/display/include/dal_asic_id.h        | 7 +++++++
 drivers/gpu/drm/amd/display/include/dal_types.h          | 3 +++
 12 files changed, 56 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index d5d4586e6176..e6ca72c0d347 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -34,4 +34,10 @@ config DEBUG_KERNEL_DC
 	  if you want to hit
 	  kdgb_break in assert.
 
+config DRM_AMD_DC_VEGAM
+        bool "VEGAM support"
+        depends on DRM_AMD_DC
+        help
+         Choose this option if you want to have
+         VEGAM support for display engine
 endmenu
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
index 2979358c6a55..be066c49b984 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
@@ -51,6 +51,9 @@ bool dal_bios_parser_init_cmd_tbl_helper(
 		return true;
 
 	case DCE_VERSION_11_2:
+#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+	case DCE_VERSION_11_22:
+#endif
 		*h = dal_cmd_tbl_helper_dce112_get_table();
 		return true;
 
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index 9a4d30dd4969..9b9e06995805 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -52,6 +52,9 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
 		return true;
 
 	case DCE_VERSION_11_2:
+#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+	case DCE_VERSION_11_22:
+#endif
 		*h = dal_cmd_tbl_helper_dce112_get_table2();
 		return true;
 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index 56f46a065a93..4ee3c26f7c13 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -59,6 +59,10 @@ static enum bw_calcs_version bw_calcs_version_from_asic_id(struct hw_asic_id asi
 			return BW_CALCS_VERSION_POLARIS10;
 		if (ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev))
 			return BW_CALCS_VERSION_POLARIS11;
+#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+		if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev))
+			return BW_CALCS_VERSION_VEGAM;
+#endif
 		return BW_CALCS_VERSION_INVALID;
 
 	case FAMILY_AI:
@@ -2147,6 +2151,11 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
 		dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); /* todo: this is a bug*/
 		break;
 	case BW_CALCS_VERSION_POLARIS10:
+#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+		/* TODO: Treat VEGAM the same as P10 for now
+		 * Need to tune the para for VEGAM if needed */
+	case BW_CALCS_VERSION_VEGAM:
+#endif
 		vbios.memory_type = bw_def_gddr5;
 		vbios.dram_channel_width_in_bits = 32;
 		vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index d7a92eca8a27..447729cd29f0 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -79,6 +79,10 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
 				ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev)) {
 			dc_version = DCE_VERSION_11_2;
 		}
+#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+		if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev))
+			dc_version = DCE_VERSION_11_22;
+#endif
 		break;
 	case FAMILY_AI:
 		dc_version = DCE_VERSION_12_0;
@@ -125,6 +129,9 @@ struct resource_pool *dc_create_resource_pool(
 			num_virtual_links, dc, asic_id);
 		break;
 	case DCE_VERSION_11_2:
+#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+	case DCE_VERSION_11_22:
+#endif
 		res_pool = dce112_create_resource_pool(
 			num_virtual_links, dc);
 		break;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 67dad7f1e643..223db98a568a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -590,6 +590,9 @@ static uint32_t dce110_get_pix_clk_dividers(
 			pll_settings, pix_clk_params);
 		break;
 	case DCE_VERSION_11_2:
+#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+	case DCE_VERSION_11_22:
+#endif
 	case DCE_VERSION_12_0:
 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
 	case DCN_VERSION_1_0:
@@ -979,6 +982,9 @@ static bool dce110_program_pix_clk(
 
 		break;
 	case DCE_VERSION_11_2:
+#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+	case DCE_VERSION_11_22:
+#endif
 	case DCE_VERSION_12_0:
 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
 	case DCN_VERSION_1_0:
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index 87b580fa4bc9..61fe484da1a0 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -75,6 +75,9 @@ bool dal_hw_factory_init(
 		return true;
 	case DCE_VERSION_11_0:
 	case DCE_VERSION_11_2:
+#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+	case DCE_VERSION_11_22:
+#endif
 		dal_hw_factory_dce110_init(factory);
 		return true;
 	case DCE_VERSION_12_0:
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
index 0ae8ace25739..910ae2b7bf64 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -72,6 +72,9 @@ bool dal_hw_translate_init(
 	case DCE_VERSION_10_0:
 	case DCE_VERSION_11_0:
 	case DCE_VERSION_11_2:
+#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+	case DCE_VERSION_11_22:
+#endif
 		dal_hw_translate_dce110_init(translate);
 		return true;
 	case DCE_VERSION_12_0:
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
index 5cbf6626b8d4..c3d7c320fdba 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
@@ -83,6 +83,9 @@ struct i2caux *dal_i2caux_create(
 	case DCE_VERSION_8_3:
 		return dal_i2caux_dce80_create(ctx);
 	case DCE_VERSION_11_2:
+#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+	case DCE_VERSION_11_22:
+#endif
 		return dal_i2caux_dce112_create(ctx);
 	case DCE_VERSION_11_0:
 		return dal_i2caux_dce110_create(ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
index 0bd87f24fc06..933ea7a1e18b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
@@ -43,6 +43,9 @@ enum bw_calcs_version {
 	BW_CALCS_VERSION_POLARIS10,
 	BW_CALCS_VERSION_POLARIS11,
 	BW_CALCS_VERSION_POLARIS12,
+#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+	BW_CALCS_VERSION_VEGAM,
+#endif
 	BW_CALCS_VERSION_STONEY,
 	BW_CALCS_VERSION_VEGA10
 };
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 9831cb5eaa7c..3e8e535e08f2 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -98,7 +98,14 @@
 		(eChipRev < VI_POLARIS11_M_A0))
 #define ASIC_REV_IS_POLARIS11_M(eChipRev) ((eChipRev >= VI_POLARIS11_M_A0) &&  \
 		(eChipRev < VI_POLARIS12_V_A0))
+#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+#define VI_VEGAM_A0 110
+#define ASIC_REV_IS_POLARIS12_V(eChipRev) ((eChipRev >= VI_POLARIS12_V_A0) && \
+		(eChipRev < VI_VEGAM_A0))
+#define ASIC_REV_IS_VEGAM(eChipRev) (eChipRev >= VI_VEGAM_A0)
+#else
 #define ASIC_REV_IS_POLARIS12_V(eChipRev) (eChipRev >= VI_POLARIS12_V_A0)
+#endif
 
 /* DCE11 */
 #define CZ_CARRIZO_A0 0x01
diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h
index fa543965feb5..5b1f8cef0c22 100644
--- a/drivers/gpu/drm/amd/display/include/dal_types.h
+++ b/drivers/gpu/drm/amd/display/include/dal_types.h
@@ -40,6 +40,9 @@ enum dce_version {
 	DCE_VERSION_10_0,
 	DCE_VERSION_11_0,
 	DCE_VERSION_11_2,
+#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+	DCE_VERSION_11_22,
+#endif
 	DCE_VERSION_12_0,
 	DCE_VERSION_MAX,
 	DCN_VERSION_1_0,

From 7737de91633b1cd6b3a0b15347a633667a9bc2fc Mon Sep 17 00:00:00 2001
From: "Jerry (Fangzhi) Zuo" <Jerry.Zuo@amd.com>
Date: Thu, 9 Nov 2017 11:51:13 -0500
Subject: [PATCH 0876/1286] drm/amd/display: Implement VEGAM device IDs in DM

Add CHIP_VEGAM

Signed-off-by: Jerry (Fangzhi) Zuo <Jerry.Zuo@amd.com>
Reviewed-by: Harry Wentland <harry.wentland@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 656a01891f6c..8379a3705f2d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1524,6 +1524,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
 	case CHIP_POLARIS11:
 	case CHIP_POLARIS10:
 	case CHIP_POLARIS12:
+#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+	case CHIP_VEGAM:
+#endif
 	case CHIP_VEGA10:
 	case CHIP_VEGA12:
 		if (dce110_register_irq_handlers(dm->adev)) {
@@ -1716,6 +1719,9 @@ static int dm_early_init(void *handle)
 		adev->mode_info.plane_type = dm_plane_type_default;
 		break;
 	case CHIP_POLARIS10:
+#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+	case CHIP_VEGAM:
+#endif
 		adev->mode_info.num_crtc = 6;
 		adev->mode_info.num_hpd = 6;
 		adev->mode_info.num_dig = 6;

From 221adb2172f10ebc3a1f86c18923692a58cff1de Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Thu, 19 Apr 2018 16:38:46 -0500
Subject: [PATCH 0877/1286] drm/amdgpu: Add VEGAM support to the legacy DCE 11
 module

DC is preferred.

Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 13 ++++++++++---
 1 file changed, 10 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index d3ae508b2a92..a5b96eac3033 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -173,6 +173,7 @@ static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
 							ARRAY_SIZE(polaris11_golden_settings_a11));
 		break;
 	case CHIP_POLARIS10:
+	case CHIP_VEGAM:
 		amdgpu_device_program_register_sequence(adev,
 							polaris10_golden_settings_a11,
 							ARRAY_SIZE(polaris10_golden_settings_a11));
@@ -473,6 +474,7 @@ static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev)
 		num_crtc = 2;
 		break;
 	case CHIP_POLARIS10:
+	case CHIP_VEGAM:
 		num_crtc = 6;
 		break;
 	case CHIP_POLARIS11:
@@ -1445,6 +1447,7 @@ static int dce_v11_0_audio_init(struct amdgpu_device *adev)
 		adev->mode_info.audio.num_pins = 7;
 		break;
 	case CHIP_POLARIS10:
+	case CHIP_VEGAM:
 		adev->mode_info.audio.num_pins = 8;
 		break;
 	case CHIP_POLARIS11:
@@ -2248,7 +2251,8 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
 
 	if ((adev->asic_type == CHIP_POLARIS10) ||
 	    (adev->asic_type == CHIP_POLARIS11) ||
-	    (adev->asic_type == CHIP_POLARIS12)) {
+	    (adev->asic_type == CHIP_POLARIS12) ||
+	    (adev->asic_type == CHIP_VEGAM)) {
 		struct amdgpu_encoder *amdgpu_encoder =
 			to_amdgpu_encoder(amdgpu_crtc->encoder);
 		struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@@ -2666,7 +2670,8 @@ static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
 
 	if ((adev->asic_type == CHIP_POLARIS10) ||
 	    (adev->asic_type == CHIP_POLARIS11) ||
-	    (adev->asic_type == CHIP_POLARIS12)) {
+	    (adev->asic_type == CHIP_POLARIS12) ||
+	    (adev->asic_type == CHIP_VEGAM)) {
 		struct amdgpu_encoder *amdgpu_encoder =
 			to_amdgpu_encoder(amdgpu_crtc->encoder);
 		int encoder_mode =
@@ -2823,6 +2828,7 @@ static int dce_v11_0_early_init(void *handle)
 		adev->mode_info.num_dig = 9;
 		break;
 	case CHIP_POLARIS10:
+	case CHIP_VEGAM:
 		adev->mode_info.num_hpd = 6;
 		adev->mode_info.num_dig = 6;
 		break;
@@ -2942,7 +2948,8 @@ static int dce_v11_0_hw_init(void *handle)
 	amdgpu_atombios_encoder_init_dig(adev);
 	if ((adev->asic_type == CHIP_POLARIS10) ||
 	    (adev->asic_type == CHIP_POLARIS11) ||
-	    (adev->asic_type == CHIP_POLARIS12)) {
+	    (adev->asic_type == CHIP_POLARIS12) ||
+	    (adev->asic_type == CHIP_VEGAM)) {
 		amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk,
 						   DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS);
 		amdgpu_atombios_crtc_set_dce_clock(adev, 0,

From 48231fd51667a89514d0eaba893ae0743fd0877d Mon Sep 17 00:00:00 2001
From: Harry Wentland <harry.wentland@amd.com>
Date: Tue, 21 Nov 2017 13:34:48 -0500
Subject: [PATCH 0878/1286] drm/amd/display: Use HBR2 if eDP monitor it doesn't
 advertise link rate

Some eDP displays use the extra link rate table to advertise link rate
support. If they do that they don't need to provide link rate through
the usual registers. Since we don't currently have support for the extra
link rate table default to HBR2 for the display in this.

Note that this is a HACK. Ultimately we need to teach DC to use the
extra link rate table.

Signed-off-by: Harry Wentland <harry.wentland@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 07cc4385a7c1..0a190c2b6898 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -2393,6 +2393,10 @@ bool detect_dp_sink_caps(struct dc_link *link)
 void detect_edp_sink_caps(struct dc_link *link)
 {
 	retrieve_link_cap(link);
+
+	if (link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)
+		link->reported_link_cap.link_rate = LINK_RATE_HIGH2;
+
 	link->verified_link_cap = link->reported_link_cap;
 }
 

From f4ad6fa99772969c16c3fc8877e450b48e93e102 Mon Sep 17 00:00:00 2001
From: Eric Huang <JinHuiEric.Huang@amd.com>
Date: Thu, 9 Nov 2017 16:29:28 -0500
Subject: [PATCH 0879/1286] drm/amd/powerplay: add smu75 header files

Signed-off-by: Eric Huang <JinHuiEric.Huang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/inc/smu75.h     | 760 +++++++++++++++
 .../drm/amd/powerplay/inc/smu75_discrete.h    | 886 ++++++++++++++++++
 2 files changed, 1646 insertions(+)
 create mode 100644 drivers/gpu/drm/amd/powerplay/inc/smu75.h
 create mode 100644 drivers/gpu/drm/amd/powerplay/inc/smu75_discrete.h

diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu75.h b/drivers/gpu/drm/amd/powerplay/inc/smu75.h
new file mode 100644
index 000000000000..771523001533
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu75.h
@@ -0,0 +1,760 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU75_H
+#define SMU75_H
+
+#pragma pack(push, 1)
+
+typedef struct {
+	uint32_t high;
+	uint32_t low;
+} data_64_t;
+
+typedef struct {
+	data_64_t high;
+	data_64_t low;
+} data_128_t;
+
+#define SMU__DGPU_ONLY
+
+#define SMU__NUM_SCLK_DPM_STATE  8
+#define SMU__NUM_MCLK_DPM_LEVELS 4
+#define SMU__NUM_LCLK_DPM_LEVELS 8
+#define SMU__NUM_PCIE_DPM_LEVELS 8
+
+#define SMU7_CONTEXT_ID_SMC        1
+#define SMU7_CONTEXT_ID_VBIOS      2
+
+#define SMU75_MAX_LEVELS_VDDC            16
+#define SMU75_MAX_LEVELS_VDDGFX          16
+#define SMU75_MAX_LEVELS_VDDCI           8
+#define SMU75_MAX_LEVELS_MVDD            4
+
+#define SMU_MAX_SMIO_LEVELS              4
+
+#define SMU75_MAX_LEVELS_GRAPHICS        SMU__NUM_SCLK_DPM_STATE
+#define SMU75_MAX_LEVELS_MEMORY          SMU__NUM_MCLK_DPM_LEVELS
+#define SMU75_MAX_LEVELS_GIO             SMU__NUM_LCLK_DPM_LEVELS
+#define SMU75_MAX_LEVELS_LINK            SMU__NUM_PCIE_DPM_LEVELS
+#define SMU75_MAX_LEVELS_UVD             8
+#define SMU75_MAX_LEVELS_VCE             8
+#define SMU75_MAX_LEVELS_ACP             8
+#define SMU75_MAX_LEVELS_SAMU            8
+#define SMU75_MAX_ENTRIES_SMIO           32
+
+#define DPM_NO_LIMIT 0
+#define DPM_NO_UP 1
+#define DPM_GO_DOWN 2
+#define DPM_GO_UP 3
+
+#define SMU7_FIRST_DPM_GRAPHICS_LEVEL    0
+#define SMU7_FIRST_DPM_MEMORY_LEVEL      0
+
+#define GPIO_CLAMP_MODE_VRHOT      1
+#define GPIO_CLAMP_MODE_THERM      2
+#define GPIO_CLAMP_MODE_DC         4
+
+#define SCRATCH_B_TARG_PCIE_INDEX_SHIFT 0
+#define SCRATCH_B_TARG_PCIE_INDEX_MASK  (0x7<<SCRATCH_B_TARG_PCIE_INDEX_SHIFT)
+#define SCRATCH_B_CURR_PCIE_INDEX_SHIFT 3
+#define SCRATCH_B_CURR_PCIE_INDEX_MASK  (0x7<<SCRATCH_B_CURR_PCIE_INDEX_SHIFT)
+#define SCRATCH_B_TARG_UVD_INDEX_SHIFT  6
+#define SCRATCH_B_TARG_UVD_INDEX_MASK   (0x7<<SCRATCH_B_TARG_UVD_INDEX_SHIFT)
+#define SCRATCH_B_CURR_UVD_INDEX_SHIFT  9
+#define SCRATCH_B_CURR_UVD_INDEX_MASK   (0x7<<SCRATCH_B_CURR_UVD_INDEX_SHIFT)
+#define SCRATCH_B_TARG_VCE_INDEX_SHIFT  12
+#define SCRATCH_B_TARG_VCE_INDEX_MASK   (0x7<<SCRATCH_B_TARG_VCE_INDEX_SHIFT)
+#define SCRATCH_B_CURR_VCE_INDEX_SHIFT  15
+#define SCRATCH_B_CURR_VCE_INDEX_MASK   (0x7<<SCRATCH_B_CURR_VCE_INDEX_SHIFT)
+#define SCRATCH_B_TARG_ACP_INDEX_SHIFT  18
+#define SCRATCH_B_TARG_ACP_INDEX_MASK   (0x7<<SCRATCH_B_TARG_ACP_INDEX_SHIFT)
+#define SCRATCH_B_CURR_ACP_INDEX_SHIFT  21
+#define SCRATCH_B_CURR_ACP_INDEX_MASK   (0x7<<SCRATCH_B_CURR_ACP_INDEX_SHIFT)
+#define SCRATCH_B_TARG_SAMU_INDEX_SHIFT 24
+#define SCRATCH_B_TARG_SAMU_INDEX_MASK  (0x7<<SCRATCH_B_TARG_SAMU_INDEX_SHIFT)
+#define SCRATCH_B_CURR_SAMU_INDEX_SHIFT 27
+#define SCRATCH_B_CURR_SAMU_INDEX_MASK  (0x7<<SCRATCH_B_CURR_SAMU_INDEX_SHIFT)
+
+/* Virtualization Defines */
+#define CG_XDMA_MASK  0x1
+#define CG_XDMA_SHIFT 0
+#define CG_UVD_MASK   0x2
+#define CG_UVD_SHIFT  1
+#define CG_VCE_MASK   0x4
+#define CG_VCE_SHIFT  2
+#define CG_SAMU_MASK  0x8
+#define CG_SAMU_SHIFT 3
+#define CG_GFX_MASK   0x10
+#define CG_GFX_SHIFT  4
+#define CG_SDMA_MASK  0x20
+#define CG_SDMA_SHIFT 5
+#define CG_HDP_MASK   0x40
+#define CG_HDP_SHIFT  6
+#define CG_MC_MASK    0x80
+#define CG_MC_SHIFT   7
+#define CG_DRM_MASK   0x100
+#define CG_DRM_SHIFT  8
+#define CG_ROM_MASK   0x200
+#define CG_ROM_SHIFT  9
+#define CG_BIF_MASK   0x400
+#define CG_BIF_SHIFT  10
+
+#if defined SMU__DGPU_ONLY
+#define SMU75_DTE_ITERATIONS 5
+#define SMU75_DTE_SOURCES 3
+#define SMU75_DTE_SINKS 1
+#define SMU75_NUM_CPU_TES 0
+#define SMU75_NUM_GPU_TES 1
+#define SMU75_NUM_NON_TES 2
+#define SMU75_DTE_FAN_SCALAR_MIN 0x100
+#define SMU75_DTE_FAN_SCALAR_MAX 0x166
+#define SMU75_DTE_FAN_TEMP_MAX 93
+#define SMU75_DTE_FAN_TEMP_MIN 83
+#endif
+#define SMU75_THERMAL_INPUT_LOOP_COUNT 2
+#define SMU75_THERMAL_CLAMP_MODE_COUNT 2
+
+#define EXP_M1_1  93
+#define EXP_M2_1  195759
+#define EXP_B_1   111176531
+
+#define EXP_M1_2  67
+#define EXP_M2_2  153720
+#define EXP_B_2   94415767
+
+#define EXP_M1_3  48
+#define EXP_M2_3  119796
+#define EXP_B_3   79195279
+
+#define EXP_M1_4  550
+#define EXP_M2_4  1484190
+#define EXP_B_4   1051432828
+
+#define EXP_M1_5  394
+#define EXP_M2_5  1143049
+#define EXP_B_5   864288432
+
+struct SMU7_HystController_Data {
+	uint16_t waterfall_up;
+	uint16_t waterfall_down;
+	uint16_t waterfall_limit;
+	uint16_t release_cnt;
+	uint16_t release_limit;
+	uint16_t spare;
+};
+
+typedef struct SMU7_HystController_Data SMU7_HystController_Data;
+
+struct SMU75_PIDController {
+	uint32_t Ki;
+	int32_t LFWindupUpperLim;
+	int32_t LFWindupLowerLim;
+	uint32_t StatePrecision;
+	uint32_t LfPrecision;
+	uint32_t LfOffset;
+	uint32_t MaxState;
+	uint32_t MaxLfFraction;
+	uint32_t StateShift;
+};
+
+typedef struct SMU75_PIDController SMU75_PIDController;
+
+struct SMU7_LocalDpmScoreboard {
+	uint32_t PercentageBusy;
+
+	int32_t  PIDError;
+	int32_t  PIDIntegral;
+	int32_t  PIDOutput;
+
+	uint32_t SigmaDeltaAccum;
+	uint32_t SigmaDeltaOutput;
+	uint32_t SigmaDeltaLevel;
+
+	uint32_t UtilizationSetpoint;
+
+	uint8_t  TdpClampMode;
+	uint8_t  TdcClampMode;
+	uint8_t  ThermClampMode;
+	uint8_t  VoltageBusy;
+
+	int8_t   CurrLevel;
+	int8_t   TargLevel;
+	uint8_t  LevelChangeInProgress;
+	uint8_t  UpHyst;
+
+	uint8_t  DownHyst;
+	uint8_t  VoltageDownHyst;
+	uint8_t  DpmEnable;
+	uint8_t  DpmRunning;
+
+	uint8_t  DpmForce;
+	uint8_t  DpmForceLevel;
+	uint8_t  DisplayWatermark;
+	uint8_t  McArbIndex;
+
+	uint32_t MinimumPerfSclk;
+
+	uint8_t  AcpiReq;
+	uint8_t  AcpiAck;
+	uint8_t  GfxClkSlow;
+	uint8_t  GpioClampMode;
+
+	uint8_t  EnableModeSwitchRLCNotification;
+	uint8_t  EnabledLevelsChange;
+	uint8_t  DteClampMode;
+	uint8_t  FpsClampMode;
+
+	uint16_t LevelResidencyCounters [SMU75_MAX_LEVELS_GRAPHICS];
+	uint16_t LevelSwitchCounters [SMU75_MAX_LEVELS_GRAPHICS];
+
+	void     (*TargetStateCalculator)(uint8_t);
+	void     (*SavedTargetStateCalculator)(uint8_t);
+
+	uint16_t AutoDpmInterval;
+	uint16_t AutoDpmRange;
+
+	uint8_t  FpsEnabled;
+	uint8_t  MaxPerfLevel;
+	uint8_t  AllowLowClkInterruptToHost;
+	uint8_t  FpsRunning;
+
+	uint32_t MaxAllowedFrequency;
+
+	uint32_t FilteredSclkFrequency;
+	uint32_t LastSclkFrequency;
+	uint32_t FilteredSclkFrequencyCnt;
+
+	uint8_t MinPerfLevel;
+#ifdef SMU__FIRMWARE_SCKS_PRESENT__1
+	uint8_t ScksClampMode;
+	uint8_t padding[2];
+#else
+	uint8_t padding[3];
+#endif
+
+	uint16_t FpsAlpha;
+	uint16_t DeltaTime;
+	uint32_t CurrentFps;
+	uint32_t FilteredFps;
+	uint32_t FrameCount;
+	uint32_t FrameCountLast;
+	uint16_t FpsTargetScalar;
+	uint16_t FpsWaterfallLimitScalar;
+	uint16_t FpsAlphaScalar;
+	uint16_t spare8;
+	SMU7_HystController_Data HystControllerData;
+};
+
+typedef struct SMU7_LocalDpmScoreboard SMU7_LocalDpmScoreboard;
+
+#define SMU7_MAX_VOLTAGE_CLIENTS 12
+
+typedef uint8_t (*VoltageChangeHandler_t)(uint16_t, uint8_t);
+
+#define VDDC_MASK    0x00007FFF
+#define VDDC_SHIFT   0
+#define VDDCI_MASK   0x3FFF8000
+#define VDDCI_SHIFT  15
+#define PHASES_MASK  0xC0000000
+#define PHASES_SHIFT 30
+
+typedef uint32_t SMU_VoltageLevel;
+
+struct SMU7_VoltageScoreboard {
+	SMU_VoltageLevel TargetVoltage;
+	uint16_t MaxVid;
+	uint8_t  HighestVidOffset;
+	uint8_t  CurrentVidOffset;
+
+	uint16_t CurrentVddc;
+	uint16_t CurrentVddci;
+
+	uint8_t  ControllerBusy;
+	uint8_t  CurrentVid;
+	uint8_t  CurrentVddciVid;
+	uint8_t  padding;
+
+	SMU_VoltageLevel RequestedVoltage[SMU7_MAX_VOLTAGE_CLIENTS];
+	SMU_VoltageLevel TargetVoltageState;
+	uint8_t  EnabledRequest[SMU7_MAX_VOLTAGE_CLIENTS];
+
+	uint8_t  padding2;
+	uint8_t  padding3;
+	uint8_t  ControllerEnable;
+	uint8_t  ControllerRunning;
+	uint16_t CurrentStdVoltageHiSidd;
+	uint16_t CurrentStdVoltageLoSidd;
+	uint8_t  OverrideVoltage;
+	uint8_t  padding4;
+	uint8_t  padding5;
+	uint8_t  CurrentPhases;
+
+	VoltageChangeHandler_t ChangeVddc;
+	VoltageChangeHandler_t ChangeVddci;
+	VoltageChangeHandler_t ChangePhase;
+	VoltageChangeHandler_t ChangeMvdd;
+
+	VoltageChangeHandler_t functionLinks[6];
+
+	uint16_t * VddcFollower1;
+	int16_t  Driver_OD_RequestedVidOffset1;
+	int16_t  Driver_OD_RequestedVidOffset2;
+};
+
+typedef struct SMU7_VoltageScoreboard SMU7_VoltageScoreboard;
+
+#define SMU7_MAX_PCIE_LINK_SPEEDS 3
+
+struct SMU7_PCIeLinkSpeedScoreboard {
+	uint8_t     DpmEnable;
+	uint8_t     DpmRunning;
+	uint8_t     DpmForce;
+	uint8_t     DpmForceLevel;
+
+	uint8_t     CurrentLinkSpeed;
+	uint8_t     EnabledLevelsChange;
+	uint16_t    AutoDpmInterval;
+
+	uint16_t    AutoDpmRange;
+	uint16_t    AutoDpmCount;
+
+	uint8_t     DpmMode;
+	uint8_t     AcpiReq;
+	uint8_t     AcpiAck;
+	uint8_t     CurrentLinkLevel;
+};
+
+typedef struct SMU7_PCIeLinkSpeedScoreboard SMU7_PCIeLinkSpeedScoreboard;
+
+#define SMU7_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
+#define SMU7_LKGE_LUT_NUM_OF_VOLT_ENTRIES 16
+
+#define SMU7_SCALE_I  7
+#define SMU7_SCALE_R 12
+
+struct SMU7_PowerScoreboard {
+	uint32_t GpuPower;
+
+	uint32_t VddcPower;
+	uint32_t VddcVoltage;
+	uint32_t VddcCurrent;
+
+	uint32_t VddciPower;
+	uint32_t VddciVoltage;
+	uint32_t VddciCurrent;
+
+	uint32_t RocPower;
+
+	uint16_t Telemetry_1_slope;
+	uint16_t Telemetry_2_slope;
+	int32_t  Telemetry_1_offset;
+	int32_t  Telemetry_2_offset;
+
+	uint8_t MCLK_patch_flag;
+	uint8_t reserved[3];
+};
+
+typedef struct SMU7_PowerScoreboard SMU7_PowerScoreboard;
+
+#define SMU7_SCLK_DPM_CONFIG_MASK                        0x01
+#define SMU7_VOLTAGE_CONTROLLER_CONFIG_MASK              0x02
+#define SMU7_THERMAL_CONTROLLER_CONFIG_MASK              0x04
+#define SMU7_MCLK_DPM_CONFIG_MASK                        0x08
+#define SMU7_UVD_DPM_CONFIG_MASK                         0x10
+#define SMU7_VCE_DPM_CONFIG_MASK                         0x20
+#define SMU7_ACP_DPM_CONFIG_MASK                         0x40
+#define SMU7_SAMU_DPM_CONFIG_MASK                        0x80
+#define SMU7_PCIEGEN_DPM_CONFIG_MASK                    0x100
+
+#define SMU7_ACP_MCLK_HANDSHAKE_DISABLE                  0x00000001
+#define SMU7_ACP_SCLK_HANDSHAKE_DISABLE                  0x00000002
+#define SMU7_UVD_MCLK_HANDSHAKE_DISABLE                  0x00000100
+#define SMU7_UVD_SCLK_HANDSHAKE_DISABLE                  0x00000200
+#define SMU7_VCE_MCLK_HANDSHAKE_DISABLE                  0x00010000
+#define SMU7_VCE_SCLK_HANDSHAKE_DISABLE                  0x00020000
+
+struct SMU75_SoftRegisters {
+	uint32_t        RefClockFrequency;
+	uint32_t        PmTimerPeriod;
+	uint32_t        FeatureEnables;
+#if defined (SMU__DGPU_ONLY)
+	uint32_t        PreVBlankGap;
+	uint32_t        VBlankTimeout;
+	uint32_t        TrainTimeGap;
+	uint32_t        MvddSwitchTime;
+	uint32_t        LongestAcpiTrainTime;
+	uint32_t        AcpiDelay;
+	uint32_t        G5TrainTime;
+	uint32_t        DelayMpllPwron;
+	uint32_t        VoltageChangeTimeout;
+#endif
+	uint32_t        HandshakeDisables;
+
+	uint8_t         DisplayPhy1Config;
+	uint8_t         DisplayPhy2Config;
+	uint8_t         DisplayPhy3Config;
+	uint8_t         DisplayPhy4Config;
+
+	uint8_t         DisplayPhy5Config;
+	uint8_t         DisplayPhy6Config;
+	uint8_t         DisplayPhy7Config;
+	uint8_t         DisplayPhy8Config;
+
+	uint32_t        AverageGraphicsActivity;
+	uint32_t        AverageMemoryActivity;
+	uint32_t        AverageGioActivity;
+
+	uint8_t         SClkDpmEnabledLevels;
+	uint8_t         MClkDpmEnabledLevels;
+	uint8_t         LClkDpmEnabledLevels;
+	uint8_t         PCIeDpmEnabledLevels;
+
+	uint8_t         UVDDpmEnabledLevels;
+	uint8_t         SAMUDpmEnabledLevels;
+	uint8_t         ACPDpmEnabledLevels;
+	uint8_t         VCEDpmEnabledLevels;
+
+	uint32_t        DRAM_LOG_ADDR_H;
+	uint32_t        DRAM_LOG_ADDR_L;
+	uint32_t        DRAM_LOG_PHY_ADDR_H;
+	uint32_t        DRAM_LOG_PHY_ADDR_L;
+	uint32_t        DRAM_LOG_BUFF_SIZE;
+	uint32_t        UlvEnterCount;
+	uint32_t        UlvTime;
+	uint32_t        UcodeLoadStatus;
+	uint32_t        AllowMvddSwitch;
+	uint8_t         Activity_Weight;
+	uint8_t         Reserved8[3];
+};
+
+typedef struct SMU75_SoftRegisters SMU75_SoftRegisters;
+
+struct SMU75_Firmware_Header {
+	uint32_t Digest[5];
+	uint32_t Version;
+	uint32_t HeaderSize;
+	uint32_t Flags;
+	uint32_t EntryPoint;
+	uint32_t CodeSize;
+	uint32_t ImageSize;
+
+	uint32_t Rtos;
+	uint32_t SoftRegisters;
+	uint32_t DpmTable;
+	uint32_t FanTable;
+	uint32_t CacConfigTable;
+	uint32_t CacStatusTable;
+	uint32_t mcRegisterTable;
+	uint32_t mcArbDramTimingTable;
+	uint32_t PmFuseTable;
+	uint32_t Globals;
+	uint32_t ClockStretcherTable;
+	uint32_t VftTable;
+	uint32_t Reserved1;
+	uint32_t AvfsCksOff_AvfsGbvTable;
+	uint32_t AvfsCksOff_BtcGbvTable;
+	uint32_t MM_AvfsTable;
+	uint32_t PowerSharingTable;
+	uint32_t AvfsTable;
+	uint32_t AvfsCksOffGbvTable;
+	uint32_t AvfsMeanNSigma;
+	uint32_t AvfsSclkOffsetTable;
+	uint32_t Reserved[12];
+	uint32_t Signature;
+};
+
+typedef struct SMU75_Firmware_Header SMU75_Firmware_Header;
+
+#define SMU7_FIRMWARE_HEADER_LOCATION 0x20000
+
+enum  DisplayConfig {
+	PowerDown = 1,
+	DP54x4,
+	DP54x2,
+	DP54x1,
+	DP27x4,
+	DP27x2,
+	DP27x1,
+	HDMI297,
+	HDMI162,
+	LVDS,
+	DP324x4,
+	DP324x2,
+	DP324x1
+};
+
+#define MC_BLOCK_COUNT 1
+#define CPL_BLOCK_COUNT 5
+#define SE_BLOCK_COUNT 15
+#define GC_BLOCK_COUNT 24
+
+struct SMU7_Local_Cac {
+	uint8_t BlockId;
+	uint8_t SignalId;
+	uint8_t Threshold;
+	uint8_t Padding;
+};
+
+typedef struct SMU7_Local_Cac SMU7_Local_Cac;
+
+struct SMU7_Local_Cac_Table {
+	SMU7_Local_Cac CplLocalCac[CPL_BLOCK_COUNT];
+	SMU7_Local_Cac McLocalCac[MC_BLOCK_COUNT];
+	SMU7_Local_Cac SeLocalCac[SE_BLOCK_COUNT];
+	SMU7_Local_Cac GcLocalCac[GC_BLOCK_COUNT];
+};
+
+typedef struct SMU7_Local_Cac_Table SMU7_Local_Cac_Table;
+
+#pragma pack(pop)
+
+#define CG_SYS_BITMASK_FIRST_BIT      0
+#define CG_SYS_BITMASK_LAST_BIT       10
+#define CG_SYS_BIF_MGLS_SHIFT         0
+#define CG_SYS_ROM_SHIFT              1
+#define CG_SYS_MC_MGCG_SHIFT          2
+#define CG_SYS_MC_MGLS_SHIFT          3
+#define CG_SYS_SDMA_MGCG_SHIFT        4
+#define CG_SYS_SDMA_MGLS_SHIFT        5
+#define CG_SYS_DRM_MGCG_SHIFT         6
+#define CG_SYS_HDP_MGCG_SHIFT         7
+#define CG_SYS_HDP_MGLS_SHIFT         8
+#define CG_SYS_DRM_MGLS_SHIFT         9
+#define CG_SYS_BIF_MGCG_SHIFT         10
+
+#define CG_SYS_BIF_MGLS_MASK          0x1
+#define CG_SYS_ROM_MASK               0x2
+#define CG_SYS_MC_MGCG_MASK           0x4
+#define CG_SYS_MC_MGLS_MASK           0x8
+#define CG_SYS_SDMA_MGCG_MASK         0x10
+#define CG_SYS_SDMA_MGLS_MASK         0x20
+#define CG_SYS_DRM_MGCG_MASK          0x40
+#define CG_SYS_HDP_MGCG_MASK          0x80
+#define CG_SYS_HDP_MGLS_MASK          0x100
+#define CG_SYS_DRM_MGLS_MASK          0x200
+#define CG_SYS_BIF_MGCG_MASK          0x400
+
+#define CG_GFX_BITMASK_FIRST_BIT      16
+#define CG_GFX_BITMASK_LAST_BIT       24
+
+#define CG_GFX_CGCG_SHIFT             16
+#define CG_GFX_CGLS_SHIFT             17
+#define CG_CPF_MGCG_SHIFT             18
+#define CG_RLC_MGCG_SHIFT             19
+#define CG_GFX_OTHERS_MGCG_SHIFT      20
+#define CG_GFX_3DCG_SHIFT             21
+#define CG_GFX_3DLS_SHIFT             22
+#define CG_GFX_RLC_LS_SHIFT           23
+#define CG_GFX_CP_LS_SHIFT            24
+
+#define CG_GFX_CGCG_MASK              0x00010000
+#define CG_GFX_CGLS_MASK              0x00020000
+#define CG_CPF_MGCG_MASK              0x00040000
+#define CG_RLC_MGCG_MASK              0x00080000
+#define CG_GFX_OTHERS_MGCG_MASK       0x00100000
+#define CG_GFX_3DCG_MASK              0x00200000
+#define CG_GFX_3DLS_MASK              0x00400000
+#define CG_GFX_RLC_LS_MASK            0x00800000
+#define CG_GFX_CP_LS_MASK             0x01000000
+
+
+#define VRCONF_VDDC_MASK         0x000000FF
+#define VRCONF_VDDC_SHIFT        0
+#define VRCONF_VDDGFX_MASK       0x0000FF00
+#define VRCONF_VDDGFX_SHIFT      8
+#define VRCONF_VDDCI_MASK        0x00FF0000
+#define VRCONF_VDDCI_SHIFT       16
+#define VRCONF_MVDD_MASK         0xFF000000
+#define VRCONF_MVDD_SHIFT        24
+
+#define VR_MERGED_WITH_VDDC      0
+#define VR_SVI2_PLANE_1          1
+#define VR_SVI2_PLANE_2          2
+#define VR_SMIO_PATTERN_1        3
+#define VR_SMIO_PATTERN_2        4
+#define VR_STATIC_VOLTAGE        5
+
+#define CLOCK_STRETCHER_MAX_ENTRIES 0x4
+#define CKS_LOOKUPTable_MAX_ENTRIES 0x4
+
+#define CLOCK_STRETCHER_SETTING_DDT_MASK             0x01
+#define CLOCK_STRETCHER_SETTING_DDT_SHIFT            0x0
+#define CLOCK_STRETCHER_SETTING_STRETCH_AMOUNT_MASK  0x1E
+#define CLOCK_STRETCHER_SETTING_STRETCH_AMOUNT_SHIFT 0x1
+#define CLOCK_STRETCHER_SETTING_ENABLE_MASK          0x80
+#define CLOCK_STRETCHER_SETTING_ENABLE_SHIFT         0x7
+
+struct SMU_ClockStretcherDataTableEntry {
+	uint8_t minVID;
+	uint8_t maxVID;
+
+	uint16_t setting;
+};
+typedef struct SMU_ClockStretcherDataTableEntry SMU_ClockStretcherDataTableEntry;
+
+struct SMU_ClockStretcherDataTable {
+	SMU_ClockStretcherDataTableEntry ClockStretcherDataTableEntry[CLOCK_STRETCHER_MAX_ENTRIES];
+};
+typedef struct SMU_ClockStretcherDataTable SMU_ClockStretcherDataTable;
+
+struct SMU_CKS_LOOKUPTableEntry {
+	uint16_t minFreq;
+	uint16_t maxFreq;
+
+	uint8_t setting;
+	uint8_t padding[3];
+};
+typedef struct SMU_CKS_LOOKUPTableEntry SMU_CKS_LOOKUPTableEntry;
+
+struct SMU_CKS_LOOKUPTable {
+	SMU_CKS_LOOKUPTableEntry CKS_LOOKUPTableEntry[CKS_LOOKUPTable_MAX_ENTRIES];
+};
+typedef struct SMU_CKS_LOOKUPTable SMU_CKS_LOOKUPTable;
+
+struct AgmAvfsData_t {
+	uint16_t avgPsmCount[28];
+	uint16_t minPsmCount[28];
+};
+typedef struct AgmAvfsData_t AgmAvfsData_t;
+
+enum VFT_COLUMNS {
+	SCLK0,
+	SCLK1,
+	SCLK2,
+	SCLK3,
+	SCLK4,
+	SCLK5,
+	SCLK6,
+	SCLK7,
+
+	NUM_VFT_COLUMNS
+};
+enum {
+  SCS_FUSE_T0,
+  SCS_FUSE_T1,
+  NUM_SCS_FUSE_TEMPERATURE
+};
+enum {
+  SCKS_ON,
+  SCKS_OFF,
+  NUM_SCKS_STATE_TYPES
+};
+
+#define VFT_TABLE_DEFINED
+
+#define TEMP_RANGE_MAXSTEPS 12
+struct VFT_CELL_t {
+	uint16_t Voltage;
+};
+
+typedef struct VFT_CELL_t VFT_CELL_t;
+#ifdef SMU__FIRMWARE_SCKS_PRESENT__1
+struct SCS_CELL_t {
+	uint16_t PsmCnt[NUM_SCKS_STATE_TYPES];
+};
+typedef struct SCS_CELL_t SCS_CELL_t;
+#endif
+
+struct VFT_TABLE_t {
+	VFT_CELL_t    Cell[TEMP_RANGE_MAXSTEPS][NUM_VFT_COLUMNS];
+	uint16_t      AvfsGbv [NUM_VFT_COLUMNS];
+	uint16_t      BtcGbv  [NUM_VFT_COLUMNS];
+	int16_t       Temperature [TEMP_RANGE_MAXSTEPS];
+
+#ifdef SMU__FIRMWARE_SCKS_PRESENT__1
+	SCS_CELL_t    ScksCell[TEMP_RANGE_MAXSTEPS][NUM_VFT_COLUMNS];
+#endif
+
+	uint8_t       NumTemperatureSteps;
+	uint8_t       padding[3];
+};
+typedef struct VFT_TABLE_t VFT_TABLE_t;
+
+#define BTCGB_VDROOP_TABLE_MAX_ENTRIES 2
+#define AVFSGB_VDROOP_TABLE_MAX_ENTRIES 2
+
+struct GB_VDROOP_TABLE_t {
+	int32_t a0;
+	int32_t a1;
+	int32_t a2;
+	uint32_t spare;
+};
+typedef struct GB_VDROOP_TABLE_t GB_VDROOP_TABLE_t;
+
+struct SMU_QuadraticCoeffs {
+	int32_t m1;
+	int32_t b;
+
+	int16_t m2;
+	uint8_t m1_shift;
+	uint8_t m2_shift;
+};
+typedef struct SMU_QuadraticCoeffs SMU_QuadraticCoeffs;
+
+struct AVFS_Margin_t {
+	VFT_CELL_t Cell[NUM_VFT_COLUMNS];
+};
+typedef struct AVFS_Margin_t AVFS_Margin_t;
+
+struct AVFS_CksOff_Gbv_t {
+	VFT_CELL_t Cell[NUM_VFT_COLUMNS];
+};
+typedef struct AVFS_CksOff_Gbv_t AVFS_CksOff_Gbv_t;
+
+struct AVFS_CksOff_AvfsGbv_t {
+	VFT_CELL_t Cell[NUM_VFT_COLUMNS];
+};
+typedef struct AVFS_CksOff_AvfsGbv_t AVFS_CksOff_AvfsGbv_t;
+
+struct AVFS_CksOff_BtcGbv_t {
+	VFT_CELL_t Cell[NUM_VFT_COLUMNS];
+};
+typedef struct AVFS_CksOff_BtcGbv_t AVFS_CksOff_BtcGbv_t;
+
+struct AVFS_meanNsigma_t {
+	uint32_t Aconstant[3];
+	uint16_t DC_tol_sigma;
+	uint16_t Platform_mean;
+	uint16_t Platform_sigma;
+	uint16_t PSM_Age_CompFactor;
+	uint8_t  Static_Voltage_Offset[NUM_VFT_COLUMNS];
+};
+typedef struct AVFS_meanNsigma_t AVFS_meanNsigma_t;
+
+struct AVFS_Sclk_Offset_t {
+	uint16_t Sclk_Offset[8];
+};
+typedef struct AVFS_Sclk_Offset_t AVFS_Sclk_Offset_t;
+
+struct Power_Sharing_t {
+	uint32_t EnergyCounter;
+	uint32_t EngeryThreshold;
+	uint64_t AM_SCLK_CNT;
+	uint64_t AM_0_BUSY_CNT;
+};
+typedef struct Power_Sharing_t  Power_Sharing_t;
+
+
+#endif
+
+
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu75_discrete.h b/drivers/gpu/drm/amd/powerplay/inc/smu75_discrete.h
new file mode 100644
index 000000000000..b64e58a22ddf
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu75_discrete.h
@@ -0,0 +1,886 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU75_DISCRETE_H
+#define SMU75_DISCRETE_H
+
+#include "smu75.h"
+
+#pragma pack(push, 1)
+
+#define NUM_SCLK_RANGE 8
+
+#define VCO_3_6 1
+#define VCO_2_4 3
+
+#define POSTDIV_DIV_BY_1  0
+#define POSTDIV_DIV_BY_2  1
+#define POSTDIV_DIV_BY_4  2
+#define POSTDIV_DIV_BY_8  3
+#define POSTDIV_DIV_BY_16 4
+
+struct sclkFcwRange_t {
+	uint8_t  vco_setting; /* 1: 3-6GHz, 3: 2-4GHz */
+	uint8_t  postdiv;     /* divide by 2^n */
+	uint16_t fcw_pcc;
+	uint16_t fcw_trans_upper;
+	uint16_t fcw_trans_lower;
+};
+typedef struct sclkFcwRange_t sclkFcwRange_t;
+
+struct SMIO_Pattern {
+	uint16_t Voltage;
+	uint8_t  Smio;
+	uint8_t  padding;
+};
+
+typedef struct SMIO_Pattern SMIO_Pattern;
+
+struct SMIO_Table {
+	SMIO_Pattern Pattern[SMU_MAX_SMIO_LEVELS];
+};
+
+typedef struct SMIO_Table SMIO_Table;
+
+struct SMU_SclkSetting {
+	uint32_t    SclkFrequency;
+	uint16_t    Fcw_int;
+	uint16_t    Fcw_frac;
+	uint16_t    Pcc_fcw_int;
+	uint8_t     PllRange;
+	uint8_t     SSc_En;
+	uint16_t    Sclk_slew_rate;
+	uint16_t    Pcc_up_slew_rate;
+	uint16_t    Pcc_down_slew_rate;
+	uint16_t    Fcw1_int;
+	uint16_t    Fcw1_frac;
+	uint16_t    Sclk_ss_slew_rate;
+};
+typedef struct SMU_SclkSetting SMU_SclkSetting;
+
+struct SMU75_Discrete_GraphicsLevel {
+	SMU_VoltageLevel MinVoltage;
+
+	uint8_t     pcieDpmLevel;
+	uint8_t     DeepSleepDivId;
+	uint16_t    ActivityLevel;
+
+	uint32_t    CgSpllFuncCntl3;
+	uint32_t    CgSpllFuncCntl4;
+	uint32_t    CcPwrDynRm;
+	uint32_t    CcPwrDynRm1;
+
+	uint8_t     SclkDid;
+	uint8_t     padding;
+	uint8_t     EnabledForActivity;
+	uint8_t     EnabledForThrottle;
+	uint8_t     UpHyst;
+	uint8_t     DownHyst;
+	uint8_t     VoltageDownHyst;
+	uint8_t     PowerThrottle;
+
+	SMU_SclkSetting SclkSetting;
+
+	uint8_t  ScksStretchThreshVid[NUM_SCKS_STATE_TYPES];
+	uint16_t Padding;
+};
+
+typedef struct SMU75_Discrete_GraphicsLevel SMU75_Discrete_GraphicsLevel;
+
+struct SMU75_Discrete_ACPILevel {
+	uint32_t    Flags;
+	SMU_VoltageLevel MinVoltage;
+	uint32_t    SclkFrequency;
+	uint8_t     SclkDid;
+	uint8_t     DisplayWatermark;
+	uint8_t     DeepSleepDivId;
+	uint8_t     padding;
+	uint32_t    CcPwrDynRm;
+	uint32_t    CcPwrDynRm1;
+
+	SMU_SclkSetting SclkSetting;
+};
+
+typedef struct SMU75_Discrete_ACPILevel SMU75_Discrete_ACPILevel;
+
+struct SMU75_Discrete_Ulv {
+	uint32_t    CcPwrDynRm;
+	uint32_t    CcPwrDynRm1;
+	uint16_t    VddcOffset;
+	uint8_t     VddcOffsetVid;
+	uint8_t     VddcPhase;
+	uint16_t    BifSclkDfs;
+	uint16_t    Reserved;
+};
+
+typedef struct SMU75_Discrete_Ulv SMU75_Discrete_Ulv;
+
+struct SMU75_Discrete_MemoryLevel {
+	SMU_VoltageLevel MinVoltage;
+	uint32_t    MinMvdd;
+
+	uint32_t    MclkFrequency;
+
+	uint8_t     StutterEnable;
+	uint8_t     EnabledForThrottle;
+	uint8_t     EnabledForActivity;
+	uint8_t     padding_0;
+
+	uint8_t     UpHyst;
+	uint8_t     DownHyst;
+	uint8_t     VoltageDownHyst;
+	uint8_t     padding_1;
+
+	uint16_t    ActivityLevel;
+	uint8_t     DisplayWatermark;
+	uint8_t     padding_2;
+
+	uint16_t    Fcw_int;
+	uint16_t    Fcw_frac;
+	uint8_t     Postdiv;
+	uint8_t     padding_3[3];
+};
+
+typedef struct SMU75_Discrete_MemoryLevel SMU75_Discrete_MemoryLevel;
+
+struct SMU75_Discrete_LinkLevel {
+	uint8_t     PcieGenSpeed;
+	uint8_t     PcieLaneCount;
+	uint8_t     EnabledForActivity;
+	uint8_t     SPC;
+	uint32_t    DownThreshold;
+	uint32_t    UpThreshold;
+	uint16_t    BifSclkDfs;
+	uint16_t    Reserved;
+};
+
+typedef struct SMU75_Discrete_LinkLevel SMU75_Discrete_LinkLevel;
+
+
+/* MC ARB DRAM Timing registers. */
+struct SMU75_Discrete_MCArbDramTimingTableEntry {
+	uint32_t McArbDramTiming;
+	uint32_t McArbDramTiming2;
+	uint32_t McArbBurstTime;
+	uint32_t McArbRfshRate;
+	uint32_t McArbMisc3;
+};
+
+typedef struct SMU75_Discrete_MCArbDramTimingTableEntry SMU75_Discrete_MCArbDramTimingTableEntry;
+
+struct SMU75_Discrete_MCArbDramTimingTable {
+	SMU75_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS];
+};
+
+typedef struct SMU75_Discrete_MCArbDramTimingTable SMU75_Discrete_MCArbDramTimingTable;
+
+/* UVD VCLK/DCLK state (level) definition. */
+struct SMU75_Discrete_UvdLevel {
+	uint32_t VclkFrequency;
+	uint32_t DclkFrequency;
+	SMU_VoltageLevel MinVoltage;
+	uint8_t  VclkDivider;
+	uint8_t  DclkDivider;
+	uint8_t  padding[2];
+};
+
+typedef struct SMU75_Discrete_UvdLevel SMU75_Discrete_UvdLevel;
+
+/* Clocks for other external blocks (VCE, ACP, SAMU). */
+struct SMU75_Discrete_ExtClkLevel {
+	uint32_t Frequency;
+	SMU_VoltageLevel MinVoltage;
+	uint8_t  Divider;
+	uint8_t  padding[3];
+};
+
+typedef struct SMU75_Discrete_ExtClkLevel SMU75_Discrete_ExtClkLevel;
+
+struct SMU75_Discrete_StateInfo {
+	uint32_t SclkFrequency;
+	uint32_t MclkFrequency;
+	uint32_t VclkFrequency;
+	uint32_t DclkFrequency;
+	uint32_t SamclkFrequency;
+	uint32_t AclkFrequency;
+	uint32_t EclkFrequency;
+	uint16_t MvddVoltage;
+	uint16_t padding16;
+	uint8_t  DisplayWatermark;
+	uint8_t  McArbIndex;
+	uint8_t  McRegIndex;
+	uint8_t  SeqIndex;
+	uint8_t  SclkDid;
+	int8_t   SclkIndex;
+	int8_t   MclkIndex;
+	uint8_t  PCIeGen;
+};
+
+typedef struct SMU75_Discrete_StateInfo SMU75_Discrete_StateInfo;
+
+struct SMU75_Discrete_DpmTable {
+	SMU75_PIDController                  GraphicsPIDController;
+	SMU75_PIDController                  MemoryPIDController;
+	SMU75_PIDController                  LinkPIDController;
+
+	uint32_t                            SystemFlags;
+
+	uint32_t                            VRConfig;
+	uint32_t                            SmioMask1;
+	uint32_t                            SmioMask2;
+	SMIO_Table                          SmioTable1;
+	SMIO_Table                          SmioTable2;
+
+	uint32_t                            MvddLevelCount;
+
+	uint8_t                             BapmVddcVidHiSidd        [SMU75_MAX_LEVELS_VDDC];
+	uint8_t                             BapmVddcVidLoSidd        [SMU75_MAX_LEVELS_VDDC];
+	uint8_t                             BapmVddcVidHiSidd2       [SMU75_MAX_LEVELS_VDDC];
+
+	uint8_t                             GraphicsDpmLevelCount;
+	uint8_t                             MemoryDpmLevelCount;
+	uint8_t                             LinkLevelCount;
+	uint8_t                             MasterDeepSleepControl;
+
+	uint8_t                             UvdLevelCount;
+	uint8_t                             VceLevelCount;
+	uint8_t                             AcpLevelCount;
+	uint8_t                             SamuLevelCount;
+
+	uint8_t                             ThermOutGpio;
+	uint8_t                             ThermOutPolarity;
+	uint8_t                             ThermOutMode;
+	uint8_t                             BootPhases;
+
+	uint8_t                             VRHotLevel;
+	uint8_t                             LdoRefSel;
+
+	uint8_t                             Reserved1[2];
+
+	uint16_t                            FanStartTemperature;
+	uint16_t                            FanStopTemperature;
+
+	uint16_t                            MaxVoltage;
+	uint16_t                            Reserved2;
+	uint32_t                            Reserved;
+
+	SMU75_Discrete_GraphicsLevel        GraphicsLevel           [SMU75_MAX_LEVELS_GRAPHICS];
+	SMU75_Discrete_MemoryLevel          MemoryACPILevel;
+	SMU75_Discrete_MemoryLevel          MemoryLevel             [SMU75_MAX_LEVELS_MEMORY];
+	SMU75_Discrete_LinkLevel            LinkLevel               [SMU75_MAX_LEVELS_LINK];
+	SMU75_Discrete_ACPILevel            ACPILevel;
+	SMU75_Discrete_UvdLevel             UvdLevel                [SMU75_MAX_LEVELS_UVD];
+	SMU75_Discrete_ExtClkLevel          VceLevel                [SMU75_MAX_LEVELS_VCE];
+	SMU75_Discrete_ExtClkLevel          AcpLevel                [SMU75_MAX_LEVELS_ACP];
+	SMU75_Discrete_ExtClkLevel          SamuLevel               [SMU75_MAX_LEVELS_SAMU];
+	SMU75_Discrete_Ulv                  Ulv;
+
+	uint8_t                             DisplayWatermark        [SMU75_MAX_LEVELS_MEMORY][SMU75_MAX_LEVELS_GRAPHICS];
+
+	uint32_t                            SclkStepSize;
+	uint32_t                            Smio                    [SMU75_MAX_ENTRIES_SMIO];
+
+	uint8_t                             UvdBootLevel;
+	uint8_t                             VceBootLevel;
+	uint8_t                             AcpBootLevel;
+	uint8_t                             SamuBootLevel;
+
+	uint8_t                             GraphicsBootLevel;
+	uint8_t                             GraphicsVoltageChangeEnable;
+	uint8_t                             GraphicsThermThrottleEnable;
+	uint8_t                             GraphicsInterval;
+
+	uint8_t                             VoltageInterval;
+	uint8_t                             ThermalInterval;
+	uint16_t                            TemperatureLimitHigh;
+
+	uint16_t                            TemperatureLimitLow;
+	uint8_t                             MemoryBootLevel;
+	uint8_t                             MemoryVoltageChangeEnable;
+
+	uint16_t                            BootMVdd;
+	uint8_t                             MemoryInterval;
+	uint8_t                             MemoryThermThrottleEnable;
+
+	uint16_t                            VoltageResponseTime;
+	uint16_t                            PhaseResponseTime;
+
+	uint8_t                             PCIeBootLinkLevel;
+	uint8_t                             PCIeGenInterval;
+	uint8_t                             DTEInterval;
+	uint8_t                             DTEMode;
+
+	uint8_t                             SVI2Enable;
+	uint8_t                             VRHotGpio;
+	uint8_t                             AcDcGpio;
+	uint8_t                             ThermGpio;
+
+	uint16_t                            PPM_PkgPwrLimit;
+	uint16_t                            PPM_TemperatureLimit;
+
+	uint16_t                            DefaultTdp;
+	uint16_t                            TargetTdp;
+
+	uint16_t                            FpsHighThreshold;
+	uint16_t                            FpsLowThreshold;
+
+	uint16_t                            BAPMTI_R  [SMU75_DTE_ITERATIONS][SMU75_DTE_SOURCES][SMU75_DTE_SINKS];
+	uint16_t                            BAPMTI_RC [SMU75_DTE_ITERATIONS][SMU75_DTE_SOURCES][SMU75_DTE_SINKS];
+
+	uint16_t                            TemperatureLimitEdge;
+	uint16_t                            TemperatureLimitHotspot;
+
+	uint16_t                            BootVddc;
+	uint16_t                            BootVddci;
+
+	uint16_t                            FanGainEdge;
+	uint16_t                            FanGainHotspot;
+
+	uint32_t                            LowSclkInterruptThreshold;
+	uint32_t                            VddGfxReChkWait;
+
+	uint8_t                             ClockStretcherAmount;
+	uint8_t                             Sclk_CKS_masterEn0_7;
+	uint8_t                             Sclk_CKS_masterEn8_15;
+	uint8_t                             DPMFreezeAndForced;
+
+	uint8_t                             Sclk_voltageOffset[8];
+
+	SMU_ClockStretcherDataTable         ClockStretcherDataTable;
+	SMU_CKS_LOOKUPTable                 CKS_LOOKUPTable;
+
+	uint32_t                            CurrSclkPllRange;
+	sclkFcwRange_t                      SclkFcwRangeTable[NUM_SCLK_RANGE];
+
+	GB_VDROOP_TABLE_t                   BTCGB_VDROOP_TABLE[BTCGB_VDROOP_TABLE_MAX_ENTRIES];
+	SMU_QuadraticCoeffs                 AVFSGB_FUSE_TABLE[AVFSGB_VDROOP_TABLE_MAX_ENTRIES];
+};
+
+typedef struct SMU75_Discrete_DpmTable SMU75_Discrete_DpmTable;
+
+struct SMU75_Discrete_FanTable {
+	uint16_t FdoMode;
+	int16_t  TempMin;
+	int16_t  TempMed;
+	int16_t  TempMax;
+	int16_t  Slope1;
+	int16_t  Slope2;
+	int16_t  FdoMin;
+	int16_t  HystUp;
+	int16_t  HystDown;
+	int16_t  HystSlope;
+	int16_t  TempRespLim;
+	int16_t  TempCurr;
+	int16_t  SlopeCurr;
+	int16_t  PwmCurr;
+	uint32_t RefreshPeriod;
+	int16_t  FdoMax;
+	uint8_t  TempSrc;
+	int8_t   Padding;
+};
+
+typedef struct SMU75_Discrete_FanTable SMU75_Discrete_FanTable;
+
+#define SMU7_DISCRETE_GPIO_SCLK_DEBUG             4
+#define SMU7_DISCRETE_GPIO_SCLK_DEBUG_BIT         (0x1 << SMU7_DISCRETE_GPIO_SCLK_DEBUG)
+
+
+
+struct SMU7_MclkDpmScoreboard {
+	uint32_t PercentageBusy;
+
+	int32_t  PIDError;
+	int32_t  PIDIntegral;
+	int32_t  PIDOutput;
+
+	uint32_t SigmaDeltaAccum;
+	uint32_t SigmaDeltaOutput;
+	uint32_t SigmaDeltaLevel;
+
+	uint32_t UtilizationSetpoint;
+
+	uint8_t  TdpClampMode;
+	uint8_t  TdcClampMode;
+	uint8_t  ThermClampMode;
+	uint8_t  VoltageBusy;
+
+	int8_t   CurrLevel;
+	int8_t   TargLevel;
+	uint8_t  LevelChangeInProgress;
+	uint8_t  UpHyst;
+
+	uint8_t  DownHyst;
+	uint8_t  VoltageDownHyst;
+	uint8_t  DpmEnable;
+	uint8_t  DpmRunning;
+
+	uint8_t  DpmForce;
+	uint8_t  DpmForceLevel;
+	uint8_t  padding2;
+	uint8_t  McArbIndex;
+
+	uint32_t MinimumPerfMclk;
+
+	uint8_t  AcpiReq;
+	uint8_t  AcpiAck;
+	uint8_t  MclkSwitchInProgress;
+	uint8_t  MclkSwitchCritical;
+
+	uint8_t  IgnoreVBlank;
+	uint8_t  TargetMclkIndex;
+	uint8_t  TargetMvddIndex;
+	uint8_t  MclkSwitchResult;
+
+	uint16_t VbiFailureCount;
+	uint8_t  VbiWaitCounter;
+	uint8_t  EnabledLevelsChange;
+
+	uint16_t LevelResidencyCounters [SMU75_MAX_LEVELS_MEMORY];
+	uint16_t LevelSwitchCounters [SMU75_MAX_LEVELS_MEMORY];
+
+	void     (*TargetStateCalculator)(uint8_t);
+	void     (*SavedTargetStateCalculator)(uint8_t);
+
+	uint16_t AutoDpmInterval;
+	uint16_t AutoDpmRange;
+
+	uint16_t VbiTimeoutCount;
+	uint16_t MclkSwitchingTime;
+
+	uint8_t  fastSwitch;
+	uint8_t  Save_PIC_VDDGFX_EXIT;
+	uint8_t  Save_PIC_VDDGFX_ENTER;
+	uint8_t  VbiTimeout;
+
+	uint32_t HbmTempRegBackup;
+};
+
+typedef struct SMU7_MclkDpmScoreboard SMU7_MclkDpmScoreboard;
+
+struct SMU7_UlvScoreboard {
+	uint8_t     EnterUlv;
+	uint8_t     ExitUlv;
+	uint8_t     UlvActive;
+	uint8_t     WaitingForUlv;
+	uint8_t     UlvEnable;
+	uint8_t     UlvRunning;
+	uint8_t     UlvMasterEnable;
+	uint8_t     padding;
+	uint32_t    UlvAbortedCount;
+	uint32_t    UlvTimeStamp;
+};
+
+typedef struct SMU7_UlvScoreboard SMU7_UlvScoreboard;
+
+struct VddgfxSavedRegisters {
+	uint32_t GPU_DBG[3];
+	uint32_t MEC_BaseAddress_Hi;
+	uint32_t MEC_BaseAddress_Lo;
+	uint32_t THM_TMON0_CTRL2__RDIR_PRESENT;
+	uint32_t THM_TMON1_CTRL2__RDIR_PRESENT;
+	uint32_t CP_INT_CNTL;
+};
+
+typedef struct VddgfxSavedRegisters VddgfxSavedRegisters;
+
+struct SMU7_VddGfxScoreboard {
+	uint8_t     VddGfxEnable;
+	uint8_t     VddGfxActive;
+	uint8_t     VPUResetOccured;
+	uint8_t     padding;
+
+	uint32_t    VddGfxEnteredCount;
+	uint32_t    VddGfxAbortedCount;
+
+	uint32_t    VddGfxVid;
+
+	VddgfxSavedRegisters SavedRegisters;
+};
+
+typedef struct SMU7_VddGfxScoreboard SMU7_VddGfxScoreboard;
+
+struct SMU7_TdcLimitScoreboard {
+	uint8_t  Enable;
+	uint8_t  Running;
+	uint16_t Alpha;
+	uint32_t FilteredIddc;
+	uint32_t IddcLimit;
+	uint32_t IddcHyst;
+	SMU7_HystController_Data HystControllerData;
+};
+
+typedef struct SMU7_TdcLimitScoreboard SMU7_TdcLimitScoreboard;
+
+struct SMU7_PkgPwrLimitScoreboard {
+	uint8_t  Enable;
+	uint8_t  Running;
+	uint16_t Alpha;
+	uint32_t FilteredPkgPwr;
+	uint32_t Limit;
+	uint32_t Hyst;
+	uint32_t LimitFromDriver;
+	uint8_t PowerSharingEnabled;
+	uint8_t PowerSharingCounter;
+	uint8_t PowerSharingINTEnabled;
+	uint8_t GFXActivityCounterEnabled;
+	uint32_t EnergyCount;
+	uint32_t PSACTCount;
+	uint8_t RollOverRequired;
+	uint8_t RollOverCount;
+	uint8_t padding[2];
+	SMU7_HystController_Data HystControllerData;
+};
+
+typedef struct SMU7_PkgPwrLimitScoreboard SMU7_PkgPwrLimitScoreboard;
+
+struct SMU7_BapmScoreboard {
+	uint32_t source_powers[SMU75_DTE_SOURCES];
+	uint32_t source_powers_last[SMU75_DTE_SOURCES];
+	int32_t entity_temperatures[SMU75_NUM_GPU_TES];
+	int32_t initial_entity_temperatures[SMU75_NUM_GPU_TES];
+	int32_t Limit;
+	int32_t Hyst;
+	int32_t therm_influence_coeff_table[SMU75_DTE_ITERATIONS * SMU75_DTE_SOURCES * SMU75_DTE_SINKS * 2];
+	int32_t therm_node_table[SMU75_DTE_ITERATIONS * SMU75_DTE_SOURCES * SMU75_DTE_SINKS];
+	uint16_t ConfigTDPPowerScalar;
+	uint16_t FanSpeedPowerScalar;
+	uint16_t OverDrivePowerScalar;
+	uint16_t OverDriveLimitScalar;
+	uint16_t FinalPowerScalar;
+	uint8_t VariantID;
+	uint8_t spare997;
+
+	SMU7_HystController_Data HystControllerData;
+
+	int32_t temperature_gradient_slope;
+	int32_t temperature_gradient;
+	uint32_t measured_temperature;
+};
+
+
+typedef struct SMU7_BapmScoreboard SMU7_BapmScoreboard;
+
+struct SMU7_AcpiScoreboard {
+	uint32_t SavedInterruptMask[2];
+	uint8_t LastACPIRequest;
+	uint8_t CgBifResp;
+	uint8_t RequestType;
+	uint8_t Padding;
+	SMU75_Discrete_ACPILevel D0Level;
+};
+
+typedef struct SMU7_AcpiScoreboard SMU7_AcpiScoreboard;
+
+struct SMU75_Discrete_PmFuses {
+	uint8_t BapmVddCVidHiSidd[8];
+
+	uint8_t BapmVddCVidLoSidd[8];
+
+	uint8_t VddCVid[8];
+
+	uint8_t SviLoadLineEn;
+	uint8_t SviLoadLineVddC;
+	uint8_t SviLoadLineTrimVddC;
+	uint8_t SviLoadLineOffsetVddC;
+
+	uint16_t TDC_VDDC_PkgLimit;
+	uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
+	uint8_t TDC_MAWt;
+
+	uint8_t TdcWaterfallCtl;
+	uint8_t LPMLTemperatureMin;
+	uint8_t LPMLTemperatureMax;
+	uint8_t Reserved;
+
+	uint8_t LPMLTemperatureScaler[16];
+
+	int16_t FuzzyFan_ErrorSetDelta;
+	int16_t FuzzyFan_ErrorRateSetDelta;
+	int16_t FuzzyFan_PwmSetDelta;
+	uint16_t Reserved6;
+
+	uint8_t GnbLPML[16];
+
+	uint8_t GnbLPMLMaxVid;
+	uint8_t GnbLPMLMinVid;
+	uint8_t Reserved1[2];
+
+	uint16_t BapmVddCBaseLeakageHiSidd;
+	uint16_t BapmVddCBaseLeakageLoSidd;
+
+	uint16_t  VFT_Temp[3];
+	uint8_t   Version;
+	uint8_t   padding;
+
+	SMU_QuadraticCoeffs VFT_ATE[3];
+
+	SMU_QuadraticCoeffs AVFS_GB;
+	SMU_QuadraticCoeffs ATE_ACBTC_GB;
+
+	SMU_QuadraticCoeffs P2V;
+
+	uint32_t PsmCharzFreq;
+
+	uint16_t InversionVoltage;
+	uint16_t PsmCharzTemp;
+
+	uint32_t EnabledAvfsModules;
+
+	SMU_QuadraticCoeffs BtcGbv_CksOff;
+};
+
+typedef struct SMU75_Discrete_PmFuses SMU75_Discrete_PmFuses;
+
+struct SMU7_Discrete_Log_Header_Table {
+	uint32_t    version;
+	uint32_t    asic_id;
+	uint16_t    flags;
+	uint16_t    entry_size;
+	uint32_t    total_size;
+	uint32_t    num_of_entries;
+	uint8_t     type;
+	uint8_t     mode;
+	uint8_t     filler_0[2];
+	uint32_t    filler_1[2];
+};
+
+typedef struct SMU7_Discrete_Log_Header_Table SMU7_Discrete_Log_Header_Table;
+
+struct SMU7_Discrete_Log_Cntl {
+	uint8_t             Enabled;
+	uint8_t             Type;
+	uint8_t             padding[2];
+	uint32_t            BufferSize;
+	uint32_t            SamplesLogged;
+	uint32_t            SampleSize;
+	uint32_t            AddrL;
+	uint32_t            AddrH;
+};
+
+typedef struct SMU7_Discrete_Log_Cntl SMU7_Discrete_Log_Cntl;
+
+#if defined SMU__DGPU_ONLY
+#define CAC_ACC_NW_NUM_OF_SIGNALS 87
+#endif
+
+
+struct SMU7_Discrete_Cac_Collection_Table {
+	uint32_t temperature;
+	uint32_t cac_acc_nw[CAC_ACC_NW_NUM_OF_SIGNALS];
+};
+
+typedef struct SMU7_Discrete_Cac_Collection_Table SMU7_Discrete_Cac_Collection_Table;
+
+struct SMU7_Discrete_Cac_Verification_Table {
+	uint32_t VddcTotalPower;
+	uint32_t VddcLeakagePower;
+	uint32_t VddcConstantPower;
+	uint32_t VddcGfxDynamicPower;
+	uint32_t VddcUvdDynamicPower;
+	uint32_t VddcVceDynamicPower;
+	uint32_t VddcAcpDynamicPower;
+	uint32_t VddcPcieDynamicPower;
+	uint32_t VddcDceDynamicPower;
+	uint32_t VddcCurrent;
+	uint32_t VddcVoltage;
+	uint32_t VddciTotalPower;
+	uint32_t VddciLeakagePower;
+	uint32_t VddciConstantPower;
+	uint32_t VddciDynamicPower;
+	uint32_t Vddr1TotalPower;
+	uint32_t Vddr1LeakagePower;
+	uint32_t Vddr1ConstantPower;
+	uint32_t Vddr1DynamicPower;
+	uint32_t spare[4];
+	uint32_t temperature;
+};
+
+typedef struct SMU7_Discrete_Cac_Verification_Table SMU7_Discrete_Cac_Verification_Table;
+
+struct SMU7_Discrete_Pm_Status_Table {
+	int32_t T_meas_max[SMU75_THERMAL_INPUT_LOOP_COUNT];
+	int32_t T_meas_acc[SMU75_THERMAL_INPUT_LOOP_COUNT];
+
+	uint32_t I_calc_max;
+	uint32_t I_calc_acc;
+	uint32_t P_meas_acc;
+	uint32_t V_meas_load_acc;
+	uint32_t I_meas_acc;
+	uint32_t P_meas_acc_vddci;
+	uint32_t V_meas_load_acc_vddci;
+	uint32_t I_meas_acc_vddci;
+
+	uint16_t Sclk_dpm_residency[8];
+	uint16_t Uvd_dpm_residency[8];
+	uint16_t Vce_dpm_residency[8];
+	uint16_t Mclk_dpm_residency[4];
+
+	uint32_t P_roc_acc;
+	uint32_t PkgPwr_max;
+	uint32_t PkgPwr_acc;
+	uint32_t MclkSwitchingTime_max;
+	uint32_t MclkSwitchingTime_acc;
+	uint32_t FanPwm_acc;
+	uint32_t FanRpm_acc;
+	uint32_t Gfx_busy_acc;
+	uint32_t Mc_busy_acc;
+	uint32_t Fps_acc;
+
+	uint32_t AccCnt;
+};
+
+typedef struct SMU7_Discrete_Pm_Status_Table SMU7_Discrete_Pm_Status_Table;
+
+struct SMU7_Discrete_AutoWattMan_Status_Table {
+	int32_t T_meas_acc[SMU75_THERMAL_INPUT_LOOP_COUNT];
+	uint16_t Sclk_dpm_residency[8];
+	uint16_t Mclk_dpm_residency[4];
+	uint32_t TgpPwr_acc;
+	uint32_t Gfx_busy_acc;
+	uint32_t Mc_busy_acc;
+	uint32_t AccCnt;
+};
+
+typedef struct SMU7_Discrete_AutoWattMan_Status_Table SMU7_Discrete_AutoWattMan_Status_Table;
+
+#define SMU7_MAX_GFX_CU_COUNT 24
+#define SMU7_MIN_GFX_CU_COUNT  8
+#define SMU7_GFX_CU_PG_ENABLE_DC_MAX_CU_SHIFT 0
+#define SMU7_GFX_CU_PG_ENABLE_DC_MAX_CU_MASK  (0xFFFF << SMU7_GFX_CU_PG_ENABLE_DC_MAX_CU_SHIFT)
+#define SMU7_GFX_CU_PG_ENABLE_AC_MAX_CU_SHIFT 16
+#define SMU7_GFX_CU_PG_ENABLE_AC_MAX_CU_MASK  (0xFFFF << SMU7_GFX_CU_PG_ENABLE_AC_MAX_CU_SHIFT)
+
+struct SMU7_GfxCuPgScoreboard {
+	uint8_t Enabled;
+	uint8_t WaterfallUp;
+	uint8_t WaterfallDown;
+	uint8_t WaterfallLimit;
+	uint8_t CurrMaxCu;
+	uint8_t TargMaxCu;
+	uint8_t ClampMode;
+	uint8_t Active;
+	uint8_t MaxSupportedCu;
+	uint8_t MinSupportedCu;
+	uint8_t PendingGfxCuHostInterrupt;
+	uint8_t LastFilteredMaxCuInteger;
+	uint16_t FilteredMaxCu;
+	uint16_t FilteredMaxCuAlpha;
+	uint16_t FilterResetCount;
+	uint16_t FilterResetCountLimit;
+	uint8_t ForceCu;
+	uint8_t ForceCuCount;
+	uint8_t AcModeMaxCu;
+	uint8_t DcModeMaxCu;
+};
+
+typedef struct SMU7_GfxCuPgScoreboard SMU7_GfxCuPgScoreboard;
+
+#define SMU7_SCLK_CAC 0x561
+#define SMU7_MCLK_CAC 0xF9
+#define SMU7_VCLK_CAC 0x2DE
+#define SMU7_DCLK_CAC 0x2DE
+#define SMU7_ECLK_CAC 0x25E
+#define SMU7_ACLK_CAC 0x25E
+#define SMU7_SAMCLK_CAC 0x25E
+#define SMU7_DISPCLK_CAC 0x100
+#define SMU7_CAC_CONSTANT 0x2EE3430
+#define SMU7_CAC_CONSTANT_SHIFT 18
+
+#define SMU7_VDDCI_MCLK_CONST        1765
+#define SMU7_VDDCI_MCLK_CONST_SHIFT  16
+#define SMU7_VDDCI_VDDCI_CONST       50958
+#define SMU7_VDDCI_VDDCI_CONST_SHIFT 14
+#define SMU7_VDDCI_CONST             11781
+#define SMU7_VDDCI_STROBE_PWR        1331
+
+#define SMU7_VDDR1_CONST            693
+#define SMU7_VDDR1_CAC_WEIGHT       20
+#define SMU7_VDDR1_CAC_WEIGHT_SHIFT 19
+#define SMU7_VDDR1_STROBE_PWR       512
+
+#define SMU7_AREA_COEFF_UVD 0xA78
+#define SMU7_AREA_COEFF_VCE 0x190A
+#define SMU7_AREA_COEFF_ACP 0x22D1
+#define SMU7_AREA_COEFF_SAMU 0x534
+
+#define SMU7_THERM_OUT_MODE_DISABLE       0x0
+#define SMU7_THERM_OUT_MODE_THERM_ONLY    0x1
+#define SMU7_THERM_OUT_MODE_THERM_VRHOT   0x2
+
+#define SQ_Enable_MASK 0x1
+#define SQ_IR_MASK 0x2
+#define SQ_PCC_MASK 0x4
+#define SQ_EDC_MASK 0x8
+
+#define TCP_Enable_MASK 0x100
+#define TCP_IR_MASK 0x200
+#define TCP_PCC_MASK 0x400
+#define TCP_EDC_MASK 0x800
+
+#define TD_Enable_MASK 0x10000
+#define TD_IR_MASK 0x20000
+#define TD_PCC_MASK 0x40000
+#define TD_EDC_MASK 0x80000
+
+#define DB_Enable_MASK 0x1000000
+#define DB_IR_MASK 0x2000000
+#define DB_PCC_MASK 0x4000000
+#define DB_EDC_MASK 0x8000000
+
+#define SQ_Enable_SHIFT 0
+#define SQ_IR_SHIFT 1
+#define SQ_PCC_SHIFT 2
+#define SQ_EDC_SHIFT 3
+
+#define TCP_Enable_SHIFT 8
+#define TCP_IR_SHIFT 9
+#define TCP_PCC_SHIFT 10
+#define TCP_EDC_SHIFT 11
+
+#define TD_Enable_SHIFT 16
+#define TD_IR_SHIFT 17
+#define TD_PCC_SHIFT 18
+#define TD_EDC_SHIFT 19
+
+#define DB_Enable_SHIFT 24
+#define DB_IR_SHIFT 25
+#define DB_PCC_SHIFT 26
+#define DB_EDC_SHIFT 27
+
+#define PMFUSES_AVFSSIZE 104
+
+#define BTCGB0_Vdroop_Enable_MASK  0x1
+#define BTCGB1_Vdroop_Enable_MASK  0x2
+#define AVFSGB0_Vdroop_Enable_MASK 0x4
+#define AVFSGB1_Vdroop_Enable_MASK 0x8
+
+#define BTCGB0_Vdroop_Enable_SHIFT  0
+#define BTCGB1_Vdroop_Enable_SHIFT  1
+#define AVFSGB0_Vdroop_Enable_SHIFT 2
+#define AVFSGB1_Vdroop_Enable_SHIFT 3
+
+#pragma pack(pop)
+
+
+#endif
+

From ee38fbf3bfb58c427dc3670a70cc6df4ebe1c785 Mon Sep 17 00:00:00 2001
From: Eric Huang <JinHuiEric.Huang@amd.com>
Date: Fri, 17 Nov 2017 11:17:48 -0500
Subject: [PATCH 0880/1286] drm/amd: add a new struct in atombios.h

Signed-off-by: Eric Huang <JinHuiEric.Huang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/include/atombios.h | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h
index f696bbb643ef..7931502fa54f 100644
--- a/drivers/gpu/drm/amd/include/atombios.h
+++ b/drivers/gpu/drm/amd/include/atombios.h
@@ -632,6 +632,13 @@ typedef struct _COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2
   ULONG ulReserved;
 }COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2;
 
+typedef struct _COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_3
+{
+  COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 ulClock;
+  USHORT  usMclk_fcw_frac;                  //fractional divider of fcw = usSclk_fcw_frac/65536
+  USHORT  usMclk_fcw_int;                   //integer divider of fcwc
+}COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_3;
+
 //Input parameter of DynamicMemorySettingsTable
 //when ATOM_COMPUTE_CLOCK_FREQ.ulComputeClockFlag = COMPUTE_MEMORY_PLL_PARAM
 typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER

From 4eeed17e713b9e6494a08ab37623283723596b5a Mon Sep 17 00:00:00 2001
From: Eric Huang <JinHuiEric.Huang@amd.com>
Date: Fri, 17 Nov 2017 11:21:02 -0500
Subject: [PATCH 0881/1286] drm/amd/powerplay: update ppatomctrl.c (v2)

used for calculating memory clocks in powerplay.

v2: handle endian swapping of atom data (Alex)

Signed-off-by: Eric Huang <JinHuiEric.Huang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c  | 31 +++++++++++++++++++
 .../gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h  | 10 ++++++
 2 files changed, 41 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index d58be7eb8256..cf99c5eaf080 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -23,6 +23,7 @@
 #include "pp_debug.h"
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/delay.h>
 #include "atom.h"
 #include "ppatomctrl.h"
 #include "atombios.h"
@@ -314,6 +315,36 @@ int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
 	return result;
 }
 
+int atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr *hwmgr,
+					uint32_t clock_value,
+					pp_atomctrl_memory_clock_param_ai *mpll_param)
+{
+	struct amdgpu_device *adev = hwmgr->adev;
+	COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_3 mpll_parameters = {0};
+	int result;
+
+	mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value);
+
+	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
+			GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
+			(uint32_t *)&mpll_parameters);
+
+	/* VEGAM's mpll takes sometime to finish computing */
+	udelay(10);
+
+	if (!result) {
+		mpll_param->ulMclk_fcw_int =
+			le16_to_cpu(mpll_parameters.usMclk_fcw_int);
+		mpll_param->ulMclk_fcw_frac =
+			le16_to_cpu(mpll_parameters.usMclk_fcw_frac);
+		mpll_param->ulClock =
+			le32_to_cpu(mpll_parameters.ulClock.ulClock);
+		mpll_param->ulPostDiv = mpll_parameters.ulClock.ucPostDiv;
+	}
+
+	return result;
+}
+
 int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
 					  uint32_t clock_value,
 					  pp_atomctrl_clock_dividers_kong *dividers)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
index e1b5d6b0b548..3ee54f182943 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
@@ -146,6 +146,14 @@ struct pp_atomctrl_memory_clock_param {
 };
 typedef struct pp_atomctrl_memory_clock_param pp_atomctrl_memory_clock_param;
 
+struct pp_atomctrl_memory_clock_param_ai {
+	uint32_t ulClock;
+	uint32_t ulPostDiv;
+	uint16_t ulMclk_fcw_frac;
+	uint16_t ulMclk_fcw_int;
+};
+typedef struct pp_atomctrl_memory_clock_param_ai pp_atomctrl_memory_clock_param_ai;
+
 struct pp_atomctrl_internal_ss_info {
 	uint32_t speed_spectrum_percentage;                      /* in 1/100 percentage */
 	uint32_t speed_spectrum_rate;                            /* in KHz */
@@ -295,6 +303,8 @@ extern bool atomctrl_is_voltage_controlled_by_gpio_v3(struct pp_hwmgr *hwmgr, ui
 extern int atomctrl_get_voltage_table_v3(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint8_t voltage_mode, pp_atomctrl_voltage_table *voltage_table);
 extern int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
 		uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param);
+extern int atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr *hwmgr,
+		uint32_t clock_value, pp_atomctrl_memory_clock_param_ai *mpll_param);
 extern int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
 						 uint32_t clock_value,
 						 pp_atomctrl_clock_dividers_kong *dividers);

From 4dc1a2d9288dbba903696d2dd5d83b5311f2d026 Mon Sep 17 00:00:00 2001
From: Eric Huang <JinHuiEric.Huang@amd.com>
Date: Fri, 17 Nov 2017 11:31:09 -0500
Subject: [PATCH 0882/1286] drm/amd/powerplay: update process pptables

Add functionality to fetch gpio table from vbios.

Signed-off-by: Eric Huang <JinHuiEric.Huang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../powerplay/hwmgr/process_pptables_v1_0.c   | 37 +++++++++++++++++++
 1 file changed, 37 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index 8516516eb6cc..f0d48b183d22 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -728,6 +728,32 @@ static int get_mm_clock_voltage_table(
 	return 0;
 }
 
+static int get_gpio_table(struct pp_hwmgr *hwmgr,
+		struct phm_ppt_v1_gpio_table **pp_tonga_gpio_table,
+		const ATOM_Tonga_GPIO_Table *atom_gpio_table)
+{
+	uint32_t table_size;
+	struct phm_ppt_v1_gpio_table *pp_gpio_table;
+	struct phm_ppt_v1_information *pp_table_information =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+	table_size = sizeof(struct phm_ppt_v1_gpio_table);
+	pp_gpio_table = kzalloc(table_size, GFP_KERNEL);
+	if (!pp_gpio_table)
+		return -ENOMEM;
+
+	if (pp_table_information->vdd_dep_on_sclk->count <
+			atom_gpio_table->ucVRHotTriggeredSclkDpmIndex)
+		PP_ASSERT_WITH_CODE(false,
+				"SCLK DPM index for VRHot cannot exceed the total sclk level count!",);
+	else
+		pp_gpio_table->vrhot_triggered_sclk_dpm_index =
+				atom_gpio_table->ucVRHotTriggeredSclkDpmIndex;
+
+	*pp_tonga_gpio_table = pp_gpio_table;
+
+	return 0;
+}
 /**
  * Private Function used during initialization.
  * Initialize clock voltage dependency
@@ -761,11 +787,15 @@ static int init_clock_voltage_dependency(
 	const PPTable_Generic_SubTable_Header *pcie_table =
 		(const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) +
 		le16_to_cpu(powerplay_table->usPCIETableOffset));
+	const ATOM_Tonga_GPIO_Table *gpio_table =
+		(const ATOM_Tonga_GPIO_Table *)(((unsigned long) powerplay_table) +
+		le16_to_cpu(powerplay_table->usGPIOTableOffset));
 
 	pp_table_information->vdd_dep_on_sclk = NULL;
 	pp_table_information->vdd_dep_on_mclk = NULL;
 	pp_table_information->mm_dep_table = NULL;
 	pp_table_information->pcie_table = NULL;
+	pp_table_information->gpio_table = NULL;
 
 	if (powerplay_table->usMMDependencyTableOffset != 0)
 		result = get_mm_clock_voltage_table(hwmgr,
@@ -810,6 +840,10 @@ static int init_clock_voltage_dependency(
 		result = get_valid_clk(hwmgr, &pp_table_information->valid_sclk_values,
 		pp_table_information->vdd_dep_on_sclk);
 
+	if (!result && gpio_table)
+		result = get_gpio_table(hwmgr, &pp_table_information->gpio_table,
+				gpio_table);
+
 	return result;
 }
 
@@ -1116,6 +1150,9 @@ static int pp_tables_v1_0_uninitialize(struct pp_hwmgr *hwmgr)
 	kfree(pp_table_information->pcie_table);
 	pp_table_information->pcie_table = NULL;
 
+	kfree(pp_table_information->gpio_table);
+	pp_table_information->gpio_table = NULL;
+
 	kfree(hwmgr->pptable);
 	hwmgr->pptable = NULL;
 

From ac7822b0026fbc33f82023b155542426b1bd211b Mon Sep 17 00:00:00 2001
From: Eric Huang <JinHuiEric.Huang@amd.com>
Date: Wed, 11 Apr 2018 15:32:58 -0500
Subject: [PATCH 0883/1286] drm/amd/powerplay: add smumgr support for VEGAM
 (v2)

The smumgr handles communication between the driver
and the SMU for power management.

v2: fix typo (Alex)

Signed-off-by: Eric Huang <JinHuiEric.Huang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c   |    6 +
 drivers/gpu/drm/amd/powerplay/smumgr/Makefile |    2 +-
 .../drm/amd/powerplay/smumgr/vegam_smumgr.c   | 2382 +++++++++++++++++
 .../drm/amd/powerplay/smumgr/vegam_smumgr.h   |   75 +
 4 files changed, 2464 insertions(+), 1 deletion(-)
 create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
 create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.h

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index d1052b5e0ca8..eecb11824412 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -40,6 +40,7 @@ extern const struct pp_smumgr_func iceland_smu_funcs;
 extern const struct pp_smumgr_func tonga_smu_funcs;
 extern const struct pp_smumgr_func fiji_smu_funcs;
 extern const struct pp_smumgr_func polaris10_smu_funcs;
+extern const struct pp_smumgr_func vegam_smu_funcs;
 extern const struct pp_smumgr_func vega10_smu_funcs;
 extern const struct pp_smumgr_func vega12_smu_funcs;
 extern const struct pp_smumgr_func smu10_smu_funcs;
@@ -136,6 +137,11 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
 			polaris_set_asic_special_caps(hwmgr);
 			hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
 			break;
+		case CHIP_VEGAM:
+			hwmgr->smumgr_funcs = &vegam_smu_funcs;
+			polaris_set_asic_special_caps(hwmgr);
+			hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
+			break;
 		default:
 			return -EINVAL;
 		}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
index 958755075421..0a200406a1ec 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
@@ -26,7 +26,7 @@
 SMU_MGR = smumgr.o smu8_smumgr.o tonga_smumgr.o fiji_smumgr.o \
 	  polaris10_smumgr.o iceland_smumgr.o \
 	  smu7_smumgr.o vega10_smumgr.o smu10_smumgr.o ci_smumgr.o \
-	  vega12_smumgr.o
+	  vega12_smumgr.o vegam_smumgr.o
 
 AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
new file mode 100644
index 000000000000..c9a563399330
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
@@ -0,0 +1,2382 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "pp_debug.h"
+#include "smumgr.h"
+#include "smu_ucode_xfer_vi.h"
+#include "vegam_smumgr.h"
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+#include "oss/oss_3_0_d.h"
+#include "gca/gfx_8_0_d.h"
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+#include "ppatomctrl.h"
+#include "cgs_common.h"
+#include "smu7_ppsmc.h"
+
+#include "smu7_dyn_defaults.h"
+
+#include "smu7_hwmgr.h"
+#include "hardwaremanager.h"
+#include "ppatomctrl.h"
+#include "atombios.h"
+#include "pppcielanes.h"
+
+#include "dce/dce_11_2_d.h"
+#include "dce/dce_11_2_sh_mask.h"
+
+#define PPVEGAM_TARGETACTIVITY_DFLT                     50
+
+#define VOLTAGE_VID_OFFSET_SCALE1   625
+#define VOLTAGE_VID_OFFSET_SCALE2   100
+#define POWERTUNE_DEFAULT_SET_MAX    1
+#define VDDC_VDDCI_DELTA            200
+#define MC_CG_ARB_FREQ_F1           0x0b
+
+#define STRAP_ASIC_RO_LSB    2168
+#define STRAP_ASIC_RO_MSB    2175
+
+#define PPSMC_MSG_ApplyAvfsCksOffVoltage      ((uint16_t) 0x415)
+#define PPSMC_MSG_EnableModeSwitchRLCNotification  ((uint16_t) 0x305)
+
+static const struct vegam_pt_defaults
+vegam_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
+	/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+	 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
+	{ 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
+	{ 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
+	{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
+};
+
+static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] = {
+			{VCO_2_4, POSTDIV_DIV_BY_16,  75, 160, 112},
+			{VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160},
+			{VCO_2_4, POSTDIV_DIV_BY_8,   75, 160, 112},
+			{VCO_3_6, POSTDIV_DIV_BY_8,  112, 224, 160},
+			{VCO_2_4, POSTDIV_DIV_BY_4,   75, 160, 112},
+			{VCO_3_6, POSTDIV_DIV_BY_4,  112, 216, 160},
+			{VCO_2_4, POSTDIV_DIV_BY_2,   75, 160, 108},
+			{VCO_3_6, POSTDIV_DIV_BY_2,  112, 216, 160} };
+
+static int vegam_smu_init(struct pp_hwmgr *hwmgr)
+{
+	struct vegam_smumgr *smu_data;
+
+	smu_data = kzalloc(sizeof(struct vegam_smumgr), GFP_KERNEL);
+	if (smu_data == NULL)
+		return -ENOMEM;
+
+	hwmgr->smu_backend = smu_data;
+
+	if (smu7_init(hwmgr)) {
+		kfree(smu_data);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int vegam_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr)
+{
+	int result = 0;
+
+	/* Wait for smc boot up */
+	/* PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0) */
+
+	/* Assert reset */
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+					SMC_SYSCON_RESET_CNTL, rst_reg, 1);
+
+	result = smu7_upload_smu_firmware_image(hwmgr);
+	if (result != 0)
+		return result;
+
+	/* Clear status */
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_STATUS, 0);
+
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+					SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
+
+	/* De-assert reset */
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+					SMC_SYSCON_RESET_CNTL, rst_reg, 0);
+
+
+	PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1);
+
+
+	/* Call Test SMU message with 0x20000 offset to trigger SMU start */
+	smu7_send_msg_to_smc_offset(hwmgr);
+
+	/* Wait done bit to be set */
+	/* Check pass/failed indicator */
+
+	PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, SMU_STATUS, SMU_DONE, 0);
+
+	if (1 != PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+						SMU_STATUS, SMU_PASS))
+		PP_ASSERT_WITH_CODE(false, "SMU Firmware start failed!", return -1);
+
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0);
+
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+					SMC_SYSCON_RESET_CNTL, rst_reg, 1);
+
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+					SMC_SYSCON_RESET_CNTL, rst_reg, 0);
+
+	/* Wait for firmware to initialize */
+	PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
+
+	return result;
+}
+
+static int vegam_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr)
+{
+	int result = 0;
+
+	/* wait for smc boot up */
+	PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0);
+
+	/* Clear firmware interrupt enable flag */
+	/* PHM_WRITE_VFPF_INDIRECT_FIELD(pSmuMgr, SMC_IND, SMC_SYSCON_MISC_CNTL, pre_fetcher_en, 1); */
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+				ixFIRMWARE_FLAGS, 0);
+
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+					SMC_SYSCON_RESET_CNTL,
+					rst_reg, 1);
+
+	result = smu7_upload_smu_firmware_image(hwmgr);
+	if (result != 0)
+		return result;
+
+	/* Set smc instruct start point at 0x0 */
+	smu7_program_jump_on_start(hwmgr);
+
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+					SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
+
+	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+					SMC_SYSCON_RESET_CNTL, rst_reg, 0);
+
+	/* Wait for firmware to initialize */
+
+	PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,
+					FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
+
+	return result;
+}
+
+static int vegam_start_smu(struct pp_hwmgr *hwmgr)
+{
+	int result = 0;
+	struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+
+	/* Only start SMC if SMC RAM is not running */
+	if (!smu7_is_smc_ram_running(hwmgr) && hwmgr->not_vf) {
+		smu_data->protected_mode = (uint8_t)(PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
+				CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE));
+		smu_data->smu7_data.security_hard_key = (uint8_t)(PHM_READ_VFPF_INDIRECT_FIELD(
+				hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
+
+		/* Check if SMU is running in protected mode */
+		if (smu_data->protected_mode == 0)
+			result = vegam_start_smu_in_non_protection_mode(hwmgr);
+		else
+			result = vegam_start_smu_in_protection_mode(hwmgr);
+
+		if (result != 0)
+			PP_ASSERT_WITH_CODE(0, "Failed to load SMU ucode.", return result);
+	}
+
+	/* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */
+	smu7_read_smc_sram_dword(hwmgr,
+			SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU75_Firmware_Header, SoftRegisters),
+			&(smu_data->smu7_data.soft_regs_start),
+			0x40000);
+
+	result = smu7_request_smu_load_fw(hwmgr);
+
+	return result;
+}
+
+static int vegam_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+	struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	uint32_t tmp;
+	int result;
+	bool error = false;
+
+	result = smu7_read_smc_sram_dword(hwmgr,
+			SMU7_FIRMWARE_HEADER_LOCATION +
+			offsetof(SMU75_Firmware_Header, DpmTable),
+			&tmp, SMC_RAM_END);
+
+	if (0 == result)
+		smu_data->smu7_data.dpm_table_start = tmp;
+
+	error |= (0 != result);
+
+	result = smu7_read_smc_sram_dword(hwmgr,
+			SMU7_FIRMWARE_HEADER_LOCATION +
+			offsetof(SMU75_Firmware_Header, SoftRegisters),
+			&tmp, SMC_RAM_END);
+
+	if (!result) {
+		data->soft_regs_start = tmp;
+		smu_data->smu7_data.soft_regs_start = tmp;
+	}
+
+	error |= (0 != result);
+
+	result = smu7_read_smc_sram_dword(hwmgr,
+			SMU7_FIRMWARE_HEADER_LOCATION +
+			offsetof(SMU75_Firmware_Header, mcRegisterTable),
+			&tmp, SMC_RAM_END);
+
+	if (!result)
+		smu_data->smu7_data.mc_reg_table_start = tmp;
+
+	result = smu7_read_smc_sram_dword(hwmgr,
+			SMU7_FIRMWARE_HEADER_LOCATION +
+			offsetof(SMU75_Firmware_Header, FanTable),
+			&tmp, SMC_RAM_END);
+
+	if (!result)
+		smu_data->smu7_data.fan_table_start = tmp;
+
+	error |= (0 != result);
+
+	result = smu7_read_smc_sram_dword(hwmgr,
+			SMU7_FIRMWARE_HEADER_LOCATION +
+			offsetof(SMU75_Firmware_Header, mcArbDramTimingTable),
+			&tmp, SMC_RAM_END);
+
+	if (!result)
+		smu_data->smu7_data.arb_table_start = tmp;
+
+	error |= (0 != result);
+
+	result = smu7_read_smc_sram_dword(hwmgr,
+			SMU7_FIRMWARE_HEADER_LOCATION +
+			offsetof(SMU75_Firmware_Header, Version),
+			&tmp, SMC_RAM_END);
+
+	if (!result)
+		hwmgr->microcode_version_info.SMC = tmp;
+
+	error |= (0 != result);
+
+	return error ? -1 : 0;
+}
+
+static bool vegam_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+	return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+			CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+			? true : false;
+}
+
+static uint32_t vegam_get_mac_definition(uint32_t value)
+{
+	switch (value) {
+	case SMU_MAX_LEVELS_GRAPHICS:
+		return SMU75_MAX_LEVELS_GRAPHICS;
+	case SMU_MAX_LEVELS_MEMORY:
+		return SMU75_MAX_LEVELS_MEMORY;
+	case SMU_MAX_LEVELS_LINK:
+		return SMU75_MAX_LEVELS_LINK;
+	case SMU_MAX_ENTRIES_SMIO:
+		return SMU75_MAX_ENTRIES_SMIO;
+	case SMU_MAX_LEVELS_VDDC:
+		return SMU75_MAX_LEVELS_VDDC;
+	case SMU_MAX_LEVELS_VDDGFX:
+		return SMU75_MAX_LEVELS_VDDGFX;
+	case SMU_MAX_LEVELS_VDDCI:
+		return SMU75_MAX_LEVELS_VDDCI;
+	case SMU_MAX_LEVELS_MVDD:
+		return SMU75_MAX_LEVELS_MVDD;
+	case SMU_UVD_MCLK_HANDSHAKE_DISABLE:
+		return SMU7_UVD_MCLK_HANDSHAKE_DISABLE |
+				SMU7_VCE_MCLK_HANDSHAKE_DISABLE;
+	}
+
+	pr_warn("can't get the mac of %x\n", value);
+	return 0;
+}
+
+static int vegam_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
+{
+	struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+	uint32_t mm_boot_level_offset, mm_boot_level_value;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+	smu_data->smc_state_table.UvdBootLevel = 0;
+	if (table_info->mm_dep_table->count > 0)
+		smu_data->smc_state_table.UvdBootLevel =
+				(uint8_t) (table_info->mm_dep_table->count - 1);
+	mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU75_Discrete_DpmTable,
+						UvdBootLevel);
+	mm_boot_level_offset /= 4;
+	mm_boot_level_offset *= 4;
+	mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+			CGS_IND_REG__SMC, mm_boot_level_offset);
+	mm_boot_level_value &= 0x00FFFFFF;
+	mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
+	cgs_write_ind_register(hwmgr->device,
+			CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+	if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_UVDDPM) ||
+		phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_StablePState))
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+				PPSMC_MSG_UVDDPM_SetEnabledMask,
+				(uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+	return 0;
+}
+
+static int vegam_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+{
+	struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+	uint32_t mm_boot_level_offset, mm_boot_level_value;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+					PHM_PlatformCaps_StablePState))
+		smu_data->smc_state_table.VceBootLevel =
+			(uint8_t) (table_info->mm_dep_table->count - 1);
+	else
+		smu_data->smc_state_table.VceBootLevel = 0;
+
+	mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+					offsetof(SMU75_Discrete_DpmTable, VceBootLevel);
+	mm_boot_level_offset /= 4;
+	mm_boot_level_offset *= 4;
+	mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+			CGS_IND_REG__SMC, mm_boot_level_offset);
+	mm_boot_level_value &= 0xFF00FFFF;
+	mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
+	cgs_write_ind_register(hwmgr->device,
+			CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+				PPSMC_MSG_VCEDPM_SetEnabledMask,
+				(uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+	return 0;
+}
+
+static int vegam_update_samu_smc_table(struct pp_hwmgr *hwmgr)
+{
+	struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+	uint32_t mm_boot_level_offset, mm_boot_level_value;
+
+
+	smu_data->smc_state_table.SamuBootLevel = 0;
+	mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+				offsetof(SMU75_Discrete_DpmTable, SamuBootLevel);
+
+	mm_boot_level_offset /= 4;
+	mm_boot_level_offset *= 4;
+	mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+			CGS_IND_REG__SMC, mm_boot_level_offset);
+	mm_boot_level_value &= 0xFFFFFF00;
+	mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
+	cgs_write_ind_register(hwmgr->device,
+			CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_StablePState))
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+				PPSMC_MSG_SAMUDPM_SetEnabledMask,
+				(uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
+	return 0;
+}
+
+
+static int vegam_update_bif_smc_table(struct pp_hwmgr *hwmgr)
+{
+	struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
+	int max_entry, i;
+
+	max_entry = (SMU75_MAX_LEVELS_LINK < pcie_table->count) ?
+						SMU75_MAX_LEVELS_LINK :
+						pcie_table->count;
+	/* Setup BIF_SCLK levels */
+	for (i = 0; i < max_entry; i++)
+		smu_data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk;
+	return 0;
+}
+
+static int vegam_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+	switch (type) {
+	case SMU_UVD_TABLE:
+		vegam_update_uvd_smc_table(hwmgr);
+		break;
+	case SMU_VCE_TABLE:
+		vegam_update_vce_smc_table(hwmgr);
+		break;
+	case SMU_SAMU_TABLE:
+		vegam_update_samu_smc_table(hwmgr);
+		break;
+	case SMU_BIF_TABLE:
+		vegam_update_bif_smc_table(hwmgr);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static void vegam_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+	struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+	struct  phm_ppt_v1_information *table_info =
+			(struct  phm_ppt_v1_information *)(hwmgr->pptable);
+
+	if (table_info &&
+			table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
+			table_info->cac_dtp_table->usPowerTuneDataSetID)
+		smu_data->power_tune_defaults =
+				&vegam_power_tune_data_set_array
+				[table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
+	else
+		smu_data->power_tune_defaults = &vegam_power_tune_data_set_array[0];
+
+}
+
+static int vegam_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+			SMU75_Discrete_DpmTable *table)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	uint32_t count, level;
+
+	if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+		count = data->mvdd_voltage_table.count;
+		if (count > SMU_MAX_SMIO_LEVELS)
+			count = SMU_MAX_SMIO_LEVELS;
+		for (level = 0; level < count; level++) {
+			table->SmioTable2.Pattern[level].Voltage = PP_HOST_TO_SMC_US(
+					data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+			/* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
+			table->SmioTable2.Pattern[level].Smio =
+				(uint8_t) level;
+			table->Smio[level] |=
+				data->mvdd_voltage_table.entries[level].smio_low;
+		}
+		table->SmioMask2 = data->mvdd_voltage_table.mask_low;
+
+		table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
+	}
+
+	return 0;
+}
+
+static int vegam_populate_smc_vddci_table(struct pp_hwmgr *hwmgr,
+					struct SMU75_Discrete_DpmTable *table)
+{
+	uint32_t count, level;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	count = data->vddci_voltage_table.count;
+
+	if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+		if (count > SMU_MAX_SMIO_LEVELS)
+			count = SMU_MAX_SMIO_LEVELS;
+		for (level = 0; level < count; ++level) {
+			table->SmioTable1.Pattern[level].Voltage = PP_HOST_TO_SMC_US(
+					data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE);
+			table->SmioTable1.Pattern[level].Smio = (uint8_t) level;
+
+			table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low;
+		}
+	}
+
+	table->SmioMask1 = data->vddci_voltage_table.mask_low;
+
+	return 0;
+}
+
+static int vegam_populate_cac_table(struct pp_hwmgr *hwmgr,
+		struct SMU75_Discrete_DpmTable *table)
+{
+	uint32_t count;
+	uint8_t index;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_voltage_lookup_table *lookup_table =
+			table_info->vddc_lookup_table;
+	/* tables is already swapped, so in order to use the value from it,
+	 * we need to swap it back.
+	 * We are populating vddc CAC data to BapmVddc table
+	 * in split and merged mode
+	 */
+	for (count = 0; count < lookup_table->count; count++) {
+		index = phm_get_voltage_index(lookup_table,
+				data->vddc_voltage_table.entries[count].value);
+		table->BapmVddcVidLoSidd[count] =
+				convert_to_vid(lookup_table->entries[index].us_cac_low);
+		table->BapmVddcVidHiSidd[count] =
+				convert_to_vid(lookup_table->entries[index].us_cac_mid);
+		table->BapmVddcVidHiSidd2[count] =
+				convert_to_vid(lookup_table->entries[index].us_cac_high);
+	}
+
+	return 0;
+}
+
+static int vegam_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+		struct SMU75_Discrete_DpmTable *table)
+{
+	vegam_populate_smc_vddci_table(hwmgr, table);
+	vegam_populate_smc_mvdd_table(hwmgr, table);
+	vegam_populate_cac_table(hwmgr, table);
+
+	return 0;
+}
+
+static int vegam_populate_ulv_level(struct pp_hwmgr *hwmgr,
+		struct SMU75_Discrete_Ulv *state)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+	state->CcPwrDynRm = 0;
+	state->CcPwrDynRm1 = 0;
+
+	state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
+	state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
+			VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+
+	state->VddcPhase = data->vddc_phase_shed_control ^ 0x3;
+
+	CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+	CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+	CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+
+	return 0;
+}
+
+static int vegam_populate_ulv_state(struct pp_hwmgr *hwmgr,
+		struct SMU75_Discrete_DpmTable *table)
+{
+	return vegam_populate_ulv_level(hwmgr, &table->Ulv);
+}
+
+static int vegam_populate_smc_link_level(struct pp_hwmgr *hwmgr,
+		struct SMU75_Discrete_DpmTable *table)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct vegam_smumgr *smu_data =
+			(struct vegam_smumgr *)(hwmgr->smu_backend);
+	struct smu7_dpm_table *dpm_table = &data->dpm_table;
+	int i;
+
+	/* Index (dpm_table->pcie_speed_table.count)
+	 * is reserved for PCIE boot level. */
+	for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+		table->LinkLevel[i].PcieGenSpeed  =
+				(uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+		table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
+				dpm_table->pcie_speed_table.dpm_levels[i].param1);
+		table->LinkLevel[i].EnabledForActivity = 1;
+		table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
+		table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
+		table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
+	}
+
+	smu_data->smc_state_table.LinkLevelCount =
+			(uint8_t)dpm_table->pcie_speed_table.count;
+
+/* To Do move to hwmgr */
+	data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+			phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+	return 0;
+}
+
+static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
+		struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
+		uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
+{
+	uint32_t i;
+	uint16_t vddci;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	*voltage = *mvdd = 0;
+
+	/* clock - voltage dependency table is empty table */
+	if (dep_table->count == 0)
+		return -EINVAL;
+
+	for (i = 0; i < dep_table->count; i++) {
+		/* find first sclk bigger than request */
+		if (dep_table->entries[i].clk >= clock) {
+			*voltage |= (dep_table->entries[i].vddc *
+					VOLTAGE_SCALE) << VDDC_SHIFT;
+			if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+				*voltage |= (data->vbios_boot_state.vddci_bootup_value *
+						VOLTAGE_SCALE) << VDDCI_SHIFT;
+			else if (dep_table->entries[i].vddci)
+				*voltage |= (dep_table->entries[i].vddci *
+						VOLTAGE_SCALE) << VDDCI_SHIFT;
+			else {
+				vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+						(dep_table->entries[i].vddc -
+								(uint16_t)VDDC_VDDCI_DELTA));
+				*voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+			}
+
+			if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+				*mvdd = data->vbios_boot_state.mvdd_bootup_value *
+					VOLTAGE_SCALE;
+			else if (dep_table->entries[i].mvdd)
+				*mvdd = (uint32_t) dep_table->entries[i].mvdd *
+					VOLTAGE_SCALE;
+
+			*voltage |= 1 << PHASES_SHIFT;
+			return 0;
+		}
+	}
+
+	/* sclk is bigger than max sclk in the dependence table */
+	*voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+	vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+			(dep_table->entries[i - 1].vddc -
+					(uint16_t)VDDC_VDDCI_DELTA));
+
+	if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+		*voltage |= (data->vbios_boot_state.vddci_bootup_value *
+				VOLTAGE_SCALE) << VDDCI_SHIFT;
+	else if (dep_table->entries[i - 1].vddci)
+		*voltage |= (dep_table->entries[i - 1].vddci *
+				VOLTAGE_SCALE) << VDDC_SHIFT;
+	else
+		*voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+	if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+		*mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
+	else if (dep_table->entries[i].mvdd)
+		*mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
+
+	return 0;
+}
+
+static void vegam_get_sclk_range_table(struct pp_hwmgr *hwmgr,
+				   SMU75_Discrete_DpmTable  *table)
+{
+	struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+	uint32_t i, ref_clk;
+
+	struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } };
+
+	ref_clk = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
+
+	if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) {
+		for (i = 0; i < NUM_SCLK_RANGE; i++) {
+			table->SclkFcwRangeTable[i].vco_setting =
+					range_table_from_vbios.entry[i].ucVco_setting;
+			table->SclkFcwRangeTable[i].postdiv =
+					range_table_from_vbios.entry[i].ucPostdiv;
+			table->SclkFcwRangeTable[i].fcw_pcc =
+					range_table_from_vbios.entry[i].usFcw_pcc;
+
+			table->SclkFcwRangeTable[i].fcw_trans_upper =
+					range_table_from_vbios.entry[i].usFcw_trans_upper;
+			table->SclkFcwRangeTable[i].fcw_trans_lower =
+					range_table_from_vbios.entry[i].usRcw_trans_lower;
+
+			CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
+			CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
+			CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
+		}
+		return;
+	}
+
+	for (i = 0; i < NUM_SCLK_RANGE; i++) {
+		smu_data->range_table[i].trans_lower_frequency =
+				(ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv;
+		smu_data->range_table[i].trans_upper_frequency =
+				(ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv;
+
+		table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting;
+		table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv;
+		table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc;
+
+		table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper;
+		table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower;
+
+		CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
+		CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
+		CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
+	}
+}
+
+static int vegam_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+		uint32_t clock, SMU_SclkSetting *sclk_setting)
+{
+	struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+	const SMU75_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+	struct pp_atomctrl_clock_dividers_ai dividers;
+	uint32_t ref_clock;
+	uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq;
+	uint8_t i;
+	int result;
+	uint64_t temp;
+
+	sclk_setting->SclkFrequency = clock;
+	/* get the engine clock dividers for this clock value */
+	result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock,  &dividers);
+	if (result == 0) {
+		sclk_setting->Fcw_int = dividers.usSclk_fcw_int;
+		sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac;
+		sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int;
+		sclk_setting->PllRange = dividers.ucSclkPllRange;
+		sclk_setting->Sclk_slew_rate = 0x400;
+		sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac;
+		sclk_setting->Pcc_down_slew_rate = 0xffff;
+		sclk_setting->SSc_En = dividers.ucSscEnable;
+		sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int;
+		sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac;
+		sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac;
+		return result;
+	}
+
+	ref_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
+
+	for (i = 0; i < NUM_SCLK_RANGE; i++) {
+		if (clock > smu_data->range_table[i].trans_lower_frequency
+		&& clock <= smu_data->range_table[i].trans_upper_frequency) {
+			sclk_setting->PllRange = i;
+			break;
+		}
+	}
+
+	sclk_setting->Fcw_int = (uint16_t)
+			((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) /
+					ref_clock);
+	temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
+	temp <<= 0x10;
+	do_div(temp, ref_clock);
+	sclk_setting->Fcw_frac = temp & 0xffff;
+
+	pcc_target_percent = 10; /*  Hardcode 10% for now. */
+	pcc_target_freq = clock - (clock * pcc_target_percent / 100);
+	sclk_setting->Pcc_fcw_int = (uint16_t)
+			((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) /
+					ref_clock);
+
+	ss_target_percent = 2; /*  Hardcode 2% for now. */
+	sclk_setting->SSc_En = 0;
+	if (ss_target_percent) {
+		sclk_setting->SSc_En = 1;
+		ss_target_freq = clock - (clock * ss_target_percent / 100);
+		sclk_setting->Fcw1_int = (uint16_t)
+				((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) /
+						ref_clock);
+		temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
+		temp <<= 0x10;
+		do_div(temp, ref_clock);
+		sclk_setting->Fcw1_frac = temp & 0xffff;
+	}
+
+	return 0;
+}
+
+static uint8_t vegam_get_sleep_divider_id_from_clock(uint32_t clock,
+		uint32_t clock_insr)
+{
+	uint8_t i;
+	uint32_t temp;
+	uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK);
+
+	PP_ASSERT_WITH_CODE((clock >= min),
+			"Engine clock can't satisfy stutter requirement!",
+			return 0);
+	for (i = 31;  ; i--) {
+		temp = clock / (i + 1);
+
+		if (temp >= min || i == 0)
+			break;
+	}
+	return i;
+}
+
+static int vegam_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+		uint32_t clock, struct SMU75_Discrete_GraphicsLevel *level)
+{
+	int result;
+	/* PP_Clocks minClocks; */
+	uint32_t mvdd;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	SMU_SclkSetting curr_sclk_setting = { 0 };
+
+	result = vegam_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting);
+
+	/* populate graphics levels */
+	result = vegam_get_dependency_volt_by_clk(hwmgr,
+			table_info->vdd_dep_on_sclk, clock,
+			&level->MinVoltage, &mvdd);
+
+	PP_ASSERT_WITH_CODE((0 == result),
+			"can not find VDDC voltage value for "
+			"VDDC engine clock dependency table",
+			return result);
+	level->ActivityLevel = (uint16_t)(SclkDPMTuning_VEGAM >> DPMTuning_Activity_Shift);
+
+	level->CcPwrDynRm = 0;
+	level->CcPwrDynRm1 = 0;
+	level->EnabledForActivity = 0;
+	level->EnabledForThrottle = 1;
+	level->VoltageDownHyst = 0;
+	level->PowerThrottle = 0;
+	data->display_timing.min_clock_in_sr = hwmgr->display_config->min_core_set_clock_in_sr;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
+		level->DeepSleepDivId = vegam_get_sleep_divider_id_from_clock(clock,
+								hwmgr->display_config->min_core_set_clock_in_sr);
+
+	level->SclkSetting = curr_sclk_setting;
+
+	CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
+	CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
+	CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
+	CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
+	CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency);
+	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int);
+	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac);
+	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int);
+	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate);
+	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate);
+	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate);
+	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int);
+	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac);
+	CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate);
+	return 0;
+}
+
+static int vegam_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+	struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
+	uint8_t pcie_entry_cnt = (uint8_t) hw_data->dpm_table.pcie_speed_table.count;
+	int result = 0;
+	uint32_t array = smu_data->smu7_data.dpm_table_start +
+			offsetof(SMU75_Discrete_DpmTable, GraphicsLevel);
+	uint32_t array_size = sizeof(struct SMU75_Discrete_GraphicsLevel) *
+			SMU75_MAX_LEVELS_GRAPHICS;
+	struct SMU75_Discrete_GraphicsLevel *levels =
+			smu_data->smc_state_table.GraphicsLevel;
+	uint32_t i, max_entry;
+	uint8_t hightest_pcie_level_enabled = 0,
+		lowest_pcie_level_enabled = 0,
+		mid_pcie_level_enabled = 0,
+		count = 0;
+
+	vegam_get_sclk_range_table(hwmgr, &(smu_data->smc_state_table));
+
+	for (i = 0; i < dpm_table->sclk_table.count; i++) {
+
+		result = vegam_populate_single_graphic_level(hwmgr,
+				dpm_table->sclk_table.dpm_levels[i].value,
+				&(smu_data->smc_state_table.GraphicsLevel[i]));
+		if (result)
+			return result;
+
+		levels[i].UpHyst = (uint8_t)
+				(SclkDPMTuning_VEGAM >> DPMTuning_Uphyst_Shift);
+		levels[i].DownHyst = (uint8_t)
+				(SclkDPMTuning_VEGAM >> DPMTuning_Downhyst_Shift);
+		/* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+		if (i > 1)
+			levels[i].DeepSleepDivId = 0;
+	}
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+					PHM_PlatformCaps_SPLLShutdownSupport))
+		smu_data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0;
+
+	smu_data->smc_state_table.GraphicsDpmLevelCount =
+			(uint8_t)dpm_table->sclk_table.count;
+	hw_data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+			phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+	for (i = 0; i < dpm_table->sclk_table.count; i++)
+		levels[i].EnabledForActivity =
+				(hw_data->dpm_level_enable_mask.sclk_dpm_enable_mask >> i) & 0x1;
+
+	if (pcie_table != NULL) {
+		PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
+				"There must be 1 or more PCIE levels defined in PPTable.",
+				return -EINVAL);
+		max_entry = pcie_entry_cnt - 1;
+		for (i = 0; i < dpm_table->sclk_table.count; i++)
+			levels[i].pcieDpmLevel =
+					(uint8_t) ((i < max_entry) ? i : max_entry);
+	} else {
+		while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+				((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+						(1 << (hightest_pcie_level_enabled + 1))) != 0))
+			hightest_pcie_level_enabled++;
+
+		while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+				((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+						(1 << lowest_pcie_level_enabled)) == 0))
+			lowest_pcie_level_enabled++;
+
+		while ((count < hightest_pcie_level_enabled) &&
+				((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+						(1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
+			count++;
+
+		mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
+				hightest_pcie_level_enabled ?
+						(lowest_pcie_level_enabled + 1 + count) :
+						hightest_pcie_level_enabled;
+
+		/* set pcieDpmLevel to hightest_pcie_level_enabled */
+		for (i = 2; i < dpm_table->sclk_table.count; i++)
+			levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
+
+		/* set pcieDpmLevel to lowest_pcie_level_enabled */
+		levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+		/* set pcieDpmLevel to mid_pcie_level_enabled */
+		levels[1].pcieDpmLevel = mid_pcie_level_enabled;
+	}
+	/* level count will send to smc once at init smc table and never change */
+	result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+			(uint32_t)array_size, SMC_RAM_END);
+
+	return result;
+}
+
+static int vegam_calculate_mclk_params(struct pp_hwmgr *hwmgr,
+		uint32_t clock, struct SMU75_Discrete_MemoryLevel *mem_level)
+{
+	struct pp_atomctrl_memory_clock_param_ai mpll_param;
+
+	PP_ASSERT_WITH_CODE(!atomctrl_get_memory_pll_dividers_ai(hwmgr,
+			clock, &mpll_param),
+			"Failed to retrieve memory pll parameter.",
+			return -EINVAL);
+
+	mem_level->MclkFrequency = (uint32_t)mpll_param.ulClock;
+	mem_level->Fcw_int = (uint16_t)mpll_param.ulMclk_fcw_int;
+	mem_level->Fcw_frac = (uint16_t)mpll_param.ulMclk_fcw_frac;
+	mem_level->Postdiv = (uint8_t)mpll_param.ulPostDiv;
+
+	return 0;
+}
+
+static int vegam_populate_single_memory_level(struct pp_hwmgr *hwmgr,
+		uint32_t clock, struct SMU75_Discrete_MemoryLevel *mem_level)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	int result = 0;
+	uint32_t mclk_stutter_mode_threshold = 60000;
+
+
+	if (table_info->vdd_dep_on_mclk) {
+		result = vegam_get_dependency_volt_by_clk(hwmgr,
+				table_info->vdd_dep_on_mclk, clock,
+				&mem_level->MinVoltage, &mem_level->MinMvdd);
+		PP_ASSERT_WITH_CODE(!result,
+				"can not find MinVddc voltage value from memory "
+				"VDDC voltage dependency table", return result);
+	}
+
+	result = vegam_calculate_mclk_params(hwmgr, clock, mem_level);
+	PP_ASSERT_WITH_CODE(!result,
+			"Failed to calculate mclk params.",
+			return -EINVAL);
+
+	mem_level->EnabledForThrottle = 1;
+	mem_level->EnabledForActivity = 0;
+	mem_level->VoltageDownHyst = 0;
+	mem_level->ActivityLevel = (uint16_t)
+			(MemoryDPMTuning_VEGAM >> DPMTuning_Activity_Shift);
+	mem_level->StutterEnable = false;
+	mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+	data->display_timing.num_existing_displays = hwmgr->display_config->num_display;
+
+	if (mclk_stutter_mode_threshold &&
+		(clock <= mclk_stutter_mode_threshold) &&
+		(PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
+				STUTTER_ENABLE) & 0x1))
+		mem_level->StutterEnable = true;
+
+	if (!result) {
+		CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
+		CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
+		CONVERT_FROM_HOST_TO_SMC_US(mem_level->Fcw_int);
+		CONVERT_FROM_HOST_TO_SMC_US(mem_level->Fcw_frac);
+		CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
+		CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
+	}
+
+	return result;
+}
+
+static int vegam_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+	struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
+	int result;
+	/* populate MCLK dpm table to SMU7 */
+	uint32_t array = smu_data->smu7_data.dpm_table_start +
+			offsetof(SMU75_Discrete_DpmTable, MemoryLevel);
+	uint32_t array_size = sizeof(SMU75_Discrete_MemoryLevel) *
+			SMU75_MAX_LEVELS_MEMORY;
+	struct SMU75_Discrete_MemoryLevel *levels =
+			smu_data->smc_state_table.MemoryLevel;
+	uint32_t i;
+
+	for (i = 0; i < dpm_table->mclk_table.count; i++) {
+		PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+				"can not populate memory level as memory clock is zero",
+				return -EINVAL);
+		result = vegam_populate_single_memory_level(hwmgr,
+				dpm_table->mclk_table.dpm_levels[i].value,
+				&levels[i]);
+
+		if (result)
+			return result;
+
+		levels[i].UpHyst = (uint8_t)
+				(MemoryDPMTuning_VEGAM >> DPMTuning_Uphyst_Shift);
+		levels[i].DownHyst = (uint8_t)
+				(MemoryDPMTuning_VEGAM >> DPMTuning_Downhyst_Shift);
+	}
+
+	smu_data->smc_state_table.MemoryDpmLevelCount =
+			(uint8_t)dpm_table->mclk_table.count;
+	hw_data->dpm_level_enable_mask.mclk_dpm_enable_mask =
+			phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+
+	for (i = 0; i < dpm_table->mclk_table.count; i++)
+		levels[i].EnabledForActivity =
+				(hw_data->dpm_level_enable_mask.mclk_dpm_enable_mask >> i) & 0x1;
+
+	levels[dpm_table->mclk_table.count - 1].DisplayWatermark =
+			PPSMC_DISPLAY_WATERMARK_HIGH;
+
+	/* level count will send to smc once at init smc table and never change */
+	result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+			(uint32_t)array_size, SMC_RAM_END);
+
+	return result;
+}
+
+static int vegam_populate_mvdd_value(struct pp_hwmgr *hwmgr,
+		uint32_t mclk, SMIO_Pattern *smio_pat)
+{
+	const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	uint32_t i = 0;
+
+	if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+		/* find mvdd value which clock is more than request */
+		for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
+			if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
+				smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
+				break;
+			}
+		}
+		PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
+				"MVDD Voltage is outside the supported range.",
+				return -EINVAL);
+	} else
+		return -EINVAL;
+
+	return 0;
+}
+
+static int vegam_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+		SMU75_Discrete_DpmTable *table)
+{
+	int result = 0;
+	uint32_t sclk_frequency;
+	const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	SMIO_Pattern vol_level;
+	uint32_t mvdd;
+	uint16_t us_mvdd;
+
+	table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+	/* Get MinVoltage and Frequency from DPM0,
+	 * already converted to SMC_UL */
+	sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
+	result = vegam_get_dependency_volt_by_clk(hwmgr,
+			table_info->vdd_dep_on_sclk,
+			sclk_frequency,
+			&table->ACPILevel.MinVoltage, &mvdd);
+	PP_ASSERT_WITH_CODE(!result,
+			"Cannot find ACPI VDDC voltage value "
+			"in Clock Dependency Table",
+			);
+
+	result = vegam_calculate_sclk_params(hwmgr, sclk_frequency,
+			&(table->ACPILevel.SclkSetting));
+	PP_ASSERT_WITH_CODE(!result,
+			"Error retrieving Engine Clock dividers from VBIOS.",
+			return result);
+
+	table->ACPILevel.DeepSleepDivId = 0;
+	table->ACPILevel.CcPwrDynRm = 0;
+	table->ACPILevel.CcPwrDynRm1 = 0;
+
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency);
+	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int);
+	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac);
+	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int);
+	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate);
+	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate);
+	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate);
+	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int);
+	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
+	CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
+
+
+	/* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
+	table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value;
+	result = vegam_get_dependency_volt_by_clk(hwmgr,
+			table_info->vdd_dep_on_mclk,
+			table->MemoryACPILevel.MclkFrequency,
+			&table->MemoryACPILevel.MinVoltage, &mvdd);
+	PP_ASSERT_WITH_CODE((0 == result),
+			"Cannot find ACPI VDDCI voltage value "
+			"in Clock Dependency Table",
+			);
+
+	us_mvdd = 0;
+	if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
+			(data->mclk_dpm_key_disabled))
+		us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
+	else {
+		if (!vegam_populate_mvdd_value(hwmgr,
+				data->dpm_table.mclk_table.dpm_levels[0].value,
+				&vol_level))
+			us_mvdd = vol_level.Voltage;
+	}
+
+	if (!vegam_populate_mvdd_value(hwmgr, 0, &vol_level))
+		table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
+	else
+		table->MemoryACPILevel.MinMvdd = 0;
+
+	table->MemoryACPILevel.StutterEnable = false;
+
+	table->MemoryACPILevel.EnabledForThrottle = 0;
+	table->MemoryACPILevel.EnabledForActivity = 0;
+	table->MemoryACPILevel.UpHyst = 0;
+	table->MemoryACPILevel.DownHyst = 100;
+	table->MemoryACPILevel.VoltageDownHyst = 0;
+	table->MemoryACPILevel.ActivityLevel =
+		PP_HOST_TO_SMC_US(data->current_profile_setting.mclk_activity);
+
+	CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
+
+	return result;
+}
+
+static int vegam_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+		SMU75_Discrete_DpmTable *table)
+{
+	int result = -EINVAL;
+	uint8_t count;
+	struct pp_atomctrl_clock_dividers_vi dividers;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+			table_info->mm_dep_table;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	uint32_t vddci;
+
+	table->VceLevelCount = (uint8_t)(mm_table->count);
+	table->VceBootLevel = 0;
+
+	for (count = 0; count < table->VceLevelCount; count++) {
+		table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
+		table->VceLevel[count].MinVoltage = 0;
+		table->VceLevel[count].MinVoltage |=
+				(mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+		if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+			vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+						mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+		else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+			vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+		else
+			vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+
+		table->VceLevel[count].MinVoltage |=
+				(vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+		table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+		/*retrieve divider value for VBIOS */
+		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+				table->VceLevel[count].Frequency, &dividers);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"can not find divide id for VCE engine clock",
+				return result);
+
+		table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+		CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
+		CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
+	}
+	return result;
+}
+
+static int vegam_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+		SMU75_Discrete_DpmTable *table)
+{
+	int result = -EINVAL;
+	uint8_t count;
+	struct pp_atomctrl_clock_dividers_vi dividers;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+			table_info->mm_dep_table;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	uint32_t vddci;
+
+	table->SamuBootLevel = 0;
+	table->SamuLevelCount = (uint8_t)(mm_table->count);
+
+	for (count = 0; count < table->SamuLevelCount; count++) {
+		/* not sure whether we need evclk or not */
+		table->SamuLevel[count].MinVoltage = 0;
+		table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
+		table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+				VOLTAGE_SCALE) << VDDC_SHIFT;
+
+		if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+			vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+						mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+		else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+			vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+		else
+			vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+		table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+		table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+		/* retrieve divider value for VBIOS */
+		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+				table->SamuLevel[count].Frequency, &dividers);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"can not find divide id for samu clock", return result);
+
+		table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+		CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+		CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
+	}
+	return result;
+}
+
+static int vegam_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
+		int32_t eng_clock, int32_t mem_clock,
+		SMU75_Discrete_MCArbDramTimingTableEntry *arb_regs)
+{
+	uint32_t dram_timing;
+	uint32_t dram_timing2;
+	uint32_t burst_time;
+	uint32_t rfsh_rate;
+	uint32_t misc3;
+
+	int result;
+
+	result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+			eng_clock, mem_clock);
+	PP_ASSERT_WITH_CODE(result == 0,
+			"Error calling VBIOS to set DRAM_TIMING.",
+			return result);
+
+	dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+	dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+	burst_time = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME);
+	rfsh_rate = cgs_read_register(hwmgr->device, mmMC_ARB_RFSH_RATE);
+	misc3 = cgs_read_register(hwmgr->device, mmMC_ARB_MISC3);
+
+	arb_regs->McArbDramTiming  = PP_HOST_TO_SMC_UL(dram_timing);
+	arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
+	arb_regs->McArbBurstTime   = PP_HOST_TO_SMC_UL(burst_time);
+	arb_regs->McArbRfshRate = PP_HOST_TO_SMC_UL(rfsh_rate);
+	arb_regs->McArbMisc3 = PP_HOST_TO_SMC_UL(misc3);
+
+	return 0;
+}
+
+static int vegam_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+	struct SMU75_Discrete_MCArbDramTimingTable arb_regs = {0};
+	uint32_t i, j;
+	int result = 0;
+
+	for (i = 0; i < hw_data->dpm_table.sclk_table.count; i++) {
+		for (j = 0; j < hw_data->dpm_table.mclk_table.count; j++) {
+			result = vegam_populate_memory_timing_parameters(hwmgr,
+					hw_data->dpm_table.sclk_table.dpm_levels[i].value,
+					hw_data->dpm_table.mclk_table.dpm_levels[j].value,
+					&arb_regs.entries[i][j]);
+			if (result)
+				return result;
+		}
+	}
+
+	result = smu7_copy_bytes_to_smc(
+			hwmgr,
+			smu_data->smu7_data.arb_table_start,
+			(uint8_t *)&arb_regs,
+			sizeof(SMU75_Discrete_MCArbDramTimingTable),
+			SMC_RAM_END);
+	return result;
+}
+
+static int vegam_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+		struct SMU75_Discrete_DpmTable *table)
+{
+	int result = -EINVAL;
+	uint8_t count;
+	struct pp_atomctrl_clock_dividers_vi dividers;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+			table_info->mm_dep_table;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	uint32_t vddci;
+
+	table->UvdLevelCount = (uint8_t)(mm_table->count);
+	table->UvdBootLevel = 0;
+
+	for (count = 0; count < table->UvdLevelCount; count++) {
+		table->UvdLevel[count].MinVoltage = 0;
+		table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
+		table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
+		table->UvdLevel[count].MinVoltage |=
+				(mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+		if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+			vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+						mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+		else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+			vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+		else
+			vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+		table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+		table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+		/* retrieve divider value for VBIOS */
+		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+				table->UvdLevel[count].VclkFrequency, &dividers);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"can not find divide id for Vclk clock", return result);
+
+		table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
+
+		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+				table->UvdLevel[count].DclkFrequency, &dividers);
+		PP_ASSERT_WITH_CODE((0 == result),
+				"can not find divide id for Dclk clock", return result);
+
+		table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
+
+		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
+		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
+		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
+	}
+
+	return result;
+}
+
+static int vegam_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+		struct SMU75_Discrete_DpmTable *table)
+{
+	int result = 0;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	table->GraphicsBootLevel = 0;
+	table->MemoryBootLevel = 0;
+
+	/* find boot level from dpm table */
+	result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+			data->vbios_boot_state.sclk_bootup_value,
+			(uint32_t *)&(table->GraphicsBootLevel));
+
+	result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+			data->vbios_boot_state.mclk_bootup_value,
+			(uint32_t *)&(table->MemoryBootLevel));
+
+	table->BootVddc  = data->vbios_boot_state.vddc_bootup_value *
+			VOLTAGE_SCALE;
+	table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
+			VOLTAGE_SCALE;
+	table->BootMVdd  = data->vbios_boot_state.mvdd_bootup_value *
+			VOLTAGE_SCALE;
+
+	CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
+	CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
+	CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
+
+	return 0;
+}
+
+static int vegam_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	uint8_t count, level;
+
+	count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
+
+	for (level = 0; level < count; level++) {
+		if (table_info->vdd_dep_on_sclk->entries[level].clk >=
+				hw_data->vbios_boot_state.sclk_bootup_value) {
+			smu_data->smc_state_table.GraphicsBootLevel = level;
+			break;
+		}
+	}
+
+	count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
+	for (level = 0; level < count; level++) {
+		if (table_info->vdd_dep_on_mclk->entries[level].clk >=
+				hw_data->vbios_boot_state.mclk_bootup_value) {
+			smu_data->smc_state_table.MemoryBootLevel = level;
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
+{
+	uint32_t tmp;
+	tmp = raw_setting * 4096 / 100;
+	return (uint16_t)tmp;
+}
+
+static int vegam_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+	struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+
+	const struct vegam_pt_defaults *defaults = smu_data->power_tune_defaults;
+	SMU75_Discrete_DpmTable  *table = &(smu_data->smc_state_table);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
+	struct pp_advance_fan_control_parameters *fan_table =
+			&hwmgr->thermal_controller.advanceFanControlParameters;
+	int i, j, k;
+	const uint16_t *pdef1;
+	const uint16_t *pdef2;
+
+	table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
+	table->TargetTdp  = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
+
+	PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
+				"Target Operating Temp is out of Range!",
+				);
+
+	table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
+			cac_dtp_table->usTargetOperatingTemp * 256);
+	table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
+			cac_dtp_table->usTemperatureLimitHotspot * 256);
+	table->FanGainEdge = PP_HOST_TO_SMC_US(
+			scale_fan_gain_settings(fan_table->usFanGainEdge));
+	table->FanGainHotspot = PP_HOST_TO_SMC_US(
+			scale_fan_gain_settings(fan_table->usFanGainHotspot));
+
+	pdef1 = defaults->BAPMTI_R;
+	pdef2 = defaults->BAPMTI_RC;
+
+	for (i = 0; i < SMU75_DTE_ITERATIONS; i++) {
+		for (j = 0; j < SMU75_DTE_SOURCES; j++) {
+			for (k = 0; k < SMU75_DTE_SINKS; k++) {
+				table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
+				table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
+				pdef1++;
+				pdef2++;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int vegam_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
+{
+	uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
+	struct vegam_smumgr *smu_data =
+			(struct vegam_smumgr *)(hwmgr->smu_backend);
+
+	uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+			table_info->vdd_dep_on_sclk;
+	uint32_t mask = (1 << ((STRAP_ASIC_RO_MSB - STRAP_ASIC_RO_LSB) + 1)) - 1;
+
+	stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
+
+	atomctrl_read_efuse(hwmgr, STRAP_ASIC_RO_LSB, STRAP_ASIC_RO_MSB,
+			mask, &efuse);
+
+	min = 1200;
+	max = 2500;
+
+	ro = efuse * (max - min) / 255 + min;
+
+	/* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
+	for (i = 0; i < sclk_table->count; i++) {
+		smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
+				sclk_table->entries[i].cks_enable << i;
+		volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) *
+				136418 - (ro - 70) * 1000000) /
+				(2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
+		volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 *
+				3232 - (ro - 65) * 1000000) /
+				(2522480 - sclk_table->entries[i].clk/100 * 115764/100));
+
+		if (volt_without_cks >= volt_with_cks)
+			volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
+					sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
+
+		smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
+	}
+
+	smu_data->smc_state_table.LdoRefSel =
+			(table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ?
+			table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 5;
+	/* Populate CKS Lookup Table */
+	if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
+		stretch_amount2 = 0;
+	else if (stretch_amount == 3 || stretch_amount == 4)
+		stretch_amount2 = 1;
+	else {
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_ClockStretcher);
+		PP_ASSERT_WITH_CODE(false,
+				"Stretch Amount in PPTable not supported\n",
+				return -EINVAL);
+	}
+
+	value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
+	value &= 0xFFFFFFFE;
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
+
+	return 0;
+}
+
+static bool vegam_is_hw_avfs_present(struct pp_hwmgr *hwmgr)
+{
+	uint32_t efuse;
+
+	efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			ixSMU_EFUSE_0 + (49 * 4));
+	efuse &= 0x00000001;
+
+	if (efuse)
+		return true;
+
+	return false;
+}
+
+static int vegam_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+
+	SMU75_Discrete_DpmTable  *table = &(smu_data->smc_state_table);
+	int result = 0;
+	struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
+	AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
+	AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
+	uint32_t tmp, i;
+
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)hwmgr->pptable;
+	struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+			table_info->vdd_dep_on_sclk;
+
+	if (!hwmgr->avfs_supported)
+		return 0;
+
+	result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
+
+	if (0 == result) {
+		table->BTCGB_VDROOP_TABLE[0].a0 =
+				PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
+		table->BTCGB_VDROOP_TABLE[0].a1 =
+				PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
+		table->BTCGB_VDROOP_TABLE[0].a2 =
+				PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
+		table->BTCGB_VDROOP_TABLE[1].a0 =
+				PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
+		table->BTCGB_VDROOP_TABLE[1].a1 =
+				PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
+		table->BTCGB_VDROOP_TABLE[1].a2 =
+				PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
+		table->AVFSGB_FUSE_TABLE[0].m1 =
+				PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
+		table->AVFSGB_FUSE_TABLE[0].m2 =
+				PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
+		table->AVFSGB_FUSE_TABLE[0].b =
+				PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
+		table->AVFSGB_FUSE_TABLE[0].m1_shift = 24;
+		table->AVFSGB_FUSE_TABLE[0].m2_shift = 12;
+		table->AVFSGB_FUSE_TABLE[1].m1 =
+				PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
+		table->AVFSGB_FUSE_TABLE[1].m2 =
+				PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
+		table->AVFSGB_FUSE_TABLE[1].b =
+				PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
+		table->AVFSGB_FUSE_TABLE[1].m1_shift = 24;
+		table->AVFSGB_FUSE_TABLE[1].m2_shift = 12;
+		table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
+		AVFS_meanNsigma.Aconstant[0] =
+				PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
+		AVFS_meanNsigma.Aconstant[1] =
+				PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
+		AVFS_meanNsigma.Aconstant[2] =
+				PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
+		AVFS_meanNsigma.DC_tol_sigma =
+				PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
+		AVFS_meanNsigma.Platform_mean =
+				PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
+		AVFS_meanNsigma.PSM_Age_CompFactor =
+				PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
+		AVFS_meanNsigma.Platform_sigma =
+				PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
+
+		for (i = 0; i < sclk_table->count; i++) {
+			AVFS_meanNsigma.Static_Voltage_Offset[i] =
+					(uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
+			AVFS_SclkOffset.Sclk_Offset[i] =
+					PP_HOST_TO_SMC_US((uint16_t)
+							(sclk_table->entries[i].sclk_offset) / 100);
+		}
+
+		result = smu7_read_smc_sram_dword(hwmgr,
+				SMU7_FIRMWARE_HEADER_LOCATION +
+				offsetof(SMU75_Firmware_Header, AvfsMeanNSigma),
+				&tmp, SMC_RAM_END);
+		smu7_copy_bytes_to_smc(hwmgr,
+					tmp,
+					(uint8_t *)&AVFS_meanNsigma,
+					sizeof(AVFS_meanNsigma_t),
+					SMC_RAM_END);
+
+		result = smu7_read_smc_sram_dword(hwmgr,
+				SMU7_FIRMWARE_HEADER_LOCATION +
+				offsetof(SMU75_Firmware_Header, AvfsSclkOffsetTable),
+				&tmp, SMC_RAM_END);
+		smu7_copy_bytes_to_smc(hwmgr,
+					tmp,
+					(uint8_t *)&AVFS_SclkOffset,
+					sizeof(AVFS_Sclk_Offset_t),
+					SMC_RAM_END);
+
+		data->avfs_vdroop_override_setting =
+				(avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
+				(avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
+				(avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
+				(avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
+		data->apply_avfs_cks_off_voltage =
+				(avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
+	}
+	return result;
+}
+
+static int vegam_populate_vr_config(struct pp_hwmgr *hwmgr,
+		struct SMU75_Discrete_DpmTable *table)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct vegam_smumgr *smu_data =
+			(struct vegam_smumgr *)(hwmgr->smu_backend);
+	uint16_t config;
+
+	config = VR_MERGED_WITH_VDDC;
+	table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
+
+	/* Set Vddc Voltage Controller */
+	if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+		config = VR_SVI2_PLANE_1;
+		table->VRConfig |= config;
+	} else {
+		PP_ASSERT_WITH_CODE(false,
+				"VDDC should be on SVI2 control in merged mode!",
+				);
+	}
+	/* Set Vddci Voltage Controller */
+	if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+		config = VR_SVI2_PLANE_2;  /* only in merged mode */
+		table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+	} else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+		config = VR_SMIO_PATTERN_1;
+		table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+	} else {
+		config = VR_STATIC_VOLTAGE;
+		table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+	}
+	/* Set Mvdd Voltage Controller */
+	if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
+		if (config != VR_SVI2_PLANE_2) {
+			config = VR_SVI2_PLANE_2;
+			table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+			cgs_write_ind_register(hwmgr->device,
+					CGS_IND_REG__SMC,
+					smu_data->smu7_data.soft_regs_start +
+					offsetof(SMU75_SoftRegisters, AllowMvddSwitch),
+					0x1);
+		} else {
+			PP_ASSERT_WITH_CODE(false,
+					"SVI2 Plane 2 is already taken, set MVDD as Static",);
+			config = VR_STATIC_VOLTAGE;
+			table->VRConfig = (config << VRCONF_MVDD_SHIFT);
+		}
+	} else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+		config = VR_SMIO_PATTERN_2;
+		table->VRConfig = (config << VRCONF_MVDD_SHIFT);
+		cgs_write_ind_register(hwmgr->device,
+				CGS_IND_REG__SMC,
+				smu_data->smu7_data.soft_regs_start +
+				offsetof(SMU75_SoftRegisters, AllowMvddSwitch),
+				0x1);
+	} else {
+		config = VR_STATIC_VOLTAGE;
+		table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+	}
+
+	return 0;
+}
+
+static int vegam_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+	struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+	const struct vegam_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+	smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
+	smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
+	smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+	smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+	return 0;
+}
+
+static int vegam_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+	uint16_t tdc_limit;
+	struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	const struct vegam_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+	tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
+	smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+			CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+	smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+			defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
+	smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
+
+	return 0;
+}
+
+static int vegam_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+	struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+	const struct vegam_pt_defaults *defaults = smu_data->power_tune_defaults;
+	uint32_t temp;
+
+	if (smu7_read_smc_sram_dword(hwmgr,
+			fuse_table_offset +
+			offsetof(SMU75_Discrete_PmFuses, TdcWaterfallCtl),
+			(uint32_t *)&temp, SMC_RAM_END))
+		PP_ASSERT_WITH_CODE(false,
+				"Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
+				return -EINVAL);
+	else {
+		smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
+		smu_data->power_tune_table.LPMLTemperatureMin =
+				(uint8_t)((temp >> 16) & 0xff);
+		smu_data->power_tune_table.LPMLTemperatureMax =
+				(uint8_t)((temp >> 8) & 0xff);
+		smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
+	}
+	return 0;
+}
+
+static int vegam_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+	int i;
+	struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+
+	/* Currently not used. Set all to zero. */
+	for (i = 0; i < 16; i++)
+		smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
+
+	return 0;
+}
+
+static int vegam_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
+{
+	struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+
+/* TO DO move to hwmgr */
+	if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
+		|| 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
+		hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
+			hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
+
+	smu_data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US(
+				hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity);
+	return 0;
+}
+
+static int vegam_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+	int i;
+	struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+
+	/* Currently not used. Set all to zero. */
+	for (i = 0; i < 16; i++)
+		smu_data->power_tune_table.GnbLPML[i] = 0;
+
+	return 0;
+}
+
+static int vegam_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+	struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+	uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+	struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+
+	hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+	lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+	smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+			CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
+	smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+			CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
+
+	return 0;
+}
+
+static int vegam_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+	struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+	uint32_t pm_fuse_table_offset;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_PowerContainment)) {
+		if (smu7_read_smc_sram_dword(hwmgr,
+				SMU7_FIRMWARE_HEADER_LOCATION +
+				offsetof(SMU75_Firmware_Header, PmFuseTable),
+				&pm_fuse_table_offset, SMC_RAM_END))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to get pm_fuse_table_offset Failed!",
+					return -EINVAL);
+
+		if (vegam_populate_svi_load_line(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate SviLoadLine Failed!",
+					return -EINVAL);
+
+		if (vegam_populate_tdc_limit(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate TDCLimit Failed!", return -EINVAL);
+
+		if (vegam_populate_dw8(hwmgr, pm_fuse_table_offset))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate TdcWaterfallCtl, "
+					"LPMLTemperature Min and Max Failed!",
+					return -EINVAL);
+
+		if (0 != vegam_populate_temperature_scaler(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate LPMLTemperatureScaler Failed!",
+					return -EINVAL);
+
+		if (vegam_populate_fuzzy_fan(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate Fuzzy Fan Control parameters Failed!",
+					return -EINVAL);
+
+		if (vegam_populate_gnb_lpml(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate GnbLPML Failed!",
+					return -EINVAL);
+
+		if (vegam_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to populate BapmVddCBaseLeakage Hi and Lo "
+					"Sidd Failed!", return -EINVAL);
+
+		if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
+				(uint8_t *)&smu_data->power_tune_table,
+				(sizeof(struct SMU75_Discrete_PmFuses) - PMFUSES_AVFSSIZE),
+				SMC_RAM_END))
+			PP_ASSERT_WITH_CODE(false,
+					"Attempt to download PmFuseTable Failed!",
+					return -EINVAL);
+	}
+	return 0;
+}
+
+static int vegam_enable_reconfig_cus(struct pp_hwmgr *hwmgr)
+{
+	struct amdgpu_device *adev = hwmgr->adev;
+
+	smum_send_msg_to_smc_with_parameter(hwmgr,
+					    PPSMC_MSG_EnableModeSwitchRLCNotification,
+					    adev->gfx.cu_info.number);
+
+	return 0;
+}
+
+static int vegam_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+	int result;
+	struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+
+	struct phm_ppt_v1_information *table_info =
+			(struct phm_ppt_v1_information *)(hwmgr->pptable);
+	struct SMU75_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+	uint8_t i;
+	struct pp_atomctrl_gpio_pin_assignment gpio_pin;
+	struct phm_ppt_v1_gpio_table *gpio_table =
+			(struct phm_ppt_v1_gpio_table *)table_info->gpio_table;
+	pp_atomctrl_clock_dividers_vi dividers;
+
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_AutomaticDCTransition);
+
+	vegam_initialize_power_tune_defaults(hwmgr);
+
+	if (SMU7_VOLTAGE_CONTROL_NONE != hw_data->voltage_control)
+		vegam_populate_smc_voltage_tables(hwmgr, table);
+
+	table->SystemFlags = 0;
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_AutomaticDCTransition))
+		table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_StepVddc))
+		table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+	if (hw_data->is_memory_gddr5)
+		table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+	if (hw_data->ulv_supported && table_info->us_ulv_voltage_offset) {
+		result = vegam_populate_ulv_state(hwmgr, table);
+		PP_ASSERT_WITH_CODE(!result,
+				"Failed to initialize ULV state!", return result);
+		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+				ixCG_ULV_PARAMETER, SMU7_CGULVPARAMETER_DFLT);
+	}
+
+	result = vegam_populate_smc_link_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(!result,
+			"Failed to initialize Link Level!", return result);
+
+	result = vegam_populate_all_graphic_levels(hwmgr);
+	PP_ASSERT_WITH_CODE(!result,
+			"Failed to initialize Graphics Level!", return result);
+
+	result = vegam_populate_all_memory_levels(hwmgr);
+	PP_ASSERT_WITH_CODE(!result,
+			"Failed to initialize Memory Level!", return result);
+
+	result = vegam_populate_smc_acpi_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(!result,
+			"Failed to initialize ACPI Level!", return result);
+
+	result = vegam_populate_smc_vce_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(!result,
+			"Failed to initialize VCE Level!", return result);
+
+	result = vegam_populate_smc_samu_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(!result,
+			"Failed to initialize SAMU Level!", return result);
+
+	/* Since only the initial state is completely set up at this point
+	 * (the other states are just copies of the boot state) we only
+	 * need to populate the  ARB settings for the initial state.
+	 */
+	result = vegam_program_memory_timing_parameters(hwmgr);
+	PP_ASSERT_WITH_CODE(!result,
+			"Failed to Write ARB settings for the initial state.", return result);
+
+	result = vegam_populate_smc_uvd_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(!result,
+			"Failed to initialize UVD Level!", return result);
+
+	result = vegam_populate_smc_boot_level(hwmgr, table);
+	PP_ASSERT_WITH_CODE(!result,
+			"Failed to initialize Boot Level!", return result);
+
+	result = vegam_populate_smc_initial_state(hwmgr);
+	PP_ASSERT_WITH_CODE(!result,
+			"Failed to initialize Boot State!", return result);
+
+	result = vegam_populate_bapm_parameters_in_dpm_table(hwmgr);
+	PP_ASSERT_WITH_CODE(!result,
+			"Failed to populate BAPM Parameters!", return result);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_ClockStretcher)) {
+		result = vegam_populate_clock_stretcher_data_table(hwmgr);
+		PP_ASSERT_WITH_CODE(!result,
+				"Failed to populate Clock Stretcher Data Table!",
+				return result);
+	}
+
+	result = vegam_populate_avfs_parameters(hwmgr);
+	PP_ASSERT_WITH_CODE(!result,
+			"Failed to populate AVFS Parameters!", return result;);
+
+	table->CurrSclkPllRange = 0xff;
+	table->GraphicsVoltageChangeEnable  = 1;
+	table->GraphicsThermThrottleEnable  = 1;
+	table->GraphicsInterval = 1;
+	table->VoltageInterval  = 1;
+	table->ThermalInterval  = 1;
+	table->TemperatureLimitHigh =
+			table_info->cac_dtp_table->usTargetOperatingTemp *
+			SMU7_Q88_FORMAT_CONVERSION_UNIT;
+	table->TemperatureLimitLow  =
+			(table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
+			SMU7_Q88_FORMAT_CONVERSION_UNIT;
+	table->MemoryVoltageChangeEnable = 1;
+	table->MemoryInterval = 1;
+	table->VoltageResponseTime = 0;
+	table->PhaseResponseTime = 0;
+	table->MemoryThermThrottleEnable = 1;
+
+	PP_ASSERT_WITH_CODE(hw_data->dpm_table.pcie_speed_table.count >= 1,
+			"There must be 1 or more PCIE levels defined in PPTable.",
+			return -EINVAL);
+	table->PCIeBootLinkLevel =
+			hw_data->dpm_table.pcie_speed_table.count;
+	table->PCIeGenInterval = 1;
+	table->VRConfig = 0;
+
+	result = vegam_populate_vr_config(hwmgr, table);
+	PP_ASSERT_WITH_CODE(!result,
+			"Failed to populate VRConfig setting!", return result);
+
+	table->ThermGpio = 17;
+	table->SclkStepSize = 0x4000;
+
+	if (atomctrl_get_pp_assign_pin(hwmgr,
+			VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
+		table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
+		if (gpio_table)
+			table->VRHotLevel =
+					table_info->gpio_table->vrhot_triggered_sclk_dpm_index;
+	} else {
+		table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_RegulatorHot);
+	}
+
+	if (atomctrl_get_pp_assign_pin(hwmgr,
+			PP_AC_DC_SWITCH_GPIO_PINID,	&gpio_pin)) {
+		table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
+		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_AutomaticDCTransition) &&
+				!smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UseNewGPIOScheme))
+			phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+					PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
+	} else {
+		table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
+		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_AutomaticDCTransition);
+	}
+
+	/* Thermal Output GPIO */
+	if (atomctrl_get_pp_assign_pin(hwmgr,
+			THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin)) {
+		table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
+
+		/* For porlarity read GPIOPAD_A with assigned Gpio pin
+		 * since VBIOS will program this register to set 'inactive state',
+		 * driver can then determine 'active state' from this and
+		 * program SMU with correct polarity
+		 */
+		table->ThermOutPolarity =
+				(0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
+				(1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
+		table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
+
+		/* if required, combine VRHot/PCC with thermal out GPIO */
+		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_RegulatorHot) &&
+			phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+				PHM_PlatformCaps_CombinePCCWithThermalSignal))
+			table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
+	} else {
+		table->ThermOutGpio = 17;
+		table->ThermOutPolarity = 1;
+		table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
+	}
+
+	/* Populate BIF_SCLK levels into SMC DPM table */
+	for (i = 0; i <= hw_data->dpm_table.pcie_speed_table.count; i++) {
+		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+				smu_data->bif_sclk_table[i], &dividers);
+		PP_ASSERT_WITH_CODE(!result,
+				"Can not find DFS divide id for Sclk",
+				return result);
+
+		if (i == 0)
+			table->Ulv.BifSclkDfs =
+					PP_HOST_TO_SMC_US((uint16_t)(dividers.pll_post_divider));
+		else
+			table->LinkLevel[i - 1].BifSclkDfs =
+					PP_HOST_TO_SMC_US((uint16_t)(dividers.pll_post_divider));
+	}
+
+	for (i = 0; i < SMU75_MAX_ENTRIES_SMIO; i++)
+		table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
+
+	CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+	CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
+	CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+	CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+	CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+	CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+	/* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+	result = smu7_copy_bytes_to_smc(hwmgr,
+			smu_data->smu7_data.dpm_table_start +
+			offsetof(SMU75_Discrete_DpmTable, SystemFlags),
+			(uint8_t *)&(table->SystemFlags),
+			sizeof(SMU75_Discrete_DpmTable) - 3 * sizeof(SMU75_PIDController),
+			SMC_RAM_END);
+	PP_ASSERT_WITH_CODE(!result,
+			"Failed to upload dpm data to SMC memory!", return result);
+
+	result = vegam_populate_pm_fuses(hwmgr);
+	PP_ASSERT_WITH_CODE(!result,
+			"Failed to  populate PM fuses to SMC memory!", return result);
+
+	result = vegam_enable_reconfig_cus(hwmgr);
+	PP_ASSERT_WITH_CODE(!result,
+			"Failed to enable reconfigurable CUs!", return result);
+
+	return 0;
+}
+
+static uint32_t vegam_get_offsetof(uint32_t type, uint32_t member)
+{
+	switch (type) {
+	case SMU_SoftRegisters:
+		switch (member) {
+		case HandshakeDisables:
+			return offsetof(SMU75_SoftRegisters, HandshakeDisables);
+		case VoltageChangeTimeout:
+			return offsetof(SMU75_SoftRegisters, VoltageChangeTimeout);
+		case AverageGraphicsActivity:
+			return offsetof(SMU75_SoftRegisters, AverageGraphicsActivity);
+		case PreVBlankGap:
+			return offsetof(SMU75_SoftRegisters, PreVBlankGap);
+		case VBlankTimeout:
+			return offsetof(SMU75_SoftRegisters, VBlankTimeout);
+		case UcodeLoadStatus:
+			return offsetof(SMU75_SoftRegisters, UcodeLoadStatus);
+		case DRAM_LOG_ADDR_H:
+			return offsetof(SMU75_SoftRegisters, DRAM_LOG_ADDR_H);
+		case DRAM_LOG_ADDR_L:
+			return offsetof(SMU75_SoftRegisters, DRAM_LOG_ADDR_L);
+		case DRAM_LOG_PHY_ADDR_H:
+			return offsetof(SMU75_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
+		case DRAM_LOG_PHY_ADDR_L:
+			return offsetof(SMU75_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
+		case DRAM_LOG_BUFF_SIZE:
+			return offsetof(SMU75_SoftRegisters, DRAM_LOG_BUFF_SIZE);
+		}
+	case SMU_Discrete_DpmTable:
+		switch (member) {
+		case UvdBootLevel:
+			return offsetof(SMU75_Discrete_DpmTable, UvdBootLevel);
+		case VceBootLevel:
+			return offsetof(SMU75_Discrete_DpmTable, VceBootLevel);
+		case SamuBootLevel:
+			return offsetof(SMU75_Discrete_DpmTable, SamuBootLevel);
+		case LowSclkInterruptThreshold:
+			return offsetof(SMU75_Discrete_DpmTable, LowSclkInterruptThreshold);
+		}
+	}
+	pr_warn("can't get the offset of type %x member %x\n", type, member);
+	return 0;
+}
+
+static int vegam_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+	if (data->need_update_smu7_dpm_table &
+		(DPMTABLE_OD_UPDATE_SCLK +
+		DPMTABLE_UPDATE_SCLK +
+		DPMTABLE_UPDATE_MCLK))
+		return vegam_program_memory_timing_parameters(hwmgr);
+
+	return 0;
+}
+
+static int vegam_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct vegam_smumgr *smu_data =
+			(struct vegam_smumgr *)(hwmgr->smu_backend);
+	int result = 0;
+	uint32_t low_sclk_interrupt_threshold = 0;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_SclkThrottleLowNotification)
+	    && (data->low_sclk_interrupt_threshold != 0)) {
+		low_sclk_interrupt_threshold =
+				data->low_sclk_interrupt_threshold;
+
+		CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+		result = smu7_copy_bytes_to_smc(
+				hwmgr,
+				smu_data->smu7_data.dpm_table_start +
+				offsetof(SMU75_Discrete_DpmTable,
+					LowSclkInterruptThreshold),
+				(uint8_t *)&low_sclk_interrupt_threshold,
+				sizeof(uint32_t),
+				SMC_RAM_END);
+	}
+	PP_ASSERT_WITH_CODE((result == 0),
+			"Failed to update SCLK threshold!", return result);
+
+	result = vegam_program_mem_timing_parameters(hwmgr);
+	PP_ASSERT_WITH_CODE((result == 0),
+			"Failed to program memory timing parameters!",
+			);
+
+	return result;
+}
+
+int vegam_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	int ret;
+
+	if (!hwmgr->avfs_supported)
+		return 0;
+
+	ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
+	if (!ret) {
+		if (data->apply_avfs_cks_off_voltage)
+			ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage);
+	}
+
+	return ret;
+}
+
+static int vegam_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+	PP_ASSERT_WITH_CODE(hwmgr->thermal_controller.fanInfo.bNoFan,
+			"VBIOS fan info is not correct!",
+			);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_MicrocodeFanControl);
+	return 0;
+}
+
+const struct pp_smumgr_func vegam_smu_funcs = {
+	.smu_init = vegam_smu_init,
+	.smu_fini = smu7_smu_fini,
+	.start_smu = vegam_start_smu,
+	.check_fw_load_finish = smu7_check_fw_load_finish,
+	.request_smu_load_fw = smu7_reload_firmware,
+	.request_smu_load_specific_fw = NULL,
+	.send_msg_to_smc = smu7_send_msg_to_smc,
+	.send_msg_to_smc_with_parameter = smu7_send_msg_to_smc_with_parameter,
+	.process_firmware_header = vegam_process_firmware_header,
+	.is_dpm_running = vegam_is_dpm_running,
+	.get_mac_definition = vegam_get_mac_definition,
+	.update_smc_table = vegam_update_smc_table,
+	.init_smc_table = vegam_init_smc_table,
+	.get_offsetof = vegam_get_offsetof,
+	.populate_all_graphic_levels = vegam_populate_all_graphic_levels,
+	.populate_all_memory_levels = vegam_populate_all_memory_levels,
+	.update_sclk_threshold = vegam_update_sclk_threshold,
+	.is_hw_avfs_present = vegam_is_hw_avfs_present,
+	.thermal_avfs_enable = vegam_thermal_avfs_enable,
+	.is_dpm_running = vegam_is_dpm_running,
+	.thermal_setup_fan_table = vegam_thermal_setup_fan_table,
+};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.h
new file mode 100644
index 000000000000..2b6558238500
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _VEGAM_SMUMANAGER_H
+#define _VEGAM_SMUMANAGER_H
+
+
+#include <pp_endian.h>
+#include "smu75_discrete.h"
+#include "smu7_smumgr.h"
+
+#define SMC_RAM_END 0x40000
+
+#define DPMTuning_Uphyst_Shift    0
+#define DPMTuning_Downhyst_Shift  8
+#define DPMTuning_Activity_Shift  16
+
+#define GraphicsDPMTuning_VEGAM    0x001e6400
+#define MemoryDPMTuning_VEGAM      0x000f3c0a
+#define SclkDPMTuning_VEGAM        0x002d000a
+#define MclkDPMTuning_VEGAM        0x001f100a
+
+
+struct vegam_pt_defaults {
+	uint8_t   SviLoadLineEn;
+	uint8_t   SviLoadLineVddC;
+	uint8_t   TDC_VDDC_ThrottleReleaseLimitPerc;
+	uint8_t   TDC_MAWt;
+	uint8_t   TdcWaterfallCtl;
+	uint8_t   DTEAmbientTempBase;
+
+	uint32_t  DisplayCac;
+	uint32_t  BAPM_TEMP_GRADIENT;
+	uint16_t  BAPMTI_R[SMU75_DTE_ITERATIONS * SMU75_DTE_SOURCES * SMU75_DTE_SINKS];
+	uint16_t  BAPMTI_RC[SMU75_DTE_ITERATIONS * SMU75_DTE_SOURCES * SMU75_DTE_SINKS];
+};
+
+struct vegam_range_table {
+	uint32_t trans_lower_frequency; /* in 10khz */
+	uint32_t trans_upper_frequency;
+};
+
+struct vegam_smumgr {
+	struct smu7_smumgr smu7_data;
+	uint8_t protected_mode;
+	SMU75_Discrete_DpmTable              smc_state_table;
+	struct SMU75_Discrete_Ulv            ulv_setting;
+	struct SMU75_Discrete_PmFuses  power_tune_table;
+	struct vegam_range_table                range_table[NUM_SCLK_RANGE];
+	const struct vegam_pt_defaults       *power_tune_defaults;
+	uint32_t               bif_sclk_table[SMU75_MAX_LEVELS_LINK];
+};
+
+
+#endif

From 0c24e7ef233b528699798a3db3ab57ee0317f2f0 Mon Sep 17 00:00:00 2001
From: Eric Huang <JinHuiEric.Huang@amd.com>
Date: Wed, 11 Apr 2018 15:38:11 -0500
Subject: [PATCH 0884/1286] drm/amd/powerplay: add specific changes for VEGAM
 in smu7_hwmgr.c

VEGAM specific changes for smu7:
1. add avfs control.
2. add a smc message defferent as smu7.
3. don't switch mc arb memory timing.
4. update LCAC_MC0/1_CNTL value.

Signed-off-by: Eric Huang <JinHuiEric.Huang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c  | 71 ++++++++++++++++---
 1 file changed, 61 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index c9dd0bec1e24..4c94e7a057e9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -83,6 +83,14 @@ static const struct profile_mode_setting smu7_profiling[5] =
 					 {1, 0, 5, 30, 0, 0, 0, 0},
 					};
 
+#define PPSMC_MSG_SetVBITimeout_VEGAM    ((uint16_t) 0x310)
+
+#define ixPWR_SVI2_PLANE1_LOAD                     0xC0200280
+#define PWR_SVI2_PLANE1_LOAD__PSI1_MASK                    0x00000020L
+#define PWR_SVI2_PLANE1_LOAD__PSI0_EN_MASK                 0x00000040L
+#define PWR_SVI2_PLANE1_LOAD__PSI1__SHIFT                  0x00000005
+#define PWR_SVI2_PLANE1_LOAD__PSI0_EN__SHIFT               0x00000006
+
 /** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
 enum DPM_EVENT_SRC {
 	DPM_EVENT_SRC_ANALOG = 0,
@@ -164,6 +172,13 @@ static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
 */
 static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
 {
+	if (hwmgr->chip_id == CHIP_VEGAM) {
+		PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
+				CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI1, 0);
+		PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
+				CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI0_EN, 0);
+	}
+
 	if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable);
 
@@ -964,6 +979,22 @@ static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
+static int smu7_disable_sclk_vce_handshake(struct pp_hwmgr *hwmgr)
+{
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	uint32_t soft_register_value = 0;
+	uint32_t handshake_disables_offset = data->soft_regs_start
+				+ smum_get_offsetof(hwmgr,
+					SMU_SoftRegisters, HandshakeDisables);
+
+	soft_register_value = cgs_read_ind_register(hwmgr->device,
+				CGS_IND_REG__SMC, handshake_disables_offset);
+	soft_register_value |= SMU7_VCE_SCLK_HANDSHAKE_DISABLE;
+	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+			handshake_disables_offset, soft_register_value);
+	return 0;
+}
+
 static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr)
 {
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -987,6 +1018,9 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
 
 	/* enable SCLK dpm */
 	if (!data->sclk_dpm_key_disabled)
+		if (hwmgr->chip_id == CHIP_VEGAM)
+			smu7_disable_sclk_vce_handshake(hwmgr);
+
 		PP_ASSERT_WITH_CODE(
 		(0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable)),
 		"Failed to enable SCLK DPM during DPM Start Function!",
@@ -996,13 +1030,15 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
 	if (0 == data->mclk_dpm_key_disabled) {
 		if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK))
 			smu7_disable_handshake_uvd(hwmgr);
+
 		PP_ASSERT_WITH_CODE(
 				(0 == smum_send_msg_to_smc(hwmgr,
 						PPSMC_MSG_MCLKDPM_Enable)),
 				"Failed to enable MCLK DPM during DPM Start Function!",
 				return -EINVAL);
 
-		PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
+		if (hwmgr->chip_family != CHIP_VEGAM)
+			PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
 
 
 		if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
@@ -1018,8 +1054,13 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
 			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
 			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
 			udelay(10);
-			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
-			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
+			if (hwmgr->chip_id == CHIP_VEGAM) {
+				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400009);
+				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400009);
+			} else {
+				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
+				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
+			}
 			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
 		}
 	}
@@ -1260,10 +1301,12 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
 	PP_ASSERT_WITH_CODE((0 == tmp_result),
 			"Failed to process firmware header!", result = tmp_result);
 
-	tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr);
-	PP_ASSERT_WITH_CODE((0 == tmp_result),
-			"Failed to initialize switch from ArbF0 to F1!",
-			result = tmp_result);
+	if (hwmgr->chip_id != CHIP_VEGAM) {
+		tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr);
+		PP_ASSERT_WITH_CODE((0 == tmp_result),
+				"Failed to initialize switch from ArbF0 to F1!",
+				result = tmp_result);
+	}
 
 	result = smu7_setup_default_dpm_tables(hwmgr);
 	PP_ASSERT_WITH_CODE(0 == result,
@@ -2753,6 +2796,9 @@ static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
 	case CHIP_POLARIS12:
 		switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
 		break;
+	case CHIP_VEGAM:
+		switch_limit_us = 30;
+		break;
 	default:
 		switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
 		break;
@@ -3801,9 +3847,14 @@ static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
 {
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 
-	if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK)
-		smum_send_msg_to_smc_with_parameter(hwmgr,
-			(PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
+	if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) {
+		if (hwmgr->chip_id == CHIP_VEGAM)
+			smum_send_msg_to_smc_with_parameter(hwmgr,
+					(PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2);
+		else
+			smum_send_msg_to_smc_with_parameter(hwmgr,
+					(PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
+	}
 	return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ?  0 : -EINVAL;
 }
 

From eda8377d1628caa07b642fce8fcd1938010e949f Mon Sep 17 00:00:00 2001
From: Eric Huang <JinHuiEric.Huang@amd.com>
Date: Wed, 11 Apr 2018 18:23:54 -0500
Subject: [PATCH 0885/1286] drm/powerplay: Add powertune table for VEGAM

Add the powertune table for VEGAM.

Signed-off-by: Eric Huang <JinHuiEric.Huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../drm/amd/powerplay/hwmgr/smu7_powertune.c  | 189 ++++++++++++++++++
 1 file changed, 189 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index a55ee166ce9f..a264e0c35f45 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -623,6 +623,190 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris11_Kicker[] =
 	{   0xFFFFFFFF  }  /* End of list */
 };
 
+static const struct gpu_pt_config_reg GCCACConfig_VegaM[] =
+{
+// ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+//      Offset                             Mask                                                Shift                                               Value       Type
+// ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+    // DIDT_SQ
+    //
+    {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00060013, GPU_CONFIGREG_GC_CAC_IND },
+    {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00860013, GPU_CONFIGREG_GC_CAC_IND },
+    {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01060013, GPU_CONFIGREG_GC_CAC_IND },
+    {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01860013, GPU_CONFIGREG_GC_CAC_IND },
+    {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02060013, GPU_CONFIGREG_GC_CAC_IND },
+    {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02860013, GPU_CONFIGREG_GC_CAC_IND },
+    {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x03060013, GPU_CONFIGREG_GC_CAC_IND },
+    {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x03860013, GPU_CONFIGREG_GC_CAC_IND },
+    {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x04060013, GPU_CONFIGREG_GC_CAC_IND },
+
+    // DIDT_TD
+    //
+    {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x000E0013, GPU_CONFIGREG_GC_CAC_IND },
+    {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x008E0013, GPU_CONFIGREG_GC_CAC_IND },
+    {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x010E0013, GPU_CONFIGREG_GC_CAC_IND },
+    {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x018E0013, GPU_CONFIGREG_GC_CAC_IND },
+    {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x020E0013, GPU_CONFIGREG_GC_CAC_IND },
+
+    // DIDT_TCP
+    //
+    {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00100013, GPU_CONFIGREG_GC_CAC_IND },
+    {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x00900013, GPU_CONFIGREG_GC_CAC_IND },
+    {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01100013, GPU_CONFIGREG_GC_CAC_IND },
+    {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x01900013, GPU_CONFIGREG_GC_CAC_IND },
+    {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02100013, GPU_CONFIGREG_GC_CAC_IND },
+    {   ixGC_CAC_CNTL,                     0xFFFFFFFF,                                         0,                                                  0x02900013, GPU_CONFIGREG_GC_CAC_IND },
+
+    {   0xFFFFFFFF  }  // End of list
+};
+
+static const struct gpu_pt_config_reg DIDTConfig_VegaM[] =
+{
+// ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+//      Offset                             Mask                                                Shift                                               Value       Type
+// ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+    // DIDT_SQ
+    //
+    {   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT,                  0x0073,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT,                  0x00ab,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT,                  0x0084,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_SQ_WEIGHT0_3,               DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK,                    DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT,                  0x005a,     GPU_CONFIGREG_DIDT_IND },
+
+    {   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT,                  0x0067,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT,                  0x0084,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT,                  0x0027,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_SQ_WEIGHT4_7,               DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK,                    DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT,                  0x0046,     GPU_CONFIGREG_DIDT_IND },
+
+    {   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK,                   DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT,                 0x00aa,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK,                   DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK,                  DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_SQ_WEIGHT8_11,              DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK,                  DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+    {   ixDIDT_SQ_CTRL1,                   DIDT_SQ_CTRL1__MIN_POWER_MASK,                      DIDT_SQ_CTRL1__MIN_POWER__SHIFT,                    0x0000,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_SQ_CTRL1,                   DIDT_SQ_CTRL1__MAX_POWER_MASK,                      DIDT_SQ_CTRL1__MAX_POWER__SHIFT,                    0xffff,     GPU_CONFIGREG_DIDT_IND },
+
+    {   ixDIDT_SQ_CTRL_OCP,                DIDT_SQ_CTRL_OCP__UNUSED_0_MASK,                    DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT,                  0x0000,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_SQ_CTRL_OCP,                DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK,               DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT,             0xffff,     GPU_CONFIGREG_DIDT_IND },
+
+    {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK,                DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT,              0x3853,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_0_MASK,                       DIDT_SQ_CTRL2__UNUSED_0__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,       DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,     0x005a,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_1_MASK,                       DIDT_SQ_CTRL2__UNUSED_1__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,       DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,     0x0000,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_2_MASK,                       DIDT_SQ_CTRL2__UNUSED_2__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+    {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,    DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT,  0x0001,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,       DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,     0x0001,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,       DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,     0x0001,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,   DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__UNUSED_0_MASK,                  DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+    {   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,       DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,     0x0001,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,       DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,     0x3853,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,       DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,     0x3153,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK,                 DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT,               0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+    {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__PHASE_OFFSET_MASK,                   DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0010,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0010,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__UNUSED_0_MASK,                       DIDT_SQ_CTRL0__UNUSED_0__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+    // DIDT_TD
+    //
+    {   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT0_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT,                  0x000a,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT1_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT,                  0x0010,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT2_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT,                  0x0017,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT3_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT,                  0x002f,     GPU_CONFIGREG_DIDT_IND },
+
+    {   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT4_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT,                  0x0046,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT5_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT,                  0x005d,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT6_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT,                  0x0000,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT7_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT,                  0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+    {   ixDIDT_TD_CTRL1,                   DIDT_TD_CTRL1__MIN_POWER_MASK,                      DIDT_TD_CTRL1__MIN_POWER__SHIFT,                    0x0000,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TD_CTRL1,                   DIDT_TD_CTRL1__MAX_POWER_MASK,                      DIDT_TD_CTRL1__MAX_POWER__SHIFT,                    0xffff,     GPU_CONFIGREG_DIDT_IND },
+
+    {   ixDIDT_TD_CTRL_OCP,                DIDT_TD_CTRL_OCP__UNUSED_0_MASK,                    DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT,                  0x0000,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TD_CTRL_OCP,                DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK,               DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT,             0x00ff,     GPU_CONFIGREG_DIDT_IND },
+
+    {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK,                DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT,              0x3fff,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__UNUSED_0_MASK,                       DIDT_TD_CTRL2__UNUSED_0__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,       DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,     0x000f,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__UNUSED_1_MASK,                       DIDT_TD_CTRL2__UNUSED_1__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,       DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,     0x0000,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__UNUSED_2_MASK,                       DIDT_TD_CTRL2__UNUSED_2__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+    {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,    DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT,  0x0001,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,       DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,     0x0001,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,       DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,     0x0001,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,   DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TD_STALL_CTRL,              DIDT_TD_STALL_CTRL__UNUSED_0_MASK,                  DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+    {   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,       DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,     0x0000,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,       DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,     0x0dde,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,       DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,     0x0dde,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__UNUSED_0_MASK,                 DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT,               0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+    {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__PHASE_OFFSET_MASK,                   DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0009,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0009,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__UNUSED_0_MASK,                       DIDT_TD_CTRL0__UNUSED_0__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+    // DIDT_TCP
+    //
+    {   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT,                 0x0004,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT,                 0x0037,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT,                 0x0001,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TCP_WEIGHT0_3,              DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK,                   DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT,                 0x00ff,     GPU_CONFIGREG_DIDT_IND },
+
+    {   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT,                 0x0054,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TCP_WEIGHT4_7,              DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK,                   DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+    {   ixDIDT_TCP_CTRL1,                  DIDT_TCP_CTRL1__MIN_POWER_MASK,                     DIDT_TCP_CTRL1__MIN_POWER__SHIFT,                   0x0000,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TCP_CTRL1,                  DIDT_TCP_CTRL1__MAX_POWER_MASK,                     DIDT_TCP_CTRL1__MAX_POWER__SHIFT,                   0xffff,     GPU_CONFIGREG_DIDT_IND },
+
+    {   ixDIDT_TCP_CTRL_OCP,               DIDT_TCP_CTRL_OCP__UNUSED_0_MASK,                   DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TCP_CTRL_OCP,               DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK,              DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT,            0xffff,     GPU_CONFIGREG_DIDT_IND },
+
+    {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK,               DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT,             0x3dde,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__UNUSED_0_MASK,                      DIDT_TCP_CTRL2__UNUSED_0__SHIFT,                    0x0000,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,      DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,    0x0032,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__UNUSED_1_MASK,                      DIDT_TCP_CTRL2__UNUSED_1__SHIFT,                    0x0000,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,      DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,    0x0000,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TCP_CTRL2,                  DIDT_TCP_CTRL2__UNUSED_2_MASK,                      DIDT_TCP_CTRL2__UNUSED_2__SHIFT,                    0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+    {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,   DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,      DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,    0x0001,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,      DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,    0x0001,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,  DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT,0x01aa,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TCP_STALL_CTRL,             DIDT_TCP_STALL_CTRL__UNUSED_0_MASK,                 DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT,               0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+    {   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,      DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,    0x0001,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,      DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,    0x3dde,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,      DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,    0x3dde,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK,                DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT,              0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+    {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__PHASE_OFFSET_MASK,                   DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0010,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0010,     GPU_CONFIGREG_DIDT_IND },
+    {   ixDIDT_TCP_CTRL0,                   DIDT_TCP_CTRL0__UNUSED_0_MASK,                       DIDT_TCP_CTRL0__UNUSED_0__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+    {   0xFFFFFFFF  }  // End of list
+};
 static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
 {
 	uint32_t en = enable ? 1 : 0;
@@ -768,6 +952,11 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
 				PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
 				result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris12);
 				PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
+			} else if (hwmgr->chip_id == CHIP_VEGAM) {
+				result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_VegaM);
+				PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
+				result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_VegaM);
+				PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
 			}
 		}
 		cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value2);

From 5dd3f9efd4199f0d9e8244322934494ebd140dfd Mon Sep 17 00:00:00 2001
From: Pixel Ding <Pixel.Ding@amd.com>
Date: Tue, 24 Apr 2018 22:52:45 -0400
Subject: [PATCH 0886/1286] drm/scheduler: don't update last scheduled fence in
 TDR

The current sequence in scheduler thread is:
1. update last sched fence
2. job begin (adding to mirror list)
3. job finish (remove from mirror list)
4. back to 1

Since we update last sched prior to joining mirror list, the jobs
in mirror list already pass the last sched fence. TDR just run
the jobs in mirror list, so we should not update the last sched
fences in TDR.

Signed-off-by: Pixel Ding <Pixel.Ding@amd.com>
Reviewed-by: Monk Liu <monk.liu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/scheduler/gpu_scheduler.c | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 088ff2b4e8fb..1f1dd70125a7 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -575,9 +575,6 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
 		fence = sched->ops->run_job(s_job);
 		atomic_inc(&sched->hw_rq_count);
 
-		dma_fence_put(s_job->entity->last_scheduled);
-		s_job->entity->last_scheduled = dma_fence_get(&s_fence->finished);
-
 		if (fence) {
 			s_fence->parent = dma_fence_get(fence);
 			r = dma_fence_add_callback(fence, &s_fence->cb,

From abc342538cc4670a107e45037c39d1dda8174563 Mon Sep 17 00:00:00 2001
From: Emily Deng <Emily.Deng@amd.com>
Date: Thu, 26 Apr 2018 18:02:55 +0800
Subject: [PATCH 0887/1286] drm/amdgpu: For sriov reset, move IB test into
 exclusive mode

When put the IB test out of exclusive mode, and do sriov reset,
the IB test will randomly fail. As out of exclusive mode it uses
kiq to do read and write registers, but as it has world switch,
the kiq read and write time will be random, sometimes it will
beyond the MAX_KIQ_REG_WAIT and then the read or write register
will fail, which will result the IB test fail.

Signed-off-by: Emily Deng <Emily.Deng@amd.com>
Reviewed-by: Monk Liu <monk.liu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index e6657ec363b8..9fb20a53d5b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3143,20 +3143,19 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
 
 	/* now we are okay to resume SMC/CP/SDMA */
 	r = amdgpu_device_ip_reinit_late_sriov(adev);
-	amdgpu_virt_release_full_gpu(adev, true);
 	if (r)
 		goto error;
 
 	amdgpu_irq_gpu_reset_resume_helper(adev);
 	r = amdgpu_ib_ring_tests(adev);
 
+error:
+	amdgpu_virt_release_full_gpu(adev, true);
 	if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
 		atomic_inc(&adev->vram_lost_counter);
 		r = amdgpu_device_handle_vram_lost(adev);
 	}
 
-error:
-
 	return r;
 }
 

From 6e9c2b88eb42bdda6ba1f1a39238c446782d443e Mon Sep 17 00:00:00 2001
From: Emily Deng <Emily.Deng@amd.com>
Date: Thu, 26 Apr 2018 18:02:14 +0800
Subject: [PATCH 0888/1286] drm/amdgpu/sriov: Need to set in_gpu_reset flag to
 back after gpu reset

After host os reset gpu reset, need to set flag in_gpu_reset to
zero.

Signed-off-by: Emily Deng <Emily.Deng@amd.com>
Reviewed-by: Monk Liu <monk.liu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 493348672475..078f70faedcb 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -260,8 +260,10 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
 	} while (timeout > 1);
 
 flr_done:
-	if (locked)
+	if (locked) {
+		adev->in_gpu_reset = 0;
 		mutex_unlock(&adev->lock_reset);
+	}
 
 	/* Trigger recovery for world switch failure if no TDR */
 	if (amdgpu_lockup_timeout == 0)

From 40c21ed6b372c58fb214307f8186f7a0c1885bfc Mon Sep 17 00:00:00 2001
From: Mikita Lipski <mikita.lipski@amd.com>
Date: Wed, 10 Jan 2018 10:01:38 -0500
Subject: [PATCH 0889/1286] drm/amd/display: Fix deadlock when flushing irq

Lock irq table when reading a work in queue,
unlock to flush the work, lock again till all tasks
are cleared

Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 490017df371d..4be21bf54749 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -329,14 +329,15 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
 {
 	int src;
 	struct irq_list_head *lh;
+	unsigned long irq_table_flags;
 	DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
-
 	for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
-
+		DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
 		/* The handler was removed from the table,
 		 * it means it is safe to flush all the 'work'
 		 * (because no code can schedule a new one). */
 		lh = &adev->dm.irq_handler_list_low_tab[src];
+		DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
 		flush_work(&lh->work);
 	}
 }

From a80aa93de1a0e69fdb83e04a9aca7c33bfb18941 Mon Sep 17 00:00:00 2001
From: Mikita Lipski <mikita.lipski@amd.com>
Date: Sat, 3 Feb 2018 14:18:07 -0500
Subject: [PATCH 0890/1286] drm/amd/display: Unify dm resume sequence into a
 single call

Merge amdgpu_dm_display_resume function into dm_resume,
as it is not called anywhere else anymore.

Initially the call was broken down into 2 functions for cursor corruption
issue. Now the issue is not visible, hence the dm_resume will perform
dm_display_resume in it.

Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 31 ++++++-------------
 1 file changed, 10 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 8379a3705f2d..cc105f1f93b8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -644,18 +644,6 @@ amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
 static int dm_resume(void *handle)
 {
 	struct amdgpu_device *adev = handle;
-	struct amdgpu_display_manager *dm = &adev->dm;
-	int ret = 0;
-
-	/* power on hardware */
-	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
-
-	ret = amdgpu_dm_display_resume(adev);
-	return ret;
-}
-
-int amdgpu_dm_display_resume(struct amdgpu_device *adev)
-{
 	struct drm_device *ddev = adev->ddev;
 	struct amdgpu_display_manager *dm = &adev->dm;
 	struct amdgpu_dm_connector *aconnector;
@@ -666,10 +654,12 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
 	struct drm_plane *plane;
 	struct drm_plane_state *new_plane_state;
 	struct dm_plane_state *dm_new_plane_state;
-
-	int ret = 0;
+	int ret;
 	int i;
 
+	/* power on hardware */
+	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
+
 	/* program HPD filter */
 	dc_resume(dm->dc);
 
@@ -683,8 +673,7 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
 	amdgpu_dm_irq_resume_early(adev);
 
 	/* Do detection*/
-	list_for_each_entry(connector,
-			&ddev->mode_config.connector_list, head) {
+	list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
 		aconnector = to_amdgpu_dm_connector(connector);
 
 		/*
@@ -706,7 +695,7 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
 	}
 
 	/* Force mode set in atomic comit */
-	for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i)
+	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
 		new_crtc_state->active_changed = true;
 
 	/*
@@ -714,7 +703,7 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
 	 * them here, since they were duplicated as part of the suspend
 	 * procedure.
 	 */
-	for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i) {
+	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
 		if (dm_new_crtc_state->stream) {
 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
@@ -723,7 +712,7 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
 		}
 	}
 
-	for_each_new_plane_in_state(adev->dm.cached_state, plane, new_plane_state, i) {
+	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
 		if (dm_new_plane_state->dc_state) {
 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
@@ -732,9 +721,9 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
 		}
 	}
 
-	ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
+	ret = drm_atomic_helper_resume(ddev, dm->cached_state);
 
-	adev->dm.cached_state = NULL;
+	dm->cached_state = NULL;
 
 	amdgpu_dm_irq_resume_late(adev);
 

From 3d777c82bda9a82cc304de5ba51122d248f7a584 Mon Sep 17 00:00:00 2001
From: Harry Wentland <harry.wentland@amd.com>
Date: Mon, 16 Apr 2018 17:28:11 -0400
Subject: [PATCH 0891/1286] drm/amd/display: Disallow enabling CRTC without
 primary plane with FB
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

The below commit

    "drm/atomic: Try to preserve the crtc enabled state in drm_atomic_remove_fb, v2"

introduces a slight behavioral change to rmfb. Instead of disabling a crtc
when the primary plane is disabled, it now preserves it.

Since DC is currently not equipped to handle this we need to fail such
a commit, otherwise we might see a corrupted screen.

This is based on Shirish's previous approach but avoids adding all
planes to the new atomic state which leads to a full update in DC for
any commit, and is not what we intend.

Theoretically DM should be able to deal with states with fully populated planes,
even for simple updates, such as cursor updates. This should still be
addressed in the future.

Signed-off-by: Harry Wentland <harry.wentland@amd.com>
Tested-by: Michel Dänzer <michel.daenzer@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Cc: stable@vger.kernel.org
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 10 +++++++++-
 1 file changed, 9 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index cc105f1f93b8..96a57be3ceb6 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4628,6 +4628,7 @@ static int dm_update_crtcs_state(struct dc *dc,
 		struct amdgpu_dm_connector *aconnector = NULL;
 		struct drm_connector_state *new_con_state = NULL;
 		struct dm_connector_state *dm_conn_state = NULL;
+		struct drm_plane_state *new_plane_state = NULL;
 
 		new_stream = NULL;
 
@@ -4635,6 +4636,13 @@ static int dm_update_crtcs_state(struct dc *dc,
 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
 		acrtc = to_amdgpu_crtc(crtc);
 
+		new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary);
+
+		if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) {
+			ret = -EINVAL;
+			goto fail;
+		}
+
 		aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
 
 		/* TODO This hack should go away */
@@ -4831,7 +4839,7 @@ static int dm_update_planes_state(struct dc *dc,
 			if (!dm_old_crtc_state->stream)
 				continue;
 
-			DRM_DEBUG_DRIVER("Disabling DRM plane: %d on DRM crtc %d\n",
+			DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
 					plane->base.id, old_plane_crtc->base.id);
 
 			if (!dc_remove_plane_from_context(

From 388277b17023e69fdbf6ed68cec42fa7616ce454 Mon Sep 17 00:00:00 2001
From: Anthony Koo <Anthony.Koo@amd.com>
Date: Wed, 11 Apr 2018 13:19:56 -0400
Subject: [PATCH 0892/1286] drm/amd/display: fix issue related to infopacket
 was not transmitted

Check in code was incorrect, and infopacket is only transmitted after update
function is called multiple times.
Purpose of the function was to check if infopackets are being enabled, and
then enable global control. Fix the code to do this.

Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../amd/display/dc/dce/dce_stream_encoder.c   | 25 +++++--------------
 .../display/dc/dcn10/dcn10_stream_encoder.c   | 11 +++++---
 2 files changed, 13 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 84e26c894046..e265a0abe361 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -819,7 +819,7 @@ static void dce110_stream_encoder_update_dp_info_packets(
 	const struct encoder_info_frame *info_frame)
 {
 	struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
-	uint32_t value = REG_READ(DP_SEC_CNTL);
+	uint32_t value = 0;
 
 	if (info_frame->vsc.valid)
 		dce110_update_generic_info_packet(
@@ -853,6 +853,7 @@ static void dce110_stream_encoder_update_dp_info_packets(
 	* Therefore we need to enable master bit
 	* if at least on of the fields is not 0
 	*/
+	value = REG_READ(DP_SEC_CNTL);
 	if (value)
 		REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
 }
@@ -862,7 +863,7 @@ static void dce110_stream_encoder_stop_dp_info_packets(
 {
 	/* stop generic packets on DP */
 	struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
-	uint32_t value = REG_READ(DP_SEC_CNTL);
+	uint32_t value = 0;
 
 	if (enc110->se_mask->DP_SEC_AVI_ENABLE) {
 		REG_SET_7(DP_SEC_CNTL, 0,
@@ -875,25 +876,10 @@ static void dce110_stream_encoder_stop_dp_info_packets(
 			DP_SEC_STREAM_ENABLE, 0);
 	}
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
-	if (enc110->se_mask->DP_SEC_GSP7_ENABLE) {
-		REG_SET_10(DP_SEC_CNTL, 0,
-			DP_SEC_GSP0_ENABLE, 0,
-			DP_SEC_GSP1_ENABLE, 0,
-			DP_SEC_GSP2_ENABLE, 0,
-			DP_SEC_GSP3_ENABLE, 0,
-			DP_SEC_GSP4_ENABLE, 0,
-			DP_SEC_GSP5_ENABLE, 0,
-			DP_SEC_GSP6_ENABLE, 0,
-			DP_SEC_GSP7_ENABLE, 0,
-			DP_SEC_MPG_ENABLE, 0,
-			DP_SEC_STREAM_ENABLE, 0);
-	}
-#endif
 	/* this register shared with audio info frame.
 	 * therefore we need to keep master enabled
 	 * if at least one of the fields is not 0 */
-
+	value = REG_READ(DP_SEC_CNTL);
 	if (value)
 		REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
 
@@ -1496,7 +1482,7 @@ static void dce110_se_disable_dp_audio(
 	struct stream_encoder *enc)
 {
 	struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
-	uint32_t value = REG_READ(DP_SEC_CNTL);
+	uint32_t value = 0;
 
 	/* Disable Audio packets */
 	REG_UPDATE_5(DP_SEC_CNTL,
@@ -1508,6 +1494,7 @@ static void dce110_se_disable_dp_audio(
 
 	/* This register shared with encoder info frame. Therefore we need to
 	keep master enabled if at least on of the fields is not 0 */
+	value = REG_READ(DP_SEC_CNTL);
 	if (value != 0)
 		REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index 9ec46f8fc7cc..befd8639ad55 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -686,7 +686,7 @@ void enc1_stream_encoder_update_dp_info_packets(
 	const struct encoder_info_frame *info_frame)
 {
 	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
-	uint32_t value = REG_READ(DP_SEC_CNTL);
+	uint32_t value = 0;
 
 	if (info_frame->vsc.valid)
 		enc1_update_generic_info_packet(
@@ -713,6 +713,7 @@ void enc1_stream_encoder_update_dp_info_packets(
 	REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, info_frame->spd.valid);
 	REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, info_frame->hdrsmd.valid);
 
+
 	/* This bit is the master enable bit.
 	 * When enabling secondary stream engine,
 	 * this master bit must also be set.
@@ -720,6 +721,7 @@ void enc1_stream_encoder_update_dp_info_packets(
 	 * Therefore we need to enable master bit
 	 * if at least on of the fields is not 0
 	 */
+	value = REG_READ(DP_SEC_CNTL);
 	if (value)
 		REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
 }
@@ -729,7 +731,7 @@ void enc1_stream_encoder_stop_dp_info_packets(
 {
 	/* stop generic packets on DP */
 	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
-	uint32_t value = REG_READ(DP_SEC_CNTL);
+	uint32_t value = 0;
 
 	REG_SET_10(DP_SEC_CNTL, 0,
 		DP_SEC_GSP0_ENABLE, 0,
@@ -746,7 +748,7 @@ void enc1_stream_encoder_stop_dp_info_packets(
 	/* this register shared with audio info frame.
 	 * therefore we need to keep master enabled
 	 * if at least one of the fields is not 0 */
-
+	value = REG_READ(DP_SEC_CNTL);
 	if (value)
 		REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
 
@@ -1356,7 +1358,7 @@ static void enc1_se_disable_dp_audio(
 	struct stream_encoder *enc)
 {
 	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
-	uint32_t value = REG_READ(DP_SEC_CNTL);
+	uint32_t value = 0;
 
 	/* Disable Audio packets */
 	REG_UPDATE_5(DP_SEC_CNTL,
@@ -1369,6 +1371,7 @@ static void enc1_se_disable_dp_audio(
 	/* This register shared with encoder info frame. Therefore we need to
 	 * keep master enabled if at least on of the fields is not 0
 	 */
+	value = REG_READ(DP_SEC_CNTL);
 	if (value != 0)
 		REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
 

From 8e357610ca0cc44a875df68f608a756fa56b1797 Mon Sep 17 00:00:00 2001
From: Eric Bernstein <eric.bernstein@amd.com>
Date: Mon, 9 Apr 2018 15:47:42 -0400
Subject: [PATCH 0893/1286] drm/amd/display: Make program_output_csc HWSS
 interface function

Signed-off-by: Eric Bernstein <eric.bernstein@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 5 +++--
 drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h         | 7 +++++++
 2 files changed, 10 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index e547f46d3516..7cecab0ce297 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -1564,7 +1564,7 @@ static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
 	}
 }
 
-static void program_output_csc(struct dc *dc,
+static void dcn10_program_output_csc(struct dc *dc,
 		struct pipe_ctx *pipe_ctx,
 		enum dc_color_space colorspace,
 		uint16_t *matrix,
@@ -1917,7 +1917,7 @@ static void update_dchubp_dpp(
 		/*gamut remap*/
 		program_gamut_remap(pipe_ctx);
 
-		program_output_csc(dc,
+		dc->hwss.program_output_csc(dc,
 				pipe_ctx,
 				pipe_ctx->stream->output_color_space,
 				pipe_ctx->stream->csc_color_matrix.matrix,
@@ -2667,6 +2667,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
 	.update_pending_status = dcn10_update_pending_status,
 	.set_input_transfer_func = dcn10_set_input_transfer_func,
 	.set_output_transfer_func = dcn10_set_output_transfer_func,
+	.program_output_csc = dcn10_program_output_csc,
 	.power_down = dce110_power_down,
 	.enable_accelerated_mode = dce110_enable_accelerated_mode,
 	.enable_timing_synchronization = dcn10_enable_timing_synchronization,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index f54d478ffc5c..be6cf7ee1468 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -95,6 +95,12 @@ struct hw_sequencer_funcs {
 			enum dc_color_space colorspace,
 			uint16_t *matrix);
 
+	void (*program_output_csc)(struct dc *dc,
+			struct pipe_ctx *pipe_ctx,
+			enum dc_color_space colorspace,
+			uint16_t *matrix,
+			int opp_id);
+
 	void (*update_plane_addr)(
 		const struct dc *dc,
 		struct pipe_ctx *pipe_ctx);
@@ -203,6 +209,7 @@ struct hw_sequencer_funcs {
 
 	void (*set_cursor_position)(struct pipe_ctx *pipe);
 	void (*set_cursor_attribute)(struct pipe_ctx *pipe);
+
 };
 
 void color_space_to_black_color(

From 3158223efde597521505b586a88a6d43c8f2324f Mon Sep 17 00:00:00 2001
From: Eric Bernstein <eric.bernstein@amd.com>
Date: Mon, 9 Apr 2018 17:19:27 -0400
Subject: [PATCH 0894/1286] drm/amd/display: Refactor otg_blank sequence

Also rename otg_blank to blank_pixel_data.

Signed-off-by: Eric Bernstein <eric.bernstein@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../display/dc/dce110/dce110_hw_sequencer.c   |  6 ++--
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c | 35 ++++++++++---------
 .../gpu/drm/amd/display/dc/inc/hw_sequencer.h |  8 ++++-
 3 files changed, 28 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index e70ccb9b6afe..51c6c70a4a30 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1233,7 +1233,7 @@ static void program_scaler(const struct dc *dc,
 		&pipe_ctx->plane_res.scl_data);
 }
 
-static enum dc_status dce110_prog_pixclk_crtc_otg(
+static enum dc_status dce110_enable_stream_timing(
 		struct pipe_ctx *pipe_ctx,
 		struct dc_state *context,
 		struct dc *dc)
@@ -1299,7 +1299,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
 			pipe_ctx[pipe_ctx->pipe_idx];
 
 	/*  */
-	dc->hwss.prog_pixclk_crtc_otg(pipe_ctx, context, dc);
+	dc->hwss.enable_stream_timing(pipe_ctx, context, dc);
 
 	/* FPGA does not program backend */
 	if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
@@ -3041,7 +3041,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
 	.get_position = get_position,
 	.set_static_screen_control = set_static_screen_control,
 	.reset_hw_ctx_wrap = dce110_reset_hw_ctx_wrap,
-	.prog_pixclk_crtc_otg = dce110_prog_pixclk_crtc_otg,
+	.enable_stream_timing = dce110_enable_stream_timing,
 	.setup_stereo = NULL,
 	.set_avmute = dce110_set_avmute,
 	.wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 7cecab0ce297..8eea38b9e32b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -593,7 +593,7 @@ static void false_optc_underflow_wa(
 		tg->funcs->clear_optc_underflow(tg);
 }
 
-static enum dc_status dcn10_prog_pixclk_crtc_otg(
+static enum dc_status dcn10_enable_stream_timing(
 		struct pipe_ctx *pipe_ctx,
 		struct dc_state *context,
 		struct dc *dc)
@@ -1950,9 +1950,9 @@ static void update_dchubp_dpp(
 		hubp->funcs->set_blank(hubp, false);
 }
 
-static void dcn10_otg_blank(
+static void dcn10_blank_pixel_data(
 		struct dc *dc,
-		struct stream_resource stream_res,
+		struct stream_resource *stream_res,
 		struct dc_stream_state *stream,
 		bool blank)
 {
@@ -1963,21 +1963,21 @@ static void dcn10_otg_blank(
 	color_space = stream->output_color_space;
 	color_space_to_black_color(dc, color_space, &black_color);
 
-	if (stream_res.tg->funcs->set_blank_color)
-		stream_res.tg->funcs->set_blank_color(
-				stream_res.tg,
+	if (stream_res->tg->funcs->set_blank_color)
+		stream_res->tg->funcs->set_blank_color(
+				stream_res->tg,
 				&black_color);
 
 	if (!blank) {
-		if (stream_res.tg->funcs->set_blank)
-			stream_res.tg->funcs->set_blank(stream_res.tg, blank);
-		if (stream_res.abm)
-			stream_res.abm->funcs->set_abm_level(stream_res.abm, stream->abm_level);
+		if (stream_res->tg->funcs->set_blank)
+			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
+		if (stream_res->abm)
+			stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
 	} else if (blank) {
-		if (stream_res.abm)
-			stream_res.abm->funcs->set_abm_immediate_disable(stream_res.abm);
-		if (stream_res.tg->funcs->set_blank)
-			stream_res.tg->funcs->set_blank(stream_res.tg, blank);
+		if (stream_res->abm)
+			stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm);
+		if (stream_res->tg->funcs->set_blank)
+			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
 	}
 }
 
@@ -2016,7 +2016,7 @@ static void program_all_pipe_in_tree(
 		pipe_ctx->stream_res.tg->funcs->program_global_sync(
 				pipe_ctx->stream_res.tg);
 
-		dcn10_otg_blank(dc, pipe_ctx->stream_res,
+		dc->hwss.blank_pixel_data(dc, &pipe_ctx->stream_res,
 				pipe_ctx->stream, blank);
 	}
 
@@ -2136,7 +2136,7 @@ static void dcn10_apply_ctx_for_surface(
 
 	if (num_planes == 0) {
 		/* OTG blank before remove all front end */
-		dcn10_otg_blank(dc, top_pipe_to_program->stream_res, top_pipe_to_program->stream, true);
+		dc->hwss.blank_pixel_data(dc, &top_pipe_to_program->stream_res, top_pipe_to_program->stream, true);
 	}
 
 	/* Disconnect unused mpcc */
@@ -2679,10 +2679,11 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
 	.blank_stream = dce110_blank_stream,
 	.enable_display_power_gating = dcn10_dummy_display_power_gating,
 	.disable_plane = dcn10_disable_plane,
+	.blank_pixel_data = dcn10_blank_pixel_data,
 	.pipe_control_lock = dcn10_pipe_control_lock,
 	.set_bandwidth = dcn10_set_bandwidth,
 	.reset_hw_ctx_wrap = reset_hw_ctx_wrap,
-	.prog_pixclk_crtc_otg = dcn10_prog_pixclk_crtc_otg,
+	.enable_stream_timing = dcn10_enable_stream_timing,
 	.set_drr = set_drr,
 	.get_position = get_position,
 	.set_static_screen_control = set_static_screen_control,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index be6cf7ee1468..29abf3ecb39c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -65,6 +65,7 @@ struct dchub_init_data;
 struct dc_static_screen_events;
 struct resource_pool;
 struct resource_context;
+struct stream_resource;
 
 struct hw_sequencer_funcs {
 
@@ -162,6 +163,11 @@ struct hw_sequencer_funcs {
 				struct dc *dc,
 				struct pipe_ctx *pipe,
 				bool lock);
+	void (*blank_pixel_data)(
+			struct dc *dc,
+			struct stream_resource *stream_res,
+			struct dc_stream_state *stream,
+			bool blank);
 
 	void (*set_bandwidth)(
 			struct dc *dc,
@@ -177,7 +183,7 @@ struct hw_sequencer_funcs {
 	void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx,
 			int num_pipes, const struct dc_static_screen_events *events);
 
-	enum dc_status (*prog_pixclk_crtc_otg)(
+	enum dc_status (*enable_stream_timing)(
 			struct pipe_ctx *pipe_ctx,
 			struct dc_state *context,
 			struct dc *dc);

From 50834eb488a30026de040ab5d209ca9f980ae14b Mon Sep 17 00:00:00 2001
From: Hersen Wu <hersenxs.wu@amd.com>
Date: Wed, 11 Apr 2018 15:22:10 -0400
Subject: [PATCH 0895/1286] drm/amd/display: DP link validation bug for YUV422

remove limit YUV422 color depth to 24bits which is
workaround for old ASIC

Signed-off-by: Hersen Wu <hersenxs.wu@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/dc/core/dc_link_dp.c  | 47 +++++++++----------
 1 file changed, 21 insertions(+), 26 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 0a190c2b6898..7d609c71394b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1379,34 +1379,29 @@ static uint32_t bandwidth_in_kbps_from_timing(
 	uint32_t bits_per_channel = 0;
 	uint32_t kbps;
 
-	if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+	switch (timing->display_color_depth) {
+	case COLOR_DEPTH_666:
+		bits_per_channel = 6;
+		break;
+	case COLOR_DEPTH_888:
+		bits_per_channel = 8;
+		break;
+	case COLOR_DEPTH_101010:
+		bits_per_channel = 10;
+		break;
+	case COLOR_DEPTH_121212:
 		bits_per_channel = 12;
-	else{
-
-		switch (timing->display_color_depth) {
-
-		case COLOR_DEPTH_666:
-			bits_per_channel = 6;
-			break;
-		case COLOR_DEPTH_888:
-			bits_per_channel = 8;
-			break;
-		case COLOR_DEPTH_101010:
-			bits_per_channel = 10;
-			break;
-		case COLOR_DEPTH_121212:
-			bits_per_channel = 12;
-			break;
-		case COLOR_DEPTH_141414:
-			bits_per_channel = 14;
-			break;
-		case COLOR_DEPTH_161616:
-			bits_per_channel = 16;
-			break;
-		default:
-			break;
-		}
+		break;
+	case COLOR_DEPTH_141414:
+		bits_per_channel = 14;
+		break;
+	case COLOR_DEPTH_161616:
+		bits_per_channel = 16;
+		break;
+	default:
+		break;
 	}
+
 	ASSERT(bits_per_channel != 0);
 
 	kbps = timing->pix_clk_khz;

From 8a79593d77de17619e99c23495ac243759704b87 Mon Sep 17 00:00:00 2001
From: Tony Cheng <tony.cheng@amd.com>
Date: Mon, 16 Apr 2018 13:30:02 -0400
Subject: [PATCH 0896/1286] drm/amd/display: dal 3.1.43

Signed-off-by: Tony Cheng <tony.cheng@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 7ac8a1bee5ac..92152980b0ce 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -38,7 +38,7 @@
 #include "inc/compressor.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.1.42"
+#define DC_VER "3.1.43"
 
 #define MAX_SURFACES 3
 #define MAX_STREAMS 6

From 55a01d4023ce7249eed361731b373c78e62b73e0 Mon Sep 17 00:00:00 2001
From: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
Date: Fri, 13 Apr 2018 16:06:24 -0400
Subject: [PATCH 0897/1286] drm/amd/display: Add user_regamma to color module

Signed-off-by: Krunoslav Kovac <Krunoslav.Kovac@amd.com>
Reviewed-by: Anthony Koo <Anthony.Koo@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../amd/display/modules/color/color_gamma.c   | 314 +++++++++++++++++-
 .../amd/display/modules/color/color_gamma.h   |  48 ++-
 2 files changed, 348 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index e7e374f56864..ad0ff50305ce 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -185,14 +185,14 @@ struct dividers {
 
 static void build_coefficients(struct gamma_coefficients *coefficients, bool is_2_4)
 {
-		static const int32_t numerator01[] = { 31308, 180000};
-		static const int32_t numerator02[] = { 12920, 4500};
-		static const int32_t numerator03[] = { 55, 99};
-		static const int32_t numerator04[] = { 55, 99};
-		static const int32_t numerator05[] = { 2400, 2200};
+	static const int32_t numerator01[] = { 31308, 180000};
+	static const int32_t numerator02[] = { 12920, 4500};
+	static const int32_t numerator03[] = { 55, 99};
+	static const int32_t numerator04[] = { 55, 99};
+	static const int32_t numerator05[] = { 2400, 2200};
 
-		uint32_t i = 0;
-		uint32_t index = is_2_4 == true ? 0:1;
+	uint32_t i = 0;
+	uint32_t index = is_2_4 == true ? 0:1;
 
 	do {
 		coefficients->a0[i] = dal_fixed31_32_from_fraction(
@@ -691,7 +691,7 @@ static void build_degamma(struct pwl_float_data_ex *curve,
 	}
 }
 
-static bool scale_gamma(struct pwl_float_data *pwl_rgb,
+static void scale_gamma(struct pwl_float_data *pwl_rgb,
 		const struct dc_gamma *ramp,
 		struct dividers dividers)
 {
@@ -752,11 +752,9 @@ static bool scale_gamma(struct pwl_float_data *pwl_rgb,
 			dividers.divider3);
 	rgb->b = dal_fixed31_32_mul(rgb_last->b,
 			dividers.divider3);
-
-	return true;
 }
 
-static bool scale_gamma_dx(struct pwl_float_data *pwl_rgb,
+static void scale_gamma_dx(struct pwl_float_data *pwl_rgb,
 		const struct dc_gamma *ramp,
 		struct dividers dividers)
 {
@@ -818,8 +816,71 @@ static bool scale_gamma_dx(struct pwl_float_data *pwl_rgb,
 				pwl_rgb[i-1].g, 2), pwl_rgb[i-2].g);
 	pwl_rgb[i].b =  dal_fixed31_32_sub(dal_fixed31_32_mul_int(
 				pwl_rgb[i-1].b, 2), pwl_rgb[i-2].b);
+}
 
-	return true;
+/* todo: all these scale_gamma functions are inherently the same but
+ *  take different structures as params or different format for ramp
+ *  values. We could probably implement it in a more generic fashion
+ */
+static void scale_user_regamma_ramp(struct pwl_float_data *pwl_rgb,
+		const struct regamma_ramp *ramp,
+		struct dividers dividers)
+{
+	unsigned short max_driver = 0xFFFF;
+	unsigned short max_os = 0xFF00;
+	unsigned short scaler = max_os;
+	uint32_t i;
+	struct pwl_float_data *rgb = pwl_rgb;
+	struct pwl_float_data *rgb_last = rgb + GAMMA_RGB_256_ENTRIES - 1;
+
+	i = 0;
+	do {
+		if (ramp->gamma[i] > max_os ||
+				ramp->gamma[i + 256] > max_os ||
+				ramp->gamma[i + 512] > max_os) {
+			scaler = max_driver;
+			break;
+		}
+		i++;
+	} while (i != GAMMA_RGB_256_ENTRIES);
+
+	i = 0;
+	do {
+		rgb->r = dal_fixed31_32_from_fraction(
+				ramp->gamma[i], scaler);
+		rgb->g = dal_fixed31_32_from_fraction(
+				ramp->gamma[i + 256], scaler);
+		rgb->b = dal_fixed31_32_from_fraction(
+				ramp->gamma[i + 512], scaler);
+
+		++rgb;
+		++i;
+	} while (i != GAMMA_RGB_256_ENTRIES);
+
+	rgb->r = dal_fixed31_32_mul(rgb_last->r,
+			dividers.divider1);
+	rgb->g = dal_fixed31_32_mul(rgb_last->g,
+			dividers.divider1);
+	rgb->b = dal_fixed31_32_mul(rgb_last->b,
+			dividers.divider1);
+
+	++rgb;
+
+	rgb->r = dal_fixed31_32_mul(rgb_last->r,
+			dividers.divider2);
+	rgb->g = dal_fixed31_32_mul(rgb_last->g,
+			dividers.divider2);
+	rgb->b = dal_fixed31_32_mul(rgb_last->b,
+			dividers.divider2);
+
+	++rgb;
+
+	rgb->r = dal_fixed31_32_mul(rgb_last->r,
+			dividers.divider3);
+	rgb->g = dal_fixed31_32_mul(rgb_last->g,
+			dividers.divider3);
+	rgb->b = dal_fixed31_32_mul(rgb_last->b,
+			dividers.divider3);
 }
 
 /*
@@ -949,7 +1010,7 @@ static inline void copy_rgb_regamma_to_coordinates_x(
 	uint32_t i = 0;
 	const struct pwl_float_data_ex *rgb_regamma = rgb_ex;
 
-	while (i <= hw_points_num) {
+	while (i <= hw_points_num + 1) {
 		coords->regamma_y_red = rgb_regamma->r;
 		coords->regamma_y_green = rgb_regamma->g;
 		coords->regamma_y_blue = rgb_regamma->b;
@@ -1002,6 +1063,102 @@ static bool calculate_interpolated_hardware_curve(
 	return true;
 }
 
+/* The "old" interpolation uses a complicated scheme to build an array of
+ * coefficients while also using an array of 0-255 normalized to 0-1
+ * Then there's another loop using both of the above + new scaled user ramp
+ * and we concatenate them. It also searches for points of interpolation and
+ * uses enums for positions.
+ *
+ * This function uses a different approach:
+ * user ramp is always applied on X with 0/255, 1/255, 2/255, ..., 255/255
+ * To find index for hwX , we notice the following:
+ * i/255 <= hwX < (i+1)/255  <=> i <= 255*hwX < i+1
+ * See apply_lut_1d which is the same principle, but on 4K entry 1D LUT
+ *
+ * Once the index is known, combined Y is simply:
+ * user_ramp(index) + (hwX-index/255)*(user_ramp(index+1) - user_ramp(index)
+ *
+ * We should switch to this method in all cases, it's simpler and faster
+ * ToDo one day - for now this only applies to ADL regamma to avoid regression
+ * for regular use cases (sRGB and PQ)
+ */
+static void interpolate_user_regamma(uint32_t hw_points_num,
+		struct pwl_float_data *rgb_user,
+		bool apply_degamma,
+		struct dc_transfer_func_distributed_points *tf_pts)
+{
+	uint32_t i;
+	uint32_t color = 0;
+	int32_t index;
+	int32_t index_next;
+	struct fixed31_32 *tf_point;
+	struct fixed31_32 hw_x;
+	struct fixed31_32 norm_factor =
+			dal_fixed31_32_from_int_nonconst(255);
+	struct fixed31_32 norm_x;
+	struct fixed31_32 index_f;
+	struct fixed31_32 lut1;
+	struct fixed31_32 lut2;
+	struct fixed31_32 delta_lut;
+	struct fixed31_32 delta_index;
+
+	i = 0;
+	/* fixed_pt library has problems handling too small values */
+	while (i != 32) {
+		tf_pts->red[i] = dal_fixed31_32_zero;
+		tf_pts->green[i] = dal_fixed31_32_zero;
+		tf_pts->blue[i] = dal_fixed31_32_zero;
+		++i;
+	}
+	while (i <= hw_points_num + 1) {
+		for (color = 0; color < 3; color++) {
+			if (color == 0)
+				tf_point = &tf_pts->red[i];
+			else if (color == 1)
+				tf_point = &tf_pts->green[i];
+			else
+				tf_point = &tf_pts->blue[i];
+
+			if (apply_degamma) {
+				if (color == 0)
+					hw_x = coordinates_x[i].regamma_y_red;
+				else if (color == 1)
+					hw_x = coordinates_x[i].regamma_y_green;
+				else
+					hw_x = coordinates_x[i].regamma_y_blue;
+			} else
+				hw_x = coordinates_x[i].x;
+
+			norm_x = dal_fixed31_32_mul(norm_factor, hw_x);
+			index = dal_fixed31_32_floor(norm_x);
+			if (index < 0 || index > 255)
+				continue;
+
+			index_f = dal_fixed31_32_from_int_nonconst(index);
+			index_next = (index == 255) ? index : index + 1;
+
+			if (color == 0) {
+				lut1 = rgb_user[index].r;
+				lut2 = rgb_user[index_next].r;
+			} else if (color == 1) {
+				lut1 = rgb_user[index].g;
+				lut2 = rgb_user[index_next].g;
+			} else {
+				lut1 = rgb_user[index].b;
+				lut2 = rgb_user[index_next].b;
+			}
+
+			// we have everything now, so interpolate
+			delta_lut = dal_fixed31_32_sub(lut2, lut1);
+			delta_index = dal_fixed31_32_sub(norm_x, index_f);
+
+			*tf_point = dal_fixed31_32_add(lut1,
+				dal_fixed31_32_mul(delta_index, delta_lut));
+		}
+		++i;
+	}
+}
+
 static void build_new_custom_resulted_curve(
 	uint32_t hw_points_num,
 	struct dc_transfer_func_distributed_points *tf_pts)
@@ -1025,6 +1182,29 @@ static void build_new_custom_resulted_curve(
 	}
 }
 
+static void apply_degamma_for_user_regamma(struct pwl_float_data_ex *rgb_regamma,
+		uint32_t hw_points_num)
+{
+	uint32_t i;
+
+	struct gamma_coefficients coeff;
+	struct pwl_float_data_ex *rgb = rgb_regamma;
+	const struct hw_x_point *coord_x = coordinates_x;
+
+	build_coefficients(&coeff, true);
+
+	i = 0;
+	while (i != hw_points_num + 1) {
+		rgb->r = translate_from_linear_space_ex(
+				coord_x->x, &coeff, 0);
+		rgb->g = rgb->r;
+		rgb->b = rgb->r;
+		++coord_x;
+		++rgb;
+		++i;
+	}
+}
+
 static bool map_regamma_hw_to_x_user(
 	const struct dc_gamma *ramp,
 	struct pixel_gamma_point *coeff128,
@@ -1062,6 +1242,7 @@ static bool map_regamma_hw_to_x_user(
 		}
 	}
 
+	/* this should be named differently, all it does is clamp to 0-1 */
 	build_new_custom_resulted_curve(hw_points_num, tf_pts);
 
 	return true;
@@ -1168,6 +1349,113 @@ rgb_user_alloc_fail:
 	return ret;
 }
 
+bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf,
+		const struct regamma_lut *regamma)
+{
+	struct gamma_coefficients coeff;
+	const struct hw_x_point *coord_x = coordinates_x;
+	uint32_t i = 0;
+
+	do {
+		coeff.a0[i] = dal_fixed31_32_from_fraction(
+				regamma->coeff.A0[i], 10000000);
+		coeff.a1[i] = dal_fixed31_32_from_fraction(
+				regamma->coeff.A1[i], 1000);
+		coeff.a2[i] = dal_fixed31_32_from_fraction(
+				regamma->coeff.A2[i], 1000);
+		coeff.a3[i] = dal_fixed31_32_from_fraction(
+				regamma->coeff.A3[i], 1000);
+		coeff.user_gamma[i] = dal_fixed31_32_from_fraction(
+				regamma->coeff.gamma[i], 1000);
+
+		++i;
+	} while (i != 3);
+
+	i = 0;
+	/* fixed_pt library has problems handling too small values */
+	while (i != 32) {
+		output_tf->tf_pts.red[i] = dal_fixed31_32_zero;
+		output_tf->tf_pts.green[i] = dal_fixed31_32_zero;
+		output_tf->tf_pts.blue[i] = dal_fixed31_32_zero;
+		++coord_x;
+		++i;
+	}
+	while (i != MAX_HW_POINTS + 1) {
+		output_tf->tf_pts.red[i] = translate_from_linear_space_ex(
+				coord_x->x, &coeff, 0);
+		output_tf->tf_pts.green[i] = translate_from_linear_space_ex(
+				coord_x->x, &coeff, 1);
+		output_tf->tf_pts.blue[i] = translate_from_linear_space_ex(
+				coord_x->x, &coeff, 2);
+		++coord_x;
+		++i;
+	}
+
+	// this function just clamps output to 0-1
+	build_new_custom_resulted_curve(MAX_HW_POINTS, &output_tf->tf_pts);
+	output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+
+	return true;
+}
+
+bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf,
+		const struct regamma_lut *regamma)
+{
+	struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;
+	struct dividers dividers;
+
+	struct pwl_float_data *rgb_user = NULL;
+	struct pwl_float_data_ex *rgb_regamma = NULL;
+	bool ret = false;
+
+	if (regamma == NULL)
+		return false;
+
+	output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+
+	rgb_user = kzalloc(sizeof(*rgb_user) * (GAMMA_RGB_256_ENTRIES + _EXTRA_POINTS),
+			GFP_KERNEL);
+	if (!rgb_user)
+		goto rgb_user_alloc_fail;
+
+	rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS),
+			GFP_KERNEL);
+	if (!rgb_regamma)
+		goto rgb_regamma_alloc_fail;
+
+	dividers.divider1 = dal_fixed31_32_from_fraction(3, 2);
+	dividers.divider2 = dal_fixed31_32_from_int(2);
+	dividers.divider3 = dal_fixed31_32_from_fraction(5, 2);
+
+	scale_user_regamma_ramp(rgb_user, &regamma->ramp, dividers);
+
+	if (regamma->flags.bits.applyDegamma == 1) {
+		apply_degamma_for_user_regamma(rgb_regamma, MAX_HW_POINTS);
+		copy_rgb_regamma_to_coordinates_x(coordinates_x,
+				MAX_HW_POINTS, rgb_regamma);
+	}
+
+	interpolate_user_regamma(MAX_HW_POINTS, rgb_user,
+			regamma->flags.bits.applyDegamma, tf_pts);
+
+	// no custom HDR curves!
+	tf_pts->end_exponent = 0;
+	tf_pts->x_point_at_y1_red = 1;
+	tf_pts->x_point_at_y1_green = 1;
+	tf_pts->x_point_at_y1_blue = 1;
+
+	// this function just clamps output to 0-1
+	build_new_custom_resulted_curve(MAX_HW_POINTS, tf_pts);
+
+	ret = true;
+
+	kfree(rgb_regamma);
+rgb_regamma_alloc_fail:
+	kfree(rgb_user);
+rgb_user_alloc_fail:
+	return ret;
+}
+
 bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
 		const struct dc_gamma *ramp, bool mapUserRamp)
 {
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
index b7f9bc27d101..b64048991a95 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
@@ -32,6 +32,47 @@ struct dc_transfer_func_distributed_points;
 struct dc_rgb_fixed;
 enum dc_transfer_func_predefined;
 
+/* For SetRegamma ADL interface support
+ * Must match escape type
+ */
+union regamma_flags {
+	unsigned int raw;
+	struct {
+		unsigned int gammaRampArray       :1;    // RegammaRamp is in use
+		unsigned int gammaFromEdid        :1;    //gamma from edid is in use
+		unsigned int gammaFromEdidEx      :1;    //gamma from edid is in use , but only for Display Id 1.2
+		unsigned int gammaFromUser        :1;    //user custom gamma is used
+		unsigned int coeffFromUser        :1;    //coeff. A0-A3 from user is in use
+		unsigned int coeffFromEdid        :1;    //coeff. A0-A3 from edid is in use
+		unsigned int applyDegamma         :1;    //flag for additional degamma correction in driver
+		unsigned int gammaPredefinedSRGB  :1;    //flag for SRGB gamma
+		unsigned int gammaPredefinedPQ    :1;    //flag for PQ gamma
+		unsigned int gammaPredefinedPQ2084Interim :1;    //flag for PQ gamma, lower max nits
+		unsigned int gammaPredefined36    :1;    //flag for 3.6 gamma
+		unsigned int gammaPredefinedReset :1;    //flag to return to previous gamma
+	} bits;
+};
+
+struct regamma_ramp {
+	unsigned short gamma[256*3];  // gamma ramp packed  in same way as OS windows ,r , g & b
+};
+
+struct regamma_coeff {
+	int    gamma[3];
+	int    A0[3];
+	int    A1[3];
+	int    A2[3];
+	int    A3[3];
+};
+
+struct regamma_lut {
+	union regamma_flags flags;
+	union {
+		struct regamma_ramp ramp;
+		struct regamma_coeff coeff;
+	};
+};
+
 void setup_x_points_distribution(void);
 void precompute_pq(void);
 void precompute_de_pq(void);
@@ -45,9 +86,14 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *output_tf,
 bool mod_color_calculate_curve(enum dc_transfer_func_predefined  trans,
 		struct dc_transfer_func_distributed_points *points);
 
-bool  mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
+bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
 				struct dc_transfer_func_distributed_points *points);
 
+bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf,
+		const struct regamma_lut *regamma);
+
+bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf,
+		const struct regamma_lut *regamma);
 
 
 #endif /* COLOR_MOD_COLOR_GAMMA_H_ */

From c0aceb7d6303ca138b0def39f25d432057548f43 Mon Sep 17 00:00:00 2001
From: Charlene Liu <charlene.liu@amd.com>
Date: Mon, 16 Apr 2018 15:14:15 -0400
Subject: [PATCH 0898/1286] drm/amd/display: add cursor TTU CRQ related

Signed-off-by: Charlene Liu <charlene.liu@amd.com>
Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c |  7 +++++++
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 10 +++++++++-
 2 files changed, 16 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 58062172cf3f..759fcd1e666a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -613,6 +613,13 @@ void hubp1_program_deadline(
 	REG_SET(DCN_SURF1_TTU_CNTL1, 0,
 		REFCYC_PER_REQ_DELIVERY_PRE,
 		ttu_attr->refcyc_per_req_delivery_pre_c);
+
+	REG_SET_3(DCN_CUR0_TTU_CNTL0, 0,
+		REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_cur0,
+		QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_cur0,
+		QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_cur0);
+	REG_SET(DCN_CUR0_TTU_CNTL1, 0,
+		REFCYC_PER_REQ_DELIVERY_PRE, ttu_attr->refcyc_per_req_delivery_pre_cur0);
 }
 
 static void hubp1_setup(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index 920ae3a1b412..02045a8c30fd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -93,6 +93,8 @@
 	SRI(DCN_SURF0_TTU_CNTL1, HUBPREQ, id),\
 	SRI(DCN_SURF1_TTU_CNTL0, HUBPREQ, id),\
 	SRI(DCN_SURF1_TTU_CNTL1, HUBPREQ, id),\
+	SRI(DCN_CUR0_TTU_CNTL0, HUBPREQ, id),\
+	SRI(DCN_CUR0_TTU_CNTL1, HUBPREQ, id),\
 	SRI(HUBP_CLK_CNTL, HUBP, id)
 
 /* Register address initialization macro for ASICs with VM */
@@ -203,6 +205,8 @@
 	uint32_t DCN_SURF0_TTU_CNTL1; \
 	uint32_t DCN_SURF1_TTU_CNTL0; \
 	uint32_t DCN_SURF1_TTU_CNTL1; \
+	uint32_t DCN_CUR0_TTU_CNTL0; \
+	uint32_t DCN_CUR0_TTU_CNTL1; \
 	uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB; \
 	uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB; \
 	uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB; \
@@ -368,7 +372,11 @@
 	HUBP_SF(HUBPREQ0_NOM_PARAMETERS_2, DST_Y_PER_PTE_ROW_NOM_C, mask_sh),\
 	HUBP_SF(HUBPREQ0_NOM_PARAMETERS_3, REFCYC_PER_PTE_GROUP_NOM_C, mask_sh),\
 	HUBP_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, mask_sh),\
-	HUBP_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, mask_sh)
+	HUBP_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, mask_sh),\
+	HUBP_SF(HUBPREQ0_DCN_CUR0_TTU_CNTL0, REFCYC_PER_REQ_DELIVERY, mask_sh),\
+	HUBP_SF(HUBPREQ0_DCN_CUR0_TTU_CNTL0, QoS_LEVEL_FIXED, mask_sh),\
+	HUBP_SF(HUBPREQ0_DCN_CUR0_TTU_CNTL0, QoS_RAMP_DISABLE, mask_sh),\
+	HUBP_SF(HUBPREQ0_DCN_CUR0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh)
 
 #define HUBP_MASK_SH_LIST_DCN10(mask_sh)\
 	HUBP_MASK_SH_LIST_DCN(mask_sh),\

From 7c91bd434e5765dc5dbcf155253f2b8c740fbef9 Mon Sep 17 00:00:00 2001
From: Anthony Koo <Anthony.Koo@amd.com>
Date: Fri, 13 Apr 2018 09:40:21 -0400
Subject: [PATCH 0899/1286] drm/amd/display: add some DTN logs for input and
 output tf

Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c  | 25 +++++++++++
 .../gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h  | 27 ++++++++++++
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c | 41 +++++++++++++++++++
 drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h   | 16 ++++++++
 4 files changed, 109 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index c008a71ebc4e..8c4d9e523331 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -98,6 +98,30 @@ enum gamut_remap_select {
 	GAMUT_REMAP_COMB_COEFF
 };
 
+void dpp_read_state(struct dpp *dpp_base,
+		struct dcn_dpp_state *s)
+{
+	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+	REG_GET(CM_IGAM_CONTROL,
+			CM_IGAM_LUT_MODE, &s->igam_lut_mode);
+	REG_GET(CM_IGAM_CONTROL,
+			CM_IGAM_INPUT_FORMAT, &s->igam_input_format);
+	REG_GET(CM_DGAM_CONTROL,
+			CM_DGAM_LUT_MODE, &s->dgam_lut_mode);
+	REG_GET(CM_RGAM_CONTROL,
+			CM_RGAM_LUT_MODE, &s->rgam_lut_mode);
+	REG_GET(CM_GAMUT_REMAP_CONTROL,
+			CM_GAMUT_REMAP_MODE, &s->gamut_remap_mode);
+
+	s->gamut_remap_c11_c12 = REG_READ(CM_GAMUT_REMAP_C11_C12);
+	s->gamut_remap_c13_c14 = REG_READ(CM_GAMUT_REMAP_C13_C14);
+	s->gamut_remap_c21_c22 = REG_READ(CM_GAMUT_REMAP_C21_C22);
+	s->gamut_remap_c23_c24 = REG_READ(CM_GAMUT_REMAP_C23_C24);
+	s->gamut_remap_c31_c32 = REG_READ(CM_GAMUT_REMAP_C31_C32);
+	s->gamut_remap_c33_c34 = REG_READ(CM_GAMUT_REMAP_C33_C34);
+}
+
 /* Program gamut remap in bypass mode */
 void dpp_set_gamut_remap_bypass(struct dcn10_dpp *dpp)
 {
@@ -450,6 +474,7 @@ void dpp1_dppclk_control(
 }
 
 static const struct dpp_funcs dcn10_dpp_funcs = {
+		.dpp_read_state = dpp_read_state,
 		.dpp_reset = dpp_reset,
 		.dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
 		.dpp_get_optimal_number_of_taps = dpp_get_optimal_number_of_taps,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index 3fccf9959305..5944a3ba0409 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -44,6 +44,10 @@
 #define TF_REG_LIST_DCN(id) \
 	SRI(CM_GAMUT_REMAP_CONTROL, CM, id),\
 	SRI(CM_GAMUT_REMAP_C11_C12, CM, id),\
+	SRI(CM_GAMUT_REMAP_C13_C14, CM, id),\
+	SRI(CM_GAMUT_REMAP_C21_C22, CM, id),\
+	SRI(CM_GAMUT_REMAP_C23_C24, CM, id),\
+	SRI(CM_GAMUT_REMAP_C31_C32, CM, id),\
 	SRI(CM_GAMUT_REMAP_C33_C34, CM, id),\
 	SRI(DSCL_EXT_OVERSCAN_LEFT_RIGHT, DSCL, id), \
 	SRI(DSCL_EXT_OVERSCAN_TOP_BOTTOM, DSCL, id), \
@@ -177,6 +181,14 @@
 	TF_SF(CM0_CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE, mask_sh),\
 	TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C11, mask_sh),\
 	TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C12, mask_sh),\
+	TF_SF(CM0_CM_GAMUT_REMAP_C13_C14, CM_GAMUT_REMAP_C13, mask_sh),\
+	TF_SF(CM0_CM_GAMUT_REMAP_C13_C14, CM_GAMUT_REMAP_C14, mask_sh),\
+	TF_SF(CM0_CM_GAMUT_REMAP_C21_C22, CM_GAMUT_REMAP_C21, mask_sh),\
+	TF_SF(CM0_CM_GAMUT_REMAP_C21_C22, CM_GAMUT_REMAP_C22, mask_sh),\
+	TF_SF(CM0_CM_GAMUT_REMAP_C23_C24, CM_GAMUT_REMAP_C23, mask_sh),\
+	TF_SF(CM0_CM_GAMUT_REMAP_C23_C24, CM_GAMUT_REMAP_C24, mask_sh),\
+	TF_SF(CM0_CM_GAMUT_REMAP_C31_C32, CM_GAMUT_REMAP_C31, mask_sh),\
+	TF_SF(CM0_CM_GAMUT_REMAP_C31_C32, CM_GAMUT_REMAP_C32, mask_sh),\
 	TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C33, mask_sh),\
 	TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C34, mask_sh),\
 	TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT, mask_sh),\
@@ -524,6 +536,14 @@
 	type CM_GAMUT_REMAP_MODE; \
 	type CM_GAMUT_REMAP_C11; \
 	type CM_GAMUT_REMAP_C12; \
+	type CM_GAMUT_REMAP_C13; \
+	type CM_GAMUT_REMAP_C14; \
+	type CM_GAMUT_REMAP_C21; \
+	type CM_GAMUT_REMAP_C22; \
+	type CM_GAMUT_REMAP_C23; \
+	type CM_GAMUT_REMAP_C24; \
+	type CM_GAMUT_REMAP_C31; \
+	type CM_GAMUT_REMAP_C32; \
 	type CM_GAMUT_REMAP_C33; \
 	type CM_GAMUT_REMAP_C34; \
 	type CM_COMA_C11; \
@@ -1095,6 +1115,10 @@ struct dcn_dpp_mask {
 	uint32_t RECOUT_SIZE; \
 	uint32_t CM_GAMUT_REMAP_CONTROL; \
 	uint32_t CM_GAMUT_REMAP_C11_C12; \
+	uint32_t CM_GAMUT_REMAP_C13_C14; \
+	uint32_t CM_GAMUT_REMAP_C21_C22; \
+	uint32_t CM_GAMUT_REMAP_C23_C24; \
+	uint32_t CM_GAMUT_REMAP_C31_C32; \
 	uint32_t CM_GAMUT_REMAP_C33_C34; \
 	uint32_t CM_COMA_C11_C12; \
 	uint32_t CM_COMA_C33_C34; \
@@ -1407,6 +1431,9 @@ bool dpp_get_optimal_number_of_taps(
 		struct scaler_data *scl_data,
 		const struct scaling_taps *in_taps);
 
+void dpp_read_state(struct dpp *dpp_base,
+		struct dcn_dpp_state *s);
+
 void dpp_reset(struct dpp *dpp_base);
 
 void dpp1_cm_program_regamma_lut(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 8eea38b9e32b..9a642116f2ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -250,6 +250,47 @@ void dcn10_log_hw_state(struct dc *dc)
 	}
 	DTN_INFO("\n");
 
+	DTN_INFO("DPP:    IGAM format  IGAM mode    DGAM mode    RGAM mode"
+			"  GAMUT mode  C11 C12   C13 C14   C21 C22   C23 C24   "
+			"C31 C32   C33 C34\n");
+	for (i = 0; i < pool->pipe_count; i++) {
+		struct dpp *dpp = pool->dpps[i];
+		struct dcn_dpp_state s;
+
+		dpp->funcs->dpp_read_state(dpp, &s);
+
+		DTN_INFO("[%2d]:  %11xh  %-11s  %-11s  %-11s"
+				"%08xh   %08xh %08xh %08xh %08xh %08xh %08xh",
+				dpp->inst,
+				s.igam_input_format,
+				(s.igam_lut_mode == 0) ? "BypassFixed" :
+					((s.igam_lut_mode == 1) ? "BypassFloat" :
+					((s.igam_lut_mode == 2) ? "RAM" :
+					((s.igam_lut_mode == 3) ? "RAM" :
+								 "Unknown"))),
+				(s.dgam_lut_mode == 0) ? "Bypass" :
+					((s.dgam_lut_mode == 1) ? "sRGB" :
+					((s.dgam_lut_mode == 2) ? "Ycc" :
+					((s.dgam_lut_mode == 3) ? "RAM" :
+					((s.dgam_lut_mode == 4) ? "RAM" :
+								 "Unknown")))),
+				(s.rgam_lut_mode == 0) ? "Bypass" :
+					((s.rgam_lut_mode == 1) ? "sRGB" :
+					((s.rgam_lut_mode == 2) ? "Ycc" :
+					((s.rgam_lut_mode == 3) ? "RAM" :
+					((s.rgam_lut_mode == 4) ? "RAM" :
+								 "Unknown")))),
+				s.gamut_remap_mode,
+				s.gamut_remap_c11_c12,
+				s.gamut_remap_c13_c14,
+				s.gamut_remap_c21_c22,
+				s.gamut_remap_c23_c24,
+				s.gamut_remap_c31_c32,
+				s.gamut_remap_c33_c34);
+		DTN_INFO("\n");
+	}
+	DTN_INFO("\n");
+
 	DTN_INFO("MPCC:  OPP  DPP  MPCCBOT  MODE  ALPHA_MODE  PREMULT  OVERLAP_ONLY  IDLE\n");
 	for (i = 0; i < pool->pipe_count; i++) {
 		struct mpcc_state s = {0};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index bb7af1b1c7b3..582458f028f8 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -44,7 +44,23 @@ struct dpp_grph_csc_adjustment {
 	enum graphics_gamut_adjust_type gamut_adjust_type;
 };
 
+struct dcn_dpp_state {
+	uint32_t igam_lut_mode;
+	uint32_t igam_input_format;
+	uint32_t dgam_lut_mode;
+	uint32_t rgam_lut_mode;
+	uint32_t gamut_remap_mode;
+	uint32_t gamut_remap_c11_c12;
+	uint32_t gamut_remap_c13_c14;
+	uint32_t gamut_remap_c21_c22;
+	uint32_t gamut_remap_c23_c24;
+	uint32_t gamut_remap_c31_c32;
+	uint32_t gamut_remap_c33_c34;
+};
+
 struct dpp_funcs {
+	void (*dpp_read_state)(struct dpp *dpp, struct dcn_dpp_state *s);
+
 	void (*dpp_reset)(struct dpp *dpp);
 
 	void (*dpp_set_scaler)(struct dpp *dpp,

From 7b265fd96cbfa03630a2db90b3891b8397bf2208 Mon Sep 17 00:00:00 2001
From: Anthony Koo <Anthony.Koo@amd.com>
Date: Tue, 17 Apr 2018 12:12:56 -0400
Subject: [PATCH 0900/1286] drm/amd/display: update dtn logging and goldens

Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 9a642116f2ef..574d37cdfa20 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -260,7 +260,7 @@ void dcn10_log_hw_state(struct dc *dc)
 		dpp->funcs->dpp_read_state(dpp, &s);
 
 		DTN_INFO("[%2d]:  %11xh  %-11s  %-11s  %-11s"
-				"%08xh   %08xh %08xh %08xh %08xh %08xh %08xh",
+				"%8x    %08xh %08xh %08xh %08xh %08xh %08xh",
 				dpp->inst,
 				s.igam_input_format,
 				(s.igam_lut_mode == 0) ? "BypassFixed" :

From 34cb6b3860a4aecafaae0df8fa84b6fc784f507c Mon Sep 17 00:00:00 2001
From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Date: Wed, 11 Apr 2018 11:51:32 -0400
Subject: [PATCH 0901/1286] drm/amd/display: compact the rq/dlg/ttu log

Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c |   4 +-
 .../gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h |  24 +-
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c | 235 ++++++++----------
 drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h  |  22 +-
 4 files changed, 128 insertions(+), 157 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 759fcd1e666a..159bebcfd521 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -763,10 +763,10 @@ void min_set_viewport(
 		  PRI_VIEWPORT_Y_START_C, viewport_c->y);
 }
 
-void hubp1_read_state(struct hubp *hubp,
-		struct dcn_hubp_state *s)
+void hubp1_read_state(struct hubp *hubp)
 {
 	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+	struct dcn_hubp_state *s = &hubp1->state;
 	struct _vcs_dpi_display_dlg_regs_st *dlg_attr = &s->dlg_attr;
 	struct _vcs_dpi_display_ttu_regs_st *ttu_attr = &s->ttu_attr;
 	struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index 02045a8c30fd..fe9b8c4a91ca 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -619,8 +619,29 @@ struct dcn_mi_mask {
 	DCN_HUBP_REG_FIELD_LIST(uint32_t);
 };
 
+struct dcn_hubp_state {
+	struct _vcs_dpi_display_dlg_regs_st dlg_attr;
+	struct _vcs_dpi_display_ttu_regs_st ttu_attr;
+	struct _vcs_dpi_display_rq_regs_st rq_regs;
+	uint32_t pixel_format;
+	uint32_t inuse_addr_hi;
+	uint32_t viewport_width;
+	uint32_t viewport_height;
+	uint32_t rotation_angle;
+	uint32_t h_mirror_en;
+	uint32_t sw_mode;
+	uint32_t dcc_en;
+	uint32_t blank_en;
+	uint32_t underflow_status;
+	uint32_t ttu_disable;
+	uint32_t min_ttu_vblank;
+	uint32_t qos_level_low_wm;
+	uint32_t qos_level_high_wm;
+};
+
 struct dcn10_hubp {
 	struct hubp base;
+	struct dcn_hubp_state state;
 	const struct dcn_mi_registers *hubp_regs;
 	const struct dcn_mi_shift *hubp_shift;
 	const struct dcn_mi_mask *hubp_mask;
@@ -698,8 +719,7 @@ void dcn10_hubp_construct(
 	const struct dcn_mi_shift *hubp_shift,
 	const struct dcn_mi_mask *hubp_mask);
 
-void hubp1_read_state(struct hubp *hubp,
-		struct dcn_hubp_state *s);
+void hubp1_read_state(struct hubp *hubp);
 
 enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch);
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 574d37cdfa20..572fa601a0eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -112,102 +112,114 @@ void dcn10_log_hubbub_state(struct dc *dc)
 	DTN_INFO("\n");
 }
 
-static void print_rq_dlg_ttu_regs(struct dc_context *dc_ctx, struct dcn_hubp_state *s)
+static void dcn10_log_hubp_states(struct dc *dc)
 {
-	struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
-	struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
-	struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
+	struct dc_context *dc_ctx = dc->ctx;
+	struct resource_pool *pool = dc->res_pool;
+	int i;
 
-	DTN_INFO("========Requester========\n");
-	DTN_INFO("drq_expansion_mode      = 0x%0x\n", rq_regs->drq_expansion_mode);
-	DTN_INFO("prq_expansion_mode      = 0x%0x\n", rq_regs->prq_expansion_mode);
-	DTN_INFO("mrq_expansion_mode      = 0x%0x\n", rq_regs->mrq_expansion_mode);
-	DTN_INFO("crq_expansion_mode      = 0x%0x\n", rq_regs->crq_expansion_mode);
-	DTN_INFO("plane1_base_address     = 0x%0x\n", rq_regs->plane1_base_address);
-	DTN_INFO("==<LUMA>==\n");
-	DTN_INFO("chunk_size              = 0x%0x\n", rq_regs->rq_regs_l.chunk_size);
-	DTN_INFO("min_chunk_size          = 0x%0x\n", rq_regs->rq_regs_l.min_chunk_size);
-	DTN_INFO("meta_chunk_size         = 0x%0x\n", rq_regs->rq_regs_l.meta_chunk_size);
-	DTN_INFO("min_meta_chunk_size     = 0x%0x\n", rq_regs->rq_regs_l.min_meta_chunk_size);
-	DTN_INFO("dpte_group_size         = 0x%0x\n", rq_regs->rq_regs_l.dpte_group_size);
-	DTN_INFO("mpte_group_size         = 0x%0x\n", rq_regs->rq_regs_l.mpte_group_size);
-	DTN_INFO("swath_height            = 0x%0x\n", rq_regs->rq_regs_l.swath_height);
-	DTN_INFO("pte_row_height_linear   = 0x%0x\n", rq_regs->rq_regs_l.pte_row_height_linear);
-	DTN_INFO("==<CHROMA>==\n");
-	DTN_INFO("chunk_size              = 0x%0x\n", rq_regs->rq_regs_c.chunk_size);
-	DTN_INFO("min_chunk_size          = 0x%0x\n", rq_regs->rq_regs_c.min_chunk_size);
-	DTN_INFO("meta_chunk_size         = 0x%0x\n", rq_regs->rq_regs_c.meta_chunk_size);
-	DTN_INFO("min_meta_chunk_size     = 0x%0x\n", rq_regs->rq_regs_c.min_meta_chunk_size);
-	DTN_INFO("dpte_group_size         = 0x%0x\n", rq_regs->rq_regs_c.dpte_group_size);
-	DTN_INFO("mpte_group_size         = 0x%0x\n", rq_regs->rq_regs_c.mpte_group_size);
-	DTN_INFO("swath_height            = 0x%0x\n", rq_regs->rq_regs_c.swath_height);
-	DTN_INFO("pte_row_height_linear   = 0x%0x\n", rq_regs->rq_regs_c.pte_row_height_linear);
+	DTN_INFO("HUBP:  format  addr_hi  width  height"
+			"  rot  mir  sw_mode  dcc_en  blank_en  ttu_dis  underflow"
+			"   min_ttu_vblank       qos_low_wm      qos_high_wm\n");
+	for (i = 0; i < pool->pipe_count; i++) {
+		struct hubp *hubp = pool->hubps[i];
+		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
+
+		hubp->funcs->hubp_read_state(hubp);
+
+		DTN_INFO("[%2d]:  %5xh  %6xh  %5d  %6d  %2xh  %2xh  %6xh"
+				"  %6d  %8d  %7d  %8xh",
+				hubp->inst,
+				s->pixel_format,
+				s->inuse_addr_hi,
+				s->viewport_width,
+				s->viewport_height,
+				s->rotation_angle,
+				s->h_mirror_en,
+				s->sw_mode,
+				s->dcc_en,
+				s->blank_en,
+				s->ttu_disable,
+				s->underflow_status);
+		DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
+		DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
+		DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
+		DTN_INFO("\n");
+	}
+
+	DTN_INFO("\n=========RQ========\n");
+	DTN_INFO("HUBP:  drq_exp_m  prq_exp_m  mrq_exp_m  crq_exp_m  plane1_ba  L:chunk_s  min_chu_s  meta_ch_s"
+		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h  C:chunk_s  min_chu_s  meta_ch_s"
+		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h\n");
+	for (i = 0; i < pool->pipe_count; i++) {
+		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
+		struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
+
+		DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
+			i, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
+			rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
+			rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
+			rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
+			rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
+			rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
+			rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
+			rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
+			rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
+	}
 
 	DTN_INFO("========DLG========\n");
-	DTN_INFO("refcyc_h_blank_end                  = 0x%0x\n", dlg_regs->refcyc_h_blank_end);
-	DTN_INFO("dlg_vblank_end                      = 0x%0x\n", dlg_regs->dlg_vblank_end);
-	DTN_INFO("min_dst_y_next_start                = 0x%0x\n", dlg_regs->min_dst_y_next_start);
-	DTN_INFO("refcyc_per_htotal                   = 0x%0x\n", dlg_regs->refcyc_per_htotal);
-	DTN_INFO("refcyc_x_after_scaler               = 0x%0x\n", dlg_regs->refcyc_x_after_scaler);
-	DTN_INFO("dst_y_after_scaler                  = 0x%0x\n", dlg_regs->dst_y_after_scaler);
-	DTN_INFO("dst_y_prefetch                      = 0x%0x\n", dlg_regs->dst_y_prefetch);
-	DTN_INFO("dst_y_per_vm_vblank                 = 0x%0x\n", dlg_regs->dst_y_per_vm_vblank);
-	DTN_INFO("dst_y_per_row_vblank                = 0x%0x\n", dlg_regs->dst_y_per_row_vblank);
-	DTN_INFO("dst_y_per_vm_flip                   = 0x%0x\n", dlg_regs->dst_y_per_vm_flip);
-	DTN_INFO("dst_y_per_row_flip                  = 0x%0x\n", dlg_regs->dst_y_per_row_flip);
-	DTN_INFO("ref_freq_to_pix_freq                = 0x%0x\n", dlg_regs->ref_freq_to_pix_freq);
-	DTN_INFO("vratio_prefetch                     = 0x%0x\n", dlg_regs->vratio_prefetch);
-	DTN_INFO("vratio_prefetch_c                   = 0x%0x\n", dlg_regs->vratio_prefetch_c);
-	DTN_INFO("refcyc_per_pte_group_vblank_l       = 0x%0x\n", dlg_regs->refcyc_per_pte_group_vblank_l);
-	DTN_INFO("refcyc_per_pte_group_vblank_c       = 0x%0x\n", dlg_regs->refcyc_per_pte_group_vblank_c);
-	DTN_INFO("refcyc_per_meta_chunk_vblank_l      = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_vblank_l);
-	DTN_INFO("refcyc_per_meta_chunk_vblank_c      = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_vblank_c);
-	DTN_INFO("refcyc_per_pte_group_flip_l         = 0x%0x\n", dlg_regs->refcyc_per_pte_group_flip_l);
-	DTN_INFO("refcyc_per_pte_group_flip_c         = 0x%0x\n", dlg_regs->refcyc_per_pte_group_flip_c);
-	DTN_INFO("refcyc_per_meta_chunk_flip_l        = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_flip_l);
-	DTN_INFO("refcyc_per_meta_chunk_flip_c        = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_flip_c);
-	DTN_INFO("dst_y_per_pte_row_nom_l             = 0x%0x\n", dlg_regs->dst_y_per_pte_row_nom_l);
-	DTN_INFO("dst_y_per_pte_row_nom_c             = 0x%0x\n", dlg_regs->dst_y_per_pte_row_nom_c);
-	DTN_INFO("refcyc_per_pte_group_nom_l          = 0x%0x\n", dlg_regs->refcyc_per_pte_group_nom_l);
-	DTN_INFO("refcyc_per_pte_group_nom_c          = 0x%0x\n", dlg_regs->refcyc_per_pte_group_nom_c);
-	DTN_INFO("dst_y_per_meta_row_nom_l            = 0x%0x\n", dlg_regs->dst_y_per_meta_row_nom_l);
-	DTN_INFO("dst_y_per_meta_row_nom_c            = 0x%0x\n", dlg_regs->dst_y_per_meta_row_nom_c);
-	DTN_INFO("refcyc_per_meta_chunk_nom_l         = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_nom_l);
-	DTN_INFO("refcyc_per_meta_chunk_nom_c         = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_nom_c);
-	DTN_INFO("refcyc_per_line_delivery_pre_l      = 0x%0x\n", dlg_regs->refcyc_per_line_delivery_pre_l);
-	DTN_INFO("refcyc_per_line_delivery_pre_c      = 0x%0x\n", dlg_regs->refcyc_per_line_delivery_pre_c);
-	DTN_INFO("refcyc_per_line_delivery_l          = 0x%0x\n", dlg_regs->refcyc_per_line_delivery_l);
-	DTN_INFO("refcyc_per_line_delivery_c          = 0x%0x\n", dlg_regs->refcyc_per_line_delivery_c);
-	DTN_INFO("chunk_hdl_adjust_cur0               = 0x%0x\n", dlg_regs->chunk_hdl_adjust_cur0);
-	DTN_INFO("dst_y_offset_cur1                   = 0x%0x\n", dlg_regs->dst_y_offset_cur1);
-	DTN_INFO("chunk_hdl_adjust_cur1               = 0x%0x\n", dlg_regs->chunk_hdl_adjust_cur1);
-	DTN_INFO("vready_after_vcount0                = 0x%0x\n", dlg_regs->vready_after_vcount0);
-	DTN_INFO("dst_y_delta_drq_limit               = 0x%0x\n", dlg_regs->dst_y_delta_drq_limit);
-	DTN_INFO("xfc_reg_transfer_delay              = 0x%0x\n", dlg_regs->xfc_reg_transfer_delay);
-	DTN_INFO("xfc_reg_precharge_delay             = 0x%0x\n", dlg_regs->xfc_reg_precharge_delay);
-	DTN_INFO("xfc_reg_remote_surface_flip_latency = 0x%0x\n", dlg_regs->xfc_reg_remote_surface_flip_latency);
+	DTN_INFO("HUBP:  rc_hbe     dlg_vbe    min_d_y_n  rc_per_ht  rc_x_a_s "
+			"  dst_y_a_s  dst_y_pf   dst_y_vvb  dst_y_rvb  dst_y_vfl  dst_y_rfl  rf_pix_fq"
+			"  vratio_pf  vrat_pf_c  rc_pg_vbl  rc_pg_vbc  rc_mc_vbl  rc_mc_vbc  rc_pg_fll"
+			"  rc_pg_flc  rc_mc_fll  rc_mc_flc  pr_nom_l   pr_nom_c   rc_pg_nl   rc_pg_nc "
+			"  mr_nom_l   mr_nom_c   rc_mc_nl   rc_mc_nc   rc_ld_pl   rc_ld_pc   rc_ld_l  "
+			"  rc_ld_c    cha_cur0   ofst_cur1  cha_cur1   vr_af_vc0  ddrq_limt  x_rt_dlay"
+			"  x_rp_dlay  x_rr_sfl\n");
+	for (i = 0; i < pool->pipe_count; i++) {
+		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
+		struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
+
+		DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
+			"%  8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
+			"  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
+			i, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
+			dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
+			dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
+			dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
+			dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
+			dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
+			dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
+			dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
+			dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
+			dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
+			dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
+			dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
+			dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
+			dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
+			dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
+			dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
+			dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
+			dlg_regs->xfc_reg_remote_surface_flip_latency);
+	}
 
 	DTN_INFO("========TTU========\n");
-	DTN_INFO("qos_level_low_wm                  = 0x%0x\n", ttu_regs->qos_level_low_wm);
-	DTN_INFO("qos_level_high_wm                 = 0x%0x\n", ttu_regs->qos_level_high_wm);
-	DTN_INFO("min_ttu_vblank                    = 0x%0x\n", ttu_regs->min_ttu_vblank);
-	DTN_INFO("qos_level_flip                    = 0x%0x\n", ttu_regs->qos_level_flip);
-	DTN_INFO("refcyc_per_req_delivery_pre_l     = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_pre_l);
-	DTN_INFO("refcyc_per_req_delivery_l         = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_l);
-	DTN_INFO("refcyc_per_req_delivery_pre_c     = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_pre_c);
-	DTN_INFO("refcyc_per_req_delivery_c         = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_c);
-	DTN_INFO("refcyc_per_req_delivery_cur0      = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_cur0);
-	DTN_INFO("refcyc_per_req_delivery_pre_cur0  = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_pre_cur0);
-	DTN_INFO("refcyc_per_req_delivery_cur1      = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_cur1);
-	DTN_INFO("refcyc_per_req_delivery_pre_cur1  = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_pre_cur1);
-	DTN_INFO("qos_level_fixed_l                 = 0x%0x\n", ttu_regs->qos_level_fixed_l);
-	DTN_INFO("qos_ramp_disable_l                = 0x%0x\n", ttu_regs->qos_ramp_disable_l);
-	DTN_INFO("qos_level_fixed_c                 = 0x%0x\n", ttu_regs->qos_level_fixed_c);
-	DTN_INFO("qos_ramp_disable_c                = 0x%0x\n", ttu_regs->qos_ramp_disable_c);
-	DTN_INFO("qos_level_fixed_cur0              = 0x%0x\n", ttu_regs->qos_level_fixed_cur0);
-	DTN_INFO("qos_ramp_disable_cur0             = 0x%0x\n", ttu_regs->qos_ramp_disable_cur0);
-	DTN_INFO("qos_level_fixed_cur1              = 0x%0x\n", ttu_regs->qos_level_fixed_cur1);
-	DTN_INFO("qos_ramp_disable_cur1             = 0x%0x\n", ttu_regs->qos_ramp_disable_cur1);
+	DTN_INFO("HUBP:  qos_ll_wm  qos_lh_wm  mn_ttu_vb  qos_l_flp  rc_rd_p_l  rc_rd_l    rc_rd_p_c"
+			"  rc_rd_c    rc_rd_c0   rc_rd_pc0  rc_rd_c1   rc_rd_pc1  qos_lf_l   qos_rds_l"
+			"  qos_lf_c   qos_rds_c  qos_lf_c0  qos_rds_c0 qos_lf_c1  qos_rds_c1\n");
+	for (i = 0; i < pool->pipe_count; i++) {
+		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
+		struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
+
+		DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
+			i, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
+			ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
+			ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
+			ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
+			ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
+			ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
+			ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
+	}
+	DTN_INFO("\n");
 }
 
 void dcn10_log_hw_state(struct dc *dc)
@@ -220,35 +232,7 @@ void dcn10_log_hw_state(struct dc *dc)
 
 	dcn10_log_hubbub_state(dc);
 
-	DTN_INFO("HUBP:  format  addr_hi  width  height"
-			"  rot  mir  sw_mode  dcc_en  blank_en  ttu_dis  underflow"
-			"   min_ttu_vblank       qos_low_wm      qos_high_wm\n");
-	for (i = 0; i < pool->pipe_count; i++) {
-		struct hubp *hubp = pool->hubps[i];
-		struct dcn_hubp_state s;
-
-		hubp->funcs->hubp_read_state(hubp, &s);
-
-		DTN_INFO("[%2d]:  %5xh  %6xh  %5d  %6d  %2xh  %2xh  %6xh"
-				"  %6d  %8d  %7d  %8xh",
-				hubp->inst,
-				s.pixel_format,
-				s.inuse_addr_hi,
-				s.viewport_width,
-				s.viewport_height,
-				s.rotation_angle,
-				s.h_mirror_en,
-				s.sw_mode,
-				s.dcc_en,
-				s.blank_en,
-				s.ttu_disable,
-				s.underflow_status);
-		DTN_INFO_MICRO_SEC(s.min_ttu_vblank);
-		DTN_INFO_MICRO_SEC(s.qos_level_low_wm);
-		DTN_INFO_MICRO_SEC(s.qos_level_high_wm);
-		DTN_INFO("\n");
-	}
-	DTN_INFO("\n");
+	dcn10_log_hubp_states(dc);
 
 	DTN_INFO("DPP:    IGAM format  IGAM mode    DGAM mode    RGAM mode"
 			"  GAMUT mode  C11 C12   C13 C14   C21 C22   C23 C24   "
@@ -340,19 +324,6 @@ void dcn10_log_hw_state(struct dc *dc)
 	}
 	DTN_INFO("\n");
 
-	for (i = 0; i < pool->pipe_count; i++) {
-		struct hubp *hubp = pool->hubps[i];
-		struct dcn_hubp_state s = {0};
-
-		if (!dc->current_state->res_ctx.pipe_ctx[i].stream)
-			continue;
-
-		hubp->funcs->hubp_read_state(hubp, &s);
-		DTN_INFO("RQ-DLG-TTU registers for HUBP%d:\n", i);
-		print_rq_dlg_ttu_regs(dc_ctx, &s);
-		DTN_INFO("\n");
-	}
-
 	DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d  dcfclk_deep_sleep_khz:%d  dispclk_khz:%d\n"
 		"dppclk_khz:%d  max_supported_dppclk_khz:%d  fclk_khz:%d  socclk_khz:%d\n\n",
 			dc->current_state->bw.dcn.calc_clk.dcfclk_khz,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 3866147fb02a..331f8ff57ed7 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -56,26 +56,6 @@ struct hubp {
 	bool power_gated;
 };
 
-struct dcn_hubp_state {
-	struct _vcs_dpi_display_dlg_regs_st dlg_attr;
-	struct _vcs_dpi_display_ttu_regs_st ttu_attr;
-	struct _vcs_dpi_display_rq_regs_st rq_regs;
-	uint32_t pixel_format;
-	uint32_t inuse_addr_hi;
-	uint32_t viewport_width;
-	uint32_t viewport_height;
-	uint32_t rotation_angle;
-	uint32_t h_mirror_en;
-	uint32_t sw_mode;
-	uint32_t dcc_en;
-	uint32_t blank_en;
-	uint32_t underflow_status;
-	uint32_t ttu_disable;
-	uint32_t min_ttu_vblank;
-	uint32_t qos_level_low_wm;
-	uint32_t qos_level_high_wm;
-};
-
 struct hubp_funcs {
 	void (*hubp_setup)(
 			struct hubp *hubp,
@@ -140,7 +120,7 @@ struct hubp_funcs {
 
 	void (*hubp_clk_cntl)(struct hubp *hubp, bool enable);
 	void (*hubp_vtg_sel)(struct hubp *hubp, uint32_t otg_inst);
-	void (*hubp_read_state)(struct hubp *hubp, struct dcn_hubp_state *s);
+	void (*hubp_read_state)(struct hubp *hubp);
 
 };
 

From 8ec06a179613f7b7379daf78844afe1cc301e2c7 Mon Sep 17 00:00:00 2001
From: Julian Parkin <jparkin@amd.com>
Date: Fri, 13 Apr 2018 13:23:02 -0400
Subject: [PATCH 0902/1286] drm/amd/display: Add assert that chroma pitch is
 non zero

Signed-off-by: Julian Parkin <jparkin@amd.com>
Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 159bebcfd521..0cbc83edd37f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -146,6 +146,9 @@ void hubp1_program_size_and_rotation(
 	 * 444 or 420 luma
 	 */
 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+		ASSERT(plane_size->video.chroma_pitch != 0);
+		/* Chroma pitch zero can cause system hang! */
+
 		pitch = plane_size->video.luma_pitch - 1;
 		meta_pitch = dcc->video.meta_pitch_l - 1;
 		pitch_c = plane_size->video.chroma_pitch - 1;

From 7ab3fdde04218c4733e96712b651751c413d51c3 Mon Sep 17 00:00:00 2001
From: "Jerry (Fangzhi) Zuo" <Jerry.Zuo@amd.com>
Date: Tue, 17 Apr 2018 13:49:48 -0400
Subject: [PATCH 0903/1286] drm/amd/display: Update MST edid property every
 time

Extended fix to: "Don't read EDID in atomic_check"

Fix display property not observed in GUI display after hot plug.

Call drm_mode_connector_update_edid_property every time in
.get_modes hook, due to the fact that edid property is getting
removed from usermode ioctl DRM_IOCTL_MODE_GETCONNECTOR each time
in hot unplug.

Signed-off-by: Jerry (Fangzhi) Zuo <Jerry.Zuo@amd.com>
Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 305292a9ff80..8c1d084429dc 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -253,11 +253,11 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
 		if (aconnector->dc_sink)
 			amdgpu_dm_add_sink_to_freesync_module(
 					connector, edid);
-
-		drm_mode_connector_update_edid_property(
-						&aconnector->base, edid);
 	}
 
+	drm_mode_connector_update_edid_property(
+					&aconnector->base, aconnector->edid);
+
 	ret = drm_add_edid_modes(connector, aconnector->edid);
 
 	return ret;

From 6b622181230e7c9286f594e4e51266b6f019b031 Mon Sep 17 00:00:00 2001
From: Julian Parkin <jparkin@amd.com>
Date: Tue, 17 Apr 2018 11:49:06 -0400
Subject: [PATCH 0904/1286] drm/amd/display: reprogram infoframe during
 apply_ctx_to_hw

To ensure the infoframe gets updated during an SDR/HDR switch
this change adds a new function to to check if the HDR static
metadata has changed and adds it to is_timing_changed and
pipe_need_reprogram checks

Signed-off-by: Julian Parkin <jparkin@amd.com>
Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/dc/core/dc_resource.c  | 18 ++++++++++++++++++
 1 file changed, 18 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 447729cd29f0..4de1b443e438 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1310,6 +1310,19 @@ bool dc_add_all_planes_for_stream(
 }
 
 
+static bool is_hdr_static_meta_changed(struct dc_stream_state *cur_stream,
+	struct dc_stream_state *new_stream)
+{
+	if (cur_stream == NULL)
+		return true;
+
+	if (memcmp(&cur_stream->hdr_static_metadata,
+			&new_stream->hdr_static_metadata,
+			sizeof(struct dc_info_packet)) != 0)
+		return true;
+
+	return false;
+}
 
 static bool is_timing_changed(struct dc_stream_state *cur_stream,
 		struct dc_stream_state *new_stream)
@@ -1345,6 +1358,9 @@ static bool are_stream_backends_same(
 	if (is_timing_changed(stream_a, stream_b))
 		return false;
 
+	if (is_hdr_static_meta_changed(stream_a, stream_b))
+		return false;
+
 	return true;
 }
 
@@ -2548,6 +2564,8 @@ bool pipe_need_reprogram(
 	if (is_timing_changed(pipe_ctx_old->stream, pipe_ctx->stream))
 		return true;
 
+	if (is_hdr_static_meta_changed(pipe_ctx_old->stream, pipe_ctx->stream))
+		return true;
 
 	return false;
 }

From 70ee2def89e678940190b2c2f7d65fdef5647e07 Mon Sep 17 00:00:00 2001
From: "Jerry (Fangzhi) Zuo" <Jerry.Zuo@amd.com>
Date: Wed, 9 May 2018 14:15:16 -0500
Subject: [PATCH 0905/1286] drm/amd/display: Check dc_sink every time in MST
 hotplug

Extended fix to: "Don't read EDID in atomic_check"

Fix issue of missing dc_sink in .mode_valid in hot plug routine.

Need to check dc_sink everytime in .get_modes hook after checking
edid, since edid is not getting removed in hot unplug but dc_sink
doesn't.

Signed-off-by: Jerry (Fangzhi) Zuo <Jerry.Zuo@amd.com>
Reviewed-by: Roman Li <Roman.Li@amd.com>
Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../amd/display/amdgpu_dm/amdgpu_dm_mst_types.c  | 16 +++++++++-------
 1 file changed, 9 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 8c1d084429dc..ace9ad578ca0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -226,10 +226,6 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
 
 	if (!aconnector->edid) {
 		struct edid *edid;
-		struct dc_sink *dc_sink;
-		struct dc_sink_init_data init_params = {
-				.link = aconnector->dc_link,
-				.sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
 		edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
 
 		if (!edid) {
@@ -240,11 +236,17 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
 		}
 
 		aconnector->edid = edid;
+	}
 
+	if (!aconnector->dc_sink) {
+		struct dc_sink *dc_sink;
+		struct dc_sink_init_data init_params = {
+				.link = aconnector->dc_link,
+				.sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
 		dc_sink = dc_link_add_remote_sink(
 			aconnector->dc_link,
-			(uint8_t *)edid,
-			(edid->extensions + 1) * EDID_LENGTH,
+			(uint8_t *)aconnector->edid,
+			(aconnector->edid->extensions + 1) * EDID_LENGTH,
 			&init_params);
 
 		dc_sink->priv = aconnector;
@@ -252,7 +254,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
 
 		if (aconnector->dc_sink)
 			amdgpu_dm_add_sink_to_freesync_module(
-					connector, edid);
+					connector, aconnector->edid);
 	}
 
 	drm_mode_connector_update_edid_property(

From 3bff0796e90960b0abfea21c7f2b50d2d6a19bd9 Mon Sep 17 00:00:00 2001
From: Martin Tsai <Martin.Tsai@amd.com>
Date: Tue, 17 Apr 2018 17:20:06 -0400
Subject: [PATCH 0906/1286] drm/amd/display: to synchronize the hubp and dpp
 programming in cursor control

Signed-off-by: Martin Tsai <Martin.Tsai@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/dc/core/dc_stream.c    | 18 ++++++++++++++++++
 1 file changed, 18 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 3b2ddbd8c054..3732a1de9d6c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -178,6 +178,7 @@ bool dc_stream_set_cursor_attributes(
 	int i;
 	struct dc  *core_dc;
 	struct resource_context *res_ctx;
+	struct pipe_ctx *pipe_to_program = NULL;
 
 	if (NULL == stream) {
 		dm_error("DC: dc_stream is NULL!\n");
@@ -205,9 +206,17 @@ bool dc_stream_set_cursor_attributes(
 		if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
 			continue;
 
+		if (!pipe_to_program) {
+			pipe_to_program = pipe_ctx;
+			core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true);
+		}
 
 		core_dc->hwss.set_cursor_attribute(pipe_ctx);
 	}
+
+	if (pipe_to_program)
+		core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, false);
+
 	return true;
 }
 
@@ -218,6 +227,7 @@ bool dc_stream_set_cursor_position(
 	int i;
 	struct dc  *core_dc;
 	struct resource_context *res_ctx;
+	struct pipe_ctx *pipe_to_program = NULL;
 
 	if (NULL == stream) {
 		dm_error("DC: dc_stream is NULL!\n");
@@ -243,9 +253,17 @@ bool dc_stream_set_cursor_position(
 				!pipe_ctx->plane_res.ipp)
 			continue;
 
+		if (!pipe_to_program) {
+			pipe_to_program = pipe_ctx;
+			core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true);
+		}
+
 		core_dc->hwss.set_cursor_position(pipe_ctx);
 	}
 
+	if (pipe_to_program)
+		core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, false);
+
 	return true;
 }
 

From 34ff937029edf708d7db0d450b5f505969b68950 Mon Sep 17 00:00:00 2001
From: Tony Cheng <tony.cheng@amd.com>
Date: Mon, 16 Apr 2018 13:30:41 -0400
Subject: [PATCH 0907/1286] drm/amd/display: dal 3.1.44

Signed-off-by: Tony Cheng <tony.cheng@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 92152980b0ce..cd4f4341cb53 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -38,7 +38,7 @@
 #include "inc/compressor.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.1.43"
+#define DC_VER "3.1.44"
 
 #define MAX_SURFACES 3
 #define MAX_STREAMS 6

From 403dc5e8aab7b3d18a625e516698ea49d2be432e Mon Sep 17 00:00:00 2001
From: Harry Wentland <harry.wentland@amd.com>
Date: Tue, 10 Apr 2018 16:06:34 -0400
Subject: [PATCH 0908/1286] drm/amd/display: Use int for calculating vline
 start

We are not sure these calculations will never need negative numbers. Use
signed integers and warn and cap at 0 if this ever happens.

Signed-off-by: Harry Wentland <harry.wentland@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 2c5dbece928e..c734b7fa5835 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -96,10 +96,10 @@ static void optc1_disable_stereo(struct timing_generator *optc)
 static uint32_t get_start_vline(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing)
 {
 	struct dc_crtc_timing patched_crtc_timing;
-	uint32_t vesa_sync_start;
-	uint32_t asic_blank_end;
-	uint32_t interlace_factor;
-	uint32_t vertical_line_start;
+	int vesa_sync_start;
+	int asic_blank_end;
+	int interlace_factor;
+	int vertical_line_start;
 
 	patched_crtc_timing = *dc_crtc_timing;
 	optc1_apply_front_porch_workaround(optc, &patched_crtc_timing);

From ada8ce1530a7d1bf2e37560afa57bad911d36e81 Mon Sep 17 00:00:00 2001
From: Harry Wentland <harry.wentland@amd.com>
Date: Fri, 20 Apr 2018 10:53:50 -0400
Subject: [PATCH 0909/1286] drm/amd/display: Couple formatting fixes

Things such as mis-indent, and space at beginning of line.

Signed-off-by: Harry Wentland <harry.wentland@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c       | 3 ++-
 drivers/gpu/drm/amd/display/dc/core/dc_resource.c       | 6 +++---
 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c | 2 +-
 3 files changed, 6 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 96a57be3ceb6..3be17e26120d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2692,6 +2692,7 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
 	const struct dc_link *link = aconnector->dc_link;
 	struct amdgpu_device *adev = connector->dev->dev_private;
 	struct amdgpu_display_manager *dm = &adev->dm;
+
 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
 
@@ -4649,7 +4650,7 @@ static int dm_update_crtcs_state(struct dc *dc,
 		if (aconnector && enable) {
 			// Make sure fake sink is created in plug-in scenario
 			new_con_state = drm_atomic_get_connector_state(state,
- 								    &aconnector->base);
+								    &aconnector->base);
 
 			if (IS_ERR(new_con_state)) {
 				ret = PTR_ERR_OR_ZERO(new_con_state);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 4de1b443e438..9eb731fb5251 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1798,9 +1798,9 @@ enum dc_status dc_validate_global_state(
 		return DC_ERROR_UNEXPECTED;
 
 	if (dc->res_pool->funcs->validate_global) {
-			result = dc->res_pool->funcs->validate_global(dc, new_ctx);
-			if (result != DC_OK)
-				return result;
+		result = dc->res_pool->funcs->validate_global(dc, new_ctx);
+		if (result != DC_OK)
+			return result;
 	}
 
 	for (i = 0; i < new_ctx->stream_count; i++) {
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 0a476636c5c7..00c0a1ef15eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -430,7 +430,7 @@ static struct stream_encoder *dce112_stream_encoder_create(
 
 	if (!enc110)
 		return NULL;
-	
+
 	dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
 					&stream_enc_regs[eng_id],
 					&se_shift, &se_mask);

From 868b83511ba92b24ce5c5d852cf16b1bc07e13a7 Mon Sep 17 00:00:00 2001
From: Harry Wentland <harry.wentland@amd.com>
Date: Fri, 20 Apr 2018 11:05:07 -0400
Subject: [PATCH 0910/1286] drm/amd/display: Add VG12 ASIC IDs

Signed-off-by: Harry Wentland <harry.wentland@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/include/dal_asic_id.h | 9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 3e8e535e08f2..1b987b6a347d 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -120,9 +120,14 @@
 
 #define AI_GREENLAND_P_A0 1
 #define AI_GREENLAND_P_A1 2
+#define AI_UNKNOWN 0xFF
 
-#define ASICREV_IS_GREENLAND_M(eChipRev)  (eChipRev < AI_UNKNOWN)
-#define ASICREV_IS_GREENLAND_P(eChipRev)  (eChipRev < AI_UNKNOWN)
+#define AI_VEGA12_P_A0 20
+#define ASICREV_IS_GREENLAND_M(eChipRev)  (eChipRev < AI_VEGA12_P_A0)
+#define ASICREV_IS_GREENLAND_P(eChipRev)  (eChipRev < AI_VEGA12_P_A0)
+
+#define ASICREV_IS_VEGA12_P(eChipRev) ((eChipRev >= AI_VEGA12_P_A0) && (eChipRev < AI_UNKNOWN))
+#define ASICREV_IS_VEGA12_p(eChipRev) ((eChipRev >= AI_VEGA12_P_A0) && (eChipRev < AI_UNKNOWN))
 
 /* DCN1_0 */
 #define INTERNAL_REV_RAVEN_A0             0x00    /* First spin of Raven */

From 3aabfcd70077743a5943acd86e70604945e384c1 Mon Sep 17 00:00:00 2001
From: "Jerry (Fangzhi) Zuo" <Jerry.Zuo@amd.com>
Date: Mon, 5 Mar 2018 14:59:57 -0500
Subject: [PATCH 0911/1286] drm/amd: Add BIOS smu_info v3_3 required struct
 def.

Signed-off-by: Jerry (Fangzhi) Zuo <Jerry.Zuo@amd.com>
Reviewed-by: Harry Wentland <harry.wentland@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/include/atomfirmware.h | 170 ++++++++++++++++++++-
 1 file changed, 168 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 0f5ad54d3fd3..de177ce8ca80 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -501,6 +501,32 @@ enum atom_cooling_solution_id{
   LIQUID_COOLING = 0x01
 };
 
+struct atom_firmware_info_v3_2 {
+  struct atom_common_table_header table_header;
+  uint32_t firmware_revision;
+  uint32_t bootup_sclk_in10khz;
+  uint32_t bootup_mclk_in10khz;
+  uint32_t firmware_capability;             // enum atombios_firmware_capability
+  uint32_t main_call_parser_entry;          /* direct address of main parser call in VBIOS binary. */
+  uint32_t bios_scratch_reg_startaddr;      // 1st bios scratch register dword address
+  uint16_t bootup_vddc_mv;
+  uint16_t bootup_vddci_mv;
+  uint16_t bootup_mvddc_mv;
+  uint16_t bootup_vddgfx_mv;
+  uint8_t  mem_module_id;
+  uint8_t  coolingsolution_id;              /*0: Air cooling; 1: Liquid cooling ... */
+  uint8_t  reserved1[2];
+  uint32_t mc_baseaddr_high;
+  uint32_t mc_baseaddr_low;
+  uint8_t  board_i2c_feature_id;            // enum of atom_board_i2c_feature_id_def
+  uint8_t  board_i2c_feature_gpio_id;       // i2c id find in gpio_lut data table gpio_id
+  uint8_t  board_i2c_feature_slave_addr;
+  uint8_t  reserved3;
+  uint16_t bootup_mvddq_mv;
+  uint16_t bootup_mvpp_mv;
+  uint32_t zfbstartaddrin16mb;
+  uint32_t reserved2[3];
+};
 
 /* 
   ***************************************************************************
@@ -1169,7 +1195,29 @@ struct  atom_gfx_info_v2_2
   uint32_t rlc_gpu_timer_refclk; 
 };
 
-
+struct  atom_gfx_info_v2_3 {
+  struct  atom_common_table_header  table_header;
+  uint8_t gfxip_min_ver;
+  uint8_t gfxip_max_ver;
+  uint8_t max_shader_engines;
+  uint8_t max_tile_pipes;
+  uint8_t max_cu_per_sh;
+  uint8_t max_sh_per_se;
+  uint8_t max_backends_per_se;
+  uint8_t max_texture_channel_caches;
+  uint32_t regaddr_cp_dma_src_addr;
+  uint32_t regaddr_cp_dma_src_addr_hi;
+  uint32_t regaddr_cp_dma_dst_addr;
+  uint32_t regaddr_cp_dma_dst_addr_hi;
+  uint32_t regaddr_cp_dma_command;
+  uint32_t regaddr_cp_status;
+  uint32_t regaddr_rlc_gpu_clock_32;
+  uint32_t rlc_gpu_timer_refclk;
+  uint8_t active_cu_per_sh;
+  uint8_t active_rb_per_se;
+  uint16_t gcgoldenoffset;
+  uint32_t rm21_sram_vmin_value;
+};
 
 /* 
   ***************************************************************************
@@ -1198,6 +1246,76 @@ struct atom_smu_info_v3_1
   uint8_t  fw_ctf_polarity;         // GPIO polarity for CTF
 };
 
+struct atom_smu_info_v3_2 {
+  struct   atom_common_table_header  table_header;
+  uint8_t  smuip_min_ver;
+  uint8_t  smuip_max_ver;
+  uint8_t  smu_rsd1;
+  uint8_t  gpuclk_ss_mode;
+  uint16_t sclk_ss_percentage;
+  uint16_t sclk_ss_rate_10hz;
+  uint16_t gpuclk_ss_percentage;    // in unit of 0.001%
+  uint16_t gpuclk_ss_rate_10hz;
+  uint32_t core_refclk_10khz;
+  uint8_t  ac_dc_gpio_bit;          // GPIO bit shift in SMU_GPIOPAD_A  configured for AC/DC switching, =0xff means invalid
+  uint8_t  ac_dc_polarity;          // GPIO polarity for AC/DC switching
+  uint8_t  vr0hot_gpio_bit;         // GPIO bit shift in SMU_GPIOPAD_A  configured for VR0 HOT event, =0xff means invalid
+  uint8_t  vr0hot_polarity;         // GPIO polarity for VR0 HOT event
+  uint8_t  vr1hot_gpio_bit;         // GPIO bit shift in SMU_GPIOPAD_A configured for VR1 HOT event , =0xff means invalid
+  uint8_t  vr1hot_polarity;         // GPIO polarity for VR1 HOT event
+  uint8_t  fw_ctf_gpio_bit;         // GPIO bit shift in SMU_GPIOPAD_A configured for CTF, =0xff means invalid
+  uint8_t  fw_ctf_polarity;         // GPIO polarity for CTF
+  uint8_t  pcc_gpio_bit;            // GPIO bit shift in SMU_GPIOPAD_A configured for PCC, =0xff means invalid
+  uint8_t  pcc_gpio_polarity;       // GPIO polarity for CTF
+  uint16_t smugoldenoffset;
+  uint32_t gpupll_vco_freq_10khz;
+  uint32_t bootup_smnclk_10khz;
+  uint32_t bootup_socclk_10khz;
+  uint32_t bootup_mp0clk_10khz;
+  uint32_t bootup_mp1clk_10khz;
+  uint32_t bootup_lclk_10khz;
+  uint32_t bootup_dcefclk_10khz;
+  uint32_t ctf_threshold_override_value;
+  uint32_t reserved[5];
+};
+
+struct atom_smu_info_v3_3 {
+  struct   atom_common_table_header  table_header;
+  uint8_t  smuip_min_ver;
+  uint8_t  smuip_max_ver;
+  uint8_t  smu_rsd1;
+  uint8_t  gpuclk_ss_mode;
+  uint16_t sclk_ss_percentage;
+  uint16_t sclk_ss_rate_10hz;
+  uint16_t gpuclk_ss_percentage;    // in unit of 0.001%
+  uint16_t gpuclk_ss_rate_10hz;
+  uint32_t core_refclk_10khz;
+  uint8_t  ac_dc_gpio_bit;          // GPIO bit shift in SMU_GPIOPAD_A  configured for AC/DC switching, =0xff means invalid
+  uint8_t  ac_dc_polarity;          // GPIO polarity for AC/DC switching
+  uint8_t  vr0hot_gpio_bit;         // GPIO bit shift in SMU_GPIOPAD_A  configured for VR0 HOT event, =0xff means invalid
+  uint8_t  vr0hot_polarity;         // GPIO polarity for VR0 HOT event
+  uint8_t  vr1hot_gpio_bit;         // GPIO bit shift in SMU_GPIOPAD_A configured for VR1 HOT event , =0xff means invalid
+  uint8_t  vr1hot_polarity;         // GPIO polarity for VR1 HOT event
+  uint8_t  fw_ctf_gpio_bit;         // GPIO bit shift in SMU_GPIOPAD_A configured for CTF, =0xff means invalid
+  uint8_t  fw_ctf_polarity;         // GPIO polarity for CTF
+  uint8_t  pcc_gpio_bit;            // GPIO bit shift in SMU_GPIOPAD_A configured for PCC, =0xff means invalid
+  uint8_t  pcc_gpio_polarity;       // GPIO polarity for CTF
+  uint16_t smugoldenoffset;
+  uint32_t gpupll_vco_freq_10khz;
+  uint32_t bootup_smnclk_10khz;
+  uint32_t bootup_socclk_10khz;
+  uint32_t bootup_mp0clk_10khz;
+  uint32_t bootup_mp1clk_10khz;
+  uint32_t bootup_lclk_10khz;
+  uint32_t bootup_dcefclk_10khz;
+  uint32_t ctf_threshold_override_value;
+  uint32_t syspll3_0_vco_freq_10khz;
+  uint32_t syspll3_1_vco_freq_10khz;
+  uint32_t bootup_fclk_10khz;
+  uint32_t bootup_waflclk_10khz;
+  uint32_t reserved[3];
+};
+
 /*
  ***************************************************************************
    Data Table smc_dpm_info  structure
@@ -1283,7 +1401,6 @@ struct atom_smc_dpm_info_v4_1
 	uint32_t boardreserved[10];
 };
 
-
 /* 
   ***************************************************************************
     Data Table asic_profiling_info  structure
@@ -1864,6 +1981,55 @@ enum atom_smu9_syspll0_clock_id
   SMU9_SYSPLL0_DISPCLK_ID  = 11,      //	DISPCLK
 };
 
+enum atom_smu11_syspll_id {
+  SMU11_SYSPLL0_ID            = 0,
+  SMU11_SYSPLL1_0_ID          = 1,
+  SMU11_SYSPLL1_1_ID          = 2,
+  SMU11_SYSPLL1_2_ID          = 3,
+  SMU11_SYSPLL2_ID            = 4,
+  SMU11_SYSPLL3_0_ID          = 5,
+  SMU11_SYSPLL3_1_ID          = 6,
+};
+
+
+enum atom_smu11_syspll0_clock_id {
+  SMU11_SYSPLL0_SOCCLK_ID   = 0,       //	SOCCLK
+  SMU11_SYSPLL0_MP0CLK_ID   = 1,       //	MP0CLK
+  SMU11_SYSPLL0_DCLK_ID     = 2,       //	DCLK
+  SMU11_SYSPLL0_VCLK_ID     = 3,       //	VCLK
+  SMU11_SYSPLL0_ECLK_ID     = 4,       //	ECLK
+  SMU11_SYSPLL0_DCEFCLK_ID  = 5,       //	DCEFCLK
+};
+
+
+enum atom_smu11_syspll1_0_clock_id {
+  SMU11_SYSPLL1_0_UCLKA_ID   = 0,       // UCLK_a
+};
+
+enum atom_smu11_syspll1_1_clock_id {
+  SMU11_SYSPLL1_0_UCLKB_ID   = 0,       // UCLK_b
+};
+
+enum atom_smu11_syspll1_2_clock_id {
+  SMU11_SYSPLL1_0_FCLK_ID   = 0,        // FCLK
+};
+
+enum atom_smu11_syspll2_clock_id {
+  SMU11_SYSPLL2_GFXCLK_ID   = 0,        // GFXCLK
+};
+
+enum atom_smu11_syspll3_0_clock_id {
+  SMU11_SYSPLL3_0_WAFCLK_ID = 0,       //	WAFCLK
+  SMU11_SYSPLL3_0_DISPCLK_ID = 1,      //	DISPCLK
+  SMU11_SYSPLL3_0_DPREFCLK_ID = 2,     //	DPREFCLK
+};
+
+enum atom_smu11_syspll3_1_clock_id {
+  SMU11_SYSPLL3_1_MP1CLK_ID = 0,       //	MP1CLK
+  SMU11_SYSPLL3_1_SMNCLK_ID = 1,       //	SMNCLK
+  SMU11_SYSPLL3_1_LCLK_ID = 2,         //	LCLK
+};
+
 struct  atom_get_smu_clock_info_output_parameters_v3_1
 {
   union {

From d66057830c12855253a5c2d246ef26a66b43e2d5 Mon Sep 17 00:00:00 2001
From: Harry Wentland <harry.wentland@amd.com>
Date: Fri, 20 Apr 2018 10:56:18 -0400
Subject: [PATCH 0912/1286] drm/amd/display: Add get_firmware_info_v3_2 for
 VG12

Signed-off-by: Harry Wentland <harry.wentland@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../drm/amd/display/dc/bios/bios_parser2.c    | 86 ++++++++++++++++++-
 1 file changed, 85 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 985fe8c22875..10a5807a7e8b 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -70,6 +70,10 @@ static enum bp_result get_firmware_info_v3_1(
 	struct bios_parser *bp,
 	struct dc_firmware_info *info);
 
+static enum bp_result get_firmware_info_v3_2(
+	struct bios_parser *bp,
+	struct dc_firmware_info *info);
+
 static struct atom_hpd_int_record *get_hpd_record(struct bios_parser *bp,
 		struct atom_display_object_path_v2 *object);
 
@@ -1321,9 +1325,11 @@ static enum bp_result bios_parser_get_firmware_info(
 		case 3:
 			switch (revision.minor) {
 			case 1:
-			case 2:
 				result = get_firmware_info_v3_1(bp, info);
 				break;
+			case 2:
+				result = get_firmware_info_v3_2(bp, info);
+				break;
 			default:
 				break;
 			}
@@ -1383,6 +1389,84 @@ static enum bp_result get_firmware_info_v3_1(
 	return BP_RESULT_OK;
 }
 
+static enum bp_result get_firmware_info_v3_2(
+	struct bios_parser *bp,
+	struct dc_firmware_info *info)
+{
+	struct atom_firmware_info_v3_2 *firmware_info;
+	struct atom_display_controller_info_v4_1 *dce_info = NULL;
+	struct atom_common_table_header *header;
+	struct atom_data_revision revision;
+	struct atom_smu_info_v3_2 *smu_info_v3_2 = NULL;
+	struct atom_smu_info_v3_3 *smu_info_v3_3 = NULL;
+
+	if (!info)
+		return BP_RESULT_BADINPUT;
+
+	firmware_info = GET_IMAGE(struct atom_firmware_info_v3_2,
+			DATA_TABLES(firmwareinfo));
+
+	dce_info = GET_IMAGE(struct atom_display_controller_info_v4_1,
+			DATA_TABLES(dce_info));
+
+	if (!firmware_info || !dce_info)
+		return BP_RESULT_BADBIOSTABLE;
+
+	memset(info, 0, sizeof(*info));
+
+	header = GET_IMAGE(struct atom_common_table_header,
+					DATA_TABLES(smu_info));
+	get_atom_data_table_revision(header, &revision);
+
+	if (revision.minor == 2) {
+		/* Vega12 */
+		smu_info_v3_2 = GET_IMAGE(struct atom_smu_info_v3_2,
+							DATA_TABLES(smu_info));
+
+		if (!smu_info_v3_2)
+			return BP_RESULT_BADBIOSTABLE;
+
+		info->default_engine_clk = smu_info_v3_2->bootup_dcefclk_10khz * 10;
+	} else if (revision.minor == 3) {
+		/* Vega20 */
+		smu_info_v3_3 = GET_IMAGE(struct atom_smu_info_v3_3,
+							DATA_TABLES(smu_info));
+
+		if (!smu_info_v3_3)
+			return BP_RESULT_BADBIOSTABLE;
+
+		info->default_engine_clk = smu_info_v3_3->bootup_dcefclk_10khz * 10;
+	}
+
+	 // We need to convert from 10KHz units into KHz units.
+	info->default_memory_clk = firmware_info->bootup_mclk_in10khz * 10;
+
+	 /* 27MHz for Vega10 & Vega12; 100MHz for Vega20 */
+	info->pll_info.crystal_frequency = dce_info->dce_refclk_10khz * 10;
+	/* Hardcode frequency if BIOS gives no DCE Ref Clk */
+	if (info->pll_info.crystal_frequency == 0) {
+		if (revision.minor == 2)
+			info->pll_info.crystal_frequency = 27000;
+		else if (revision.minor == 3)
+			info->pll_info.crystal_frequency = 100000;
+	}
+	/*dp_phy_ref_clk is not correct for atom_display_controller_info_v4_2, but we don't use it*/
+	info->dp_phy_ref_clk     = dce_info->dpphy_refclk_10khz * 10;
+	info->i2c_engine_ref_clk = dce_info->i2c_engine_refclk_10khz * 10;
+
+	/* Get GPU PLL VCO Clock */
+	if (bp->cmd_tbl.get_smu_clock_info != NULL) {
+		if (revision.minor == 2)
+			info->smu_gpu_pll_output_freq =
+					bp->cmd_tbl.get_smu_clock_info(bp, SMU9_SYSPLL0_ID) * 10;
+		else if (revision.minor == 3)
+			info->smu_gpu_pll_output_freq =
+					bp->cmd_tbl.get_smu_clock_info(bp, SMU11_SYSPLL3_0_ID) * 10;
+	}
+
+	return BP_RESULT_OK;
+}
+
 static enum bp_result bios_parser_get_encoder_cap_info(
 	struct dc_bios *dcb,
 	struct graphics_object_id object_id,

From 8a61bc085ffab3071c59efcbeff4044c034e7490 Mon Sep 17 00:00:00 2001
From: Harry Wentland <harry.wentland@amd.com>
Date: Tue, 24 Apr 2018 10:49:20 -0400
Subject: [PATCH 0913/1286] drm/amd/display: Don't return ddc result and
 read_bytes in same return value

The two ranges overlap.

Signed-off-by: Harry Wentland <harry.wentland@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../display/amdgpu_dm/amdgpu_dm_mst_types.c   | 20 +++++++++++--------
 .../gpu/drm/amd/display/dc/core/dc_link_ddc.c | 10 +++++++---
 .../gpu/drm/amd/display/dc/inc/dc_link_ddc.h  |  5 +++--
 3 files changed, 22 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index ace9ad578ca0..4304d9e408b8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -83,21 +83,22 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
 	enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ?
 		I2C_MOT_TRUE : I2C_MOT_FALSE;
 	enum ddc_result res;
-	ssize_t read_bytes;
+	uint32_t read_bytes = msg->size;
 
 	if (WARN_ON(msg->size > 16))
 		return -E2BIG;
 
 	switch (msg->request & ~DP_AUX_I2C_MOT) {
 	case DP_AUX_NATIVE_READ:
-		read_bytes = dal_ddc_service_read_dpcd_data(
+		res = dal_ddc_service_read_dpcd_data(
 				TO_DM_AUX(aux)->ddc_service,
 				false,
 				I2C_MOT_UNDEF,
 				msg->address,
 				msg->buffer,
-				msg->size);
-		return read_bytes;
+				msg->size,
+				&read_bytes);
+		break;
 	case DP_AUX_NATIVE_WRITE:
 		res = dal_ddc_service_write_dpcd_data(
 				TO_DM_AUX(aux)->ddc_service,
@@ -108,14 +109,15 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
 				msg->size);
 		break;
 	case DP_AUX_I2C_READ:
-		read_bytes = dal_ddc_service_read_dpcd_data(
+		res = dal_ddc_service_read_dpcd_data(
 				TO_DM_AUX(aux)->ddc_service,
 				true,
 				mot,
 				msg->address,
 				msg->buffer,
-				msg->size);
-		return read_bytes;
+				msg->size,
+				&read_bytes);
+		break;
 	case DP_AUX_I2C_WRITE:
 		res = dal_ddc_service_write_dpcd_data(
 				TO_DM_AUX(aux)->ddc_service,
@@ -137,7 +139,9 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
 		 r == DDC_RESULT_SUCESSFULL);
 #endif
 
-	return msg->size;
+	if (res != DDC_RESULT_SUCESSFULL)
+		return -EIO;
+	return read_bytes;
 }
 
 static enum drm_connector_status
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index 49c2face1e7a..ae48d603ebd6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -629,13 +629,14 @@ bool dal_ddc_service_query_ddc_data(
 	return ret;
 }
 
-ssize_t dal_ddc_service_read_dpcd_data(
+enum ddc_result dal_ddc_service_read_dpcd_data(
 	struct ddc_service *ddc,
 	bool i2c,
 	enum i2c_mot_mode mot,
 	uint32_t address,
 	uint8_t *data,
-	uint32_t len)
+	uint32_t len,
+	uint32_t *read)
 {
 	struct aux_payload read_payload = {
 		.i2c_over_aux = i2c,
@@ -652,6 +653,8 @@ ssize_t dal_ddc_service_read_dpcd_data(
 		.mot = mot
 	};
 
+	*read = 0;
+
 	if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
 		BREAK_TO_DEBUGGER();
 		return DDC_RESULT_FAILED_INVALID_OPERATION;
@@ -661,7 +664,8 @@ ssize_t dal_ddc_service_read_dpcd_data(
 		ddc->ctx->i2caux,
 		ddc->ddc_pin,
 		&command)) {
-		return (ssize_t)command.payloads->length;
+		*read = command.payloads->length;
+		return DDC_RESULT_SUCESSFULL;
 	}
 
 	return DDC_RESULT_FAILED_OPERATION;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
index 090b7a8dd67b..30b3a08b91be 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
@@ -102,13 +102,14 @@ bool dal_ddc_service_query_ddc_data(
 		uint8_t *read_buf,
 		uint32_t read_size);
 
-ssize_t dal_ddc_service_read_dpcd_data(
+enum ddc_result dal_ddc_service_read_dpcd_data(
 		struct ddc_service *ddc,
 		bool i2c,
 		enum i2c_mot_mode mot,
 		uint32_t address,
 		uint8_t *data,
-		uint32_t len);
+		uint32_t len,
+		uint32_t *read);
 
 enum ddc_result dal_ddc_service_write_dpcd_data(
 		struct ddc_service *ddc,

From f3336254263c3b6a7734379084f4873a335f1577 Mon Sep 17 00:00:00 2001
From: Xiaojie Yuan <Xiaojie.Yuan@amd.com>
Date: Thu, 26 Apr 2018 18:50:26 +0800
Subject: [PATCH 0914/1286] drm/amdgpu/uvd7: add emit_reg_write_reg_wait ring
 callback

Fix the NULL pointer dereference while running amdgpu_test:

[   54.972246] BUG: unable to handle kernel NULL pointer dereference at 0000000000000000
[   54.972265] IP:           (null)
[   54.972273] PGD 0 P4D 0
[   54.972280] Oops: 0010 [#1] SMP PTI
[   54.972288] Modules linked in: amdkfd amd_iommu_v2 amdgpu(OE) chash gpu_sched ttm drm_kms_helper drm i2c_algo_bit fb_sys_fops syscopyarea sysfillrect sysimgblt snd_hda_codec_realtek snd_hda_codec_generic snd_hda_codec_hdmi snd_hda_intel snd_hda_codec snd_hda_core snd_hwdep intel_rapl snd_pcm snd_seq_midi snd_seq_midi_event snd_rawmidi x86_pkg_temp_thermal intel_powerclamp coretemp kvm_intel snd_seq snd_seq_device kvm irqbypass snd_timer crct10dif_pclmul crc32_pclmul ghash_clmulni_intel pcbc snd soundcore joydev input_leds aesni_intel aes_x86_64 crypto_simd glue_helper cryptd idma64 virt_dma mei_me intel_lpss_pci serio_raw intel_cstate intel_rapl_perf shpchp intel_pch_thermal mei mac_hid intel_lpss acpi_pad parport_pc ppdev nfsd lp auth_rpcgss nfs_acl lockd grace sunrpc parport autofs4 hid_generic
[   54.972434]  usbhid mxm_wmi e1000e psmouse ahci hid libahci wmi pinctrl_sunrisepoint video pinctrl_intel
[   54.972457] CPU: 6 PID: 1393 Comm: uvd Tainted: G           OE    4.16.0-rc7-27fb84fda777 #1
[   54.972473] Hardware name: MSI MS-7984/Z170 KRAIT GAMING (MS-7984), BIOS B.80 05/11/2016
[   54.972489] RIP: 0010:          (null)
[   54.972497] RSP: 0018:ffffaea002c8bcc0 EFLAGS: 00010202
[   54.972508] RAX: 0000000000000000 RBX: ffff9d30d3c56f60 RCX: 00000000007c0002
[   54.972522] RDX: 000000000001a6fb RSI: 000000000001a6e9 RDI: ffff9d30d3c56f60
[   54.972536] RBP: ffffaea002c8bd10 R08: 0000000000000002 R09: ffffffffc06977d0
[   54.972550] R10: 0000000000000040 R11: 0000000000000000 R12: 0000000000000002
[   54.972564] R13: ffff9d30d3c5001c R14: ffff9d30d3c50000 R15: 0000000000000006
[   54.972579] FS:  0000000000000000(0000) GS:ffff9d30eed80000(0000) knlGS:0000000000000000
[   54.972594] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[   54.972606] CR2: 0000000000000000 CR3: 00000002dbc0a001 CR4: 00000000003606e0
[   54.972620] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
[   54.972634] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
[   54.972648] Call Trace:
[   54.972685]  ? gmc_v9_0_emit_flush_gpu_tlb+0x111/0x140 [amdgpu]
[   54.972721]  uvd_v7_0_ring_emit_vm_flush+0x31/0x70 [amdgpu]
[   54.972751]  amdgpu_vm_flush+0x5dc/0x6c0 [amdgpu]
[   54.972787]  ? pp_dpm_powergate_uvd+0x50/0x80 [amdgpu]
[   54.972816]  amdgpu_ib_schedule+0x120/0x4e0 [amdgpu]
[   54.972850]  amdgpu_job_run+0x17b/0x1c0 [amdgpu]
[   54.972861]  drm_sched_main+0x2cc/0x490 [gpu_sched]
[   54.972873]  ? wait_woken+0x80/0x80
[   54.972882]  kthread+0x121/0x140
[   54.972891]  ? drm_sched_job_finish+0xf0/0xf0 [gpu_sched]
[   54.972902]  ? kthread_create_worker_on_cpu+0x70/0x70
[   54.972914]  ret_from_fork+0x35/0x40
[   54.972922] Code:  Bad RIP value.
[   54.972932] RIP:           (null) RSP: ffffaea002c8bcc0
[   54.972943] CR2: 0000000000000000
[   54.972951] ---[ end trace 5feb349263bbf633 ]---

Signed-off-by: Xiaojie Yuan <Xiaojie.Yuan@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 280c0826e183..2251db4048f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -1671,6 +1671,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
 	.end_use = amdgpu_uvd_ring_end_use,
 	.emit_wreg = uvd_v7_0_ring_emit_wreg,
 	.emit_reg_wait = uvd_v7_0_ring_emit_reg_wait,
+	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
 };
 
 static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {

From f7dbe9186d85dd63df7868d408fea6859281c446 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Michel=20D=C3=A4nzer?= <michel.daenzer@amd.com>
Date: Tue, 17 Apr 2018 12:25:22 +0200
Subject: [PATCH 0915/1286] drm/amd/display: Use kvzalloc for potentially large
 allocations
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Allocating up to 32 physically contiguous pages can easily fail (and has
failed for me), and isn't necessary anyway.

Reviewed-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/dc/core/dc_surface.c  | 14 ++--
 .../amd/display/modules/color/color_gamma.c   | 72 ++++++++++---------
 2 files changed, 45 insertions(+), 41 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index 959387705965..68a71adeb12e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -72,8 +72,8 @@ struct dc_plane_state *dc_create_plane_state(struct dc *dc)
 {
 	struct dc *core_dc = dc;
 
-	struct dc_plane_state *plane_state = kzalloc(sizeof(*plane_state),
-						     GFP_KERNEL);
+	struct dc_plane_state *plane_state = kvzalloc(sizeof(*plane_state),
+						      GFP_KERNEL);
 
 	if (NULL == plane_state)
 		return NULL;
@@ -126,7 +126,7 @@ static void dc_plane_state_free(struct kref *kref)
 {
 	struct dc_plane_state *plane_state = container_of(kref, struct dc_plane_state, refcount);
 	destruct(plane_state);
-	kfree(plane_state);
+	kvfree(plane_state);
 }
 
 void dc_plane_state_release(struct dc_plane_state *plane_state)
@@ -142,7 +142,7 @@ void dc_gamma_retain(struct dc_gamma *gamma)
 static void dc_gamma_free(struct kref *kref)
 {
 	struct dc_gamma *gamma = container_of(kref, struct dc_gamma, refcount);
-	kfree(gamma);
+	kvfree(gamma);
 }
 
 void dc_gamma_release(struct dc_gamma **gamma)
@@ -153,7 +153,7 @@ void dc_gamma_release(struct dc_gamma **gamma)
 
 struct dc_gamma *dc_create_gamma(void)
 {
-	struct dc_gamma *gamma = kzalloc(sizeof(*gamma), GFP_KERNEL);
+	struct dc_gamma *gamma = kvzalloc(sizeof(*gamma), GFP_KERNEL);
 
 	if (gamma == NULL)
 		goto alloc_fail;
@@ -173,7 +173,7 @@ void dc_transfer_func_retain(struct dc_transfer_func *tf)
 static void dc_transfer_func_free(struct kref *kref)
 {
 	struct dc_transfer_func *tf = container_of(kref, struct dc_transfer_func, refcount);
-	kfree(tf);
+	kvfree(tf);
 }
 
 void dc_transfer_func_release(struct dc_transfer_func *tf)
@@ -183,7 +183,7 @@ void dc_transfer_func_release(struct dc_transfer_func *tf)
 
 struct dc_transfer_func *dc_create_transfer_func()
 {
-	struct dc_transfer_func *tf = kzalloc(sizeof(*tf), GFP_KERNEL);
+	struct dc_transfer_func *tf = kvzalloc(sizeof(*tf), GFP_KERNEL);
 
 	if (tf == NULL)
 		goto alloc_fail;
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index ad0ff50305ce..15e5b72e6e00 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1274,19 +1274,19 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
 
 	output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
 
-	rgb_user = kzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS),
-			   GFP_KERNEL);
+	rgb_user = kvzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS),
+			    GFP_KERNEL);
 	if (!rgb_user)
 		goto rgb_user_alloc_fail;
-	rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS),
-			GFP_KERNEL);
+	rgb_regamma = kvzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS),
+			       GFP_KERNEL);
 	if (!rgb_regamma)
 		goto rgb_regamma_alloc_fail;
-	axix_x = kzalloc(sizeof(*axix_x) * (ramp->num_entries + 3),
-			 GFP_KERNEL);
+	axix_x = kvzalloc(sizeof(*axix_x) * (ramp->num_entries + 3),
+			  GFP_KERNEL);
 	if (!axix_x)
 		goto axix_x_alloc_fail;
-	coeff = kzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL);
+	coeff = kvzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL);
 	if (!coeff)
 		goto coeff_alloc_fail;
 
@@ -1338,13 +1338,13 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
 
 	ret = true;
 
-	kfree(coeff);
+	kvfree(coeff);
 coeff_alloc_fail:
-	kfree(axix_x);
+	kvfree(axix_x);
 axix_x_alloc_fail:
-	kfree(rgb_regamma);
+	kvfree(rgb_regamma);
 rgb_regamma_alloc_fail:
-	kfree(rgb_user);
+	kvfree(rgb_user);
 rgb_user_alloc_fail:
 	return ret;
 }
@@ -1480,19 +1480,19 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
 
 	input_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
 
-	rgb_user = kzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS),
-			   GFP_KERNEL);
+	rgb_user = kvzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS),
+			    GFP_KERNEL);
 	if (!rgb_user)
 		goto rgb_user_alloc_fail;
-	curve = kzalloc(sizeof(*curve) * (MAX_HW_POINTS + _EXTRA_POINTS),
-			GFP_KERNEL);
+	curve = kvzalloc(sizeof(*curve) * (MAX_HW_POINTS + _EXTRA_POINTS),
+			 GFP_KERNEL);
 	if (!curve)
 		goto curve_alloc_fail;
-	axix_x = kzalloc(sizeof(*axix_x) * (ramp->num_entries + _EXTRA_POINTS),
-			 GFP_KERNEL);
+	axix_x = kvzalloc(sizeof(*axix_x) * (ramp->num_entries + _EXTRA_POINTS),
+			  GFP_KERNEL);
 	if (!axix_x)
 		goto axix_x_alloc_fail;
-	coeff = kzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL);
+	coeff = kvzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL);
 	if (!coeff)
 		goto coeff_alloc_fail;
 
@@ -1534,13 +1534,13 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
 
 	ret = true;
 
-	kfree(coeff);
+	kvfree(coeff);
 coeff_alloc_fail:
-	kfree(axix_x);
+	kvfree(axix_x);
 axix_x_alloc_fail:
-	kfree(curve);
+	kvfree(curve);
 curve_alloc_fail:
-	kfree(rgb_user);
+	kvfree(rgb_user);
 rgb_user_alloc_fail:
 
 	return ret;
@@ -1569,8 +1569,9 @@ bool  mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
 		}
 		ret = true;
 	} else if (trans == TRANSFER_FUNCTION_PQ) {
-		rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS +
-						_EXTRA_POINTS), GFP_KERNEL);
+		rgb_regamma = kvzalloc(sizeof(*rgb_regamma) *
+				       (MAX_HW_POINTS + _EXTRA_POINTS),
+				       GFP_KERNEL);
 		if (!rgb_regamma)
 			goto rgb_regamma_alloc_fail;
 		points->end_exponent = 7;
@@ -1590,11 +1591,12 @@ bool  mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
 		}
 		ret = true;
 
-		kfree(rgb_regamma);
+		kvfree(rgb_regamma);
 	} else if (trans == TRANSFER_FUNCTION_SRGB ||
 			  trans == TRANSFER_FUNCTION_BT709) {
-		rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS +
-						_EXTRA_POINTS), GFP_KERNEL);
+		rgb_regamma = kvzalloc(sizeof(*rgb_regamma) *
+				       (MAX_HW_POINTS + _EXTRA_POINTS),
+				       GFP_KERNEL);
 		if (!rgb_regamma)
 			goto rgb_regamma_alloc_fail;
 		points->end_exponent = 0;
@@ -1612,7 +1614,7 @@ bool  mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
 		}
 		ret = true;
 
-		kfree(rgb_regamma);
+		kvfree(rgb_regamma);
 	}
 rgb_regamma_alloc_fail:
 	return ret;
@@ -1636,8 +1638,9 @@ bool  mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
 		}
 		ret = true;
 	} else if (trans == TRANSFER_FUNCTION_PQ) {
-		rgb_degamma = kzalloc(sizeof(*rgb_degamma) * (MAX_HW_POINTS +
-						_EXTRA_POINTS), GFP_KERNEL);
+		rgb_degamma = kvzalloc(sizeof(*rgb_degamma) *
+				       (MAX_HW_POINTS +	_EXTRA_POINTS),
+				       GFP_KERNEL);
 		if (!rgb_degamma)
 			goto rgb_degamma_alloc_fail;
 
@@ -1652,11 +1655,12 @@ bool  mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
 		}
 		ret = true;
 
-		kfree(rgb_degamma);
+		kvfree(rgb_degamma);
 	} else if (trans == TRANSFER_FUNCTION_SRGB ||
 			  trans == TRANSFER_FUNCTION_BT709) {
-		rgb_degamma = kzalloc(sizeof(*rgb_degamma) * (MAX_HW_POINTS +
-						_EXTRA_POINTS), GFP_KERNEL);
+		rgb_degamma = kvzalloc(sizeof(*rgb_degamma) *
+				       (MAX_HW_POINTS + _EXTRA_POINTS),
+				       GFP_KERNEL);
 		if (!rgb_degamma)
 			goto rgb_degamma_alloc_fail;
 
@@ -1670,7 +1674,7 @@ bool  mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
 		}
 		ret = true;
 
-		kfree(rgb_degamma);
+		kvfree(rgb_degamma);
 	}
 	points->end_exponent = 0;
 	points->x_point_at_y1_red = 1;

From 87ac8fb08bc7c69a39842c73f3a9d06eb73f02cc Mon Sep 17 00:00:00 2001
From: Shirish S <shirish.s@amd.com>
Date: Wed, 25 Apr 2018 14:42:28 +0530
Subject: [PATCH 0916/1286] drm/amd/display: disable FBC on underlay pipe

FBC is not applicable for the underlay pipe,
hence disallow enabling and disabling of the same.

This also fixes the BUG hit of calling sleep in
atomic context.

Signed-off-by: Shirish S <shirish.s@amd.com>
Reviewed-by: Roman Li <Roman.Li@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c    | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 51c6c70a4a30..2288d0aa773b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -2747,6 +2747,9 @@ static void dce110_program_front_end_for_pipe(
 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
 	struct xfm_grph_csc_adjustment adjust;
 	struct out_csc_color_matrix tbl_entry;
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+	unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
+#endif
 	unsigned int i;
 	DC_LOGGER_INIT();
 	memset(&tbl_entry, 0, sizeof(tbl_entry));
@@ -2788,7 +2791,9 @@ static void dce110_program_front_end_for_pipe(
 	program_scaler(dc, pipe_ctx);
 
 #if defined(CONFIG_DRM_AMD_DC_FBC)
-	if (dc->fbc_compressor && old_pipe->stream) {
+	/* fbc not applicable on Underlay pipe */
+	if (dc->fbc_compressor && old_pipe->stream &&
+	    pipe_ctx->pipe_idx != underlay_idx) {
 		if (plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL)
 			dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
 		else

From 00862edba135c30d65b9af863163fdfa1f502c61 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Michel=20D=C3=A4nzer?= <michel.daenzer@amd.com>
Date: Wed, 25 Apr 2018 17:32:10 +0200
Subject: [PATCH 0917/1286] drm/ttm: Use GFP_TRANSHUGE_LIGHT for allocating
 huge pages
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

GFP_TRANSHUGE tries very hard to allocate huge pages, which can result
in long delays with high memory pressure. I have observed firefox
freezing for up to around a minute due to this while restic was taking
a full system backup.

Since we don't really need huge pages, use GFP_TRANSHUGE_LIGHT |
__GFP_NORETRY instead, in order to fail quickly when there are no huge
pages available.

Set __GFP_KSWAPD_RECLAIM as well, in order for huge pages to be freed
up in the background if necessary.

With these changes, I'm no longer seeing freezes during a restic backup.

Cc: stable@vger.kernel.org
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/ttm/ttm_page_alloc.c     | 11 ++++++++---
 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c |  3 ++-
 2 files changed, 10 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index f0481b7b60c5..06c94e3a5f15 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -910,7 +910,8 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
 			while (npages >= HPAGE_PMD_NR) {
 				gfp_t huge_flags = gfp_flags;
 
-				huge_flags |= GFP_TRANSHUGE;
+				huge_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
+					__GFP_KSWAPD_RECLAIM;
 				huge_flags &= ~__GFP_MOVABLE;
 				huge_flags &= ~__GFP_COMP;
 				p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
@@ -1027,11 +1028,15 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
 				  GFP_USER | GFP_DMA32, "uc dma", 0);
 
 	ttm_page_pool_init_locked(&_manager->wc_pool_huge,
-				  GFP_TRANSHUGE	& ~(__GFP_MOVABLE | __GFP_COMP),
+				  (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
+				   __GFP_KSWAPD_RECLAIM) &
+				  ~(__GFP_MOVABLE | __GFP_COMP),
 				  "wc huge", order);
 
 	ttm_page_pool_init_locked(&_manager->uc_pool_huge,
-				  GFP_TRANSHUGE	& ~(__GFP_MOVABLE | __GFP_COMP)
+				  (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
+				   __GFP_KSWAPD_RECLAIM) &
+				  ~(__GFP_MOVABLE | __GFP_COMP)
 				  , "uc huge", order);
 
 	_manager->options.max_size = max_pages;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 8a25d1974385..f63d99c302e4 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -910,7 +910,8 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
 		gfp_flags |= __GFP_ZERO;
 
 	if (huge) {
-		gfp_flags |= GFP_TRANSHUGE;
+		gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
+			__GFP_KSWAPD_RECLAIM;
 		gfp_flags &= ~__GFP_MOVABLE;
 		gfp_flags &= ~__GFP_COMP;
 	}

From 719a39a1e9b2dfbfb86f17a8da696b714a3b885d Mon Sep 17 00:00:00 2001
From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Date: Mon, 30 Apr 2018 10:04:42 -0400
Subject: [PATCH 0918/1286] drm/amdgpu: Switch to interruptable wait to recover
 from ring hang.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

v2:
Use dma_fence_wait instead of dma_fence_wait_timeout(...,MAX_SCHEDULE_TIMEOUT)
Avoid printing error message for ERESTARTSYS

Originally-by: David Panariti <David.Panariti@amd.com>
Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index eb80edfb1b0a..6741a62a7d15 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -421,9 +421,11 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id)
 
 	if (other) {
 		signed long r;
-		r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
+		r = dma_fence_wait(other, true);
 		if (r < 0) {
-			DRM_ERROR("Error (%ld) waiting for fence!\n", r);
+			if (r != -ERESTARTSYS)
+				DRM_ERROR("Error (%ld) waiting for fence!\n", r);
+
 			return r;
 		}
 	}

From fc5a136ddad944d2f909d3ffcde924b7afa792f4 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Fri, 27 Apr 2018 13:46:08 +0800
Subject: [PATCH 0919/1286] drm/amd/pp: Skip fan attributes if fan not present

With powerplay enabled, also need to skip fan attributes
if no fan present.

Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c      | 21 ++++++++-------------
 drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c |  3 +++
 2 files changed, 11 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index d9802d938e33..2c821262e262 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1364,19 +1364,14 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
 	struct amdgpu_device *adev = dev_get_drvdata(dev);
 	umode_t effective_mode = attr->mode;
 
-	/* handle non-powerplay limitations */
-	if (!adev->powerplay.pp_handle) {
-		/* Skip fan attributes if fan is not present */
-		if (adev->pm.no_fan &&
-		    (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
-		     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
-		     attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
-		     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
-			return 0;
-		/* requires powerplay */
-		if (attr == &sensor_dev_attr_fan1_input.dev_attr.attr)
-			return 0;
-	}
+
+	/* Skip fan attributes if fan is not present */
+	if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
+	    attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
+	    attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
+	    attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
+	    attr == &sensor_dev_attr_fan1_input.dev_attr.attr))
+		return 0;
 
 	/* Skip limit attributes if DPM is not enabled */
 	if (!adev->pm.dpm_enabled &&
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index eecb11824412..71b42331f185 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -229,6 +229,9 @@ int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
 	if (ret)
 		goto err;
 
+	((struct amdgpu_device *)hwmgr->adev)->pm.no_fan =
+				hwmgr->thermal_controller.fanInfo.bNoFan;
+
 	ret = hwmgr->hwmgr_func->backend_init(hwmgr);
 	if (ret)
 		goto err1;

From 51d45cbc9196b07f3fc66df5dafd3010c04913a3 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Fri, 27 Apr 2018 14:09:30 +0800
Subject: [PATCH 0920/1286] drm/amdgpu: Fix display corruption on CI with dpm
 enabled

with dpm enabled, need to get active crtcs in dc/no-dc mode.

caused by
'commit ebb649667a31 ("drm/amdgpu: Set pm_display_cfg in non-dc mode")'

Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 32 +++++++++++++-------------
 1 file changed, 16 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 2c821262e262..b455da487782 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1878,26 +1878,26 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
 			amdgpu_fence_wait_empty(ring);
 	}
 
-	if (!amdgpu_device_has_dc_support(adev)) {
-		mutex_lock(&adev->pm.mutex);
-		amdgpu_dpm_get_active_displays(adev);
-		adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtcs;
-		adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
-		adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
-		/* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
-		if (adev->pm.pm_display_cfg.vrefresh > 120)
-			adev->pm.pm_display_cfg.min_vblank_time = 0;
-		if (adev->powerplay.pp_funcs->display_configuration_change)
-			adev->powerplay.pp_funcs->display_configuration_change(
-							adev->powerplay.pp_handle,
-							&adev->pm.pm_display_cfg);
-		mutex_unlock(&adev->pm.mutex);
-	}
-
 	if (adev->powerplay.pp_funcs->dispatch_tasks) {
+		if (!amdgpu_device_has_dc_support(adev)) {
+			mutex_lock(&adev->pm.mutex);
+			amdgpu_dpm_get_active_displays(adev);
+			adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtcs;
+			adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
+			adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
+			/* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
+			if (adev->pm.pm_display_cfg.vrefresh > 120)
+				adev->pm.pm_display_cfg.min_vblank_time = 0;
+			if (adev->powerplay.pp_funcs->display_configuration_change)
+				adev->powerplay.pp_funcs->display_configuration_change(
+								adev->powerplay.pp_handle,
+								&adev->pm.pm_display_cfg);
+			mutex_unlock(&adev->pm.mutex);
+		}
 		amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
 	} else {
 		mutex_lock(&adev->pm.mutex);
+		amdgpu_dpm_get_active_displays(adev);
 		/* update battery/ac status */
 		if (power_supply_is_system_supplied() > 0)
 			adev->pm.dpm.ac_power = true;

From dfe8a0187c8dde66b3bc52882826b1e53920ad56 Mon Sep 17 00:00:00 2001
From: Tom St Denis <tom.stdenis@amd.com>
Date: Tue, 1 May 2018 10:15:16 -0400
Subject: [PATCH 0921/1286] drm/amd/amdgpu: vcn10 Add callback for
 emit_reg_write_reg_wait

The callback .emit_reg_write_reg_wait was missing for vcn decode
which resulted in a kernel oops.

Signed-off-by: Tom St Denis <tom.stdenis@amd.com>
Reviewed-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index d9a15338db7e..0501746b6c2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -1109,6 +1109,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
 	.end_use = amdgpu_vcn_ring_end_use,
 	.emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
 	.emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
+	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
 };
 
 static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {

From 7e4237dbe494f9721463fd1f2d3b9e52ec74930e Mon Sep 17 00:00:00 2001
From: Tom St Denis <tom.stdenis@amd.com>
Date: Wed, 2 May 2018 13:01:36 -0400
Subject: [PATCH 0922/1286] drm/amd/amdgpu: Add some documentation to the
 debugfs entries

Signed-off-by: Tom St Denis <tom.stdenis@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 193 +++++++++++++++++++-
 1 file changed, 189 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index c98e59721444..f5fb93795a69 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -28,8 +28,13 @@
 #include <linux/debugfs.h>
 #include "amdgpu.h"
 
-/*
- * Debugfs
+/**
+ * amdgpu_debugfs_add_files - Add simple debugfs entries
+ *
+ * @adev:  Device to attach debugfs entries to
+ * @files:  Array of function callbacks that respond to reads
+ * @nfiles: Number of callbacks to register
+ *
  */
 int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
 			     const struct drm_info_list *files,
@@ -64,7 +69,33 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
 
 #if defined(CONFIG_DEBUG_FS)
 
-
+/**
+ * amdgpu_debugfs_process_reg_op - Handle MMIO register reads/writes
+ *
+ * @read: True if reading
+ * @f: open file handle
+ * @buf: User buffer to write/read to
+ * @size: Number of bytes to write/read
+ * @pos:  Offset to seek to
+ *
+ * This debugfs entry has special meaning on the offset being sought.
+ * Various bits have different meanings:
+ *
+ * Bit 62:  Indicates a GRBM bank switch is needed
+ * Bit 61:  Indicates a SRBM bank switch is needed (implies bit 62 is
+ * 			zero)
+ * Bits 24..33: The SE or ME selector if needed
+ * Bits 34..43: The SH (or SA) or PIPE selector if needed
+ * Bits 44..53: The INSTANCE (or CU/WGP) or QUEUE selector if needed
+ *
+ * Bit 23:  Indicates that the PM power gating lock should be held
+ * 			This is necessary to read registers that might be
+ * 			unreliable during a power gating transistion.
+ *
+ * The lower bits are the BYTE offset of the register to read.  This
+ * allows reading multiple registers in a single call and having
+ * the returned size reflect that.
+ */
 static int  amdgpu_debugfs_process_reg_op(bool read, struct file *f,
 		char __user *buf, size_t size, loff_t *pos)
 {
@@ -164,19 +195,37 @@ end:
 	return result;
 }
 
-
+/**
+ * amdgpu_debugfs_regs_read - Callback for reading MMIO registers
+ */
 static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
 					size_t size, loff_t *pos)
 {
 	return amdgpu_debugfs_process_reg_op(true, f, buf, size, pos);
 }
 
+/**
+ * amdgpu_debugfs_regs_write - Callback for writing MMIO registers
+ */
 static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
 					 size_t size, loff_t *pos)
 {
 	return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos);
 }
 
+
+/**
+ * amdgpu_debugfs_regs_pcie_read - Read from a PCIE register
+ *
+ * @f: open file handle
+ * @buf: User buffer to store read data in
+ * @size: Number of bytes to read
+ * @pos:  Offset to seek to
+ *
+ * The lower bits are the BYTE offset of the register to read.  This
+ * allows reading multiple registers in a single call and having
+ * the returned size reflect that.
+ */
 static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
 					size_t size, loff_t *pos)
 {
@@ -204,6 +253,18 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
 	return result;
 }
 
+/**
+ * amdgpu_debugfs_regs_pcie_write - Write to a PCIE register
+ *
+ * @f: open file handle
+ * @buf: User buffer to write data from
+ * @size: Number of bytes to write
+ * @pos:  Offset to seek to
+ *
+ * The lower bits are the BYTE offset of the register to write.  This
+ * allows writing multiple registers in a single call and having
+ * the returned size reflect that.
+ */
 static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
 					 size_t size, loff_t *pos)
 {
@@ -232,6 +293,18 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
 	return result;
 }
 
+/**
+ * amdgpu_debugfs_regs_didt_read - Read from a DIDT register
+ *
+ * @f: open file handle
+ * @buf: User buffer to store read data in
+ * @size: Number of bytes to read
+ * @pos:  Offset to seek to
+ *
+ * The lower bits are the BYTE offset of the register to read.  This
+ * allows reading multiple registers in a single call and having
+ * the returned size reflect that.
+ */
 static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
 					size_t size, loff_t *pos)
 {
@@ -259,6 +332,18 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
 	return result;
 }
 
+/**
+ * amdgpu_debugfs_regs_didt_write - Write to a DIDT register
+ *
+ * @f: open file handle
+ * @buf: User buffer to write data from
+ * @size: Number of bytes to write
+ * @pos:  Offset to seek to
+ *
+ * The lower bits are the BYTE offset of the register to write.  This
+ * allows writing multiple registers in a single call and having
+ * the returned size reflect that.
+ */
 static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
 					 size_t size, loff_t *pos)
 {
@@ -287,6 +372,18 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
 	return result;
 }
 
+/**
+ * amdgpu_debugfs_regs_smc_read - Read from a SMC register
+ *
+ * @f: open file handle
+ * @buf: User buffer to store read data in
+ * @size: Number of bytes to read
+ * @pos:  Offset to seek to
+ *
+ * The lower bits are the BYTE offset of the register to read.  This
+ * allows reading multiple registers in a single call and having
+ * the returned size reflect that.
+ */
 static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
 					size_t size, loff_t *pos)
 {
@@ -314,6 +411,18 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
 	return result;
 }
 
+/**
+ * amdgpu_debugfs_regs_smc_write - Write to a SMC register
+ *
+ * @f: open file handle
+ * @buf: User buffer to write data from
+ * @size: Number of bytes to write
+ * @pos:  Offset to seek to
+ *
+ * The lower bits are the BYTE offset of the register to write.  This
+ * allows writing multiple registers in a single call and having
+ * the returned size reflect that.
+ */
 static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
 					 size_t size, loff_t *pos)
 {
@@ -342,6 +451,20 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
 	return result;
 }
 
+/**
+ * amdgpu_debugfs_gca_config_read - Read from gfx config data
+ *
+ * @f: open file handle
+ * @buf: User buffer to store read data in
+ * @size: Number of bytes to read
+ * @pos:  Offset to seek to
+ *
+ * This file is used to access configuration data in a somewhat
+ * stable fashion.  The format is a series of DWORDs with the first
+ * indicating which revision it is.  New content is appended to the
+ * end so that older software can still read the data.
+ */
+
 static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
 					size_t size, loff_t *pos)
 {
@@ -418,6 +541,19 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
 	return result;
 }
 
+/**
+ * amdgpu_debugfs_sensor_read - Read from the powerplay sensors
+ *
+ * @f: open file handle
+ * @buf: User buffer to store read data in
+ * @size: Number of bytes to read
+ * @pos:  Offset to seek to
+ *
+ * The offset is treated as the BYTE address of one of the sensors
+ * enumerated in amd/include/kgd_pp_interface.h under the
+ * 'amd_pp_sensors' enumeration.  For instance to read the UVD VCLK
+ * you would use the offset 3 * 4 = 12.
+ */
 static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
 					size_t size, loff_t *pos)
 {
@@ -457,6 +593,27 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
 	return !r ? outsize : r;
 }
 
+/** amdgpu_debugfs_wave_read - Read WAVE STATUS data
+ *
+ * @f: open file handle
+ * @buf: User buffer to store read data in
+ * @size: Number of bytes to read
+ * @pos:  Offset to seek to
+ *
+ * The offset being sought changes which wave that the status data
+ * will be returned for.  The bits are used as follows:
+ *
+ * Bits 0..6: 	Byte offset into data
+ * Bits 7..14:	SE selector
+ * Bits 15..22:	SH/SA selector
+ * Bits 23..30: CU/{WGP+SIMD} selector
+ * Bits 31..36: WAVE ID selector
+ * Bits 37..44: SIMD ID selector
+ *
+ * The returned data begins with one DWORD of version information
+ * Followed by WAVE STATUS registers relevant to the GFX IP version
+ * being used.  See gfx_v8_0_read_wave_data() for an example output.
+ */
 static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
 					size_t size, loff_t *pos)
 {
@@ -507,6 +664,28 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
 	return result;
 }
 
+/** amdgpu_debugfs_gpr_read - Read wave gprs
+ *
+ * @f: open file handle
+ * @buf: User buffer to store read data in
+ * @size: Number of bytes to read
+ * @pos:  Offset to seek to
+ *
+ * The offset being sought changes which wave that the status data
+ * will be returned for.  The bits are used as follows:
+ *
+ * Bits 0..11:	Byte offset into data
+ * Bits 12..19:	SE selector
+ * Bits 20..27:	SH/SA selector
+ * Bits 28..35: CU/{WGP+SIMD} selector
+ * Bits 36..43: WAVE ID selector
+ * Bits 37..44: SIMD ID selector
+ * Bits 52..59: Thread selector
+ * Bits 60..61: Bank selector (VGPR=0,SGPR=1)
+ *
+ * The return data comes from the SGPR or VGPR register bank for
+ * the selected operational unit.
+ */
 static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
 					size_t size, loff_t *pos)
 {
@@ -637,6 +816,12 @@ static const char *debugfs_regs_names[] = {
 	"amdgpu_gpr",
 };
 
+/**
+ * amdgpu_debugfs_regs_init -	Initialize debugfs entries that provide
+ * 								register access.
+ *
+ * @adev: The device to attach the debugfs entries to
+ */
 int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
 {
 	struct drm_minor *minor = adev->ddev->primary;

From 3f4299bee6eda852489ce4fd307dd709a09f5d8f Mon Sep 17 00:00:00 2001
From: Chunming Zhou <david1.zhou@amd.com>
Date: Tue, 24 Apr 2018 12:14:39 +0800
Subject: [PATCH 0923/1286] drm/amdgpu: abstract bo_base init function
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 73 ++++++++++++++------------
 1 file changed, 38 insertions(+), 35 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 1c00f1a56e8b..71dcdefce255 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -94,6 +94,36 @@ struct amdgpu_prt_cb {
 	struct dma_fence_cb cb;
 };
 
+static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
+				   struct amdgpu_vm *vm,
+				   struct amdgpu_bo *bo)
+{
+	base->vm = vm;
+	base->bo = bo;
+	INIT_LIST_HEAD(&base->bo_list);
+	INIT_LIST_HEAD(&base->vm_status);
+
+	if (!bo)
+		return;
+	list_add_tail(&base->bo_list, &bo->va);
+
+	if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
+		return;
+
+	if (bo->preferred_domains &
+	    amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
+		return;
+
+	/*
+	 * we checked all the prerequisites, but it looks like this per vm bo
+	 * is currently evicted. add the bo to the evicted list to make sure it
+	 * is validated on next vm use to avoid fault.
+	 * */
+	spin_lock(&vm->status_lock);
+	list_move_tail(&base->vm_status, &vm->evicted);
+	spin_unlock(&vm->status_lock);
+}
+
 /**
  * amdgpu_vm_level_shift - return the addr shift for each level
  *
@@ -446,11 +476,9 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
 			*/
 			pt->parent = amdgpu_bo_ref(parent->base.bo);
 
-			entry->base.vm = vm;
-			entry->base.bo = pt;
-			list_add_tail(&entry->base.bo_list, &pt->va);
+			amdgpu_vm_bo_base_init(&entry->base, vm, pt);
 			spin_lock(&vm->status_lock);
-			list_add(&entry->base.vm_status, &vm->relocated);
+			list_move(&entry->base.vm_status, &vm->relocated);
 			spin_unlock(&vm->status_lock);
 		}
 
@@ -1841,36 +1869,12 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
 	if (bo_va == NULL) {
 		return NULL;
 	}
-	bo_va->base.vm = vm;
-	bo_va->base.bo = bo;
-	INIT_LIST_HEAD(&bo_va->base.bo_list);
-	INIT_LIST_HEAD(&bo_va->base.vm_status);
+	amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
 
 	bo_va->ref_count = 1;
 	INIT_LIST_HEAD(&bo_va->valids);
 	INIT_LIST_HEAD(&bo_va->invalids);
 
-	if (!bo)
-		return bo_va;
-
-	list_add_tail(&bo_va->base.bo_list, &bo->va);
-
-	if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
-		return bo_va;
-
-	if (bo->preferred_domains &
-	    amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
-		return bo_va;
-
-	/*
-	 * We checked all the prerequisites, but it looks like this per VM BO
-	 * is currently evicted. add the BO to the evicted list to make sure it
-	 * is validated on next VM use to avoid fault.
-	 * */
-	spin_lock(&vm->status_lock);
-	list_move_tail(&bo_va->base.vm_status, &vm->evicted);
-	spin_unlock(&vm->status_lock);
-
 	return bo_va;
 }
 
@@ -2370,6 +2374,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 		   int vm_context, unsigned int pasid)
 {
 	struct amdgpu_bo_param bp;
+	struct amdgpu_bo *root;
 	const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
 		AMDGPU_VM_PTE_COUNT(adev) * 8);
 	unsigned ring_instance;
@@ -2431,23 +2436,21 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 	bp.flags = flags;
 	bp.type = ttm_bo_type_kernel;
 	bp.resv = NULL;
-	r = amdgpu_bo_create(adev, &bp, &vm->root.base.bo);
+	r = amdgpu_bo_create(adev, &bp, &root);
 	if (r)
 		goto error_free_sched_entity;
 
-	r = amdgpu_bo_reserve(vm->root.base.bo, true);
+	r = amdgpu_bo_reserve(root, true);
 	if (r)
 		goto error_free_root;
 
-	r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
+	r = amdgpu_vm_clear_bo(adev, vm, root,
 			       adev->vm_manager.root_level,
 			       vm->pte_support_ats);
 	if (r)
 		goto error_unreserve;
 
-	vm->root.base.vm = vm;
-	list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va);
-	list_add_tail(&vm->root.base.vm_status, &vm->evicted);
+	amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
 	amdgpu_bo_unreserve(vm->root.base.bo);
 
 	if (pasid) {

From 4bebcceededa794a26827d40ab52555c2ec37deb Mon Sep 17 00:00:00 2001
From: Chunming Zhou <david1.zhou@amd.com>
Date: Tue, 24 Apr 2018 13:54:10 +0800
Subject: [PATCH 0924/1286] drm/amdgpu: invalidate parent bo when shadow bo was
 invalidated
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Shadow BO is located on GTT and its parent (PT and PD) BO could located on VRAM.
In some case, the BO on GTT could be evicted but the parent did not. This may
cause the shadow BO not be put in the evict list and could not be invalidate
correctly.
v2: suggested by Christian

Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Reported-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 71dcdefce255..8e71d3984016 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2252,6 +2252,10 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
 {
 	struct amdgpu_vm_bo_base *bo_base;
 
+	/* shadow bo doesn't have bo base, its validation needs its parent */
+	if (bo->parent && bo->parent->shadow == bo)
+		bo = bo->parent;
+
 	list_for_each_entry(bo_base, &bo->va, bo_list) {
 		struct amdgpu_vm *vm = bo_base->vm;
 

From 4aa8c41bfb772cd86a5726e374b13d2f31cd9f4c Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Mon, 7 May 2018 14:23:04 +0800
Subject: [PATCH 0925/1286] drm/amd/pp: Refine the output of
 pp_power_profile_mode on VI
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

In order to keep consist with Vega,
the output format of the pp_power_profile_mode would be
<integer><mode name string>< “*” for current profile>:"detail settings"
and remove the "CURRENT" mode line.

for example:
NUM        MODE_NAME     SCLK_UP_HYST   SCLK_DOWN_HYST SCLK_ACTIVE_LEVEL     MCLK_UP_HYST   MCLK_DOWN_HYST MCLK_ACTIVE_LEVEL
  0   3D_FULL_SCREEN:        0              100               30                0              100               10
  1     POWER_SAVING:       10                0               30                -                -                -
  2            VIDEO:        -                -                -               10               16               31
  3               VR:        0               11               50                0              100               10
  4          COMPUTE:        0                5               30                -                -                -
  5         CUSTOM *:        0                5               30                0              100               10
NUM        MODE_NAME     SCLK_UP_HYST   SCLK_DOWN_HYST SCLK_ACTIVE_LEVEL     MCLK_UP_HYST   MCLK_DOWN_HYST MCLK_ACTIVE_LEVEL
  0   3D_FULL_SCREEN:        0              100               30                0              100               10
  1   POWER_SAVING *:       10                0               30                0              100               10
  2            VIDEO:        -                -                -               10               16               31
  3               VR:        0               11               50                0              100               10
  4          COMPUTE:        0                5               30                -                -                -
  5           CUSTOM:        -                -                -                -                -                -

Reviewed-by: Evan Quan <evan.quan@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c  | 52 ++++++++-----------
 .../gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h  |  1 -
 2 files changed, 23 insertions(+), 30 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 4c94e7a057e9..39e49ce6bb5f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -75,12 +75,13 @@
 #define PCIE_BUS_CLK                10000
 #define TCLK                        (PCIE_BUS_CLK / 10)
 
-static const struct profile_mode_setting smu7_profiling[5] =
+static const struct profile_mode_setting smu7_profiling[6] =
 					{{1, 0, 100, 30, 1, 0, 100, 10},
 					 {1, 10, 0, 30, 0, 0, 0, 0},
 					 {0, 0, 0, 0, 1, 10, 16, 31},
 					 {1, 0, 11, 50, 1, 0, 100, 10},
 					 {1, 0, 5, 30, 0, 0, 0, 0},
+					 {0, 0, 0, 0, 0, 0, 0, 0},
 					};
 
 #define PPSMC_MSG_SetVBITimeout_VEGAM    ((uint16_t) 0x310)
@@ -4882,6 +4883,17 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
 	len = sizeof(smu7_profiling) / sizeof(struct profile_mode_setting);
 
 	for (i = 0; i < len; i++) {
+		if (i == hwmgr->power_profile_mode) {
+			size += sprintf(buf + size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n",
+			i, profile_name[i], "*",
+			data->current_profile_setting.sclk_up_hyst,
+			data->current_profile_setting.sclk_down_hyst,
+			data->current_profile_setting.sclk_activity,
+			data->current_profile_setting.mclk_up_hyst,
+			data->current_profile_setting.mclk_down_hyst,
+			data->current_profile_setting.mclk_activity);
+			continue;
+		}
 		if (smu7_profiling[i].bupdate_sclk)
 			size += sprintf(buf + size, "%3d %16s: %8d %16d %16d ",
 			i, profile_name[i], smu7_profiling[i].sclk_up_hyst,
@@ -4901,24 +4913,6 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
 			"-", "-", "-");
 	}
 
-	size += sprintf(buf + size, "%3d %16s: %8d %16d %16d %16d %16d %16d\n",
-			i, profile_name[i],
-			data->custom_profile_setting.sclk_up_hyst,
-			data->custom_profile_setting.sclk_down_hyst,
-			data->custom_profile_setting.sclk_activity,
-			data->custom_profile_setting.mclk_up_hyst,
-			data->custom_profile_setting.mclk_down_hyst,
-			data->custom_profile_setting.mclk_activity);
-
-	size += sprintf(buf + size, "%3s %16s: %8d %16d %16d %16d %16d %16d\n",
-			"*", "CURRENT",
-			data->current_profile_setting.sclk_up_hyst,
-			data->current_profile_setting.sclk_down_hyst,
-			data->current_profile_setting.sclk_activity,
-			data->current_profile_setting.mclk_up_hyst,
-			data->current_profile_setting.mclk_down_hyst,
-			data->current_profile_setting.mclk_activity);
-
 	return size;
 }
 
@@ -4957,16 +4951,16 @@ static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint
 		if (size < 8)
 			return -EINVAL;
 
-		data->custom_profile_setting.bupdate_sclk = input[0];
-		data->custom_profile_setting.sclk_up_hyst = input[1];
-		data->custom_profile_setting.sclk_down_hyst = input[2];
-		data->custom_profile_setting.sclk_activity = input[3];
-		data->custom_profile_setting.bupdate_mclk = input[4];
-		data->custom_profile_setting.mclk_up_hyst = input[5];
-		data->custom_profile_setting.mclk_down_hyst = input[6];
-		data->custom_profile_setting.mclk_activity = input[7];
-		if (!smum_update_dpm_settings(hwmgr, &data->custom_profile_setting)) {
-			memcpy(&data->current_profile_setting, &data->custom_profile_setting, sizeof(struct profile_mode_setting));
+		tmp.bupdate_sclk = input[0];
+		tmp.sclk_up_hyst = input[1];
+		tmp.sclk_down_hyst = input[2];
+		tmp.sclk_activity = input[3];
+		tmp.bupdate_mclk = input[4];
+		tmp.mclk_up_hyst = input[5];
+		tmp.mclk_down_hyst = input[6];
+		tmp.mclk_activity = input[7];
+		if (!smum_update_dpm_settings(hwmgr, &tmp)) {
+			memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting));
 			hwmgr->power_profile_mode = mode;
 		}
 		break;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
index 51a776ed5906..c91e75db6a8e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
@@ -327,7 +327,6 @@ struct smu7_hwmgr {
 	uint16_t                              mem_latency_high;
 	uint16_t                              mem_latency_low;
 	uint32_t                              vr_config;
-	struct profile_mode_setting           custom_profile_setting;
 	struct profile_mode_setting           current_profile_setting;
 };
 

From 3d3c4f1b4d37412b2e3f1dd5e95293e61efa83df Mon Sep 17 00:00:00 2001
From: Colin Ian King <colin.king@canonical.com>
Date: Sat, 28 Apr 2018 23:21:55 +0100
Subject: [PATCH 0926/1286] drm/amd/powerplay: fix spelling mistake: "contruct"
 -> "construct"

Trivial fix to spelling mistake in PP_ASSERT_WITH_CODE message text

Signed-off-by: Colin Ian King <colin.king@canonical.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c   | 2 +-
 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 39e49ce6bb5f..8eb3f5176646 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -1270,7 +1270,7 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
 
 		tmp_result = smu7_construct_voltage_tables(hwmgr);
 		PP_ASSERT_WITH_CODE((0 == tmp_result),
-				"Failed to contruct voltage tables!",
+				"Failed to construct voltage tables!",
 				result = tmp_result);
 	}
 	smum_initialize_mc_reg_table(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 748612074d20..d156b7bb92ae 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -2860,7 +2860,7 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
 
 	tmp_result = vega10_construct_voltage_tables(hwmgr);
 	PP_ASSERT_WITH_CODE(!tmp_result,
-			"Failed to contruct voltage tables!",
+			"Failed to construct voltage tables!",
 			result = tmp_result);
 
 	tmp_result = vega10_init_smc_table(hwmgr);

From f4c2cc43218150da670f526aba1eeb3bcec9e3d2 Mon Sep 17 00:00:00 2001
From: Colin Ian King <colin.king@canonical.com>
Date: Wed, 2 May 2018 15:43:16 +0100
Subject: [PATCH 0927/1286] drm/amd/display: clean up assignment of amdgpu_crtc

The declaration of pointer amdgpu_crtc has a redundant assignment to
amdgpu_crtc. Clean this up by removing it.

Detected by CoverityScan, CID#1460299 ("Evaluation order violation")

Reviewed-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Colin Ian King <colin.king@canonical.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 3be17e26120d..3e0f3850dc9b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3748,7 +3748,7 @@ static void remove_stream(struct amdgpu_device *adev,
 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
 			       struct dc_cursor_position *position)
 {
-	struct amdgpu_crtc *amdgpu_crtc = amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
 	int x, y;
 	int xorigin = 0, yorigin = 0;
 

From 267256b5d884b1494cf30636c66cd95eeb25f41f Mon Sep 17 00:00:00 2001
From: Junwei Zhang <Jerry.Zhang@amd.com>
Date: Wed, 9 May 2018 17:17:58 +0800
Subject: [PATCH 0928/1286] drm/amd/powerplay: add PME smu message for raven

Used for working around an audio bug on some platforms.

Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
index 5d07b6ea0a55..a2991fa2e6f8 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
@@ -82,7 +82,8 @@
 #define PPSMC_MSG_SetSoftMaxFclkByFreq          0x33
 #define PPSMC_MSG_SetSoftMaxVcn                 0x34
 #define PPSMC_MSG_PowerGateMmHub                0x35
-#define PPSMC_Message_Count                     0x36
+#define PPSMC_MSG_SetRccPfcPmeRestoreRegister   0x36
+#define PPSMC_Message_Count                     0x37
 
 
 typedef uint16_t PPSMC_Result;

From 4ccd2d931c4bbebbca5a5e233f0d28ed57482e90 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Tue, 8 May 2018 14:20:25 +0800
Subject: [PATCH 0929/1286] drm/amd/pp: Implement force_clock_level for RV

under manual dpm mode, user can set gfx/mem clock
through sysfs pp_dpm_sclk/mclk on Rv.

Reviewed-by: Evan Quan <evan.quan@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 45 +++++++++++++++++++
 1 file changed, 45 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index be6d6e202819..8b75f525fe49 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -766,6 +766,51 @@ static int smu10_get_dal_power_level(struct pp_hwmgr *hwmgr,
 static int smu10_force_clock_level(struct pp_hwmgr *hwmgr,
 		enum pp_clock_type type, uint32_t mask)
 {
+	struct smu10_hwmgr *data = hwmgr->backend;
+	struct smu10_voltage_dependency_table *mclk_table =
+					data->clock_vol_info.vdd_dep_on_fclk;
+	uint32_t low, high;
+
+	low = mask ? (ffs(mask) - 1) : 0;
+	high = mask ? (fls(mask) - 1) : 0;
+
+	switch (type) {
+	case PP_SCLK:
+		if (low > 2 || high > 2) {
+			pr_info("Currently sclk only support 3 levels on RV\n");
+			return -EINVAL;
+		}
+
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetHardMinGfxClk,
+						low == 2 ? data->gfx_max_freq_limit/100 :
+						low == 1 ? SMU10_UMD_PSTATE_GFXCLK :
+						data->gfx_min_freq_limit/100);
+
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetSoftMaxGfxClk,
+						high == 0 ? data->gfx_min_freq_limit/100 :
+						high == 1 ? SMU10_UMD_PSTATE_GFXCLK :
+						data->gfx_max_freq_limit/100);
+		break;
+
+	case PP_MCLK:
+		if (low > mclk_table->count - 1 || high > mclk_table->count - 1)
+			return -EINVAL;
+
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetHardMinFclkByFreq,
+						mclk_table->entries[low].clk/100);
+
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_SetSoftMaxFclkByFreq,
+						mclk_table->entries[high].clk/100);
+		break;
+
+	case PP_PCIE:
+	default:
+		break;
+	}
 	return 0;
 }
 

From 9164e8b7b32edeea75bf713a61f8bd1701b9a61b Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Thu, 10 May 2018 19:51:09 +0800
Subject: [PATCH 0930/1286] drm/amd/pp: Fix performance drop on Fiji

The performance drop if the default TDP more than 256 Watt

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index a264e0c35f45..99b29ff45d91 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -1042,12 +1042,10 @@ int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
 {
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 
-	n = (n & 0xff) << 8;
-
 	if (data->power_containment_features &
 			POWERCONTAINMENT_FEATURE_PkgPwrLimit)
 		return smum_send_msg_to_smc_with_parameter(hwmgr,
-				PPSMC_MSG_PkgPwrSetLimit, n);
+				PPSMC_MSG_PkgPwrSetLimit, n<<8);
 	return 0;
 }
 

From 967c650d4973dd87355c8540a18e6fa58844e547 Mon Sep 17 00:00:00 2001
From: Junwei Zhang <Jerry.Zhang@amd.com>
Date: Fri, 11 May 2018 10:54:40 +0800
Subject: [PATCH 0931/1286] drm/ttm: remove priority hard code when
 initializing ttm bo
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Then priority could be set before initialization.
By default, it requires to kzalloc ttm bo. In fact, we always do so.

Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
Reviewed-by: David Zhou <david1.zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/ttm/ttm_bo.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 98e06f8bf23b..cba50151de18 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1175,7 +1175,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 	reservation_object_init(&bo->ttm_resv);
 	atomic_inc(&bo->bdev->glob->bo_count);
 	drm_vma_node_reset(&bo->vma_node);
-	bo->priority = 0;
 
 	/*
 	 * For ttm_bo_type_device buffers, allocate

From a50cb94819f81bfafb5dc5a605baba9b40ba3243 Mon Sep 17 00:00:00 2001
From: Junwei Zhang <Jerry.Zhang@amd.com>
Date: Fri, 11 May 2018 11:02:23 +0800
Subject: [PATCH 0932/1286] drm/amdgpu: set ttm bo priority before
 initialization
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
Reviewed-by: David Zhou <david1.zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index e62153a86001..6a9e46ae7f0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -419,6 +419,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
 
 	bo->tbo.bdev = &adev->mman.bdev;
 	amdgpu_ttm_placement_from_domain(bo, bp->domain);
+	if (bp->type == ttm_bo_type_kernel)
+		bo->tbo.priority = 1;
 
 	r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
 				 &bo->placement, page_align, &ctx, acc_size,
@@ -434,9 +436,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
 	else
 		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
 
-	if (bp->type == ttm_bo_type_kernel)
-		bo->tbo.priority = 1;
-
 	if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
 	    bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
 		struct dma_fence *fence;

From 996cab955384122848d8132554de43dce0d3c8a2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
Date: Tue, 8 May 2018 12:26:52 +0200
Subject: [PATCH 0933/1286] drm/amdgpu: add HDP flush dummy for UVD 6/7
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

The UVD firmware doesn't seem to like the HDP flush here.

This worked for years without HDP flush, so just skip it.

Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 16 ++++++++++++++--
 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 13 ++++++++++++-
 2 files changed, 26 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 6d3359889c0b..8041b26a7a21 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -963,6 +963,16 @@ static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
 	amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
 }
 
+/**
+ * uvd_v6_0_ring_emit_hdp_flush - skip HDP flushing
+ *
+ * @ring: amdgpu_ring pointer
+ */
+static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+	/* The firmware doesn't seem to like touching registers at this point. */
+}
+
 /**
  * uvd_v6_0_ring_test_ring - register write test
  *
@@ -1528,12 +1538,13 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
 	.set_wptr = uvd_v6_0_ring_set_wptr,
 	.parse_cs = amdgpu_uvd_ring_parse_cs,
 	.emit_frame_size =
-		6 + 6 + /* hdp flush / invalidate */
+		6 + /* hdp invalidate */
 		10 + /* uvd_v6_0_ring_emit_pipeline_sync */
 		14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
 	.emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
 	.emit_ib = uvd_v6_0_ring_emit_ib,
 	.emit_fence = uvd_v6_0_ring_emit_fence,
+	.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
 	.test_ring = uvd_v6_0_ring_test_ring,
 	.test_ib = amdgpu_uvd_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
@@ -1552,7 +1563,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
 	.get_wptr = uvd_v6_0_ring_get_wptr,
 	.set_wptr = uvd_v6_0_ring_set_wptr,
 	.emit_frame_size =
-		6 + 6 + /* hdp flush / invalidate */
+		6 + /* hdp invalidate */
 		10 + /* uvd_v6_0_ring_emit_pipeline_sync */
 		VI_FLUSH_GPU_TLB_NUM_WREG * 6 + 8 + /* uvd_v6_0_ring_emit_vm_flush */
 		14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
@@ -1561,6 +1572,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
 	.emit_fence = uvd_v6_0_ring_emit_fence,
 	.emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
 	.emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
+	.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
 	.test_ring = uvd_v6_0_ring_test_ring,
 	.test_ib = amdgpu_uvd_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 2251db4048f5..b0de1e04093b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -1135,6 +1135,16 @@ static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
 	amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
 }
 
+/**
+ * uvd_v7_0_ring_emit_hdp_flush - skip HDP flushing
+ *
+ * @ring: amdgpu_ring pointer
+ */
+static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+	/* The firmware doesn't seem to like touching registers at this point. */
+}
+
 /**
  * uvd_v7_0_ring_test_ring - register write test
  *
@@ -1654,7 +1664,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
 	.get_wptr = uvd_v7_0_ring_get_wptr,
 	.set_wptr = uvd_v7_0_ring_set_wptr,
 	.emit_frame_size =
-		6 + 6 + /* hdp flush / invalidate */
+		6 + /* hdp invalidate */
 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
 		8 + /* uvd_v7_0_ring_emit_vm_flush */
@@ -1663,6 +1673,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
 	.emit_ib = uvd_v7_0_ring_emit_ib,
 	.emit_fence = uvd_v7_0_ring_emit_fence,
 	.emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
+	.emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
 	.test_ring = uvd_v7_0_ring_test_ring,
 	.test_ib = amdgpu_uvd_ring_test_ib,
 	.insert_nop = uvd_v7_0_ring_insert_nop,

From 323a9dbc452da5c155e5c17fe91c07093824fe27 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Thu, 10 May 2018 15:10:14 -0500
Subject: [PATCH 0934/1286] drm/amdgpu/gmc9: remove unused register defs
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

These got moved to the new df module so no longer
used in this file.

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 14 --------------
 1 file changed, 14 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 6c9f7f999532..6cccf0e0acd7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -43,20 +43,6 @@
 #include "gfxhub_v1_0.h"
 #include "mmhub_v1_0.h"
 
-#define mmDF_CS_AON0_DramBaseAddress0                                                                  0x0044
-#define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX                                                         0
-//DF_CS_AON0_DramBaseAddress0
-#define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT                                                        0x0
-#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT                                                    0x1
-#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT                                                      0x4
-#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT                                                      0x8
-#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT                                                      0xc
-#define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK                                                          0x00000001L
-#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK                                                      0x00000002L
-#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK                                                        0x000000F0L
-#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK                                                        0x00000700L
-#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK                                                        0xFFFFF000L
-
 /* add these here since we already include dce12 headers and these are for DCN */
 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION                                                          0x055d
 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX                                                 2

From c430bc977059a73758f666da545bc16c759fc165 Mon Sep 17 00:00:00 2001
From: Junwei Zhang <Jerry.Zhang@amd.com>
Date: Fri, 11 May 2018 14:54:31 +0800
Subject: [PATCH 0935/1286] drm/amdgpu: fix null pointer for bo unmap trace
 function
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

fix crash in trace.

Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 532263ab6e16..e96e26d3f3b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -275,7 +275,7 @@ TRACE_EVENT(amdgpu_vm_bo_unmap,
 			     ),
 
 	    TP_fast_assign(
-			   __entry->bo = bo_va->base.bo;
+			   __entry->bo = bo_va ? bo_va->base.bo : NULL;
 			   __entry->start = mapping->start;
 			   __entry->last = mapping->last;
 			   __entry->offset = mapping->offset;

From a2a330ad66313084c9432b32862aa7e1255da9b4 Mon Sep 17 00:00:00 2001
From: Shirish S <shirish.s@amd.com>
Date: Fri, 27 Apr 2018 15:47:21 +0530
Subject: [PATCH 0936/1286] drm/amd/display: remove need of modeset flag for
 overlay planes (V2)

This patch is in continuation to the
"843e3c7 drm/amd/display: defer modeset check in dm_update_planes_state"
where we started to eliminate the dependency on
DRM_MODE_ATOMIC_ALLOW_MODESET to be set by the user space,
which as such is not mandatory.

After deferring, this patch eliminates the dependency on the flag
for overlay planes.

This has to be done in stages as its a pretty complex and requires thorough
testing before we free primary planes as well from dependency on modeset
flag.

V2: Simplified the plane type check.

Signed-off-by: Shirish S <shirish.s@amd.com>
Reviewed-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 8 +++++---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 3e0f3850dc9b..f2f54a9df56f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4104,7 +4104,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
 		}
 		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
 
-		if (!pflip_needed) {
+		if (!pflip_needed || plane->type == DRM_PLANE_TYPE_OVERLAY) {
 			WARN_ON(!dm_new_plane_state->dc_state);
 
 			plane_states_constructed[planes_count] = dm_new_plane_state->dc_state;
@@ -4827,7 +4827,8 @@ static int dm_update_planes_state(struct dc *dc,
 
 		/* Remove any changed/removed planes */
 		if (!enable) {
-			if (pflip_needed)
+			if (pflip_needed &&
+			    plane->type != DRM_PLANE_TYPE_OVERLAY)
 				continue;
 
 			if (!old_plane_crtc)
@@ -4874,7 +4875,8 @@ static int dm_update_planes_state(struct dc *dc,
 			if (!dm_new_crtc_state->stream)
 				continue;
 
-			if (pflip_needed)
+			if (pflip_needed &&
+			    plane->type != DRM_PLANE_TYPE_OVERLAY)
 				continue;
 
 			WARN_ON(dm_new_plane_state->dc_state);

From 8eb77198131bab4417b711f899473f4ee6b8ad55 Mon Sep 17 00:00:00 2001
From: Mikita Lipski <mikita.lipski@amd.com>
Date: Mon, 14 May 2018 10:13:57 -0400
Subject: [PATCH 0937/1286] drm/amd/powerplay: Add notify PWE function to SMU10

Functionality to message smc to enable pwe after gpu suspense.
It is used in case when display resumes from S3 and wants to start
audio driver by enabling pwe.

Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 8 ++++++++
 drivers/gpu/drm/amd/powerplay/inc/hwmgr.h         | 1 +
 2 files changed, 9 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 8b75f525fe49..2f69bfa478a7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -1113,6 +1113,13 @@ static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
 	data->water_marks_exist = true;
 	return result;
 }
+
+static int smu10_smus_notify_pwe(struct pp_hwmgr *hwmgr)
+{
+
+	return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister);
+}
+
 static int smu10_set_mmhub_powergating_by_smu(struct pp_hwmgr *hwmgr)
 {
 	return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub);
@@ -1153,6 +1160,7 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
 	.power_state_set = smu10_set_power_state_tasks,
 	.dynamic_state_management_disable = smu10_disable_dpm_tasks,
 	.set_mmhub_powergating_by_smu = smu10_set_mmhub_powergating_by_smu,
+	.smus_notify_pwe = smu10_smus_notify_pwe,
 	.gfx_off_control = smu10_gfx_off_control,
 };
 
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 3d9743f5bb45..3c321c7d9626 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -326,6 +326,7 @@ struct pp_hwmgr_func {
 					long *input, uint32_t size);
 	int (*set_power_limit)(struct pp_hwmgr *hwmgr, uint32_t n);
 	int (*set_mmhub_powergating_by_smu)(struct pp_hwmgr *hwmgr);
+	int (*smus_notify_pwe)(struct pp_hwmgr *hwmgr);
 };
 
 struct pp_table_func {

From 959a2091fae0fa498c79e095a4f6cbbb202a1194 Mon Sep 17 00:00:00 2001
From: Yong Zhao <yong.zhao@amd.com>
Date: Mon, 14 May 2018 12:15:27 -0400
Subject: [PATCH 0938/1286] drm/amdgpu: Add support to change mtype for 2nd
 part of gart BOs on GFX9
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This change prepares for a workaround in amdkfd for a GFX9 HW bug. It
requires the control stack memory of compute queues, which is allocated
from the second page of MQD gart BOs, to have mtype NC, rather than
the default UC.

Signed-off-by: Yong Zhao <yong.zhao@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 54 ++++++++++++++++++++-----
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h  |  5 ++-
 include/uapi/drm/amdgpu_drm.h           |  4 ++
 3 files changed, 51 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index dfd22db13fb1..cc3b067e1ec6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -834,6 +834,45 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
 	sg_free_table(ttm->sg);
 }
 
+int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
+				struct ttm_buffer_object *tbo,
+				uint64_t flags)
+{
+	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
+	struct ttm_tt *ttm = tbo->ttm;
+	struct amdgpu_ttm_tt *gtt = (void *)ttm;
+	int r;
+
+	if (abo->flags & AMDGPU_GEM_CREATE_MQD_GFX9) {
+		uint64_t page_idx = 1;
+
+		r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
+				ttm->pages, gtt->ttm.dma_address, flags);
+		if (r)
+			goto gart_bind_fail;
+
+		/* Patch mtype of the second part BO */
+		flags &=  ~AMDGPU_PTE_MTYPE_MASK;
+		flags |= AMDGPU_PTE_MTYPE(AMDGPU_MTYPE_NC);
+
+		r = amdgpu_gart_bind(adev,
+				gtt->offset + (page_idx << PAGE_SHIFT),
+				ttm->num_pages - page_idx,
+				&ttm->pages[page_idx],
+				&(gtt->ttm.dma_address[page_idx]), flags);
+	} else {
+		r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
+				     ttm->pages, gtt->ttm.dma_address, flags);
+	}
+
+gart_bind_fail:
+	if (r)
+		DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+			  ttm->num_pages, gtt->offset);
+
+	return r;
+}
+
 static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
 				   struct ttm_mem_reg *bo_mem)
 {
@@ -907,8 +946,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
 
 	flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
 	gtt->offset = (u64)tmp.start << PAGE_SHIFT;
-	r = amdgpu_gart_bind(adev, gtt->offset, bo->ttm->num_pages,
-			     bo->ttm->pages, gtt->ttm.dma_address, flags);
+	r = amdgpu_ttm_gart_bind(adev, bo, flags);
 	if (unlikely(r)) {
 		ttm_bo_mem_put(bo, &tmp);
 		return r;
@@ -925,19 +963,15 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
 int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
 {
 	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
-	struct amdgpu_ttm_tt *gtt = (void *)tbo->ttm;
 	uint64_t flags;
 	int r;
 
-	if (!gtt)
+	if (!tbo->ttm)
 		return 0;
 
-	flags = amdgpu_ttm_tt_pte_flags(adev, &gtt->ttm.ttm, &tbo->mem);
-	r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
-			     gtt->ttm.ttm.pages, gtt->ttm.dma_address, flags);
-	if (r)
-		DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
-			  gtt->ttm.ttm.num_pages, gtt->offset);
+	flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem);
+	r = amdgpu_ttm_gart_bind(adev, tbo, flags);
+
 	return r;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 30f080364c97..4cf678684a12 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -75,11 +75,12 @@ struct amdgpu_bo_list_entry;
 /* PDE Block Fragment Size for VEGA10 */
 #define AMDGPU_PDE_BFS(a)	((uint64_t)a << 59)
 
-/* VEGA10 only */
+
+/* For GFX9 */
 #define AMDGPU_PTE_MTYPE(a)    ((uint64_t)a << 57)
 #define AMDGPU_PTE_MTYPE_MASK	AMDGPU_PTE_MTYPE(3ULL)
 
-/* For Raven */
+#define AMDGPU_MTYPE_NC 0
 #define AMDGPU_MTYPE_CC 2
 
 #define AMDGPU_PTE_DEFAULT_ATC  (AMDGPU_PTE_SYSTEM      \
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 081d25640b64..78b4dd89fcb4 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -101,6 +101,10 @@ extern "C" {
 #define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID	(1 << 6)
 /* Flag that BO sharing will be explicitly synchronized */
 #define AMDGPU_GEM_CREATE_EXPLICIT_SYNC		(1 << 7)
+/* Flag that indicates allocating MQD gart on GFX9, where the mtype
+ * for the second page onward should be set to NC.
+ */
+#define AMDGPU_GEM_CREATE_MQD_GFX9		(1 << 8)
 
 struct drm_amdgpu_gem_create_in  {
 	/** the requested memory size */

From c7535379f660b721998ad6ab397809b0cbeb66d7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
Date: Fri, 11 May 2018 23:13:39 +0800
Subject: [PATCH 0939/1286] drm/amdgpu: drop printing the BO offset in the gem
 debugfs (v2)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

It is meaningless anyway.

v2: remove unused variable (Alex)

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 5 -----
 1 file changed, 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 7d3dc229fa47..f79bbf81a088 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -785,7 +785,6 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
 	unsigned domain;
 	const char *placement;
 	unsigned pin_count;
-	uint64_t offset;
 
 	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
 	switch (domain) {
@@ -803,10 +802,6 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
 	seq_printf(m, "\t0x%08x: %12ld byte %s",
 		   id, amdgpu_bo_size(bo), placement);
 
-	offset = READ_ONCE(bo->tbo.mem.start);
-	if (offset != AMDGPU_BO_INVALID_OFFSET)
-		seq_printf(m, " @ 0x%010Lx", offset);
-
 	pin_count = READ_ONCE(bo->pin_count);
 	if (pin_count)
 		seq_printf(m, " pin count %d", pin_count);

From 6b155d6af03a053a7de2a72255563d7ef40c9644 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
Date: Fri, 11 May 2018 23:14:29 +0800
Subject: [PATCH 0940/1286] drm/amdgpu: print the BO flags in the gem debugfs
 entry
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Quite useful to know.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 15 +++++++++++++++
 1 file changed, 15 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index f79bbf81a088..2c8e27370284 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -774,6 +774,12 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
 }
 
 #if defined(CONFIG_DEBUG_FS)
+
+#define amdgpu_debugfs_gem_bo_print_flag(m, bo, flag)	\
+	if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) {	\
+		seq_printf((m), " " #flag);		\
+	}
+
 static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
 {
 	struct drm_gem_object *gobj = ptr;
@@ -814,6 +820,15 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
 	else if (dma_buf)
 		seq_printf(m, " exported as %p", dma_buf);
 
+	amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
+	amdgpu_debugfs_gem_bo_print_flag(m, bo, NO_CPU_ACCESS);
+	amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_GTT_USWC);
+	amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CLEARED);
+	amdgpu_debugfs_gem_bo_print_flag(m, bo, SHADOW);
+	amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
+	amdgpu_debugfs_gem_bo_print_flag(m, bo, VM_ALWAYS_VALID);
+	amdgpu_debugfs_gem_bo_print_flag(m, bo, EXPLICIT_SYNC);
+
 	seq_printf(m, "\n");
 
 	return 0;

From 1297bf2e916d2012995b642dd6851332a73126c2 Mon Sep 17 00:00:00 2001
From: Dirk Hohndel <dirk@hohndel.org>
Date: Wed, 2 May 2018 15:46:21 +0200
Subject: [PATCH 0941/1286] Add SPDX idenitifier and clarify license
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This is dual licensed under GPL-2.0 or MIT.

Signed-off-by: Dirk Hohndel (VMware) <dirk@hohndel.org>
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/ttm/ttm_agp_backend.c  | 1 +
 drivers/gpu/drm/ttm/ttm_bo.c           | 1 +
 drivers/gpu/drm/ttm/ttm_bo_manager.c   | 1 +
 drivers/gpu/drm/ttm/ttm_bo_util.c      | 1 +
 drivers/gpu/drm/ttm/ttm_bo_vm.c        | 1 +
 drivers/gpu/drm/ttm/ttm_execbuf_util.c | 1 +
 drivers/gpu/drm/ttm/ttm_lock.c         | 1 +
 drivers/gpu/drm/ttm/ttm_memory.c       | 1 +
 drivers/gpu/drm/ttm/ttm_module.c       | 1 +
 drivers/gpu/drm/ttm/ttm_object.c       | 1 +
 drivers/gpu/drm/ttm/ttm_tt.c           | 1 +
 11 files changed, 11 insertions(+)

diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 7c2485fe88d8..ea4d59eb8966 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**************************************************************************
  *
  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index cba50151de18..5d8688e522d1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**************************************************************************
  *
  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index a7c232dc39cb..18d3debcc949 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**************************************************************************
  *
  * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index f3bf545a79cf..f2c167702eef 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**************************************************************************
  *
  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 8eba95b3c737..c7ece7613a6a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**************************************************************************
  *
  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 3dca206e85f7..e73ae0d22897 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**************************************************************************
  *
  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
index 913f4318cdc0..20694b8a01ca 100644
--- a/drivers/gpu/drm/ttm/ttm_lock.c
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**************************************************************************
  *
  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 27856c55dc84..450387c92b63 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**************************************************************************
  *
  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
index 66fc6395eb54..6ff40c041d79 100644
--- a/drivers/gpu/drm/ttm/ttm_module.c
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**************************************************************************
  *
  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 1aa2baa83959..74f1b1eb1f8e 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**************************************************************************
  *
  * Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 7e672be987b5..a1e543972ca7 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**************************************************************************
  *
  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA

From 8344c53f57057b42a5da87e9557c40fcda18fb7a Mon Sep 17 00:00:00 2001
From: Nayan Deshmukh <nayan26deshmukh@gmail.com>
Date: Thu, 29 Mar 2018 22:36:32 +0530
Subject: [PATCH 0942/1286] drm/scheduler: remove unused parameter
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

this patch also effect the amdgpu and etnaviv drivers which
use the function drm_sched_entity_init

Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
Suggested-by: Christian König <christian.koenig@amd.com>
Acked-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c   | 2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c   | 2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c   | 2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c   | 2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c   | 4 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c    | 2 +-
 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c     | 2 +-
 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c     | 2 +-
 drivers/gpu/drm/etnaviv/etnaviv_drv.c     | 2 +-
 drivers/gpu/drm/scheduler/gpu_scheduler.c | 3 +--
 include/drm/gpu_scheduler.h               | 2 +-
 11 files changed, 12 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 6741a62a7d15..a8e531d604fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -91,7 +91,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
 			continue;
 
 		r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
-					  rq, amdgpu_sched_jobs, &ctx->guilty);
+					  rq, &ctx->guilty);
 		if (r)
 			goto failed;
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index cc3b067e1ec6..5e9fd256faad 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -111,7 +111,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
 	ring = adev->mman.buffer_funcs_ring;
 	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
 	r = drm_sched_entity_init(&ring->sched, &adev->mman.entity,
-				  rq, amdgpu_sched_jobs, NULL);
+				  rq, NULL);
 	if (r) {
 		DRM_ERROR("Failed setting up TTM BO move run queue.\n");
 		goto error_entity;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index d8dd4028c2bb..de4d77af02ae 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -242,7 +242,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
 	ring = &adev->uvd.ring;
 	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
 	r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity,
-				  rq, amdgpu_sched_jobs, NULL);
+				  rq, NULL);
 	if (r != 0) {
 		DRM_ERROR("Failed setting up UVD run queue.\n");
 		return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index e2186eda3271..a86322f5164f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -186,7 +186,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
 	ring = &adev->vce.ring[0];
 	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
 	r = drm_sched_entity_init(&ring->sched, &adev->vce.entity,
-				  rq, amdgpu_sched_jobs, NULL);
+				  rq, NULL);
 	if (r != 0) {
 		DRM_ERROR("Failed setting up VCE run queue.\n");
 		return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 58e495330b38..e5d234cf804f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -105,7 +105,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
 	ring = &adev->vcn.ring_dec;
 	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
 	r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
-				  rq, amdgpu_sched_jobs, NULL);
+				  rq, NULL);
 	if (r != 0) {
 		DRM_ERROR("Failed setting up VCN dec run queue.\n");
 		return r;
@@ -114,7 +114,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
 	ring = &adev->vcn.ring_enc[0];
 	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
 	r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
-				  rq, amdgpu_sched_jobs, NULL);
+				  rq, NULL);
 	if (r != 0) {
 		DRM_ERROR("Failed setting up VCN enc run queue.\n");
 		return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 8e71d3984016..1a8f4e0dd023 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2404,7 +2404,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 	ring = adev->vm_manager.vm_pte_rings[ring_instance];
 	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
 	r = drm_sched_entity_init(&ring->sched, &vm->entity,
-				  rq, amdgpu_sched_jobs, NULL);
+				  rq, NULL);
 	if (r)
 		return r;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 8041b26a7a21..ca6ab56357b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -429,7 +429,7 @@ static int uvd_v6_0_sw_init(void *handle)
 		ring = &adev->uvd.ring_enc[0];
 		rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
 		r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
-					  rq, amdgpu_sched_jobs, NULL);
+					  rq, NULL);
 		if (r) {
 			DRM_ERROR("Failed setting up UVD ENC run queue.\n");
 			return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index b0de1e04093b..0ca63d588670 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -418,7 +418,7 @@ static int uvd_v7_0_sw_init(void *handle)
 	ring = &adev->uvd.ring_enc[0];
 	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
 	r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
-				  rq, amdgpu_sched_jobs, NULL);
+				  rq, NULL);
 	if (r) {
 		DRM_ERROR("Failed setting up UVD ENC run queue.\n");
 		return r;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index ab50090d066c..23e73c2a19f4 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -116,7 +116,7 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
 			drm_sched_entity_init(&gpu->sched,
 				&ctx->sched_entity[i],
 				&gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
-				32, NULL);
+				NULL);
 			}
 	}
 
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 1f1dd70125a7..a364fc0b38c3 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -117,7 +117,6 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
  * @sched	The pointer to the scheduler
  * @entity	The pointer to a valid drm_sched_entity
  * @rq		The run queue this entity belongs
- * @jobs	The max number of jobs in the job queue
  * @guilty      atomic_t set to 1 when a job on this queue
  *              is found to be guilty causing a timeout
  *
@@ -126,7 +125,7 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
 int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
 			  struct drm_sched_entity *entity,
 			  struct drm_sched_rq *rq,
-			  uint32_t jobs, atomic_t *guilty)
+			  atomic_t *guilty)
 {
 	if (!(sched && entity && rq))
 		return -EINVAL;
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 350a62c26b29..52380067a43f 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -188,7 +188,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched);
 int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
 			  struct drm_sched_entity *entity,
 			  struct drm_sched_rq *rq,
-			  uint32_t jobs, atomic_t *guilty);
+			  atomic_t *guilty);
 void drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
 			   struct drm_sched_entity *entity);
 void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,

From 548da31da9805645b1e8043da5081b9745545248 Mon Sep 17 00:00:00 2001
From: Stephen Rothwell <sfr@canb.auug.org.au>
Date: Wed, 16 May 2018 16:43:34 +1000
Subject: [PATCH 0943/1286] drm/amdgpu: include pagemap.h for release_pages()

Fixes: 5ae0283e831a ("drm/amdgpu: Add userptr support for KFD"
Cc: Felix Kuehling <Felix.Kuehling@amd.com>
Cc: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Dave Airlie <airlied@redhat.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 72ab2b1ffe75..ff8fd75f7ca5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -23,6 +23,7 @@
 #define pr_fmt(fmt) "kfd2kgd: " fmt
 
 #include <linux/list.h>
+#include <linux/pagemap.h>
 #include <linux/sched/mm.h>
 #include <drm/drmP.h>
 #include "amdgpu_object.h"

From dd856d924b2471bf28e7c60df881529525a1192c Mon Sep 17 00:00:00 2001
From: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
Date: Tue, 15 May 2018 18:57:36 +0300
Subject: [PATCH 0944/1286] drm: rcar-du: Fix rcar_du_of_init() stub

The rcar_du_of_init() function is supposed to be defined as a stub when
CONFIG_DRM_RCAR_LVDS is disabled as the rcar_du_of.c file isn't compiled
in that case. However, a bug in the configuration option check makes it
a stub when CONFIG_DRM_RCAR_LVDS=m as well, which prevents legacy DTs
from being fixed at boot time. Fix the configuration option check by
using IS_ENABLED.

Fixes: 81c0e3dd8292 ("drm: rcar-du: Fix legacy DT to create LVDS encoder nodes")
Signed-off-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
Reviewed-by: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180515155736.3379-1-laurent.pinchart+renesas@ideasonboard.com
---
 drivers/gpu/drm/rcar-du/rcar_du_of.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/rcar-du/rcar_du_of.h b/drivers/gpu/drm/rcar-du/rcar_du_of.h
index c2e65a727e91..8dd3fbe96650 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_of.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_of.h
@@ -11,7 +11,7 @@
 
 struct of_device_id;
 
-#ifdef CONFIG_DRM_RCAR_LVDS
+#if IS_ENABLED(CONFIG_DRM_RCAR_LVDS)
 void __init rcar_du_of_init(const struct of_device_id *of_ids);
 #else
 static inline void rcar_du_of_init(const struct of_device_id *of_ids) { }

From 315852b422972e6ebb1dfddaadada09e46a2681a Mon Sep 17 00:00:00 2001
From: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
Date: Tue, 15 May 2018 20:47:52 +0300
Subject: [PATCH 0945/1286] drm: rcar-du: Fix build failure

Commit 75a07f399cd4 ("drm: rcar-du: Zero-out sg_tables when duplicating
plane state") introduced a reference to the alpha field of struct
rcar_du_vsp_plane_state that got removed in commit 301a9b8d5456
("drm/rcar-du: Convert to the new generic alpha property"). The issue
stems from the merge of the two commits through separate branches and
breaks compilation of the driver. Fix it.

Fixes: 75a07f399cd4 ("drm: rcar-du: Zero-out sg_tables when duplicating plane state")
Signed-off-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
Reviewed-by: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
Tested-by: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180515174752.28954-1-laurent.pinchart+renesas@ideasonboard.com
---
 drivers/gpu/drm/rcar-du/rcar_du_vsp.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index 27a440886b17..c59f0cfabd33 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -314,7 +314,6 @@ rcar_du_vsp_plane_atomic_duplicate_state(struct drm_plane *plane)
 		return NULL;
 
 	__drm_atomic_helper_plane_duplicate_state(plane, &copy->state);
-	copy->alpha = to_rcar_vsp_plane_state(plane->state)->alpha;
 
 	return &copy->state;
 }

From 230630bd3834af0ea6ec75354ec21819de148ee1 Mon Sep 17 00:00:00 2001
From: Thierry Reding <treding@nvidia.com>
Date: Fri, 4 May 2018 15:08:49 +0200
Subject: [PATCH 0946/1286] drm/tegra: gr3d: Properly clean up resources

Failure to register the Tegra DRM client would leak the resources. Move
cleanup code to error unwinding gotos to fix that and share the cleanup
code with the other error paths.

Reviewed-by: Dmitry Osipenko <digetx@gmail.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/drm/tegra/gr3d.c | 28 ++++++++++++++++++++++------
 1 file changed, 22 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index ce5120683091..9303278efc1d 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -52,8 +52,9 @@ static int gr3d_init(struct host1x_client *client)
 
 	client->syncpts[0] = host1x_syncpt_request(client, flags);
 	if (!client->syncpts[0]) {
-		host1x_channel_put(gr3d->channel);
-		return -ENOMEM;
+		err = -ENOMEM;
+		dev_err(client->dev, "failed to request syncpoint: %d\n", err);
+		goto put;
 	}
 
 	if (tegra->domain) {
@@ -65,15 +66,30 @@ static int gr3d_init(struct host1x_client *client)
 				dev_err(client->dev,
 					"failed to attach to domain: %d\n",
 					err);
-				host1x_syncpt_free(client->syncpts[0]);
-				host1x_channel_put(gr3d->channel);
 				iommu_group_put(gr3d->group);
-				return err;
+				goto free;
 			}
 		}
 	}
 
-	return tegra_drm_register_client(dev->dev_private, drm);
+	err = tegra_drm_register_client(dev->dev_private, drm);
+	if (err < 0) {
+		dev_err(client->dev, "failed to register client: %d\n", err);
+		goto detach;
+	}
+
+	return 0;
+
+detach:
+	if (gr3d->group) {
+		iommu_detach_group(tegra->domain, gr3d->group);
+		iommu_group_put(gr3d->group);
+	}
+free:
+	host1x_syncpt_free(client->syncpts[0]);
+put:
+	host1x_channel_put(gr3d->channel);
+	return err;
 }
 
 static int gr3d_exit(struct host1x_client *client)

From 0c407de5ed1a329468122cbf4f3e727e0c1e3f36 Mon Sep 17 00:00:00 2001
From: Thierry Reding <treding@nvidia.com>
Date: Fri, 4 May 2018 15:02:24 +0200
Subject: [PATCH 0947/1286] drm/tegra: Refactor IOMMU attach/detach

Attaching to and detaching from an IOMMU uses the same code sequence in
every driver, so factor it out into separate helpers.

Reviewed-by: Dmitry Osipenko <digetx@gmail.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/drm/tegra/dc.c   | 42 ++++++--------------------------
 drivers/gpu/drm/tegra/drm.c  | 46 ++++++++++++++++++++++++++++++++++++
 drivers/gpu/drm/tegra/drm.h  |  4 ++++
 drivers/gpu/drm/tegra/gr2d.c | 32 +++++++------------------
 drivers/gpu/drm/tegra/gr3d.c | 31 ++++++------------------
 5 files changed, 72 insertions(+), 83 deletions(-)

diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index c843f11043db..3e7ec3937346 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1837,21 +1837,11 @@ static int tegra_dc_init(struct host1x_client *client)
 	if (!dc->syncpt)
 		dev_warn(dc->dev, "failed to allocate syncpoint\n");
 
-	if (tegra->domain) {
-		dc->group = iommu_group_get(client->dev);
-
-		if (dc->group && dc->group != tegra->group) {
-			err = iommu_attach_group(tegra->domain, dc->group);
-			if (err < 0) {
-				dev_err(dc->dev,
-					"failed to attach to domain: %d\n",
-					err);
-				iommu_group_put(dc->group);
-				return err;
-			}
-
-			tegra->group = dc->group;
-		}
+	dc->group = host1x_client_iommu_attach(client, true);
+	if (IS_ERR(dc->group)) {
+		err = PTR_ERR(dc->group);
+		dev_err(client->dev, "failed to attach to domain: %d\n", err);
+		return err;
 	}
 
 	if (dc->soc->wgrps)
@@ -1916,15 +1906,7 @@ cleanup:
 	if (!IS_ERR(primary))
 		drm_plane_cleanup(primary);
 
-	if (dc->group) {
-		if (dc->group == tegra->group) {
-			iommu_detach_group(tegra->domain, dc->group);
-			tegra->group = NULL;
-		}
-
-		iommu_group_put(dc->group);
-	}
-
+	host1x_client_iommu_detach(client, dc->group);
 	host1x_syncpt_free(dc->syncpt);
 
 	return err;
@@ -1932,9 +1914,7 @@ cleanup:
 
 static int tegra_dc_exit(struct host1x_client *client)
 {
-	struct drm_device *drm = dev_get_drvdata(client->parent);
 	struct tegra_dc *dc = host1x_client_to_dc(client);
-	struct tegra_drm *tegra = drm->dev_private;
 	int err;
 
 	devm_free_irq(dc->dev, dc->irq, dc);
@@ -1945,15 +1925,7 @@ static int tegra_dc_exit(struct host1x_client *client)
 		return err;
 	}
 
-	if (dc->group) {
-		if (dc->group == tegra->group) {
-			iommu_detach_group(tegra->domain, dc->group);
-			tegra->group = NULL;
-		}
-
-		iommu_group_put(dc->group);
-	}
-
+	host1x_client_iommu_detach(client, dc->group);
 	host1x_syncpt_free(dc->syncpt);
 
 	return 0;
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 7afe2f635f74..181e82c58a4f 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -1114,6 +1114,52 @@ int tegra_drm_unregister_client(struct tegra_drm *tegra,
 	return 0;
 }
 
+struct iommu_group *host1x_client_iommu_attach(struct host1x_client *client,
+					       bool shared)
+{
+	struct drm_device *drm = dev_get_drvdata(client->parent);
+	struct tegra_drm *tegra = drm->dev_private;
+	struct iommu_group *group = NULL;
+	int err;
+
+	if (tegra->domain) {
+		group = iommu_group_get(client->dev);
+		if (!group) {
+			dev_err(client->dev, "failed to get IOMMU group\n");
+			return ERR_PTR(-ENODEV);
+		}
+
+		if (!shared || (shared && (group != tegra->group))) {
+			err = iommu_attach_group(tegra->domain, group);
+			if (err < 0) {
+				iommu_group_put(group);
+				return ERR_PTR(err);
+			}
+
+			if (shared && !tegra->group)
+				tegra->group = group;
+		}
+	}
+
+	return group;
+}
+
+void host1x_client_iommu_detach(struct host1x_client *client,
+				struct iommu_group *group)
+{
+	struct drm_device *drm = dev_get_drvdata(client->parent);
+	struct tegra_drm *tegra = drm->dev_private;
+
+	if (group) {
+		if (group == tegra->group) {
+			iommu_detach_group(tegra->domain, group);
+			tegra->group = NULL;
+		}
+
+		iommu_group_put(group);
+	}
+}
+
 void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *dma)
 {
 	struct iova *alloc;
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 4f41aaec8530..fe263cf58f34 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -110,6 +110,10 @@ int tegra_drm_register_client(struct tegra_drm *tegra,
 			      struct tegra_drm_client *client);
 int tegra_drm_unregister_client(struct tegra_drm *tegra,
 				struct tegra_drm_client *client);
+struct iommu_group *host1x_client_iommu_attach(struct host1x_client *client,
+					       bool shared);
+void host1x_client_iommu_detach(struct host1x_client *client,
+				struct iommu_group *group);
 
 int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm);
 int tegra_drm_exit(struct tegra_drm *tegra);
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index 0b42e99da8ad..2cd0f66c8aa9 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -32,7 +32,6 @@ static int gr2d_init(struct host1x_client *client)
 	struct tegra_drm_client *drm = host1x_to_drm_client(client);
 	struct drm_device *dev = dev_get_drvdata(client->parent);
 	unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
-	struct tegra_drm *tegra = dev->dev_private;
 	struct gr2d *gr2d = to_gr2d(drm);
 	int err;
 
@@ -47,22 +46,14 @@ static int gr2d_init(struct host1x_client *client)
 		goto put;
 	}
 
-	if (tegra->domain) {
-		gr2d->group = iommu_group_get(client->dev);
-
-		if (gr2d->group) {
-			err = iommu_attach_group(tegra->domain, gr2d->group);
-			if (err < 0) {
-				dev_err(client->dev,
-					"failed to attach to domain: %d\n",
-					err);
-				iommu_group_put(gr2d->group);
-				goto free;
-			}
-		}
+	gr2d->group = host1x_client_iommu_attach(client, false);
+	if (IS_ERR(gr2d->group)) {
+		err = PTR_ERR(gr2d->group);
+		dev_err(client->dev, "failed to attach to domain: %d\n", err);
+		goto free;
 	}
 
-	err = tegra_drm_register_client(tegra, drm);
+	err = tegra_drm_register_client(dev->dev_private, drm);
 	if (err < 0) {
 		dev_err(client->dev, "failed to register client: %d\n", err);
 		goto detach;
@@ -71,10 +62,7 @@ static int gr2d_init(struct host1x_client *client)
 	return 0;
 
 detach:
-	if (gr2d->group) {
-		iommu_detach_group(tegra->domain, gr2d->group);
-		iommu_group_put(gr2d->group);
-	}
+	host1x_client_iommu_detach(client, gr2d->group);
 free:
 	host1x_syncpt_free(client->syncpts[0]);
 put:
@@ -94,14 +82,10 @@ static int gr2d_exit(struct host1x_client *client)
 	if (err < 0)
 		return err;
 
+	host1x_client_iommu_detach(client, gr2d->group);
 	host1x_syncpt_free(client->syncpts[0]);
 	host1x_channel_put(gr2d->channel);
 
-	if (gr2d->group) {
-		iommu_detach_group(tegra->domain, gr2d->group);
-		iommu_group_put(gr2d->group);
-	}
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index 9303278efc1d..b00002f1c590 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -42,7 +42,6 @@ static int gr3d_init(struct host1x_client *client)
 	struct tegra_drm_client *drm = host1x_to_drm_client(client);
 	struct drm_device *dev = dev_get_drvdata(client->parent);
 	unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
-	struct tegra_drm *tegra = dev->dev_private;
 	struct gr3d *gr3d = to_gr3d(drm);
 	int err;
 
@@ -57,19 +56,11 @@ static int gr3d_init(struct host1x_client *client)
 		goto put;
 	}
 
-	if (tegra->domain) {
-		gr3d->group = iommu_group_get(client->dev);
-
-		if (gr3d->group) {
-			err = iommu_attach_group(tegra->domain, gr3d->group);
-			if (err < 0) {
-				dev_err(client->dev,
-					"failed to attach to domain: %d\n",
-					err);
-				iommu_group_put(gr3d->group);
-				goto free;
-			}
-		}
+	gr3d->group = host1x_client_iommu_attach(client, false);
+	if (IS_ERR(gr3d->group)) {
+		err = PTR_ERR(gr3d->group);
+		dev_err(client->dev, "failed to attach to domain: %d\n", err);
+		goto free;
 	}
 
 	err = tegra_drm_register_client(dev->dev_private, drm);
@@ -81,10 +72,7 @@ static int gr3d_init(struct host1x_client *client)
 	return 0;
 
 detach:
-	if (gr3d->group) {
-		iommu_detach_group(tegra->domain, gr3d->group);
-		iommu_group_put(gr3d->group);
-	}
+	host1x_client_iommu_detach(client, gr3d->group);
 free:
 	host1x_syncpt_free(client->syncpts[0]);
 put:
@@ -96,7 +84,6 @@ static int gr3d_exit(struct host1x_client *client)
 {
 	struct tegra_drm_client *drm = host1x_to_drm_client(client);
 	struct drm_device *dev = dev_get_drvdata(client->parent);
-	struct tegra_drm *tegra = dev->dev_private;
 	struct gr3d *gr3d = to_gr3d(drm);
 	int err;
 
@@ -104,14 +91,10 @@ static int gr3d_exit(struct host1x_client *client)
 	if (err < 0)
 		return err;
 
+	host1x_client_iommu_detach(client, gr3d->group);
 	host1x_syncpt_free(client->syncpts[0]);
 	host1x_channel_put(gr3d->channel);
 
-	if (gr3d->group) {
-		iommu_detach_group(tegra->domain, gr3d->group);
-		iommu_group_put(gr3d->group);
-	}
-
 	return 0;
 }
 

From acc6a3a9afdd4e0537342012656cdb5c4a3127c5 Mon Sep 17 00:00:00 2001
From: Dmitry Osipenko <digetx@gmail.com>
Date: Fri, 4 May 2018 17:39:58 +0300
Subject: [PATCH 0948/1286] drm/tegra: dc: Enable plane scaling filters

Currently resized plane produces a "pixelated" image which doesn't look
nice, especially in a case of a video overlay. Enable scaling filters that
significantly improve image quality of a scaled overlay.

Signed-off-by: Dmitry Osipenko <digetx@gmail.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/drm/tegra/dc.c | 81 ++++++++++++++++++++++++++++++++++++++
 drivers/gpu/drm/tegra/dc.h |  7 ++++
 2 files changed, 88 insertions(+)

diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 3e7ec3937346..a4dd866fc8be 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -224,6 +224,39 @@ static void tegra_plane_setup_blending(struct tegra_plane *plane,
 	tegra_plane_writel(plane, value, DC_WIN_BLEND_LAYER_CONTROL);
 }
 
+static bool
+tegra_plane_use_horizontal_filtering(struct tegra_plane *plane,
+				     const struct tegra_dc_window *window)
+{
+	struct tegra_dc *dc = plane->dc;
+
+	if (window->src.w == window->dst.w)
+		return false;
+
+	if (plane->index == 0 && dc->soc->has_win_a_without_filters)
+		return false;
+
+	return true;
+}
+
+static bool
+tegra_plane_use_vertical_filtering(struct tegra_plane *plane,
+				   const struct tegra_dc_window *window)
+{
+	struct tegra_dc *dc = plane->dc;
+
+	if (window->src.h == window->dst.h)
+		return false;
+
+	if (plane->index == 0 && dc->soc->has_win_a_without_filters)
+		return false;
+
+	if (plane->index == 2 && dc->soc->has_win_c_without_vert_filter)
+		return false;
+
+	return true;
+}
+
 static void tegra_dc_setup_window(struct tegra_plane *plane,
 				  const struct tegra_dc_window *window)
 {
@@ -361,6 +394,44 @@ static void tegra_dc_setup_window(struct tegra_plane *plane,
 	if (window->bottom_up)
 		value |= V_DIRECTION;
 
+	if (tegra_plane_use_horizontal_filtering(plane, window)) {
+		/*
+		 * Enable horizontal 6-tap filter and set filtering
+		 * coefficients to the default values defined in TRM.
+		 */
+		tegra_plane_writel(plane, 0x00008000, DC_WIN_H_FILTER_P(0));
+		tegra_plane_writel(plane, 0x3e087ce1, DC_WIN_H_FILTER_P(1));
+		tegra_plane_writel(plane, 0x3b117ac1, DC_WIN_H_FILTER_P(2));
+		tegra_plane_writel(plane, 0x591b73aa, DC_WIN_H_FILTER_P(3));
+		tegra_plane_writel(plane, 0x57256d9a, DC_WIN_H_FILTER_P(4));
+		tegra_plane_writel(plane, 0x552f668b, DC_WIN_H_FILTER_P(5));
+		tegra_plane_writel(plane, 0x73385e8b, DC_WIN_H_FILTER_P(6));
+		tegra_plane_writel(plane, 0x72435583, DC_WIN_H_FILTER_P(7));
+		tegra_plane_writel(plane, 0x714c4c8b, DC_WIN_H_FILTER_P(8));
+		tegra_plane_writel(plane, 0x70554393, DC_WIN_H_FILTER_P(9));
+		tegra_plane_writel(plane, 0x715e389b, DC_WIN_H_FILTER_P(10));
+		tegra_plane_writel(plane, 0x71662faa, DC_WIN_H_FILTER_P(11));
+		tegra_plane_writel(plane, 0x536d25ba, DC_WIN_H_FILTER_P(12));
+		tegra_plane_writel(plane, 0x55731bca, DC_WIN_H_FILTER_P(13));
+		tegra_plane_writel(plane, 0x387a11d9, DC_WIN_H_FILTER_P(14));
+		tegra_plane_writel(plane, 0x3c7c08f1, DC_WIN_H_FILTER_P(15));
+
+		value |= H_FILTER;
+	}
+
+	if (tegra_plane_use_vertical_filtering(plane, window)) {
+		unsigned int i, k;
+
+		/*
+		 * Enable vertical 2-tap filter and set filtering
+		 * coefficients to the default values defined in TRM.
+		 */
+		for (i = 0, k = 128; i < 16; i++, k -= 8)
+			tegra_plane_writel(plane, k, DC_WIN_V_FILTER_P(i));
+
+		value |= V_FILTER;
+	}
+
 	tegra_plane_writel(plane, value, DC_WIN_WIN_OPTIONS);
 
 	if (dc->soc->supports_blending)
@@ -1951,6 +2022,8 @@ static const struct tegra_dc_soc_info tegra20_dc_soc_info = {
 	.num_overlay_formats = ARRAY_SIZE(tegra20_overlay_formats),
 	.overlay_formats = tegra20_overlay_formats,
 	.modifiers = tegra20_modifiers,
+	.has_win_a_without_filters = true,
+	.has_win_c_without_vert_filter = true,
 };
 
 static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
@@ -1968,6 +2041,8 @@ static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
 	.num_overlay_formats = ARRAY_SIZE(tegra20_overlay_formats),
 	.overlay_formats = tegra20_overlay_formats,
 	.modifiers = tegra20_modifiers,
+	.has_win_a_without_filters = false,
+	.has_win_c_without_vert_filter = false,
 };
 
 static const struct tegra_dc_soc_info tegra114_dc_soc_info = {
@@ -1985,6 +2060,8 @@ static const struct tegra_dc_soc_info tegra114_dc_soc_info = {
 	.num_overlay_formats = ARRAY_SIZE(tegra114_overlay_formats),
 	.overlay_formats = tegra114_overlay_formats,
 	.modifiers = tegra20_modifiers,
+	.has_win_a_without_filters = false,
+	.has_win_c_without_vert_filter = false,
 };
 
 static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
@@ -2002,6 +2079,8 @@ static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
 	.num_overlay_formats = ARRAY_SIZE(tegra124_overlay_formats),
 	.overlay_formats = tegra124_overlay_formats,
 	.modifiers = tegra124_modifiers,
+	.has_win_a_without_filters = false,
+	.has_win_c_without_vert_filter = false,
 };
 
 static const struct tegra_dc_soc_info tegra210_dc_soc_info = {
@@ -2019,6 +2098,8 @@ static const struct tegra_dc_soc_info tegra210_dc_soc_info = {
 	.num_overlay_formats = ARRAY_SIZE(tegra114_overlay_formats),
 	.overlay_formats = tegra114_overlay_formats,
 	.modifiers = tegra124_modifiers,
+	.has_win_a_without_filters = false,
+	.has_win_c_without_vert_filter = false,
 };
 
 static const struct tegra_windowgroup_soc tegra186_dc_wgrps[] = {
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 7be786febb17..556321fde9d2 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -67,6 +67,8 @@ struct tegra_dc_soc_info {
 	const u32 *overlay_formats;
 	unsigned int num_overlay_formats;
 	const u64 *modifiers;
+	bool has_win_a_without_filters;
+	bool has_win_c_without_vert_filter;
 };
 
 struct tegra_dc {
@@ -553,6 +555,9 @@ int tegra_dc_rgb_exit(struct tegra_dc *dc);
 #define  THREAD_NUM(x) (((x) & 0x1f) << 1)
 #define  THREAD_GROUP_ENABLE (1 << 0)
 
+#define DC_WIN_H_FILTER_P(p)			(0x601 + (p))
+#define DC_WIN_V_FILTER_P(p)			(0x619 + (p))
+
 #define DC_WIN_CSC_YOF				0x611
 #define DC_WIN_CSC_KYRGB			0x612
 #define DC_WIN_CSC_KUR				0x613
@@ -566,6 +571,8 @@ int tegra_dc_rgb_exit(struct tegra_dc *dc);
 #define H_DIRECTION  (1 <<  0)
 #define V_DIRECTION  (1 <<  2)
 #define COLOR_EXPAND (1 <<  6)
+#define H_FILTER     (1 <<  8)
+#define V_FILTER     (1 << 10)
 #define CSC_ENABLE   (1 << 18)
 #define WIN_ENABLE   (1 << 30)
 

From 3dae08bc076b93487ed2df50bcfa892113e89d9d Mon Sep 17 00:00:00 2001
From: Dmitry Osipenko <digetx@gmail.com>
Date: Fri, 4 May 2018 17:39:59 +0300
Subject: [PATCH 0949/1286] drm/tegra: plane: Implement zpos plane property for
 older Tegras

Older Tegra's do not support plane's Z position handling in hardware,
but the hardware provides knobs to implement it in software.

Signed-off-by: Dmitry Osipenko <digetx@gmail.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/drm/tegra/dc.c    | 140 ++++++++++++++++-------
 drivers/gpu/drm/tegra/plane.c | 207 ++++++++++++++++++++++++----------
 drivers/gpu/drm/tegra/plane.h |  13 ++-
 3 files changed, 254 insertions(+), 106 deletions(-)

diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index a4dd866fc8be..51581d9da509 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -163,28 +163,89 @@ static void tegra_plane_setup_blending_legacy(struct tegra_plane *plane)
 			 BLEND_COLOR_KEY_NONE;
 	u32 blendnokey = BLEND_WEIGHT1(255) | BLEND_WEIGHT0(255);
 	struct tegra_plane_state *state;
+	u32 blending[2];
 	unsigned int i;
 
-	state = to_tegra_plane_state(plane->base.state);
-
-	/* alpha contribution is 1 minus sum of overlapping windows */
-	for (i = 0; i < 3; i++) {
-		if (state->dependent[i])
-			background[i] |= BLEND_CONTROL_DEPENDENT;
-	}
-
-	/* enable alpha blending if pixel format has an alpha component */
-	if (!state->opaque)
-		foreground |= BLEND_CONTROL_ALPHA;
-
-	/*
-	 * Disable blending and assume Window A is the bottom-most window,
-	 * Window C is the top-most window and Window B is in the middle.
-	 */
+	/* disable blending for non-overlapping case */
 	tegra_plane_writel(plane, blendnokey, DC_WIN_BLEND_NOKEY);
 	tegra_plane_writel(plane, foreground, DC_WIN_BLEND_1WIN);
 
-	switch (plane->index) {
+	state = to_tegra_plane_state(plane->base.state);
+
+	if (state->opaque) {
+		/*
+		 * Since custom fix-weight blending isn't utilized and weight
+		 * of top window is set to max, we can enforce dependent
+		 * blending which in this case results in transparent bottom
+		 * window if top window is opaque and if top window enables
+		 * alpha blending, then bottom window is getting alpha value
+		 * of 1 minus the sum of alpha components of the overlapping
+		 * plane.
+		 */
+		background[0] |= BLEND_CONTROL_DEPENDENT;
+		background[1] |= BLEND_CONTROL_DEPENDENT;
+
+		/*
+		 * The region where three windows overlap is the intersection
+		 * of the two regions where two windows overlap. It contributes
+		 * to the area if all of the windows on top of it have an alpha
+		 * component.
+		 */
+		switch (state->base.normalized_zpos) {
+		case 0:
+			if (state->blending[0].alpha &&
+			    state->blending[1].alpha)
+				background[2] |= BLEND_CONTROL_DEPENDENT;
+			break;
+
+		case 1:
+			background[2] |= BLEND_CONTROL_DEPENDENT;
+			break;
+		}
+	} else {
+		/*
+		 * Enable alpha blending if pixel format has an alpha
+		 * component.
+		 */
+		foreground |= BLEND_CONTROL_ALPHA;
+
+		/*
+		 * If any of the windows on top of this window is opaque, it
+		 * will completely conceal this window within that area. If
+		 * top window has an alpha component, it is blended over the
+		 * bottom window.
+		 */
+		for (i = 0; i < 2; i++) {
+			if (state->blending[i].alpha &&
+			    state->blending[i].top)
+				background[i] |= BLEND_CONTROL_DEPENDENT;
+		}
+
+		switch (state->base.normalized_zpos) {
+		case 0:
+			if (state->blending[0].alpha &&
+			    state->blending[1].alpha)
+				background[2] |= BLEND_CONTROL_DEPENDENT;
+			break;
+
+		case 1:
+			/*
+			 * When both middle and topmost windows have an alpha,
+			 * these windows a mixed together and then the result
+			 * is blended over the bottom window.
+			 */
+			if (state->blending[0].alpha &&
+			    state->blending[0].top)
+				background[2] |= BLEND_CONTROL_ALPHA;
+
+			if (state->blending[1].alpha &&
+			    state->blending[1].top)
+				background[2] |= BLEND_CONTROL_ALPHA;
+			break;
+		}
+	}
+
+	switch (state->base.normalized_zpos) {
 	case 0:
 		tegra_plane_writel(plane, background[0], DC_WIN_BLEND_2WIN_X);
 		tegra_plane_writel(plane, background[1], DC_WIN_BLEND_2WIN_Y);
@@ -192,8 +253,21 @@ static void tegra_plane_setup_blending_legacy(struct tegra_plane *plane)
 		break;
 
 	case 1:
-		tegra_plane_writel(plane, foreground, DC_WIN_BLEND_2WIN_X);
-		tegra_plane_writel(plane, background[1], DC_WIN_BLEND_2WIN_Y);
+		/*
+		 * If window B / C is topmost, then X / Y registers are
+		 * matching the order of blending[...] state indices,
+		 * otherwise a swap is required.
+		 */
+		if (!state->blending[0].top && state->blending[1].top) {
+			blending[0] = foreground;
+			blending[1] = background[1];
+		} else {
+			blending[0] = background[0];
+			blending[1] = foreground;
+		}
+
+		tegra_plane_writel(plane, blending[0], DC_WIN_BLEND_2WIN_X);
+		tegra_plane_writel(plane, blending[1], DC_WIN_BLEND_2WIN_Y);
 		tegra_plane_writel(plane, background[2], DC_WIN_BLEND_3WIN_XY);
 		break;
 
@@ -525,14 +599,14 @@ static int tegra_plane_atomic_check(struct drm_plane *plane,
 	struct tegra_bo_tiling *tiling = &plane_state->tiling;
 	struct tegra_plane *tegra = to_tegra_plane(plane);
 	struct tegra_dc *dc = to_tegra_dc(state->crtc);
-	unsigned int format;
 	int err;
 
 	/* no need for further checks if the plane is being disabled */
 	if (!state->crtc)
 		return 0;
 
-	err = tegra_plane_format(state->fb->format->format, &format,
+	err = tegra_plane_format(state->fb->format->format,
+				 &plane_state->format,
 				 &plane_state->swap);
 	if (err < 0)
 		return err;
@@ -544,21 +618,11 @@ static int tegra_plane_atomic_check(struct drm_plane *plane,
 	 * be emulated by disabling alpha blending for the plane.
 	 */
 	if (!dc->soc->supports_blending) {
-		if (!tegra_plane_format_has_alpha(format)) {
-			err = tegra_plane_format_get_alpha(format, &format);
-			if (err < 0)
-				return err;
-
-			plane_state->opaque = true;
-		} else {
-			plane_state->opaque = false;
-		}
-
-		tegra_plane_check_dependent(tegra, plane_state);
+		err = tegra_plane_setup_legacy_state(tegra, plane_state);
+		if (err < 0)
+			return err;
 	}
 
-	plane_state->format = format;
-
 	err = tegra_fb_get_tiling(state->fb, tiling);
 	if (err < 0)
 		return err;
@@ -710,9 +774,7 @@ static struct drm_plane *tegra_primary_plane_create(struct drm_device *drm,
 	}
 
 	drm_plane_helper_add(&plane->base, &tegra_plane_helper_funcs);
-
-	if (dc->soc->supports_blending)
-		drm_plane_create_zpos_property(&plane->base, 0, 0, 255);
+	drm_plane_create_zpos_property(&plane->base, plane->index, 0, 255);
 
 	return &plane->base;
 }
@@ -989,9 +1051,7 @@ static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,
 	}
 
 	drm_plane_helper_add(&plane->base, &tegra_plane_helper_funcs);
-
-	if (dc->soc->supports_blending)
-		drm_plane_create_zpos_property(&plane->base, 0, 0, 255);
+	drm_plane_create_zpos_property(&plane->base, plane->index, 0, 255);
 
 	return &plane->base;
 }
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
index 176ef46c615c..0406c2ef432c 100644
--- a/drivers/gpu/drm/tegra/plane.c
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -23,6 +23,7 @@ static void tegra_plane_destroy(struct drm_plane *plane)
 
 static void tegra_plane_reset(struct drm_plane *plane)
 {
+	struct tegra_plane *p = to_tegra_plane(plane);
 	struct tegra_plane_state *state;
 
 	if (plane->state)
@@ -35,6 +36,8 @@ static void tegra_plane_reset(struct drm_plane *plane)
 	if (state) {
 		plane->state = &state->base;
 		plane->state->plane = plane;
+		plane->state->zpos = p->index;
+		plane->state->normalized_zpos = p->index;
 	}
 }
 
@@ -55,8 +58,8 @@ tegra_plane_atomic_duplicate_state(struct drm_plane *plane)
 	copy->swap = state->swap;
 	copy->opaque = state->opaque;
 
-	for (i = 0; i < 3; i++)
-		copy->dependent[i] = state->dependent[i];
+	for (i = 0; i < 2; i++)
+		copy->blending[i] = state->blending[i];
 
 	return &copy->base;
 }
@@ -267,24 +270,8 @@ static bool __drm_format_has_alpha(u32 format)
 	return false;
 }
 
-/*
- * This is applicable to Tegra20 and Tegra30 only where the opaque formats can
- * be emulated using the alpha formats and alpha blending disabled.
- */
-bool tegra_plane_format_has_alpha(unsigned int format)
-{
-	switch (format) {
-	case WIN_COLOR_DEPTH_B5G5R5A1:
-	case WIN_COLOR_DEPTH_A1B5G5R5:
-	case WIN_COLOR_DEPTH_R8G8B8A8:
-	case WIN_COLOR_DEPTH_B8G8R8A8:
-		return true;
-	}
-
-	return false;
-}
-
-int tegra_plane_format_get_alpha(unsigned int opaque, unsigned int *alpha)
+static int tegra_plane_format_get_alpha(unsigned int opaque,
+					unsigned int *alpha)
 {
 	if (tegra_plane_format_is_yuv(opaque, NULL)) {
 		*alpha = opaque;
@@ -316,6 +303,67 @@ int tegra_plane_format_get_alpha(unsigned int opaque, unsigned int *alpha)
 	return -EINVAL;
 }
 
+/*
+ * This is applicable to Tegra20 and Tegra30 only where the opaque formats can
+ * be emulated using the alpha formats and alpha blending disabled.
+ */
+static int tegra_plane_setup_opacity(struct tegra_plane *tegra,
+				     struct tegra_plane_state *state)
+{
+	unsigned int format;
+	int err;
+
+	switch (state->format) {
+	case WIN_COLOR_DEPTH_B5G5R5A1:
+	case WIN_COLOR_DEPTH_A1B5G5R5:
+	case WIN_COLOR_DEPTH_R8G8B8A8:
+	case WIN_COLOR_DEPTH_B8G8R8A8:
+		state->opaque = false;
+		break;
+
+	default:
+		err = tegra_plane_format_get_alpha(state->format, &format);
+		if (err < 0)
+			return err;
+
+		state->format = format;
+		state->opaque = true;
+		break;
+	}
+
+	return 0;
+}
+
+static int tegra_plane_check_transparency(struct tegra_plane *tegra,
+					  struct tegra_plane_state *state)
+{
+	struct drm_plane_state *old, *plane_state;
+	struct drm_plane *plane;
+
+	old = drm_atomic_get_old_plane_state(state->base.state, &tegra->base);
+
+	/* check if zpos / transparency changed */
+	if (old->normalized_zpos == state->base.normalized_zpos &&
+	    to_tegra_plane_state(old)->opaque == state->opaque)
+		return 0;
+
+	/* include all sibling planes into this commit */
+	drm_for_each_plane(plane, tegra->base.dev) {
+		struct tegra_plane *p = to_tegra_plane(plane);
+
+		/* skip this plane and planes on different CRTCs */
+		if (p == tegra || p->dc != tegra->dc)
+			continue;
+
+		plane_state = drm_atomic_get_plane_state(state->base.state,
+							 plane);
+		if (IS_ERR(plane_state))
+			return PTR_ERR(plane_state);
+	}
+
+	return 1;
+}
+
 static unsigned int tegra_plane_get_overlap_index(struct tegra_plane *plane,
 						  struct tegra_plane *other)
 {
@@ -336,61 +384,98 @@ static unsigned int tegra_plane_get_overlap_index(struct tegra_plane *plane,
 	return index;
 }
 
-void tegra_plane_check_dependent(struct tegra_plane *tegra,
-				 struct tegra_plane_state *state)
+static void tegra_plane_update_transparency(struct tegra_plane *tegra,
+					    struct tegra_plane_state *state)
 {
-	struct drm_plane_state *old, *new;
+	struct drm_plane_state *new;
 	struct drm_plane *plane;
-	unsigned int zpos[2];
 	unsigned int i;
 
-	for (i = 0; i < 2; i++)
-		zpos[i] = 0;
-
-	for_each_oldnew_plane_in_state(state->base.state, plane, old, new, i) {
+	for_each_new_plane_in_state(state->base.state, plane, new, i) {
 		struct tegra_plane *p = to_tegra_plane(plane);
 		unsigned index;
 
 		/* skip this plane and planes on different CRTCs */
-		if (p == tegra || new->crtc != state->base.crtc)
+		if (p == tegra || p->dc != tegra->dc)
 			continue;
 
 		index = tegra_plane_get_overlap_index(tegra, p);
 
-		state->dependent[index] = false;
+		if (new->fb && __drm_format_has_alpha(new->fb->format->format))
+			state->blending[index].alpha = true;
+		else
+			state->blending[index].alpha = false;
+
+		if (new->normalized_zpos > state->base.normalized_zpos)
+			state->blending[index].top = true;
+		else
+			state->blending[index].top = false;
 
 		/*
-		 * If any of the other planes is on top of this plane and uses
-		 * a format with an alpha component, mark this plane as being
-		 * dependent, meaning it's alpha value will be 1 minus the sum
-		 * of alpha components of the overlapping planes.
+		 * Missing framebuffer means that plane is disabled, in this
+		 * case mark B / C window as top to be able to differentiate
+		 * windows indices order in regards to zPos for the middle
+		 * window X / Y registers programming.
 		 */
-		if (p->index > tegra->index) {
-			if (__drm_format_has_alpha(new->fb->format->format))
-				state->dependent[index] = true;
-
-			/* keep track of the Z position */
-			zpos[index] = p->index;
-		}
-	}
-
-	/*
-	 * The region where three windows overlap is the intersection of the
-	 * two regions where two windows overlap. It contributes to the area
-	 * if any of the windows on top of it have an alpha component.
-	 */
-	for (i = 0; i < 2; i++)
-		state->dependent[2] = state->dependent[2] ||
-				      state->dependent[i];
-
-	/*
-	 * However, if any of the windows on top of this window is opaque, it
-	 * will completely conceal this window within that area, so avoid the
-	 * window from contributing to the area.
-	 */
-	for (i = 0; i < 2; i++) {
-		if (zpos[i] > tegra->index)
-			state->dependent[2] = state->dependent[2] &&
-					      state->dependent[i];
+		if (!new->fb)
+			state->blending[index].top = (index == 1);
 	}
 }
+
+static int tegra_plane_setup_transparency(struct tegra_plane *tegra,
+					  struct tegra_plane_state *state)
+{
+	struct tegra_plane_state *tegra_state;
+	struct drm_plane_state *new;
+	struct drm_plane *plane;
+	int err;
+
+	/*
+	 * If planes zpos / transparency changed, sibling planes blending
+	 * state may require adjustment and in this case they will be included
+	 * into this atom commit, otherwise blending state is unchanged.
+	 */
+	err = tegra_plane_check_transparency(tegra, state);
+	if (err <= 0)
+		return err;
+
+	/*
+	 * All planes are now in the atomic state, walk them up and update
+	 * transparency state for each plane.
+	 */
+	drm_for_each_plane(plane, tegra->base.dev) {
+		struct tegra_plane *p = to_tegra_plane(plane);
+
+		/* skip planes on different CRTCs */
+		if (p->dc != tegra->dc)
+			continue;
+
+		new = drm_atomic_get_new_plane_state(state->base.state, plane);
+		tegra_state = to_tegra_plane_state(new);
+
+		/*
+		 * There is no need to update blending state for the disabled
+		 * plane.
+		 */
+		if (new->fb)
+			tegra_plane_update_transparency(p, tegra_state);
+	}
+
+	return 0;
+}
+
+int tegra_plane_setup_legacy_state(struct tegra_plane *tegra,
+				   struct tegra_plane_state *state)
+{
+	int err;
+
+	err = tegra_plane_setup_opacity(tegra, state);
+	if (err < 0)
+		return err;
+
+	err = tegra_plane_setup_transparency(tegra, state);
+	if (err < 0)
+		return err;
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/tegra/plane.h b/drivers/gpu/drm/tegra/plane.h
index 6938719e7e5d..7360ddfafee8 100644
--- a/drivers/gpu/drm/tegra/plane.h
+++ b/drivers/gpu/drm/tegra/plane.h
@@ -34,6 +34,11 @@ static inline struct tegra_plane *to_tegra_plane(struct drm_plane *plane)
 	return container_of(plane, struct tegra_plane, base);
 }
 
+struct tegra_plane_legacy_blending_state {
+	bool alpha;
+	bool top;
+};
+
 struct tegra_plane_state {
 	struct drm_plane_state base;
 
@@ -42,8 +47,8 @@ struct tegra_plane_state {
 	u32 swap;
 
 	/* used for legacy blending support only */
+	struct tegra_plane_legacy_blending_state blending[2];
 	bool opaque;
-	bool dependent[3];
 };
 
 static inline struct tegra_plane_state *
@@ -62,9 +67,7 @@ int tegra_plane_state_add(struct tegra_plane *plane,
 
 int tegra_plane_format(u32 fourcc, u32 *format, u32 *swap);
 bool tegra_plane_format_is_yuv(unsigned int format, bool *planar);
-bool tegra_plane_format_has_alpha(unsigned int format);
-int tegra_plane_format_get_alpha(unsigned int opaque, unsigned int *alpha);
-void tegra_plane_check_dependent(struct tegra_plane *tegra,
-				 struct tegra_plane_state *state);
+int tegra_plane_setup_legacy_state(struct tegra_plane *tegra,
+				   struct tegra_plane_state *state);
 
 #endif /* TEGRA_PLANE_H */

From a43d0a00ea58a665905d94e8ab469ff888b1b0e1 Mon Sep 17 00:00:00 2001
From: Dmitry Osipenko <digetx@gmail.com>
Date: Fri, 4 May 2018 17:40:00 +0300
Subject: [PATCH 0950/1286] drm/tegra: dc: Rename supports_blending to
 has_legacy_blending

Older Tegra chips do support blending as well. Rename the SoC info entry
.supports_blending to .has_legacy_blending to eliminate the confusion.

Signed-off-by: Dmitry Osipenko <digetx@gmail.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/drm/tegra/dc.c | 20 ++++++++++----------
 drivers/gpu/drm/tegra/dc.h |  2 +-
 2 files changed, 11 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 51581d9da509..31e12a9dfcb8 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -508,10 +508,10 @@ static void tegra_dc_setup_window(struct tegra_plane *plane,
 
 	tegra_plane_writel(plane, value, DC_WIN_WIN_OPTIONS);
 
-	if (dc->soc->supports_blending)
-		tegra_plane_setup_blending(plane, window);
-	else
+	if (dc->soc->has_legacy_blending)
 		tegra_plane_setup_blending_legacy(plane);
+	else
+		tegra_plane_setup_blending(plane, window);
 }
 
 static const u32 tegra20_primary_formats[] = {
@@ -617,7 +617,7 @@ static int tegra_plane_atomic_check(struct drm_plane *plane,
 	 * the corresponding opaque formats. However, the opaque formats can
 	 * be emulated by disabling alpha blending for the plane.
 	 */
-	if (!dc->soc->supports_blending) {
+	if (dc->soc->has_legacy_blending) {
 		err = tegra_plane_setup_legacy_state(tegra, plane_state);
 		if (err < 0)
 			return err;
@@ -2072,7 +2072,7 @@ static const struct tegra_dc_soc_info tegra20_dc_soc_info = {
 	.supports_interlacing = false,
 	.supports_cursor = false,
 	.supports_block_linear = false,
-	.supports_blending = false,
+	.has_legacy_blending = true,
 	.pitch_align = 8,
 	.has_powergate = false,
 	.coupled_pm = true,
@@ -2091,7 +2091,7 @@ static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
 	.supports_interlacing = false,
 	.supports_cursor = false,
 	.supports_block_linear = false,
-	.supports_blending = false,
+	.has_legacy_blending = true,
 	.pitch_align = 8,
 	.has_powergate = false,
 	.coupled_pm = false,
@@ -2110,7 +2110,7 @@ static const struct tegra_dc_soc_info tegra114_dc_soc_info = {
 	.supports_interlacing = false,
 	.supports_cursor = false,
 	.supports_block_linear = false,
-	.supports_blending = false,
+	.has_legacy_blending = true,
 	.pitch_align = 64,
 	.has_powergate = true,
 	.coupled_pm = false,
@@ -2129,7 +2129,7 @@ static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
 	.supports_interlacing = true,
 	.supports_cursor = true,
 	.supports_block_linear = true,
-	.supports_blending = true,
+	.has_legacy_blending = false,
 	.pitch_align = 64,
 	.has_powergate = true,
 	.coupled_pm = false,
@@ -2148,7 +2148,7 @@ static const struct tegra_dc_soc_info tegra210_dc_soc_info = {
 	.supports_interlacing = true,
 	.supports_cursor = true,
 	.supports_block_linear = true,
-	.supports_blending = true,
+	.has_legacy_blending = false,
 	.pitch_align = 64,
 	.has_powergate = true,
 	.coupled_pm = false,
@@ -2201,7 +2201,7 @@ static const struct tegra_dc_soc_info tegra186_dc_soc_info = {
 	.supports_interlacing = true,
 	.supports_cursor = true,
 	.supports_block_linear = true,
-	.supports_blending = true,
+	.has_legacy_blending = false,
 	.pitch_align = 64,
 	.has_powergate = false,
 	.coupled_pm = false,
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 556321fde9d2..e96f582ca692 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -55,7 +55,7 @@ struct tegra_dc_soc_info {
 	bool supports_interlacing;
 	bool supports_cursor;
 	bool supports_block_linear;
-	bool supports_blending;
+	bool has_legacy_blending;
 	unsigned int pitch_align;
 	bool has_powergate;
 	bool coupled_pm;

From c5fb5426dda897fbfeb3ddba81c9811f1178132c Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Sat, 10 Mar 2018 05:15:18 +0800
Subject: [PATCH 0951/1286] drm/amdgpu/gfx9: Update golden setting for gfx9_0.

Update golden_settings_gc_9_0[].

Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 17 +++++------------
 1 file changed, 5 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index fc1911834ab5..b05b7ae4d035 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -72,29 +72,22 @@ MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
 
 static const struct soc15_reg_golden golden_settings_gc_9_0[] =
 {
-	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
-	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
-	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
-	SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
-	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
-	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
-	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
-	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
-	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
-	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
+	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
+	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
+	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
+	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
-	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
-	SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
+	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
 };
 
 static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =

From 73aa1b9af5947f103913124f93ca19e6f3af1c1b Mon Sep 17 00:00:00 2001
From: Evan Quan <evan.quan@amd.com>
Date: Wed, 9 May 2018 10:57:53 +0800
Subject: [PATCH 0952/1286] drm/amd/powerplay: new framework to honour DAL
 clock limits

This is needed for vega12 and vega20 which do not support legacy
powerstate. With this new framework, the DAL clocks limits can also
be honored on these asics.

Signed-off-by: Evan Quan <evan.quan@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c | 9 +++++++++
 drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c          | 7 +++++++
 drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h   | 2 ++
 drivers/gpu/drm/amd/powerplay/inc/hwmgr.h             | 2 ++
 4 files changed, 20 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index e411012b3dcb..f5571e9fde26 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -132,6 +132,15 @@ int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
 	return 0;
 }
 
+int phm_apply_clock_adjust_rules(struct pp_hwmgr *hwmgr)
+{
+	PHM_FUNC_CHECK(hwmgr);
+
+	if (hwmgr->hwmgr_func->apply_clocks_adjust_rules != NULL)
+		return hwmgr->hwmgr_func->apply_clocks_adjust_rules(hwmgr);
+	return 0;
+}
+
 int phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
 {
 	PHM_FUNC_CHECK(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
index 308bff2b5d1d..2a2955c17d78 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
@@ -265,6 +265,13 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip,
 	if (skip)
 		return 0;
 
+	if (!hwmgr->ps)
+		/*
+		 * for vega12/vega20 which does not support power state manager
+		 * DAL clock limits should also be honoured
+		 */
+		phm_apply_clock_adjust_rules(hwmgr);
+
 	phm_display_configuration_changed(hwmgr);
 
 	if (hwmgr->ps)
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index 9bb87857a20f..e029555dfc2d 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -410,6 +410,8 @@ extern int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
 				   struct pp_power_state *adjusted_ps,
 			     const struct pp_power_state *current_ps);
 
+extern int phm_apply_clock_adjust_rules(struct pp_hwmgr *hwmgr);
+
 extern int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level);
 extern int phm_display_configuration_changed(struct pp_hwmgr *hwmgr);
 extern int phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 3c321c7d9626..9b6c6af869a6 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -229,6 +229,8 @@ struct pp_hwmgr_func {
 				struct pp_power_state  *prequest_ps,
 			const struct pp_power_state *pcurrent_ps);
 
+	int (*apply_clocks_adjust_rules)(struct pp_hwmgr *hwmgr);
+
 	int (*force_dpm_level)(struct pp_hwmgr *hw_mgr,
 					enum amd_dpm_forced_level level);
 

From 11a89b431e41dcfaa4e7b9806233f60de905287b Mon Sep 17 00:00:00 2001
From: Evan Quan <evan.quan@amd.com>
Date: Wed, 9 May 2018 11:08:29 +0800
Subject: [PATCH 0953/1286] drm/amd/powerplay: add a framework for perfroming
 pre display configuration change settings

Signed-off-by: Evan Quan <evan.quan@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c | 10 ++++++++++
 drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c          |  2 ++
 drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h   |  1 +
 drivers/gpu/drm/amd/powerplay/inc/hwmgr.h             |  1 +
 4 files changed, 14 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index f5571e9fde26..a0bb921fac22 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -170,6 +170,16 @@ int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
+int phm_pre_display_configuration_changed(struct pp_hwmgr *hwmgr)
+{
+	PHM_FUNC_CHECK(hwmgr);
+
+	if (NULL != hwmgr->hwmgr_func->pre_display_config_changed)
+		hwmgr->hwmgr_func->pre_display_config_changed(hwmgr);
+
+	return 0;
+
+}
 
 int phm_display_configuration_changed(struct pp_hwmgr *hwmgr)
 {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
index 2a2955c17d78..0af13c154328 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
@@ -272,6 +272,8 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip,
 		 */
 		phm_apply_clock_adjust_rules(hwmgr);
 
+	phm_pre_display_configuration_changed(hwmgr);
+
 	phm_display_configuration_changed(hwmgr);
 
 	if (hwmgr->ps)
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index e029555dfc2d..a202247c9894 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -413,6 +413,7 @@ extern int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
 extern int phm_apply_clock_adjust_rules(struct pp_hwmgr *hwmgr);
 
 extern int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level);
+extern int phm_pre_display_configuration_changed(struct pp_hwmgr *hwmgr);
 extern int phm_display_configuration_changed(struct pp_hwmgr *hwmgr);
 extern int phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr);
 extern int phm_register_irq_handlers(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 9b6c6af869a6..b99fb8ac822c 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -254,6 +254,7 @@ struct pp_hwmgr_func {
 						const void *state);
 	int (*enable_clock_power_gating)(struct pp_hwmgr *hwmgr);
 	int (*notify_smc_display_config_after_ps_adjustment)(struct pp_hwmgr *hwmgr);
+	int (*pre_display_config_changed)(struct pp_hwmgr *hwmgr);
 	int (*display_config_changed)(struct pp_hwmgr *hwmgr);
 	int (*disable_clock_power_gating)(struct pp_hwmgr *hwmgr);
 	int (*update_clock_gatings)(struct pp_hwmgr *hwmgr,

From cc3a98cc6efd0e60e0ed547f9f76f5d4e23fb758 Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Fri, 11 May 2018 14:41:40 +0800
Subject: [PATCH 0954/1286] drm/amdgpu: Drop the unused header files in
 soc15.c.

Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/soc15.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 90065766fffb..f31df18fcb81 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -41,8 +41,6 @@
 #include "sdma1/sdma1_4_0_offset.h"
 #include "hdp/hdp_4_0_offset.h"
 #include "hdp/hdp_4_0_sh_mask.h"
-#include "mp/mp_9_0_offset.h"
-#include "mp/mp_9_0_sh_mask.h"
 #include "smuio/smuio_9_0_offset.h"
 #include "smuio/smuio_9_0_sh_mask.h"
 

From b6110c00ced26b66999eb00b90c35b767cd45da4 Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Tue, 6 Feb 2018 12:29:23 +0800
Subject: [PATCH 0955/1286] drm/amdgpu: Fix hardcoded base offset of vram pages

In gmc_v9_0_vram_gtt_location(),the vram_base_offset is hardcoded
to 0 in dGPU. Fix it by reading mmMC_VM_FB_OFFSET or return
zfb_phys_addr if ZFB is enabled.

Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 5 +----
 1 file changed, 1 insertion(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 6cccf0e0acd7..734306902e4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -693,10 +693,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
 	amdgpu_device_vram_location(adev, &adev->gmc, base);
 	amdgpu_device_gart_location(adev, mc);
 	/* base offset of vram pages */
-	if (adev->flags & AMD_IS_APU)
-		adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
-	else
-		adev->vm_manager.vram_base_offset = 0;
+	adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
 }
 
 /**

From 5eb26e7ae16b8da302a361824b9c4a53a6f3ee0f Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Tue, 30 Jan 2018 10:59:23 +0800
Subject: [PATCH 0956/1286] drm/amd: Add vega20_ip_offset.h headerfile for
 vega20. (v2)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This headerfile contains vega20's ip base addresses.

v2: squash in MP1_BASE fix

Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/include/vega20_ip_offset.h    | 1050 +++++++++++++++++
 1 file changed, 1050 insertions(+)
 create mode 100644 drivers/gpu/drm/amd/include/vega20_ip_offset.h

diff --git a/drivers/gpu/drm/amd/include/vega20_ip_offset.h b/drivers/gpu/drm/amd/include/vega20_ip_offset.h
new file mode 100644
index 000000000000..97db93ceba4b
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/vega20_ip_offset.h
@@ -0,0 +1,1050 @@
+/*
+ * Copyright (C) 2018  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _vega20_ip_offset_HEADER
+#define _vega20_ip_offset_HEADER
+
+#define MAX_INSTANCE                                       6
+#define MAX_SEGMENT                                        6
+
+
+struct IP_BASE_INSTANCE
+{
+    unsigned int segment[MAX_SEGMENT];
+};
+
+struct IP_BASE
+{
+    struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
+};
+
+
+static const struct IP_BASE ATHUB_BASE            ={ { { { 0x00000C20, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE CLK_BASE            ={ { { { 0x00016C00, 0x00016E00, 0x00017000, 0x00017200, 0x0001B000, 0x0001B200 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE DCE_BASE            ={ { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE DF_BASE            ={ { { { 0x00007000, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE FUSE_BASE            ={ { { { 0x00017400, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE GC_BASE            ={ { { { 0x00002000, 0x0000A000, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE HDP_BASE            ={ { { { 0x00000F20, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE MMHUB_BASE            ={ { { { 0x0001A000, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE MP0_BASE            ={ { { { 0x00016000, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE MP1_BASE            ={ { { { 0x00016000, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE NBIO_BASE            ={ { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE OSSSYS_BASE            ={ { { { 0x000010A0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE SDMA0_BASE            ={ { { { 0x00001260, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE SDMA1_BASE            ={ { { { 0x00001860, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE SMUIO_BASE            ={ { { { 0x00016800, 0x00016A00, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE THM_BASE            ={ { { { 0x00016600, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE UMC_BASE            ={ { { { 0x00014000, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE UVD_BASE            ={ { { { 0x00007800, 0x00007E00, 0, 0, 0, 0 } },
+                                        { { 0, 0x00009000, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE VCE_BASE            ={ { { { 0x00008800, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE XDMA_BASE            ={ { { { 0x00003400, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE RSMU_BASE            ={ { { { 0x00012000, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } },
+                                        { { 0, 0, 0, 0, 0, 0 } } } };
+
+
+#define ATHUB_BASE__INST0_SEG0                     0x00000C20
+#define ATHUB_BASE__INST0_SEG1                     0
+#define ATHUB_BASE__INST0_SEG2                     0
+#define ATHUB_BASE__INST0_SEG3                     0
+#define ATHUB_BASE__INST0_SEG4                     0
+#define ATHUB_BASE__INST0_SEG5                     0
+
+#define ATHUB_BASE__INST1_SEG0                     0
+#define ATHUB_BASE__INST1_SEG1                     0
+#define ATHUB_BASE__INST1_SEG2                     0
+#define ATHUB_BASE__INST1_SEG3                     0
+#define ATHUB_BASE__INST1_SEG4                     0
+#define ATHUB_BASE__INST1_SEG5                     0
+
+#define ATHUB_BASE__INST2_SEG0                     0
+#define ATHUB_BASE__INST2_SEG1                     0
+#define ATHUB_BASE__INST2_SEG2                     0
+#define ATHUB_BASE__INST2_SEG3                     0
+#define ATHUB_BASE__INST2_SEG4                     0
+#define ATHUB_BASE__INST2_SEG5                     0
+
+#define ATHUB_BASE__INST3_SEG0                     0
+#define ATHUB_BASE__INST3_SEG1                     0
+#define ATHUB_BASE__INST3_SEG2                     0
+#define ATHUB_BASE__INST3_SEG3                     0
+#define ATHUB_BASE__INST3_SEG4                     0
+#define ATHUB_BASE__INST3_SEG5                     0
+
+#define ATHUB_BASE__INST4_SEG0                     0
+#define ATHUB_BASE__INST4_SEG1                     0
+#define ATHUB_BASE__INST4_SEG2                     0
+#define ATHUB_BASE__INST4_SEG3                     0
+#define ATHUB_BASE__INST4_SEG4                     0
+#define ATHUB_BASE__INST4_SEG5                     0
+
+#define ATHUB_BASE__INST5_SEG0                     0
+#define ATHUB_BASE__INST5_SEG1                     0
+#define ATHUB_BASE__INST5_SEG2                     0
+#define ATHUB_BASE__INST5_SEG3                     0
+#define ATHUB_BASE__INST5_SEG4                     0
+#define ATHUB_BASE__INST5_SEG5                     0
+
+#define CLK_BASE__INST0_SEG0                       0x00016C00
+#define CLK_BASE__INST0_SEG1                       0x00016E00
+#define CLK_BASE__INST0_SEG2                       0x00017000
+#define CLK_BASE__INST0_SEG3                       0x00017200
+#define CLK_BASE__INST0_SEG4                       0x0001B000
+#define CLK_BASE__INST0_SEG5                       0x0001B200
+
+#define CLK_BASE__INST1_SEG0                       0
+#define CLK_BASE__INST1_SEG1                       0
+#define CLK_BASE__INST1_SEG2                       0
+#define CLK_BASE__INST1_SEG3                       0
+#define CLK_BASE__INST1_SEG4                       0
+#define CLK_BASE__INST1_SEG5                       0
+
+#define CLK_BASE__INST2_SEG0                       0
+#define CLK_BASE__INST2_SEG1                       0
+#define CLK_BASE__INST2_SEG2                       0
+#define CLK_BASE__INST2_SEG3                       0
+#define CLK_BASE__INST2_SEG4                       0
+#define CLK_BASE__INST2_SEG5                       0
+
+#define CLK_BASE__INST3_SEG0                       0
+#define CLK_BASE__INST3_SEG1                       0
+#define CLK_BASE__INST3_SEG2                       0
+#define CLK_BASE__INST3_SEG3                       0
+#define CLK_BASE__INST3_SEG4                       0
+#define CLK_BASE__INST3_SEG5                       0
+
+#define CLK_BASE__INST4_SEG0                       0
+#define CLK_BASE__INST4_SEG1                       0
+#define CLK_BASE__INST4_SEG2                       0
+#define CLK_BASE__INST4_SEG3                       0
+#define CLK_BASE__INST4_SEG4                       0
+#define CLK_BASE__INST4_SEG5                       0
+
+#define CLK_BASE__INST5_SEG0                       0
+#define CLK_BASE__INST5_SEG1                       0
+#define CLK_BASE__INST5_SEG2                       0
+#define CLK_BASE__INST5_SEG3                       0
+#define CLK_BASE__INST5_SEG4                       0
+#define CLK_BASE__INST5_SEG5                       0
+
+#define DCE_BASE__INST0_SEG0                       0x00000012
+#define DCE_BASE__INST0_SEG1                       0x000000C0
+#define DCE_BASE__INST0_SEG2                       0x000034C0
+#define DCE_BASE__INST0_SEG3                       0
+#define DCE_BASE__INST0_SEG4                       0
+#define DCE_BASE__INST0_SEG5                       0
+
+#define DCE_BASE__INST1_SEG0                       0
+#define DCE_BASE__INST1_SEG1                       0
+#define DCE_BASE__INST1_SEG2                       0
+#define DCE_BASE__INST1_SEG3                       0
+#define DCE_BASE__INST1_SEG4                       0
+#define DCE_BASE__INST1_SEG5                       0
+
+#define DCE_BASE__INST2_SEG0                       0
+#define DCE_BASE__INST2_SEG1                       0
+#define DCE_BASE__INST2_SEG2                       0
+#define DCE_BASE__INST2_SEG3                       0
+#define DCE_BASE__INST2_SEG4                       0
+#define DCE_BASE__INST2_SEG5                       0
+
+#define DCE_BASE__INST3_SEG0                       0
+#define DCE_BASE__INST3_SEG1                       0
+#define DCE_BASE__INST3_SEG2                       0
+#define DCE_BASE__INST3_SEG3                       0
+#define DCE_BASE__INST3_SEG4                       0
+#define DCE_BASE__INST3_SEG5                       0
+
+#define DCE_BASE__INST4_SEG0                       0
+#define DCE_BASE__INST4_SEG1                       0
+#define DCE_BASE__INST4_SEG2                       0
+#define DCE_BASE__INST4_SEG3                       0
+#define DCE_BASE__INST4_SEG4                       0
+#define DCE_BASE__INST4_SEG5                       0
+
+#define DCE_BASE__INST5_SEG0                       0
+#define DCE_BASE__INST5_SEG1                       0
+#define DCE_BASE__INST5_SEG2                       0
+#define DCE_BASE__INST5_SEG3                       0
+#define DCE_BASE__INST5_SEG4                       0
+#define DCE_BASE__INST5_SEG5                       0
+
+#define DF_BASE__INST0_SEG0                        0x00007000
+#define DF_BASE__INST0_SEG1                        0
+#define DF_BASE__INST0_SEG2                        0
+#define DF_BASE__INST0_SEG3                        0
+#define DF_BASE__INST0_SEG4                        0
+#define DF_BASE__INST0_SEG5                        0
+
+#define DF_BASE__INST1_SEG0                        0
+#define DF_BASE__INST1_SEG1                        0
+#define DF_BASE__INST1_SEG2                        0
+#define DF_BASE__INST1_SEG3                        0
+#define DF_BASE__INST1_SEG4                        0
+#define DF_BASE__INST1_SEG5                        0
+
+#define DF_BASE__INST2_SEG0                        0
+#define DF_BASE__INST2_SEG1                        0
+#define DF_BASE__INST2_SEG2                        0
+#define DF_BASE__INST2_SEG3                        0
+#define DF_BASE__INST2_SEG4                        0
+#define DF_BASE__INST2_SEG5                        0
+
+#define DF_BASE__INST3_SEG0                        0
+#define DF_BASE__INST3_SEG1                        0
+#define DF_BASE__INST3_SEG2                        0
+#define DF_BASE__INST3_SEG3                        0
+#define DF_BASE__INST3_SEG4                        0
+#define DF_BASE__INST3_SEG5                        0
+
+#define DF_BASE__INST4_SEG0                        0
+#define DF_BASE__INST4_SEG1                        0
+#define DF_BASE__INST4_SEG2                        0
+#define DF_BASE__INST4_SEG3                        0
+#define DF_BASE__INST4_SEG4                        0
+#define DF_BASE__INST4_SEG5                        0
+
+#define DF_BASE__INST5_SEG0                        0
+#define DF_BASE__INST5_SEG1                        0
+#define DF_BASE__INST5_SEG2                        0
+#define DF_BASE__INST5_SEG3                        0
+#define DF_BASE__INST5_SEG4                        0
+#define DF_BASE__INST5_SEG5                        0
+
+#define FUSE_BASE__INST0_SEG0                      0x00017400
+#define FUSE_BASE__INST0_SEG1                      0
+#define FUSE_BASE__INST0_SEG2                      0
+#define FUSE_BASE__INST0_SEG3                      0
+#define FUSE_BASE__INST0_SEG4                      0
+#define FUSE_BASE__INST0_SEG5                      0
+
+#define FUSE_BASE__INST1_SEG0                      0
+#define FUSE_BASE__INST1_SEG1                      0
+#define FUSE_BASE__INST1_SEG2                      0
+#define FUSE_BASE__INST1_SEG3                      0
+#define FUSE_BASE__INST1_SEG4                      0
+#define FUSE_BASE__INST1_SEG5                      0
+
+#define FUSE_BASE__INST2_SEG0                      0
+#define FUSE_BASE__INST2_SEG1                      0
+#define FUSE_BASE__INST2_SEG2                      0
+#define FUSE_BASE__INST2_SEG3                      0
+#define FUSE_BASE__INST2_SEG4                      0
+#define FUSE_BASE__INST2_SEG5                      0
+
+#define FUSE_BASE__INST3_SEG0                      0
+#define FUSE_BASE__INST3_SEG1                      0
+#define FUSE_BASE__INST3_SEG2                      0
+#define FUSE_BASE__INST3_SEG3                      0
+#define FUSE_BASE__INST3_SEG4                      0
+#define FUSE_BASE__INST3_SEG5                      0
+
+#define FUSE_BASE__INST4_SEG0                      0
+#define FUSE_BASE__INST4_SEG1                      0
+#define FUSE_BASE__INST4_SEG2                      0
+#define FUSE_BASE__INST4_SEG3                      0
+#define FUSE_BASE__INST4_SEG4                      0
+#define FUSE_BASE__INST4_SEG5                      0
+
+#define FUSE_BASE__INST5_SEG0                      0
+#define FUSE_BASE__INST5_SEG1                      0
+#define FUSE_BASE__INST5_SEG2                      0
+#define FUSE_BASE__INST5_SEG3                      0
+#define FUSE_BASE__INST5_SEG4                      0
+#define FUSE_BASE__INST5_SEG5                      0
+
+#define GC_BASE__INST0_SEG0                        0x00002000
+#define GC_BASE__INST0_SEG1                        0x0000A000
+#define GC_BASE__INST0_SEG2                        0
+#define GC_BASE__INST0_SEG3                        0
+#define GC_BASE__INST0_SEG4                        0
+#define GC_BASE__INST0_SEG5                        0
+
+#define GC_BASE__INST1_SEG0                        0
+#define GC_BASE__INST1_SEG1                        0
+#define GC_BASE__INST1_SEG2                        0
+#define GC_BASE__INST1_SEG3                        0
+#define GC_BASE__INST1_SEG4                        0
+#define GC_BASE__INST1_SEG5                        0
+
+#define GC_BASE__INST2_SEG0                        0
+#define GC_BASE__INST2_SEG1                        0
+#define GC_BASE__INST2_SEG2                        0
+#define GC_BASE__INST2_SEG3                        0
+#define GC_BASE__INST2_SEG4                        0
+#define GC_BASE__INST2_SEG5                        0
+
+#define GC_BASE__INST3_SEG0                        0
+#define GC_BASE__INST3_SEG1                        0
+#define GC_BASE__INST3_SEG2                        0
+#define GC_BASE__INST3_SEG3                        0
+#define GC_BASE__INST3_SEG4                        0
+#define GC_BASE__INST3_SEG5                        0
+
+#define GC_BASE__INST4_SEG0                        0
+#define GC_BASE__INST4_SEG1                        0
+#define GC_BASE__INST4_SEG2                        0
+#define GC_BASE__INST4_SEG3                        0
+#define GC_BASE__INST4_SEG4                        0
+#define GC_BASE__INST4_SEG5                        0
+
+#define GC_BASE__INST5_SEG0                        0
+#define GC_BASE__INST5_SEG1                        0
+#define GC_BASE__INST5_SEG2                        0
+#define GC_BASE__INST5_SEG3                        0
+#define GC_BASE__INST5_SEG4                        0
+#define GC_BASE__INST5_SEG5                        0
+
+#define HDP_BASE__INST0_SEG0                       0x00000F20
+#define HDP_BASE__INST0_SEG1                       0
+#define HDP_BASE__INST0_SEG2                       0
+#define HDP_BASE__INST0_SEG3                       0
+#define HDP_BASE__INST0_SEG4                       0
+#define HDP_BASE__INST0_SEG5                       0
+
+#define HDP_BASE__INST1_SEG0                       0
+#define HDP_BASE__INST1_SEG1                       0
+#define HDP_BASE__INST1_SEG2                       0
+#define HDP_BASE__INST1_SEG3                       0
+#define HDP_BASE__INST1_SEG4                       0
+#define HDP_BASE__INST1_SEG5                       0
+
+#define HDP_BASE__INST2_SEG0                       0
+#define HDP_BASE__INST2_SEG1                       0
+#define HDP_BASE__INST2_SEG2                       0
+#define HDP_BASE__INST2_SEG3                       0
+#define HDP_BASE__INST2_SEG4                       0
+#define HDP_BASE__INST2_SEG5                       0
+
+#define HDP_BASE__INST3_SEG0                       0
+#define HDP_BASE__INST3_SEG1                       0
+#define HDP_BASE__INST3_SEG2                       0
+#define HDP_BASE__INST3_SEG3                       0
+#define HDP_BASE__INST3_SEG4                       0
+#define HDP_BASE__INST3_SEG5                       0
+
+#define HDP_BASE__INST4_SEG0                       0
+#define HDP_BASE__INST4_SEG1                       0
+#define HDP_BASE__INST4_SEG2                       0
+#define HDP_BASE__INST4_SEG3                       0
+#define HDP_BASE__INST4_SEG4                       0
+#define HDP_BASE__INST4_SEG5                       0
+
+#define HDP_BASE__INST5_SEG0                       0
+#define HDP_BASE__INST5_SEG1                       0
+#define HDP_BASE__INST5_SEG2                       0
+#define HDP_BASE__INST5_SEG3                       0
+#define HDP_BASE__INST5_SEG4                       0
+#define HDP_BASE__INST5_SEG5                       0
+
+#define MMHUB_BASE__INST0_SEG0                     0x0001A000
+#define MMHUB_BASE__INST0_SEG1                     0
+#define MMHUB_BASE__INST0_SEG2                     0
+#define MMHUB_BASE__INST0_SEG3                     0
+#define MMHUB_BASE__INST0_SEG4                     0
+#define MMHUB_BASE__INST0_SEG5                     0
+
+#define MMHUB_BASE__INST1_SEG0                     0
+#define MMHUB_BASE__INST1_SEG1                     0
+#define MMHUB_BASE__INST1_SEG2                     0
+#define MMHUB_BASE__INST1_SEG3                     0
+#define MMHUB_BASE__INST1_SEG4                     0
+#define MMHUB_BASE__INST1_SEG5                     0
+
+#define MMHUB_BASE__INST2_SEG0                     0
+#define MMHUB_BASE__INST2_SEG1                     0
+#define MMHUB_BASE__INST2_SEG2                     0
+#define MMHUB_BASE__INST2_SEG3                     0
+#define MMHUB_BASE__INST2_SEG4                     0
+#define MMHUB_BASE__INST2_SEG5                     0
+
+#define MMHUB_BASE__INST3_SEG0                     0
+#define MMHUB_BASE__INST3_SEG1                     0
+#define MMHUB_BASE__INST3_SEG2                     0
+#define MMHUB_BASE__INST3_SEG3                     0
+#define MMHUB_BASE__INST3_SEG4                     0
+#define MMHUB_BASE__INST3_SEG5                     0
+
+#define MMHUB_BASE__INST4_SEG0                     0
+#define MMHUB_BASE__INST4_SEG1                     0
+#define MMHUB_BASE__INST4_SEG2                     0
+#define MMHUB_BASE__INST4_SEG3                     0
+#define MMHUB_BASE__INST4_SEG4                     0
+#define MMHUB_BASE__INST4_SEG5                     0
+
+#define MMHUB_BASE__INST5_SEG0                     0
+#define MMHUB_BASE__INST5_SEG1                     0
+#define MMHUB_BASE__INST5_SEG2                     0
+#define MMHUB_BASE__INST5_SEG3                     0
+#define MMHUB_BASE__INST5_SEG4                     0
+#define MMHUB_BASE__INST5_SEG5                     0
+
+#define MP0_BASE__INST0_SEG0                       0x00016000
+#define MP0_BASE__INST0_SEG1                       0
+#define MP0_BASE__INST0_SEG2                       0
+#define MP0_BASE__INST0_SEG3                       0
+#define MP0_BASE__INST0_SEG4                       0
+#define MP0_BASE__INST0_SEG5                       0
+
+#define MP0_BASE__INST1_SEG0                       0
+#define MP0_BASE__INST1_SEG1                       0
+#define MP0_BASE__INST1_SEG2                       0
+#define MP0_BASE__INST1_SEG3                       0
+#define MP0_BASE__INST1_SEG4                       0
+#define MP0_BASE__INST1_SEG5                       0
+
+#define MP0_BASE__INST2_SEG0                       0
+#define MP0_BASE__INST2_SEG1                       0
+#define MP0_BASE__INST2_SEG2                       0
+#define MP0_BASE__INST2_SEG3                       0
+#define MP0_BASE__INST2_SEG4                       0
+#define MP0_BASE__INST2_SEG5                       0
+
+#define MP0_BASE__INST3_SEG0                       0
+#define MP0_BASE__INST3_SEG1                       0
+#define MP0_BASE__INST3_SEG2                       0
+#define MP0_BASE__INST3_SEG3                       0
+#define MP0_BASE__INST3_SEG4                       0
+#define MP0_BASE__INST3_SEG5                       0
+
+#define MP0_BASE__INST4_SEG0                       0
+#define MP0_BASE__INST4_SEG1                       0
+#define MP0_BASE__INST4_SEG2                       0
+#define MP0_BASE__INST4_SEG3                       0
+#define MP0_BASE__INST4_SEG4                       0
+#define MP0_BASE__INST4_SEG5                       0
+
+#define MP0_BASE__INST5_SEG0                       0
+#define MP0_BASE__INST5_SEG1                       0
+#define MP0_BASE__INST5_SEG2                       0
+#define MP0_BASE__INST5_SEG3                       0
+#define MP0_BASE__INST5_SEG4                       0
+#define MP0_BASE__INST5_SEG5                       0
+
+#define MP1_BASE__INST0_SEG0                       0x00016000
+#define MP1_BASE__INST0_SEG1                       0
+#define MP1_BASE__INST0_SEG2                       0
+#define MP1_BASE__INST0_SEG3                       0
+#define MP1_BASE__INST0_SEG4                       0
+#define MP1_BASE__INST0_SEG5                       0
+
+#define MP1_BASE__INST1_SEG0                       0
+#define MP1_BASE__INST1_SEG1                       0
+#define MP1_BASE__INST1_SEG2                       0
+#define MP1_BASE__INST1_SEG3                       0
+#define MP1_BASE__INST1_SEG4                       0
+#define MP1_BASE__INST1_SEG5                       0
+
+#define MP1_BASE__INST2_SEG0                       0
+#define MP1_BASE__INST2_SEG1                       0
+#define MP1_BASE__INST2_SEG2                       0
+#define MP1_BASE__INST2_SEG3                       0
+#define MP1_BASE__INST2_SEG4                       0
+#define MP1_BASE__INST2_SEG5                       0
+
+#define MP1_BASE__INST3_SEG0                       0
+#define MP1_BASE__INST3_SEG1                       0
+#define MP1_BASE__INST3_SEG2                       0
+#define MP1_BASE__INST3_SEG3                       0
+#define MP1_BASE__INST3_SEG4                       0
+#define MP1_BASE__INST3_SEG5                       0
+
+#define MP1_BASE__INST4_SEG0                       0
+#define MP1_BASE__INST4_SEG1                       0
+#define MP1_BASE__INST4_SEG2                       0
+#define MP1_BASE__INST4_SEG3                       0
+#define MP1_BASE__INST4_SEG4                       0
+#define MP1_BASE__INST4_SEG5                       0
+
+#define MP1_BASE__INST5_SEG0                       0
+#define MP1_BASE__INST5_SEG1                       0
+#define MP1_BASE__INST5_SEG2                       0
+#define MP1_BASE__INST5_SEG3                       0
+#define MP1_BASE__INST5_SEG4                       0
+#define MP1_BASE__INST5_SEG5                       0
+
+#define NBIO_BASE__INST0_SEG0                      0x00000000
+#define NBIO_BASE__INST0_SEG1                      0x00000014
+#define NBIO_BASE__INST0_SEG2                      0x00000D20
+#define NBIO_BASE__INST0_SEG3                      0x00010400
+#define NBIO_BASE__INST0_SEG4                      0
+#define NBIO_BASE__INST0_SEG5                      0
+
+#define NBIO_BASE__INST1_SEG0                      0
+#define NBIO_BASE__INST1_SEG1                      0
+#define NBIO_BASE__INST1_SEG2                      0
+#define NBIO_BASE__INST1_SEG3                      0
+#define NBIO_BASE__INST1_SEG4                      0
+#define NBIO_BASE__INST1_SEG5                      0
+
+#define NBIO_BASE__INST2_SEG0                      0
+#define NBIO_BASE__INST2_SEG1                      0
+#define NBIO_BASE__INST2_SEG2                      0
+#define NBIO_BASE__INST2_SEG3                      0
+#define NBIO_BASE__INST2_SEG4                      0
+#define NBIO_BASE__INST2_SEG5                      0
+
+#define NBIO_BASE__INST3_SEG0                      0
+#define NBIO_BASE__INST3_SEG1                      0
+#define NBIO_BASE__INST3_SEG2                      0
+#define NBIO_BASE__INST3_SEG3                      0
+#define NBIO_BASE__INST3_SEG4                      0
+#define NBIO_BASE__INST3_SEG5                      0
+
+#define NBIO_BASE__INST4_SEG0                      0
+#define NBIO_BASE__INST4_SEG1                      0
+#define NBIO_BASE__INST4_SEG2                      0
+#define NBIO_BASE__INST4_SEG3                      0
+#define NBIO_BASE__INST4_SEG4                      0
+#define NBIO_BASE__INST4_SEG5                      0
+
+#define NBIO_BASE__INST5_SEG0                      0
+#define NBIO_BASE__INST5_SEG1                      0
+#define NBIO_BASE__INST5_SEG2                      0
+#define NBIO_BASE__INST5_SEG3                      0
+#define NBIO_BASE__INST5_SEG4                      0
+#define NBIO_BASE__INST5_SEG5                      0
+
+#define OSSSYS_BASE__INST0_SEG0                    0x000010A0
+#define OSSSYS_BASE__INST0_SEG1                    0
+#define OSSSYS_BASE__INST0_SEG2                    0
+#define OSSSYS_BASE__INST0_SEG3                    0
+#define OSSSYS_BASE__INST0_SEG4                    0
+#define OSSSYS_BASE__INST0_SEG5                    0
+
+#define OSSSYS_BASE__INST1_SEG0                    0
+#define OSSSYS_BASE__INST1_SEG1                    0
+#define OSSSYS_BASE__INST1_SEG2                    0
+#define OSSSYS_BASE__INST1_SEG3                    0
+#define OSSSYS_BASE__INST1_SEG4                    0
+#define OSSSYS_BASE__INST1_SEG5                    0
+
+#define OSSSYS_BASE__INST2_SEG0                    0
+#define OSSSYS_BASE__INST2_SEG1                    0
+#define OSSSYS_BASE__INST2_SEG2                    0
+#define OSSSYS_BASE__INST2_SEG3                    0
+#define OSSSYS_BASE__INST2_SEG4                    0
+#define OSSSYS_BASE__INST2_SEG5                    0
+
+#define OSSSYS_BASE__INST3_SEG0                    0
+#define OSSSYS_BASE__INST3_SEG1                    0
+#define OSSSYS_BASE__INST3_SEG2                    0
+#define OSSSYS_BASE__INST3_SEG3                    0
+#define OSSSYS_BASE__INST3_SEG4                    0
+#define OSSSYS_BASE__INST3_SEG5                    0
+
+#define OSSSYS_BASE__INST4_SEG0                    0
+#define OSSSYS_BASE__INST4_SEG1                    0
+#define OSSSYS_BASE__INST4_SEG2                    0
+#define OSSSYS_BASE__INST4_SEG3                    0
+#define OSSSYS_BASE__INST4_SEG4                    0
+#define OSSSYS_BASE__INST4_SEG5                    0
+
+#define OSSSYS_BASE__INST5_SEG0                    0
+#define OSSSYS_BASE__INST5_SEG1                    0
+#define OSSSYS_BASE__INST5_SEG2                    0
+#define OSSSYS_BASE__INST5_SEG3                    0
+#define OSSSYS_BASE__INST5_SEG4                    0
+#define OSSSYS_BASE__INST5_SEG5                    0
+
+#define SDMA0_BASE__INST0_SEG0                     0x00001260
+#define SDMA0_BASE__INST0_SEG1                     0
+#define SDMA0_BASE__INST0_SEG2                     0
+#define SDMA0_BASE__INST0_SEG3                     0
+#define SDMA0_BASE__INST0_SEG4                     0
+#define SDMA0_BASE__INST0_SEG5                     0
+
+#define SDMA0_BASE__INST1_SEG0                     0
+#define SDMA0_BASE__INST1_SEG1                     0
+#define SDMA0_BASE__INST1_SEG2                     0
+#define SDMA0_BASE__INST1_SEG3                     0
+#define SDMA0_BASE__INST1_SEG4                     0
+#define SDMA0_BASE__INST1_SEG5                     0
+
+#define SDMA0_BASE__INST2_SEG0                     0
+#define SDMA0_BASE__INST2_SEG1                     0
+#define SDMA0_BASE__INST2_SEG2                     0
+#define SDMA0_BASE__INST2_SEG3                     0
+#define SDMA0_BASE__INST2_SEG4                     0
+#define SDMA0_BASE__INST2_SEG5                     0
+
+#define SDMA0_BASE__INST3_SEG0                     0
+#define SDMA0_BASE__INST3_SEG1                     0
+#define SDMA0_BASE__INST3_SEG2                     0
+#define SDMA0_BASE__INST3_SEG3                     0
+#define SDMA0_BASE__INST3_SEG4                     0
+#define SDMA0_BASE__INST3_SEG5                     0
+
+#define SDMA0_BASE__INST4_SEG0                     0
+#define SDMA0_BASE__INST4_SEG1                     0
+#define SDMA0_BASE__INST4_SEG2                     0
+#define SDMA0_BASE__INST4_SEG3                     0
+#define SDMA0_BASE__INST4_SEG4                     0
+#define SDMA0_BASE__INST4_SEG5                     0
+
+#define SDMA0_BASE__INST5_SEG0                     0
+#define SDMA0_BASE__INST5_SEG1                     0
+#define SDMA0_BASE__INST5_SEG2                     0
+#define SDMA0_BASE__INST5_SEG3                     0
+#define SDMA0_BASE__INST5_SEG4                     0
+#define SDMA0_BASE__INST5_SEG5                     0
+
+#define SDMA1_BASE__INST0_SEG0                     0x00001860
+#define SDMA1_BASE__INST0_SEG1                     0
+#define SDMA1_BASE__INST0_SEG2                     0
+#define SDMA1_BASE__INST0_SEG3                     0
+#define SDMA1_BASE__INST0_SEG4                     0
+#define SDMA1_BASE__INST0_SEG5                     0
+
+#define SDMA1_BASE__INST1_SEG0                     0
+#define SDMA1_BASE__INST1_SEG1                     0
+#define SDMA1_BASE__INST1_SEG2                     0
+#define SDMA1_BASE__INST1_SEG3                     0
+#define SDMA1_BASE__INST1_SEG4                     0
+#define SDMA1_BASE__INST1_SEG5                     0
+
+#define SDMA1_BASE__INST2_SEG0                     0
+#define SDMA1_BASE__INST2_SEG1                     0
+#define SDMA1_BASE__INST2_SEG2                     0
+#define SDMA1_BASE__INST2_SEG3                     0
+#define SDMA1_BASE__INST2_SEG4                     0
+#define SDMA1_BASE__INST2_SEG5                     0
+
+#define SDMA1_BASE__INST3_SEG0                     0
+#define SDMA1_BASE__INST3_SEG1                     0
+#define SDMA1_BASE__INST3_SEG2                     0
+#define SDMA1_BASE__INST3_SEG3                     0
+#define SDMA1_BASE__INST3_SEG4                     0
+#define SDMA1_BASE__INST3_SEG5                     0
+
+#define SDMA1_BASE__INST4_SEG0                     0
+#define SDMA1_BASE__INST4_SEG1                     0
+#define SDMA1_BASE__INST4_SEG2                     0
+#define SDMA1_BASE__INST4_SEG3                     0
+#define SDMA1_BASE__INST4_SEG4                     0
+#define SDMA1_BASE__INST4_SEG5                     0
+
+#define SDMA1_BASE__INST5_SEG0                     0
+#define SDMA1_BASE__INST5_SEG1                     0
+#define SDMA1_BASE__INST5_SEG2                     0
+#define SDMA1_BASE__INST5_SEG3                     0
+#define SDMA1_BASE__INST5_SEG4                     0
+#define SDMA1_BASE__INST5_SEG5                     0
+
+#define SMUIO_BASE__INST0_SEG0                     0x00016800
+#define SMUIO_BASE__INST0_SEG1                     0x00016A00
+#define SMUIO_BASE__INST0_SEG2                     0
+#define SMUIO_BASE__INST0_SEG3                     0
+#define SMUIO_BASE__INST0_SEG4                     0
+#define SMUIO_BASE__INST0_SEG5                     0
+
+#define SMUIO_BASE__INST1_SEG0                     0
+#define SMUIO_BASE__INST1_SEG1                     0
+#define SMUIO_BASE__INST1_SEG2                     0
+#define SMUIO_BASE__INST1_SEG3                     0
+#define SMUIO_BASE__INST1_SEG4                     0
+#define SMUIO_BASE__INST1_SEG5                     0
+
+#define SMUIO_BASE__INST2_SEG0                     0
+#define SMUIO_BASE__INST2_SEG1                     0
+#define SMUIO_BASE__INST2_SEG2                     0
+#define SMUIO_BASE__INST2_SEG3                     0
+#define SMUIO_BASE__INST2_SEG4                     0
+#define SMUIO_BASE__INST2_SEG5                     0
+
+#define SMUIO_BASE__INST3_SEG0                     0
+#define SMUIO_BASE__INST3_SEG1                     0
+#define SMUIO_BASE__INST3_SEG2                     0
+#define SMUIO_BASE__INST3_SEG3                     0
+#define SMUIO_BASE__INST3_SEG4                     0
+#define SMUIO_BASE__INST3_SEG5                     0
+
+#define SMUIO_BASE__INST4_SEG0                     0
+#define SMUIO_BASE__INST4_SEG1                     0
+#define SMUIO_BASE__INST4_SEG2                     0
+#define SMUIO_BASE__INST4_SEG3                     0
+#define SMUIO_BASE__INST4_SEG4                     0
+#define SMUIO_BASE__INST4_SEG5                     0
+
+#define SMUIO_BASE__INST5_SEG0                     0
+#define SMUIO_BASE__INST5_SEG1                     0
+#define SMUIO_BASE__INST5_SEG2                     0
+#define SMUIO_BASE__INST5_SEG3                     0
+#define SMUIO_BASE__INST5_SEG4                     0
+#define SMUIO_BASE__INST5_SEG5                     0
+
+#define THM_BASE__INST0_SEG0                       0x00016600
+#define THM_BASE__INST0_SEG1                       0
+#define THM_BASE__INST0_SEG2                       0
+#define THM_BASE__INST0_SEG3                       0
+#define THM_BASE__INST0_SEG4                       0
+#define THM_BASE__INST0_SEG5                       0
+
+#define THM_BASE__INST1_SEG0                       0
+#define THM_BASE__INST1_SEG1                       0
+#define THM_BASE__INST1_SEG2                       0
+#define THM_BASE__INST1_SEG3                       0
+#define THM_BASE__INST1_SEG4                       0
+#define THM_BASE__INST1_SEG5                       0
+
+#define THM_BASE__INST2_SEG0                       0
+#define THM_BASE__INST2_SEG1                       0
+#define THM_BASE__INST2_SEG2                       0
+#define THM_BASE__INST2_SEG3                       0
+#define THM_BASE__INST2_SEG4                       0
+#define THM_BASE__INST2_SEG5                       0
+
+#define THM_BASE__INST3_SEG0                       0
+#define THM_BASE__INST3_SEG1                       0
+#define THM_BASE__INST3_SEG2                       0
+#define THM_BASE__INST3_SEG3                       0
+#define THM_BASE__INST3_SEG4                       0
+#define THM_BASE__INST3_SEG5                       0
+
+#define THM_BASE__INST4_SEG0                       0
+#define THM_BASE__INST4_SEG1                       0
+#define THM_BASE__INST4_SEG2                       0
+#define THM_BASE__INST4_SEG3                       0
+#define THM_BASE__INST4_SEG4                       0
+#define THM_BASE__INST4_SEG5                       0
+
+#define THM_BASE__INST5_SEG0                       0
+#define THM_BASE__INST5_SEG1                       0
+#define THM_BASE__INST5_SEG2                       0
+#define THM_BASE__INST5_SEG3                       0
+#define THM_BASE__INST5_SEG4                       0
+#define THM_BASE__INST5_SEG5                       0
+
+#define UMC_BASE__INST0_SEG0                       0x00014000
+#define UMC_BASE__INST0_SEG1                       0
+#define UMC_BASE__INST0_SEG2                       0
+#define UMC_BASE__INST0_SEG3                       0
+#define UMC_BASE__INST0_SEG4                       0
+#define UMC_BASE__INST0_SEG5                       0
+
+#define UMC_BASE__INST1_SEG0                       0
+#define UMC_BASE__INST1_SEG1                       0
+#define UMC_BASE__INST1_SEG2                       0
+#define UMC_BASE__INST1_SEG3                       0
+#define UMC_BASE__INST1_SEG4                       0
+#define UMC_BASE__INST1_SEG5                       0
+
+#define UMC_BASE__INST2_SEG0                       0
+#define UMC_BASE__INST2_SEG1                       0
+#define UMC_BASE__INST2_SEG2                       0
+#define UMC_BASE__INST2_SEG3                       0
+#define UMC_BASE__INST2_SEG4                       0
+#define UMC_BASE__INST2_SEG5                       0
+
+#define UMC_BASE__INST3_SEG0                       0
+#define UMC_BASE__INST3_SEG1                       0
+#define UMC_BASE__INST3_SEG2                       0
+#define UMC_BASE__INST3_SEG3                       0
+#define UMC_BASE__INST3_SEG4                       0
+#define UMC_BASE__INST3_SEG5                       0
+
+#define UMC_BASE__INST4_SEG0                       0
+#define UMC_BASE__INST4_SEG1                       0
+#define UMC_BASE__INST4_SEG2                       0
+#define UMC_BASE__INST4_SEG3                       0
+#define UMC_BASE__INST4_SEG4                       0
+#define UMC_BASE__INST4_SEG5                       0
+
+#define UMC_BASE__INST5_SEG0                       0
+#define UMC_BASE__INST5_SEG1                       0
+#define UMC_BASE__INST5_SEG2                       0
+#define UMC_BASE__INST5_SEG3                       0
+#define UMC_BASE__INST5_SEG4                       0
+#define UMC_BASE__INST5_SEG5                       0
+
+#define UVD_BASE__INST0_SEG0                       0x00007800
+#define UVD_BASE__INST0_SEG1                       0x00007E00
+#define UVD_BASE__INST0_SEG2                       0
+#define UVD_BASE__INST0_SEG3                       0
+#define UVD_BASE__INST0_SEG4                       0
+#define UVD_BASE__INST0_SEG5                       0
+
+#define UVD_BASE__INST1_SEG0                       0
+#define UVD_BASE__INST1_SEG1                       0x00009000
+#define UVD_BASE__INST1_SEG2                       0
+#define UVD_BASE__INST1_SEG3                       0
+#define UVD_BASE__INST1_SEG4                       0
+#define UVD_BASE__INST1_SEG5                       0
+
+#define UVD_BASE__INST2_SEG0                       0
+#define UVD_BASE__INST2_SEG1                       0
+#define UVD_BASE__INST2_SEG2                       0
+#define UVD_BASE__INST2_SEG3                       0
+#define UVD_BASE__INST2_SEG4                       0
+#define UVD_BASE__INST2_SEG5                       0
+
+#define UVD_BASE__INST3_SEG0                       0
+#define UVD_BASE__INST3_SEG1                       0
+#define UVD_BASE__INST3_SEG2                       0
+#define UVD_BASE__INST3_SEG3                       0
+#define UVD_BASE__INST3_SEG4                       0
+#define UVD_BASE__INST3_SEG5                       0
+
+#define UVD_BASE__INST4_SEG0                       0
+#define UVD_BASE__INST4_SEG1                       0
+#define UVD_BASE__INST4_SEG2                       0
+#define UVD_BASE__INST4_SEG3                       0
+#define UVD_BASE__INST4_SEG4                       0
+#define UVD_BASE__INST4_SEG5                       0
+
+#define UVD_BASE__INST5_SEG0                       0
+#define UVD_BASE__INST5_SEG1                       0
+#define UVD_BASE__INST5_SEG2                       0
+#define UVD_BASE__INST5_SEG3                       0
+#define UVD_BASE__INST5_SEG4                       0
+#define UVD_BASE__INST5_SEG5                       0
+
+#define VCE_BASE__INST0_SEG0                       0x00008800
+#define VCE_BASE__INST0_SEG1                       0
+#define VCE_BASE__INST0_SEG2                       0
+#define VCE_BASE__INST0_SEG3                       0
+#define VCE_BASE__INST0_SEG4                       0
+#define VCE_BASE__INST0_SEG5                       0
+
+#define VCE_BASE__INST1_SEG0                       0
+#define VCE_BASE__INST1_SEG1                       0
+#define VCE_BASE__INST1_SEG2                       0
+#define VCE_BASE__INST1_SEG3                       0
+#define VCE_BASE__INST1_SEG4                       0
+#define VCE_BASE__INST1_SEG5                       0
+
+#define VCE_BASE__INST2_SEG0                       0
+#define VCE_BASE__INST2_SEG1                       0
+#define VCE_BASE__INST2_SEG2                       0
+#define VCE_BASE__INST2_SEG3                       0
+#define VCE_BASE__INST2_SEG4                       0
+#define VCE_BASE__INST2_SEG5                       0
+
+#define VCE_BASE__INST3_SEG0                       0
+#define VCE_BASE__INST3_SEG1                       0
+#define VCE_BASE__INST3_SEG2                       0
+#define VCE_BASE__INST3_SEG3                       0
+#define VCE_BASE__INST3_SEG4                       0
+#define VCE_BASE__INST3_SEG5                       0
+
+#define VCE_BASE__INST4_SEG0                       0
+#define VCE_BASE__INST4_SEG1                       0
+#define VCE_BASE__INST4_SEG2                       0
+#define VCE_BASE__INST4_SEG3                       0
+#define VCE_BASE__INST4_SEG4                       0
+#define VCE_BASE__INST4_SEG5                       0
+
+#define VCE_BASE__INST5_SEG0                       0
+#define VCE_BASE__INST5_SEG1                       0
+#define VCE_BASE__INST5_SEG2                       0
+#define VCE_BASE__INST5_SEG3                       0
+#define VCE_BASE__INST5_SEG4                       0
+#define VCE_BASE__INST5_SEG5                       0
+
+#define XDMA_BASE__INST0_SEG0                      0x00003400
+#define XDMA_BASE__INST0_SEG1                      0
+#define XDMA_BASE__INST0_SEG2                      0
+#define XDMA_BASE__INST0_SEG3                      0
+#define XDMA_BASE__INST0_SEG4                      0
+#define XDMA_BASE__INST0_SEG5                      0
+
+#define XDMA_BASE__INST1_SEG0                      0
+#define XDMA_BASE__INST1_SEG1                      0
+#define XDMA_BASE__INST1_SEG2                      0
+#define XDMA_BASE__INST1_SEG3                      0
+#define XDMA_BASE__INST1_SEG4                      0
+#define XDMA_BASE__INST1_SEG5                      0
+
+#define XDMA_BASE__INST2_SEG0                      0
+#define XDMA_BASE__INST2_SEG1                      0
+#define XDMA_BASE__INST2_SEG2                      0
+#define XDMA_BASE__INST2_SEG3                      0
+#define XDMA_BASE__INST2_SEG4                      0
+#define XDMA_BASE__INST2_SEG5                      0
+
+#define XDMA_BASE__INST3_SEG0                      0
+#define XDMA_BASE__INST3_SEG1                      0
+#define XDMA_BASE__INST3_SEG2                      0
+#define XDMA_BASE__INST3_SEG3                      0
+#define XDMA_BASE__INST3_SEG4                      0
+#define XDMA_BASE__INST3_SEG5                      0
+
+#define XDMA_BASE__INST4_SEG0                      0
+#define XDMA_BASE__INST4_SEG1                      0
+#define XDMA_BASE__INST4_SEG2                      0
+#define XDMA_BASE__INST4_SEG3                      0
+#define XDMA_BASE__INST4_SEG4                      0
+#define XDMA_BASE__INST4_SEG5                      0
+
+#define XDMA_BASE__INST5_SEG0                      0
+#define XDMA_BASE__INST5_SEG1                      0
+#define XDMA_BASE__INST5_SEG2                      0
+#define XDMA_BASE__INST5_SEG3                      0
+#define XDMA_BASE__INST5_SEG4                      0
+#define XDMA_BASE__INST5_SEG5                      0
+
+#define RSMU_BASE__INST0_SEG0                      0x00012000
+#define RSMU_BASE__INST0_SEG1                      0
+#define RSMU_BASE__INST0_SEG2                      0
+#define RSMU_BASE__INST0_SEG3                      0
+#define RSMU_BASE__INST0_SEG4                      0
+#define RSMU_BASE__INST0_SEG5                      0
+
+#define RSMU_BASE__INST1_SEG0                      0
+#define RSMU_BASE__INST1_SEG1                      0
+#define RSMU_BASE__INST1_SEG2                      0
+#define RSMU_BASE__INST1_SEG3                      0
+#define RSMU_BASE__INST1_SEG4                      0
+#define RSMU_BASE__INST1_SEG5                      0
+
+#define RSMU_BASE__INST2_SEG0                      0
+#define RSMU_BASE__INST2_SEG1                      0
+#define RSMU_BASE__INST2_SEG2                      0
+#define RSMU_BASE__INST2_SEG3                      0
+#define RSMU_BASE__INST2_SEG4                      0
+#define RSMU_BASE__INST2_SEG5                      0
+
+#define RSMU_BASE__INST3_SEG0                      0
+#define RSMU_BASE__INST3_SEG1                      0
+#define RSMU_BASE__INST3_SEG2                      0
+#define RSMU_BASE__INST3_SEG3                      0
+#define RSMU_BASE__INST3_SEG4                      0
+#define RSMU_BASE__INST3_SEG5                      0
+
+#define RSMU_BASE__INST4_SEG0                      0
+#define RSMU_BASE__INST4_SEG1                      0
+#define RSMU_BASE__INST4_SEG2                      0
+#define RSMU_BASE__INST4_SEG3                      0
+#define RSMU_BASE__INST4_SEG4                      0
+#define RSMU_BASE__INST4_SEG5                      0
+
+#define RSMU_BASE__INST5_SEG0                      0
+#define RSMU_BASE__INST5_SEG1                      0
+#define RSMU_BASE__INST5_SEG2                      0
+#define RSMU_BASE__INST5_SEG3                      0
+#define RSMU_BASE__INST5_SEG4                      0
+#define RSMU_BASE__INST5_SEG5                      0
+
+#endif
+

From b2f87c9182deaf495ec4fefde89584910ec137d8 Mon Sep 17 00:00:00 2001
From: James Zhu <James.Zhu@amd.com>
Date: Tue, 17 Apr 2018 16:25:58 -0400
Subject: [PATCH 0957/1286] drm/amd/include/vg20: adjust VCE_BASE to reuse vce
 4.0 header files

Vega20 uses vce 4.1 engine, all the registers have the
same absolute offset with vce 4.0. By adjusting vega20
VCE_BASE, vce 4.1 can reuse vce 4.0 header files.

Signed-off-by: James Zhu <James.Zhu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/include/vega20_ip_offset.h | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/include/vega20_ip_offset.h b/drivers/gpu/drm/amd/include/vega20_ip_offset.h
index 97db93ceba4b..2a2a9cc8bedb 100644
--- a/drivers/gpu/drm/amd/include/vega20_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/vega20_ip_offset.h
@@ -144,7 +144,8 @@ static const struct IP_BASE UVD_BASE            ={ { { { 0x00007800, 0x00007E00,
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE VCE_BASE            ={ { { { 0x00008800, 0, 0, 0, 0, 0 } },
+/* Adjust VCE_BASE to make vce_4_1 use vce_4_0 offset header files*/
+static const struct IP_BASE VCE_BASE            ={ { { { 0x00007E00/* 0x00008800 */, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },

From 956fcddc0b2a7430b6ee4783827f57cb7c823c7d Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Fri, 20 Apr 2018 12:27:54 +0800
Subject: [PATCH 0958/1286] drm/amdgpu: Add vega20 to asic_type enum.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Add vega20 to amd_asic_type enum and amdgpu_asic_name[].

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 +
 include/drm/amd_asic_type.h                | 1 +
 2 files changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 9fb20a53d5b2..f84fc560c797 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -86,6 +86,7 @@ static const char *amdgpu_asic_name[] = {
 	"VEGAM",
 	"VEGA10",
 	"VEGA12",
+	"VEGA20",
 	"RAVEN",
 	"LAST",
 };
diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h
index 695bde7eb055..dd63d08cc54e 100644
--- a/include/drm/amd_asic_type.h
+++ b/include/drm/amd_asic_type.h
@@ -47,6 +47,7 @@ enum amd_asic_type {
 	CHIP_VEGAM,
 	CHIP_VEGA10,
 	CHIP_VEGA12,
+	CHIP_VEGA20,
 	CHIP_RAVEN,
 	CHIP_LAST,
 };

From 27c0bc7163ae8484d3a15324122774b240fadd21 Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Thu, 17 May 2018 10:01:19 -0500
Subject: [PATCH 0959/1286] drm/amdgpu: Add gpu_info firmware for vega20. (v2)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

vega20_gpu_info firmware stores gpu configuration for vega20.

v2: drop gpu info firmware for vega20

Squash of:
drm/amdgpu: Add gpu_info firmware for vega20.
drm/amdgpu: drop gpu_info firmware for vega20

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index f84fc560c797..3a8d4bcd95f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1388,6 +1388,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
 	case CHIP_KABINI:
 	case CHIP_MULLINS:
 #endif
+	case CHIP_VEGA20:
 	default:
 		return 0;
 	case CHIP_VEGA10:

From e4bd8170407dc54bc3f4b0e140816e51f13f3e71 Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Fri, 20 Apr 2018 12:33:33 +0800
Subject: [PATCH 0960/1286] drm/amdgpu: set asic family for vega20.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 3a8d4bcd95f5..2d46ad7bd8fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1523,6 +1523,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
 #endif
 	case CHIP_VEGA10:
 	case CHIP_VEGA12:
+	case CHIP_VEGA20:
 	case CHIP_RAVEN:
 		if (adev->asic_type == CHIP_RAVEN)
 			adev->family = AMDGPU_FAMILY_RV;

From a167ae2509132e97c94d66cf1ce15ba2fa620248 Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Fri, 20 Apr 2018 12:46:21 +0800
Subject: [PATCH 0961/1286] drm/amdgpu: Add smu firmware support for vega20
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c       | 3 +++
 drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c | 1 +
 2 files changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 5b3d3bf5b599..e950730f1933 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -400,6 +400,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
 			case CHIP_VEGA12:
 				strcpy(fw_name, "amdgpu/vega12_smc.bin");
 				break;
+			case CHIP_VEGA20:
+				strcpy(fw_name, "amdgpu/vega20_smc.bin");
+				break;
 			default:
 				DRM_ERROR("SMC firmware not supported\n");
 				return -EINVAL;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index ee236dfbf1d6..c9837935f0f5 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -45,6 +45,7 @@ MODULE_FIRMWARE("amdgpu/vegam_smc.bin");
 MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
 MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin");
 MODULE_FIRMWARE("amdgpu/vega12_smc.bin");
+MODULE_FIRMWARE("amdgpu/vega20_smc.bin");
 
 int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
 {

From d3bfb6647cc66664f1e09706690444d2d09a56a8 Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Fri, 20 Apr 2018 13:32:46 +0800
Subject: [PATCH 0962/1286] drm/amdgpu/powerplay: Add initial vega20 support v2
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Initial powerplay support the same as vega10 for now.

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 71b42331f185..e63bc47dc715 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -151,6 +151,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
 		hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
 		switch (hwmgr->chip_id) {
 		case CHIP_VEGA10:
+		case CHIP_VEGA20:
 			hwmgr->smumgr_funcs = &vega10_smu_funcs;
 			vega10_hwmgr_init(hwmgr);
 			break;

From 8fd2d849da98924e1e021314de289d4a3a31d07f Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Fri, 20 Apr 2018 13:36:54 +0800
Subject: [PATCH 0963/1286] drm/amdgpu/psp: Add initial psp support for vega20
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

The same as vega10 for now.

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 1 +
 drivers/gpu/drm/amd/amdgpu/psp_v3_1.c   | 3 +++
 2 files changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index c7d43e064fc7..9f1a5bd39ae8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -52,6 +52,7 @@ static int psp_sw_init(void *handle)
 	switch (adev->asic_type) {
 	case CHIP_VEGA10:
 	case CHIP_VEGA12:
+	case CHIP_VEGA20:
 		psp_v3_1_set_psp_funcs(psp);
 		break;
 	case CHIP_RAVEN:
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index 196e75def1f2..0c768e388ace 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -41,6 +41,9 @@ MODULE_FIRMWARE("amdgpu/vega10_sos.bin");
 MODULE_FIRMWARE("amdgpu/vega10_asd.bin");
 MODULE_FIRMWARE("amdgpu/vega12_sos.bin");
 MODULE_FIRMWARE("amdgpu/vega12_asd.bin");
+MODULE_FIRMWARE("amdgpu/vega20_sos.bin");
+MODULE_FIRMWARE("amdgpu/vega20_asd.bin");
+
 
 #define smnMP1_FIRMWARE_FLAGS 0x3010028
 

From 4b1f540ae1a9eba826538cb37f6791729e2bcec8 Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Fri, 20 Apr 2018 13:38:24 +0800
Subject: [PATCH 0964/1286] drm/amdgpu: Add vega20 ucode loading method
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

The same as vega10.

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 75592bd04d6a..b419d6e33b3a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -303,6 +303,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
 	case CHIP_VEGA10:
 	case CHIP_RAVEN:
 	case CHIP_VEGA12:
+	case CHIP_VEGA20:
 		if (!load_type)
 			return AMDGPU_FW_LOAD_DIRECT;
 		else

From cac18c82e0c5b39b69648942576dbd1d6f9d056e Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Fri, 11 May 2018 13:44:09 -0500
Subject: [PATCH 0965/1286] drm/amdgpu: Specify vega20 uvd firmware
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index de4d77af02ae..fd1e9cd65066 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -70,6 +70,7 @@
 
 #define FIRMWARE_VEGA10		"amdgpu/vega10_uvd.bin"
 #define FIRMWARE_VEGA12		"amdgpu/vega12_uvd.bin"
+#define FIRMWARE_VEGA20		"amdgpu/vega20_uvd.bin"
 
 #define mmUVD_GPCOM_VCPU_DATA0_VEGA10 (0x03c4 + 0x7e00)
 #define mmUVD_GPCOM_VCPU_DATA1_VEGA10 (0x03c5 + 0x7e00)
@@ -114,6 +115,7 @@ MODULE_FIRMWARE(FIRMWARE_VEGAM);
 
 MODULE_FIRMWARE(FIRMWARE_VEGA10);
 MODULE_FIRMWARE(FIRMWARE_VEGA12);
+MODULE_FIRMWARE(FIRMWARE_VEGA20);
 
 static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
 
@@ -177,6 +179,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
 	case CHIP_VEGAM:
 		fw_name = FIRMWARE_VEGAM;
 		break;
+	case CHIP_VEGA20:
+		fw_name = FIRMWARE_VEGA20;
+		break;
 	default:
 		return -EINVAL;
 	}

From 341b4ce2330b0af3fa09db545dc2d552a99dbdec Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Fri, 20 Apr 2018 13:46:49 +0800
Subject: [PATCH 0966/1286] drm/amdgpu: Specify vega20 vce firmware
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index a86322f5164f..23d960ec1cf2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -57,6 +57,7 @@
 
 #define FIRMWARE_VEGA10		"amdgpu/vega10_vce.bin"
 #define FIRMWARE_VEGA12		"amdgpu/vega12_vce.bin"
+#define FIRMWARE_VEGA20		"amdgpu/vega20_vce.bin"
 
 #ifdef CONFIG_DRM_AMDGPU_CIK
 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
@@ -76,6 +77,7 @@ MODULE_FIRMWARE(FIRMWARE_VEGAM);
 
 MODULE_FIRMWARE(FIRMWARE_VEGA10);
 MODULE_FIRMWARE(FIRMWARE_VEGA12);
+MODULE_FIRMWARE(FIRMWARE_VEGA20);
 
 static void amdgpu_vce_idle_work_handler(struct work_struct *work);
 
@@ -143,6 +145,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
 	case CHIP_VEGA12:
 		fw_name = FIRMWARE_VEGA12;
 		break;
+	case CHIP_VEGA20:
+		fw_name = FIRMWARE_VEGA20;
+		break;
 
 	default:
 		return -EINVAL;

From a2c319b63ea377bce4f278d4ca1cb4d6da31e4fb Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Fri, 20 Apr 2018 13:48:23 +0800
Subject: [PATCH 0967/1286] drm/amdgpu/virtual_dce: Add vega20 support
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index de7be3de0f41..dbf2ccd0c744 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -473,6 +473,7 @@ static int dce_virtual_hw_init(void *handle)
 		break;
 	case CHIP_VEGA10:
 	case CHIP_VEGA12:
+	case CHIP_VEGA20:
 		break;
 	default:
 		DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type);

From d96b428c3cea9ed12d03635a02fbf8644e315bc0 Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Fri, 20 Apr 2018 13:56:43 +0800
Subject: [PATCH 0968/1286] drm/amdgpu/gmc9: Add vega20 support
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 734306902e4e..b60ed288d314 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -752,6 +752,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
 		switch (adev->asic_type) {
 		case CHIP_VEGA10:  /* all engines support GPUVM */
 		case CHIP_VEGA12:  /* all engines support GPUVM */
+		case CHIP_VEGA20:
 		default:
 			adev->gmc.gart_size = 512ULL << 20;
 			break;
@@ -857,6 +858,7 @@ static int gmc_v9_0_sw_init(void *handle)
 		break;
 	case CHIP_VEGA10:
 	case CHIP_VEGA12:
+	case CHIP_VEGA20:
 		/*
 		 * To fulfill 4-level page support,
 		 * vm size is 256TB (48bit), maximum size of Vega10,
@@ -974,6 +976,7 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
 
 	switch (adev->asic_type) {
 	case CHIP_VEGA10:
+	case CHIP_VEGA20:
 		soc15_program_register_sequence(adev,
 						golden_settings_mmhub_1_0_0,
 						ARRAY_SIZE(golden_settings_mmhub_1_0_0));

From c2d7fd2baeba4c65a3cf7f61d6d54c205e4608f8 Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Fri, 20 Apr 2018 13:58:09 +0800
Subject: [PATCH 0969/1286] drm/amdgpu/mmhub: Add clockgating support for
 vega20
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 43f925773b57..3d53c4413f13 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -734,6 +734,7 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
 	switch (adev->asic_type) {
 	case CHIP_VEGA10:
 	case CHIP_VEGA12:
+	case CHIP_VEGA20:
 	case CHIP_RAVEN:
 		mmhub_v1_0_update_medium_grain_clock_gating(adev,
 				state == AMD_CG_STATE_GATE ? true : false);

From 54a29ef758f6cc6b66b5f27dbfd90c9683920fab Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Fri, 20 Apr 2018 14:00:02 +0800
Subject: [PATCH 0970/1286] drm/amdgpu/sdma4: Specify vega20 firmware
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 03a36cbe7557..79b3a45b5715 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -42,6 +42,8 @@ MODULE_FIRMWARE("amdgpu/vega10_sdma.bin");
 MODULE_FIRMWARE("amdgpu/vega10_sdma1.bin");
 MODULE_FIRMWARE("amdgpu/vega12_sdma.bin");
 MODULE_FIRMWARE("amdgpu/vega12_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/vega20_sdma.bin");
+MODULE_FIRMWARE("amdgpu/vega20_sdma1.bin");
 MODULE_FIRMWARE("amdgpu/raven_sdma.bin");
 
 #define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK  0x000000F8L
@@ -182,6 +184,9 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
 	case CHIP_VEGA12:
 		chip_name = "vega12";
 		break;
+	case CHIP_VEGA20:
+		chip_name = "vega20";
+		break;
 	case CHIP_RAVEN:
 		chip_name = "raven";
 		break;

From 84f50e9c80a74f9f8cac819c7a4b7ca220945b6d Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Tue, 23 Jan 2018 11:13:02 +0800
Subject: [PATCH 0971/1286] drm/amdgpu/sdma4: Add vega20 golden settings (v3)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

v2: squash in updates (Alex)
v3: squash in more updates (Alex)

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 27 ++++++++++++++++++++++++++
 1 file changed, 27 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 79b3a45b5715..dc12c365a886 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -109,6 +109,28 @@ static const struct soc15_reg_golden golden_settings_sdma_4_1[] =
 	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0)
 };
 
+static const struct soc15_reg_golden golden_settings_sdma_4_2[] =
+{
+	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
+	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
+	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
+	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff0, 0x00403000),
+	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
+	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
+	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
+	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0)
+};
+
 static const struct soc15_reg_golden golden_settings_sdma_rv1[] =
 {
 	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00000002),
@@ -141,6 +163,11 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
 						golden_settings_sdma_vg12,
 						ARRAY_SIZE(golden_settings_sdma_vg12));
 		break;
+	case CHIP_VEGA20:
+		soc15_program_register_sequence(adev,
+						golden_settings_sdma_4_2,
+						ARRAY_SIZE(golden_settings_sdma_4_2));
+		break;
 	case CHIP_RAVEN:
 		soc15_program_register_sequence(adev,
 						 golden_settings_sdma_4_1,

From 7eb32a7012ee592d6567e133a0d9c8c26e2590bf Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Tue, 23 Jan 2018 11:16:16 +0800
Subject: [PATCH 0972/1286] drm/amdgpu/sdma4: Add clockgating support for
 vega20
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index dc12c365a886..ca53b3fba422 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1548,6 +1548,7 @@ static int sdma_v4_0_set_clockgating_state(void *handle,
 	switch (adev->asic_type) {
 	case CHIP_VEGA10:
 	case CHIP_VEGA12:
+	case CHIP_VEGA20:
 	case CHIP_RAVEN:
 		sdma_v4_0_update_medium_grain_clock_gating(adev,
 				state == AMD_CG_STATE_GATE ? true : false);

From 940328fe35ab6e9f0eb1118f3cf91a22f97da298 Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Fri, 20 Apr 2018 14:22:48 +0800
Subject: [PATCH 0973/1286] drm/amdgpu/gfx9: Add support for vega20 firmware
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 10 ++++++++++
 1 file changed, 10 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index b05b7ae4d035..6976317dc6b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -63,6 +63,13 @@ MODULE_FIRMWARE("amdgpu/vega12_mec.bin");
 MODULE_FIRMWARE("amdgpu/vega12_mec2.bin");
 MODULE_FIRMWARE("amdgpu/vega12_rlc.bin");
 
+MODULE_FIRMWARE("amdgpu/vega20_ce.bin");
+MODULE_FIRMWARE("amdgpu/vega20_pfp.bin");
+MODULE_FIRMWARE("amdgpu/vega20_me.bin");
+MODULE_FIRMWARE("amdgpu/vega20_mec.bin");
+MODULE_FIRMWARE("amdgpu/vega20_mec2.bin");
+MODULE_FIRMWARE("amdgpu/vega20_rlc.bin");
+
 MODULE_FIRMWARE("amdgpu/raven_ce.bin");
 MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
 MODULE_FIRMWARE("amdgpu/raven_me.bin");
@@ -461,6 +468,9 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
 	case CHIP_VEGA12:
 		chip_name = "vega12";
 		break;
+	case CHIP_VEGA20:
+		chip_name = "vega20";
+		break;
 	case CHIP_RAVEN:
 		chip_name = "raven";
 		break;

From bb5368aac5b83c1fbb39ccd0d4a89af4465c84e2 Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Tue, 23 Jan 2018 14:47:26 +0800
Subject: [PATCH 0974/1286] drm/amdgpu/gfx9: Add vega20 golden settings (v3)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

v2: squash in updates (Alex)
v3: squash in more updates (Alex)

Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 22 ++++++++++++++++++++++
 1 file changed, 22 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 6976317dc6b4..37492791a8f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -108,6 +108,20 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800)
 };
 
+static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
+{
+	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
+	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
+	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042),
+	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042),
+	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400),
+	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000),
+	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000),
+	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107),
+	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000),
+	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000)
+};
+
 static const struct soc15_reg_golden golden_settings_gc_9_1[] =
 {
 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
@@ -241,6 +255,14 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
 						golden_settings_gc_9_2_1_vg12,
 						ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
 		break;
+	case CHIP_VEGA20:
+		soc15_program_register_sequence(adev,
+						golden_settings_gc_9_0,
+						ARRAY_SIZE(golden_settings_gc_9_0));
+		soc15_program_register_sequence(adev,
+						golden_settings_gc_9_0_vg20,
+						ARRAY_SIZE(golden_settings_gc_9_0_vg20));
+		break;
 	case CHIP_RAVEN:
 		soc15_program_register_sequence(adev,
 						 golden_settings_gc_9_1,

From d3adedb4559c01d18a934250e41a4660b4d89ac3 Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Fri, 20 Apr 2018 14:40:11 +0800
Subject: [PATCH 0975/1286] drm/amdgpu/gfx9: Add gfx config for vega20. (v4)

v2: clean up (Alex)
v3: additional cleanups (Alex)
v4: drop leftover TODO (Alex)

Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 10 ++++++++++
 1 file changed, 10 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 37492791a8f8..8335d98a3f3b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1137,6 +1137,16 @@ static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
 		gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
 		DRM_INFO("fix gfx.config for vega12\n");
 		break;
+	case CHIP_VEGA20:
+		adev->gfx.config.max_hw_contexts = 8;
+		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
+		gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
+		gb_addr_config &= ~0xf3e777ff;
+		gb_addr_config |= 0x22014042;
+		break;
 	case CHIP_RAVEN:
 		adev->gfx.config.max_hw_contexts = 8;
 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;

From 61324ddc5b7a43c3b989fbbb2ac5d99009a04d4b Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Fri, 20 Apr 2018 15:51:26 +0800
Subject: [PATCH 0976/1286] drm/amdgpu/gfx9: Add support for vega20
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 8335d98a3f3b..92ed268a1b7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1429,6 +1429,7 @@ static int gfx_v9_0_sw_init(void *handle)
 	switch (adev->asic_type) {
 	case CHIP_VEGA10:
 	case CHIP_VEGA12:
+	case CHIP_VEGA20:
 	case CHIP_RAVEN:
 		adev->gfx.mec.num_mec = 2;
 		break;
@@ -4715,6 +4716,7 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
 	switch (adev->asic_type) {
 	case CHIP_VEGA10:
 	case CHIP_VEGA12:
+	case CHIP_VEGA20:
 	case CHIP_RAVEN:
 		adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
 		break;

From 28b576b27a7acb29ce5b64da69d3855f6302350d Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Tue, 23 Jan 2018 15:03:36 +0800
Subject: [PATCH 0977/1286] drm/amdgpu/gfx9: Add clockgatting support for
 vega20
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 92ed268a1b7f..13253e09f4bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -3724,6 +3724,7 @@ static int gfx_v9_0_set_clockgating_state(void *handle,
 	switch (adev->asic_type) {
 	case CHIP_VEGA10:
 	case CHIP_VEGA12:
+	case CHIP_VEGA20:
 	case CHIP_RAVEN:
 		gfx_v9_0_update_gfx_clock_gating(adev,
 						 state == AMD_CG_STATE_GATE ? true : false);

From 935be7a0ce4e181a23fc840861088e79dcb3dc08 Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Fri, 26 Jan 2018 15:06:22 +0800
Subject: [PATCH 0978/1286] drm/amdgpu/soc15:Add vega20 soc15_common_early_init
 support
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Set external_rev_id and disable cg,pg for now.

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/soc15.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index f31df18fcb81..f45bea84a73e 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -658,6 +658,11 @@ static int soc15_common_early_init(void *handle)
 		adev->pg_flags = 0;
 		adev->external_rev_id = adev->rev_id + 0x14;
 		break;
+	case CHIP_VEGA20:
+		adev->cg_flags = 0;
+		adev->pg_flags = 0;
+		adev->external_rev_id = adev->rev_id + 0x28;
+		break;
 	case CHIP_RAVEN:
 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 			AMD_CG_SUPPORT_GFX_MGLS |

From f980d127dba80214b4d793942492d3a4e6c46be0 Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Fri, 26 Jan 2018 15:10:55 +0800
Subject: [PATCH 0979/1286] drm/amdgpu/soc15: Set common clockgating for
 vega20.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Same as vega10 for now.

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/soc15.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index f45bea84a73e..1fd75f5aa22b 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -875,6 +875,7 @@ static int soc15_common_set_clockgating_state(void *handle,
 	switch (adev->asic_type) {
 	case CHIP_VEGA10:
 	case CHIP_VEGA12:
+	case CHIP_VEGA20:
 		adev->nbio_funcs->update_medium_grain_clock_gating(adev,
 				state == AMD_CG_STATE_GATE ? true : false);
 		adev->nbio_funcs->update_medium_grain_light_sleep(adev,

From 8ee273e516a096ee00b3be7cc15c8924aa3b1ef1 Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Fri, 23 Mar 2018 14:42:28 -0500
Subject: [PATCH 0980/1286] drm/amdgpu/soc15: dynamic initialize ip offset for
 vega20
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Vega20 need a seperate vega20_reg_init.c due to ip base
offset difference.

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/Makefile          |  3 +-
 drivers/gpu/drm/amd/amdgpu/soc15.c           |  3 ++
 drivers/gpu/drm/amd/amdgpu/soc15.h           |  1 +
 drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c | 53 ++++++++++++++++++++
 4 files changed, 59 insertions(+), 1 deletion(-)
 create mode 100644 drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c

diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 68e9f584c570..012ea37b81be 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -62,7 +62,8 @@ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
 amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o
 
 amdgpu-y += \
-	vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o
+	vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
+	vega20_reg_init.o
 
 # add DF block
 amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 1fd75f5aa22b..c3133d16de77 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -487,6 +487,9 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
 	case CHIP_RAVEN:
 		vega10_reg_base_init(adev);
 		break;
+	case CHIP_VEGA20:
+		vega20_reg_base_init(adev);
+		break;
 	default:
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index f70da8a29f86..1f714b7af520 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -55,5 +55,6 @@ void soc15_program_register_sequence(struct amdgpu_device *adev,
 					     const u32 array_size);
 
 int vega10_reg_base_init(struct amdgpu_device *adev);
+int vega20_reg_base_init(struct amdgpu_device *adev);
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
new file mode 100644
index 000000000000..52778de93ab0
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "soc15.h"
+
+#include "soc15_common.h"
+#include "soc15_hw_ip.h"
+#include "vega20_ip_offset.h"
+
+int vega20_reg_base_init(struct amdgpu_device *adev)
+{
+	/* HW has more IP blocks,  only initialized the blocke beend by our driver  */
+	uint32_t i;
+	for (i = 0 ; i < MAX_INSTANCE ; ++i) {
+		adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+		adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
+		adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
+		adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
+		adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
+		adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
+		adev->reg_offset[UVD_HWIP][i] = (uint32_t *)(&(UVD_BASE.instance[i]));
+		adev->reg_offset[VCE_HWIP][i] = (uint32_t *)(&(VCE_BASE.instance[i]));
+		adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
+		adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCE_BASE.instance[i]));
+		adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
+		adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(SDMA0_BASE.instance[i]));
+		adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(SDMA1_BASE.instance[i]));
+		adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
+	}
+	return 0;
+}
+
+

From 7c7af6c10d5dc733c2f181f653cb0a5b64e372a5 Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Fri, 20 Apr 2018 18:35:42 +0800
Subject: [PATCH 0981/1286] drm/amdgpu/soc15: Add ip blocks for vega20 (v2)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Same as vega10 now.

v2: squash in typo fix

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/soc15.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index c3133d16de77..10337fb3fc1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -508,6 +508,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
 	switch (adev->asic_type) {
 	case CHIP_VEGA10:
 	case CHIP_VEGA12:
+	case CHIP_VEGA20:
 		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
 		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
 		amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);

From a95d89e2d8e268d90d0f97c9c57d61006eec78c3 Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Fri, 23 Mar 2018 14:44:28 -0500
Subject: [PATCH 0982/1286] drm/amdgpu: Add nbio support for vega20 (v2)

Some register offset in nbio v7.4 are different with v7.0.

v2: Use nbio7.0 for now.

TODO: add a new nbio 7.4 module (Alex)

Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c | 18 +++++++++++++++++-
 drivers/gpu/drm/amd/amdgpu/soc15.c     |  2 ++
 2 files changed, 19 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
index df34dc79d444..365517c0121e 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
@@ -34,10 +34,19 @@
 #define smnCPM_CONTROL                                                                                  0x11180460
 #define smnPCIE_CNTL2                                                                                   0x11180070
 
+/* vega20 */
+#define mmRCC_DEV0_EPF0_STRAP0_VG20                                                                         0x0011
+#define mmRCC_DEV0_EPF0_STRAP0_VG20_BASE_IDX                                                                2
+
 static u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev)
 {
         u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
 
+	if (adev->asic_type == CHIP_VEGA20)
+		tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0_VG20);
+	else
+		tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
+
 	tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
 	tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
 
@@ -75,10 +84,14 @@ static void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instan
 			SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE);
 
 	u32 doorbell_range = RREG32(reg);
+	u32 range = 2;
+
+	if (adev->asic_type == CHIP_VEGA20)
+		range = 8;
 
 	if (use_doorbell) {
 		doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
-		doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 2);
+		doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, range);
 	} else
 		doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
 
@@ -133,6 +146,9 @@ static void nbio_v7_0_update_medium_grain_clock_gating(struct amdgpu_device *ade
 {
 	uint32_t def, data;
 
+	if (adev->asic_type == CHIP_VEGA20)
+		return;
+
 	/* NBIF_MGCG_CTRL_LCLK */
 	def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 10337fb3fc1f..4e065c68b86c 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -496,6 +496,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
 
 	if (adev->flags & AMD_IS_APU)
 		adev->nbio_funcs = &nbio_v7_0_funcs;
+	else if (adev->asic_type == CHIP_VEGA20)
+		adev->nbio_funcs = &nbio_v7_0_funcs;
 	else
 		adev->nbio_funcs = &nbio_v6_1_funcs;
 

From 1fe6bf2f33fe6728cfb206e2ce476cb2d1dae406 Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Fri, 20 Apr 2018 19:50:01 +0800
Subject: [PATCH 0983/1286] drm/amd/display/dm: Add vega20 support
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index f2f54a9df56f..6f5cb26b243c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1115,6 +1115,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
 
 	if (adev->asic_type == CHIP_VEGA10 ||
 	    adev->asic_type == CHIP_VEGA12 ||
+	    adev->asic_type == CHIP_VEGA20 ||
 	    adev->asic_type == CHIP_RAVEN)
 		client_id = SOC15_IH_CLIENTID_DCE;
 
@@ -1518,6 +1519,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
 #endif
 	case CHIP_VEGA10:
 	case CHIP_VEGA12:
+	case CHIP_VEGA20:
 		if (dce110_register_irq_handlers(dm->adev)) {
 			DRM_ERROR("DM: Failed to initialize IRQ\n");
 			goto fail;
@@ -1718,6 +1720,7 @@ static int dm_early_init(void *handle)
 		break;
 	case CHIP_VEGA10:
 	case CHIP_VEGA12:
+	case CHIP_VEGA20:
 		adev->mode_info.num_crtc = 6;
 		adev->mode_info.num_hpd = 6;
 		adev->mode_info.num_dig = 6;
@@ -1966,6 +1969,7 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
 
 	if (adev->asic_type == CHIP_VEGA10 ||
 	    adev->asic_type == CHIP_VEGA12 ||
+	    adev->asic_type == CHIP_VEGA20 ||
 	    adev->asic_type == CHIP_RAVEN) {
 		/* Fill GFX9 params */
 		plane_state->tiling_info.gfx9.num_pipes =

From c6034aa2c4fc54bbe429cc6414f83a25bb4913f7 Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Sat, 3 Feb 2018 12:19:46 +0800
Subject: [PATCH 0984/1286] drm/amdgpu: Add vega20 to dc support check (v2)

v2: fix whitespace

Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 2d46ad7bd8fc..0e3f69d31b80 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2158,6 +2158,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
 	case CHIP_FIJI:
 	case CHIP_VEGA10:
 	case CHIP_VEGA12:
+	case CHIP_VEGA20:
 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
 	case CHIP_RAVEN:
 #endif

From d82420b56a17d5b39579bc46f8dad757be684f94 Mon Sep 17 00:00:00 2001
From: Roman Li <Roman.Li@amd.com>
Date: Wed, 14 Feb 2018 17:20:54 -0500
Subject: [PATCH 0985/1286] drm/amd: Add dce-12.1 gpio aux registers (v2)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Updating dce12 register headers by adding dc registers
required for potential DP LTTPR support.

v2: fix mode change

Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Roman Li <Roman.Li@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../include/asic_reg/dce/dce_12_0_offset.h    |  12 ++
 .../include/asic_reg/dce/dce_12_0_sh_mask.h   | 152 ++++++++++++++++++
 2 files changed, 164 insertions(+)

diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
index f730d0629020..b6f74bf4af02 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
@@ -2095,6 +2095,18 @@
 #define mmDC_GPIO_AUX_CTRL_2_BASE_IDX                                                                  2
 #define mmDC_GPIO_RXEN                                                                                 0x212f
 #define mmDC_GPIO_RXEN_BASE_IDX                                                                        2
+#define mmDC_GPIO_AUX_CTRL_3                                                                           0x2130
+#define mmDC_GPIO_AUX_CTRL_3_BASE_IDX                                                                  2
+#define mmDC_GPIO_AUX_CTRL_4                                                                           0x2131
+#define mmDC_GPIO_AUX_CTRL_4_BASE_IDX                                                                  2
+#define mmDC_GPIO_AUX_CTRL_5                                                                           0x2132
+#define mmDC_GPIO_AUX_CTRL_5_BASE_IDX                                                                  2
+#define mmAUXI2C_PAD_ALL_PWR_OK                                                                        0x2133
+#define mmAUXI2C_PAD_ALL_PWR_OK_BASE_IDX                                                               2
+#define mmDC_GPIO_PULLUPEN                                                                             0x2134
+#define mmDC_GPIO_PULLUPEN_BASE_IDX                                                                    2
+#define mmDC_GPIO_AUX_CTRL_6                                                                           0x2135
+#define mmDC_GPIO_AUX_CTRL_6_BASE_IDX                                                                  2
 #define mmBPHYC_DAC_MACRO_CNTL                                                                         0x2136
 #define mmBPHYC_DAC_MACRO_CNTL_BASE_IDX                                                                2
 #define mmDAC_MACRO_CNTL_RESERVED0                                                                     0x2136
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h
index 6d3162c42957..bcd190a3fcdd 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h
@@ -10971,6 +10971,158 @@
 #define DC_GPIO_RXEN__DC_GPIO_BLON_RXEN_MASK                                                                  0x00100000L
 #define DC_GPIO_RXEN__DC_GPIO_DIGON_RXEN_MASK                                                                 0x00200000L
 #define DC_GPIO_RXEN__DC_GPIO_ENA_BL_RXEN_MASK                                                                0x00400000L
+//DC_GPIO_AUX_CTRL_3
+#define DC_GPIO_AUX_CTRL_3__AUX1_NEN_RTERM__SHIFT                                                             0x0
+#define DC_GPIO_AUX_CTRL_3__AUX2_NEN_RTERM__SHIFT                                                             0x1
+#define DC_GPIO_AUX_CTRL_3__AUX3_NEN_RTERM__SHIFT                                                             0x2
+#define DC_GPIO_AUX_CTRL_3__AUX4_NEN_RTERM__SHIFT                                                             0x3
+#define DC_GPIO_AUX_CTRL_3__AUX5_NEN_RTERM__SHIFT                                                             0x4
+#define DC_GPIO_AUX_CTRL_3__AUX6_NEN_RTERM__SHIFT                                                             0x5
+#define DC_GPIO_AUX_CTRL_3__AUX1_DP_DN_SWAP__SHIFT                                                            0x8
+#define DC_GPIO_AUX_CTRL_3__AUX2_DP_DN_SWAP__SHIFT                                                            0x9
+#define DC_GPIO_AUX_CTRL_3__AUX3_DP_DN_SWAP__SHIFT                                                            0xa
+#define DC_GPIO_AUX_CTRL_3__AUX4_DP_DN_SWAP__SHIFT                                                            0xb
+#define DC_GPIO_AUX_CTRL_3__AUX5_DP_DN_SWAP__SHIFT                                                            0xc
+#define DC_GPIO_AUX_CTRL_3__AUX6_DP_DN_SWAP__SHIFT                                                            0xd
+#define DC_GPIO_AUX_CTRL_3__AUX1_HYS_TUNE__SHIFT                                                              0x10
+#define DC_GPIO_AUX_CTRL_3__AUX2_HYS_TUNE__SHIFT                                                              0x12
+#define DC_GPIO_AUX_CTRL_3__AUX3_HYS_TUNE__SHIFT                                                              0x14
+#define DC_GPIO_AUX_CTRL_3__AUX4_HYS_TUNE__SHIFT                                                              0x16
+#define DC_GPIO_AUX_CTRL_3__AUX5_HYS_TUNE__SHIFT                                                              0x18
+#define DC_GPIO_AUX_CTRL_3__AUX6_HYS_TUNE__SHIFT                                                              0x1a
+#define DC_GPIO_AUX_CTRL_3__AUX1_NEN_RTERM_MASK                                                               0x00000001L
+#define DC_GPIO_AUX_CTRL_3__AUX2_NEN_RTERM_MASK                                                               0x00000002L
+#define DC_GPIO_AUX_CTRL_3__AUX3_NEN_RTERM_MASK                                                               0x00000004L
+#define DC_GPIO_AUX_CTRL_3__AUX4_NEN_RTERM_MASK                                                               0x00000008L
+#define DC_GPIO_AUX_CTRL_3__AUX5_NEN_RTERM_MASK                                                               0x00000010L
+#define DC_GPIO_AUX_CTRL_3__AUX6_NEN_RTERM_MASK                                                               0x00000020L
+#define DC_GPIO_AUX_CTRL_3__AUX1_DP_DN_SWAP_MASK                                                              0x00000100L
+#define DC_GPIO_AUX_CTRL_3__AUX2_DP_DN_SWAP_MASK                                                              0x00000200L
+#define DC_GPIO_AUX_CTRL_3__AUX3_DP_DN_SWAP_MASK                                                              0x00000400L
+#define DC_GPIO_AUX_CTRL_3__AUX4_DP_DN_SWAP_MASK                                                              0x00000800L
+#define DC_GPIO_AUX_CTRL_3__AUX5_DP_DN_SWAP_MASK                                                              0x00001000L
+#define DC_GPIO_AUX_CTRL_3__AUX6_DP_DN_SWAP_MASK                                                              0x00002000L
+#define DC_GPIO_AUX_CTRL_3__AUX1_HYS_TUNE_MASK                                                                0x00030000L
+#define DC_GPIO_AUX_CTRL_3__AUX2_HYS_TUNE_MASK                                                                0x000C0000L
+#define DC_GPIO_AUX_CTRL_3__AUX3_HYS_TUNE_MASK                                                                0x00300000L
+#define DC_GPIO_AUX_CTRL_3__AUX4_HYS_TUNE_MASK                                                                0x00C00000L
+#define DC_GPIO_AUX_CTRL_3__AUX5_HYS_TUNE_MASK                                                                0x03000000L
+#define DC_GPIO_AUX_CTRL_3__AUX6_HYS_TUNE_MASK                                                                0x0C000000L
+//DC_GPIO_AUX_CTRL_4
+#define DC_GPIO_AUX_CTRL_4__AUX1_AUX_CTRL__SHIFT                                                              0x0
+#define DC_GPIO_AUX_CTRL_4__AUX2_AUX_CTRL__SHIFT                                                              0x4
+#define DC_GPIO_AUX_CTRL_4__AUX3_AUX_CTRL__SHIFT                                                              0x8
+#define DC_GPIO_AUX_CTRL_4__AUX4_AUX_CTRL__SHIFT                                                              0xc
+#define DC_GPIO_AUX_CTRL_4__AUX5_AUX_CTRL__SHIFT                                                              0x10
+#define DC_GPIO_AUX_CTRL_4__AUX6_AUX_CTRL__SHIFT                                                              0x14
+#define DC_GPIO_AUX_CTRL_4__AUX1_AUX_CTRL_MASK                                                                0x0000000FL
+#define DC_GPIO_AUX_CTRL_4__AUX2_AUX_CTRL_MASK                                                                0x000000F0L
+#define DC_GPIO_AUX_CTRL_4__AUX3_AUX_CTRL_MASK                                                                0x00000F00L
+#define DC_GPIO_AUX_CTRL_4__AUX4_AUX_CTRL_MASK                                                                0x0000F000L
+#define DC_GPIO_AUX_CTRL_4__AUX5_AUX_CTRL_MASK                                                                0x000F0000L
+#define DC_GPIO_AUX_CTRL_4__AUX6_AUX_CTRL_MASK                                                                0x00F00000L
+//DC_GPIO_AUX_CTRL_5
+#define DC_GPIO_AUX_CTRL_5__AUX1_VOD_TUNE__SHIFT                                                              0x0
+#define DC_GPIO_AUX_CTRL_5__AUX2_VOD_TUNE__SHIFT                                                              0x2
+#define DC_GPIO_AUX_CTRL_5__AUX3_VOD_TUNE__SHIFT                                                              0x4
+#define DC_GPIO_AUX_CTRL_5__AUX4_VOD_TUNE__SHIFT                                                              0x6
+#define DC_GPIO_AUX_CTRL_5__AUX5_VOD_TUNE__SHIFT                                                              0x8
+#define DC_GPIO_AUX_CTRL_5__AUX6_VOD_TUNE__SHIFT                                                              0xa
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD1_I2CMODE__SHIFT                                                           0xc
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD2_I2CMODE__SHIFT                                                           0xd
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD3_I2CMODE__SHIFT                                                           0xe
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD4_I2CMODE__SHIFT                                                           0xf
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD5_I2CMODE__SHIFT                                                           0x10
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD6_I2CMODE__SHIFT                                                           0x11
+#define DC_GPIO_AUX_CTRL_5__DDC1_I2C_VPH_1V2_EN__SHIFT                                                        0x12
+#define DC_GPIO_AUX_CTRL_5__DDC2_I2C_VPH_1V2_EN__SHIFT                                                        0x13
+#define DC_GPIO_AUX_CTRL_5__DDC3_I2C_VPH_1V2_EN__SHIFT                                                        0x14
+#define DC_GPIO_AUX_CTRL_5__DDC4_I2C_VPH_1V2_EN__SHIFT                                                        0x15
+#define DC_GPIO_AUX_CTRL_5__DDC5_I2C_VPH_1V2_EN__SHIFT                                                        0x16
+#define DC_GPIO_AUX_CTRL_5__DDC6_I2C_VPH_1V2_EN__SHIFT                                                        0x17
+#define DC_GPIO_AUX_CTRL_5__DDC1_PAD_I2C_CTRL__SHIFT                                                          0x18
+#define DC_GPIO_AUX_CTRL_5__DDC2_PAD_I2C_CTRL__SHIFT                                                          0x19
+#define DC_GPIO_AUX_CTRL_5__DDC3_PAD_I2C_CTRL__SHIFT                                                          0x1a
+#define DC_GPIO_AUX_CTRL_5__DDC4_PAD_I2C_CTRL__SHIFT                                                          0x1b
+#define DC_GPIO_AUX_CTRL_5__DDC5_PAD_I2C_CTRL__SHIFT                                                          0x1c
+#define DC_GPIO_AUX_CTRL_5__DDC6_PAD_I2C_CTRL__SHIFT                                                          0x1d
+#define DC_GPIO_AUX_CTRL_5__AUX1_VOD_TUNE_MASK                                                                0x00000003L
+#define DC_GPIO_AUX_CTRL_5__AUX2_VOD_TUNE_MASK                                                                0x0000000CL
+#define DC_GPIO_AUX_CTRL_5__AUX3_VOD_TUNE_MASK                                                                0x00000030L
+#define DC_GPIO_AUX_CTRL_5__AUX4_VOD_TUNE_MASK                                                                0x000000C0L
+#define DC_GPIO_AUX_CTRL_5__AUX5_VOD_TUNE_MASK                                                                0x00000300L
+#define DC_GPIO_AUX_CTRL_5__AUX6_VOD_TUNE_MASK                                                                0x00000C00L
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD1_I2CMODE_MASK                                                             0x00001000L
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD2_I2CMODE_MASK                                                             0x00002000L
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD3_I2CMODE_MASK                                                             0x00004000L
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD4_I2CMODE_MASK                                                             0x00008000L
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD5_I2CMODE_MASK                                                             0x00010000L
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD6_I2CMODE_MASK                                                             0x00020000L
+#define DC_GPIO_AUX_CTRL_5__DDC1_I2C_VPH_1V2_EN_MASK                                                          0x00040000L
+#define DC_GPIO_AUX_CTRL_5__DDC2_I2C_VPH_1V2_EN_MASK                                                          0x00080000L
+#define DC_GPIO_AUX_CTRL_5__DDC3_I2C_VPH_1V2_EN_MASK                                                          0x00100000L
+#define DC_GPIO_AUX_CTRL_5__DDC4_I2C_VPH_1V2_EN_MASK                                                          0x00200000L
+#define DC_GPIO_AUX_CTRL_5__DDC5_I2C_VPH_1V2_EN_MASK                                                          0x00400000L
+#define DC_GPIO_AUX_CTRL_5__DDC6_I2C_VPH_1V2_EN_MASK                                                          0x00800000L
+#define DC_GPIO_AUX_CTRL_5__DDC1_PAD_I2C_CTRL_MASK                                                            0x01000000L
+#define DC_GPIO_AUX_CTRL_5__DDC2_PAD_I2C_CTRL_MASK                                                            0x02000000L
+#define DC_GPIO_AUX_CTRL_5__DDC3_PAD_I2C_CTRL_MASK                                                            0x04000000L
+#define DC_GPIO_AUX_CTRL_5__DDC4_PAD_I2C_CTRL_MASK                                                            0x08000000L
+#define DC_GPIO_AUX_CTRL_5__DDC5_PAD_I2C_CTRL_MASK                                                            0x10000000L
+#define DC_GPIO_AUX_CTRL_5__DDC6_PAD_I2C_CTRL_MASK                                                            0x20000000L
+//AUXI2C_PAD_ALL_PWR_OK
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY1_ALL_PWR_OK__SHIFT                                                  0x0
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY2_ALL_PWR_OK__SHIFT                                                  0x1
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY3_ALL_PWR_OK__SHIFT                                                  0x2
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY4_ALL_PWR_OK__SHIFT                                                  0x3
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY5_ALL_PWR_OK__SHIFT                                                  0x4
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY6_ALL_PWR_OK__SHIFT                                                  0x5
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY1_ALL_PWR_OK_MASK                                                    0x00000001L
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY2_ALL_PWR_OK_MASK                                                    0x00000002L
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY3_ALL_PWR_OK_MASK                                                    0x00000004L
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY4_ALL_PWR_OK_MASK                                                    0x00000008L
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY5_ALL_PWR_OK_MASK                                                    0x00000010L
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY6_ALL_PWR_OK_MASK                                                    0x00000020L
+//DC_GPIO_PULLUPEN
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICA_PU_EN__SHIFT                                                       0x0
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICB_PU_EN__SHIFT                                                       0x1
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICC_PU_EN__SHIFT                                                       0x2
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICD_PU_EN__SHIFT                                                       0x3
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICE_PU_EN__SHIFT                                                       0x4
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICF_PU_EN__SHIFT                                                       0x5
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICG_PU_EN__SHIFT                                                       0x6
+#define DC_GPIO_PULLUPEN__DC_GPIO_HSYNCA_PU_EN__SHIFT                                                         0x8
+#define DC_GPIO_PULLUPEN__DC_GPIO_VSYNCA_PU_EN__SHIFT                                                         0x9
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD1_PU_EN__SHIFT                                                           0xe
+#define DC_GPIO_PULLUPEN__DC_GPIO_BLON_PU_EN__SHIFT                                                           0x14
+#define DC_GPIO_PULLUPEN__DC_GPIO_DIGON_PU_EN__SHIFT                                                          0x15
+#define DC_GPIO_PULLUPEN__DC_GPIO_ENA_BL_PU_EN__SHIFT                                                         0x16
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICA_PU_EN_MASK                                                         0x00000001L
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICB_PU_EN_MASK                                                         0x00000002L
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICC_PU_EN_MASK                                                         0x00000004L
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICD_PU_EN_MASK                                                         0x00000008L
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICE_PU_EN_MASK                                                         0x00000010L
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICF_PU_EN_MASK                                                         0x00000020L
+#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICG_PU_EN_MASK                                                         0x00000040L
+#define DC_GPIO_PULLUPEN__DC_GPIO_HSYNCA_PU_EN_MASK                                                           0x00000100L
+#define DC_GPIO_PULLUPEN__DC_GPIO_VSYNCA_PU_EN_MASK                                                           0x00000200L
+#define DC_GPIO_PULLUPEN__DC_GPIO_HPD1_PU_EN_MASK                                                             0x00004000L
+#define DC_GPIO_PULLUPEN__DC_GPIO_BLON_PU_EN_MASK                                                             0x00100000L
+#define DC_GPIO_PULLUPEN__DC_GPIO_DIGON_PU_EN_MASK                                                            0x00200000L
+#define DC_GPIO_PULLUPEN__DC_GPIO_ENA_BL_PU_EN_MASK                                                           0x00400000L
+//DC_GPIO_AUX_CTRL_6
+#define DC_GPIO_AUX_CTRL_6__AUX1_PAD_RXSEL__SHIFT                                                             0x0
+#define DC_GPIO_AUX_CTRL_6__AUX2_PAD_RXSEL__SHIFT                                                             0x2
+#define DC_GPIO_AUX_CTRL_6__AUX3_PAD_RXSEL__SHIFT                                                             0x4
+#define DC_GPIO_AUX_CTRL_6__AUX4_PAD_RXSEL__SHIFT                                                             0x6
+#define DC_GPIO_AUX_CTRL_6__AUX5_PAD_RXSEL__SHIFT                                                             0x8
+#define DC_GPIO_AUX_CTRL_6__AUX6_PAD_RXSEL__SHIFT                                                             0xa
+#define DC_GPIO_AUX_CTRL_6__AUX1_PAD_RXSEL_MASK                                                               0x00000003L
+#define DC_GPIO_AUX_CTRL_6__AUX2_PAD_RXSEL_MASK                                                               0x0000000CL
+#define DC_GPIO_AUX_CTRL_6__AUX3_PAD_RXSEL_MASK                                                               0x00000030L
+#define DC_GPIO_AUX_CTRL_6__AUX4_PAD_RXSEL_MASK                                                               0x000000C0L
+#define DC_GPIO_AUX_CTRL_6__AUX5_PAD_RXSEL_MASK                                                               0x00000300L
+#define DC_GPIO_AUX_CTRL_6__AUX6_PAD_RXSEL_MASK                                                               0x00000C00L
 //BPHYC_DAC_MACRO_CNTL
 #define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_WHITE_LEVEL__SHIFT                                                    0x0
 #define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_WHITE_FINE_CONTROL__SHIFT                                             0x8

From 138bc36051f817ce5bee33b0e7a4873bb04f1eb4 Mon Sep 17 00:00:00 2001
From: "Jerry (Fangzhi) Zuo" <Jerry.Zuo@amd.com>
Date: Fri, 11 May 2018 13:46:19 -0500
Subject: [PATCH 0986/1286] drm/amd/display: Add Vega20 config. support
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Jerry (Fangzhi) Zuo <Jerry.Zuo@amd.com>
Reviewed-by: Harry Wentland <harry.wentland@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/Kconfig | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index e6ca72c0d347..6dcec9c9126b 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -40,4 +40,13 @@ config DRM_AMD_DC_VEGAM
         help
          Choose this option if you want to have
          VEGAM support for display engine
+
+config DRM_AMD_DC_VG20
+	bool "Vega20 support"
+	depends on DRM_AMD_DC
+	help
+		Choose this option if you want to have
+		Vega20 support for display engine
+
+
 endmenu

From 14a13a0ef0665924a5e87947309b6c9abfb41903 Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Fri, 20 Apr 2018 21:03:10 +0800
Subject: [PATCH 0987/1286] drm/amd/display: Remove COMBO_DISPLAY_PLL0 from
 Vega20

Signed-off-by: Jerry (Fangzhi) Zuo <Jerry.Zuo@amd.com>
Reviewed-by: Hersen Wu <hersenxs.wu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c   | 11 ++++++++++-
 drivers/gpu/drm/amd/display/include/dal_asic_id.h |  6 ++++++
 2 files changed, 16 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
index 78e6beb6cf26..aa4cf3095235 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
@@ -35,7 +35,7 @@
 #endif
 #include "core_types.h"
 #include "dc_types.h"
-
+#include "dal_asic_id.h"
 
 #define TO_DCE_CLOCKS(clocks)\
 	container_of(clocks, struct dce_disp_clk, base)
@@ -413,9 +413,18 @@ static int dce112_set_clock(
 	/*VBIOS will determine DPREFCLK frequency, so we don't set it*/
 	dce_clk_params.target_clock_frequency = 0;
 	dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
+#ifndef CONFIG_DRM_AMD_DC_VG20
 	dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
 			(dce_clk_params.pll_id ==
 					CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
+#else
+	if (!ASICREV_IS_VEGA20_P(clk->ctx->asic_id.hw_internal_rev))
+		dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
+			(dce_clk_params.pll_id ==
+					CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
+	else
+		dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false;
+#endif
 
 	bp->funcs->set_dce_clock(bp, &dce_clk_params);
 
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 1b987b6a347d..77d2856be9f6 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -117,6 +117,12 @@
 	((rev >= STONEY_A0) && (rev < CZ_UNKNOWN))
 
 /* DCE12 */
+#define AI_UNKNOWN 0xFF
+
+#ifdef CONFIG_DRM_AMD_DC_VG20
+#define AI_VEGA20_P_A0 40
+#define ASICREV_IS_VEGA20_P(eChipRev) ((eChipRev >= AI_VEGA20_P_A0) && (eChipRev < AI_UNKNOWN))
+#endif
 
 #define AI_GREENLAND_P_A0 1
 #define AI_GREENLAND_P_A1 2

From 1edb2c8a32160c00273485efea8d18080e31cc09 Mon Sep 17 00:00:00 2001
From: "Jerry (Fangzhi) Zuo" <Jerry.Zuo@amd.com>
Date: Fri, 11 May 2018 13:51:43 -0500
Subject: [PATCH 0988/1286] drm/amd/display: Add BIOS smu_info v3_3 support for
 Vega20
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Jerry (Fangzhi) Zuo <Jerry.Zuo@amd.com>
Reviewed-by: Hersen Wu <hersenxs.wu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
---
 drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 10a5807a7e8b..4561673a0fe6 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -1330,6 +1330,11 @@ static enum bp_result bios_parser_get_firmware_info(
 			case 2:
 				result = get_firmware_info_v3_2(bp, info);
 				break;
+			case 3:
+#ifdef CONFIG_DRM_AMD_DC_VG20
+				result = get_firmware_info_v3_2(bp, info);
+#endif
+				break;
 			default:
 				break;
 			}

From 8ad63122f9f22dde172b98fe9c75818831e57f4b Mon Sep 17 00:00:00 2001
From: "Jerry (Fangzhi) Zuo" <Jerry.Zuo@amd.com>
Date: Mon, 5 Mar 2018 16:12:23 -0500
Subject: [PATCH 0989/1286] drm/amd/display: Add harvest IP support for Vega20
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Retrieve fuses to determine the availability of pipes, and
eliminate pipes that cannot be used.

Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Jerry (Fangzhi) Zuo <Jerry.Zuo@amd.com>
Reviewed-by: Hersen Wu <hersenxs.wu@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
---
 .../amd/display/dc/dce120/dce120_resource.c   | 208 ++++++++++++++++++
 1 file changed, 208 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index fda01574d1ba..545f35f0821f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -814,6 +814,213 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
 	dm_pp_notify_wm_clock_changes(dc->ctx, &clk_ranges);
 }
 
+#ifdef CONFIG_DRM_AMD_DC_VG20
+static uint32_t read_pipe_fuses(struct dc_context *ctx)
+{
+	uint32_t value = dm_read_reg_soc15(ctx, mmCC_DC_PIPE_DIS, 0);
+	/* VG20 support max 6 pipes */
+	value = value & 0x3f;
+	return value;
+}
+
+static bool construct(
+	uint8_t num_virtual_links,
+	struct dc *dc,
+	struct dce110_resource_pool *pool)
+{
+	unsigned int i;
+	int j;
+	struct dc_context *ctx = dc->ctx;
+	struct irq_service_init_data irq_init_data;
+	bool harvest_enabled = ASICREV_IS_VEGA20_P(ctx->asic_id.hw_internal_rev);
+	uint32_t pipe_fuses;
+
+	ctx->dc_bios->regs = &bios_regs;
+
+	pool->base.res_cap = &res_cap;
+	pool->base.funcs = &dce120_res_pool_funcs;
+
+	/* TODO: Fill more data from GreenlandAsicCapability.cpp */
+	pool->base.pipe_count = res_cap.num_timing_generator;
+	pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator;
+	pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+
+	dc->caps.max_downscale_ratio = 200;
+	dc->caps.i2c_speed_in_khz = 100;
+	dc->caps.max_cursor_size = 128;
+	dc->caps.dual_link_dvi = true;
+
+	dc->debug = debug_defaults;
+
+	/*************************************************
+	 *  Create resources                             *
+	 *************************************************/
+
+	pool->base.clock_sources[DCE120_CLK_SRC_PLL0] =
+			dce120_clock_source_create(ctx, ctx->dc_bios,
+				CLOCK_SOURCE_COMBO_PHY_PLL0,
+				&clk_src_regs[0], false);
+	pool->base.clock_sources[DCE120_CLK_SRC_PLL1] =
+			dce120_clock_source_create(ctx, ctx->dc_bios,
+				CLOCK_SOURCE_COMBO_PHY_PLL1,
+				&clk_src_regs[1], false);
+	pool->base.clock_sources[DCE120_CLK_SRC_PLL2] =
+			dce120_clock_source_create(ctx, ctx->dc_bios,
+				CLOCK_SOURCE_COMBO_PHY_PLL2,
+				&clk_src_regs[2], false);
+	pool->base.clock_sources[DCE120_CLK_SRC_PLL3] =
+			dce120_clock_source_create(ctx, ctx->dc_bios,
+				CLOCK_SOURCE_COMBO_PHY_PLL3,
+				&clk_src_regs[3], false);
+	pool->base.clock_sources[DCE120_CLK_SRC_PLL4] =
+			dce120_clock_source_create(ctx, ctx->dc_bios,
+				CLOCK_SOURCE_COMBO_PHY_PLL4,
+				&clk_src_regs[4], false);
+	pool->base.clock_sources[DCE120_CLK_SRC_PLL5] =
+			dce120_clock_source_create(ctx, ctx->dc_bios,
+				CLOCK_SOURCE_COMBO_PHY_PLL5,
+				&clk_src_regs[5], false);
+	pool->base.clk_src_count = DCE120_CLK_SRC_TOTAL;
+
+	pool->base.dp_clock_source =
+			dce120_clock_source_create(ctx, ctx->dc_bios,
+				CLOCK_SOURCE_ID_DP_DTO,
+				&clk_src_regs[0], true);
+
+	for (i = 0; i < pool->base.clk_src_count; i++) {
+		if (pool->base.clock_sources[i] == NULL) {
+			dm_error("DC: failed to create clock sources!\n");
+			BREAK_TO_DEBUGGER();
+			goto clk_src_create_fail;
+		}
+	}
+
+	pool->base.display_clock = dce120_disp_clk_create(ctx);
+	if (pool->base.display_clock == NULL) {
+		dm_error("DC: failed to create display clock!\n");
+		BREAK_TO_DEBUGGER();
+		goto disp_clk_create_fail;
+	}
+
+	pool->base.dmcu = dce_dmcu_create(ctx,
+			&dmcu_regs,
+			&dmcu_shift,
+			&dmcu_mask);
+	if (pool->base.dmcu == NULL) {
+		dm_error("DC: failed to create dmcu!\n");
+		BREAK_TO_DEBUGGER();
+		goto res_create_fail;
+	}
+
+	pool->base.abm = dce_abm_create(ctx,
+			&abm_regs,
+			&abm_shift,
+			&abm_mask);
+	if (pool->base.abm == NULL) {
+		dm_error("DC: failed to create abm!\n");
+		BREAK_TO_DEBUGGER();
+		goto res_create_fail;
+	}
+
+	irq_init_data.ctx = dc->ctx;
+	pool->base.irqs = dal_irq_service_dce120_create(&irq_init_data);
+	if (!pool->base.irqs)
+		goto irqs_create_fail;
+
+	/* retrieve valid pipe fuses */
+	if (harvest_enabled)
+		pipe_fuses = read_pipe_fuses(ctx);
+
+	/* index to valid pipe resource */
+	j = 0;
+	for (i = 0; i < pool->base.pipe_count; i++) {
+		if (harvest_enabled) {
+			if ((pipe_fuses & (1 << i)) != 0) {
+				dm_error("DC: skip invalid pipe %d!\n", i);
+				continue;
+			}
+		}
+
+		pool->base.timing_generators[j] =
+				dce120_timing_generator_create(
+					ctx,
+					i,
+					&dce120_tg_offsets[i]);
+		if (pool->base.timing_generators[j] == NULL) {
+			BREAK_TO_DEBUGGER();
+			dm_error("DC: failed to create tg!\n");
+			goto controller_create_fail;
+		}
+
+		pool->base.mis[j] = dce120_mem_input_create(ctx, i);
+
+		if (pool->base.mis[j] == NULL) {
+			BREAK_TO_DEBUGGER();
+			dm_error(
+				"DC: failed to create memory input!\n");
+			goto controller_create_fail;
+		}
+
+		pool->base.ipps[j] = dce120_ipp_create(ctx, i);
+		if (pool->base.ipps[i] == NULL) {
+			BREAK_TO_DEBUGGER();
+			dm_error(
+				"DC: failed to create input pixel processor!\n");
+			goto controller_create_fail;
+		}
+
+		pool->base.transforms[j] = dce120_transform_create(ctx, i);
+		if (pool->base.transforms[i] == NULL) {
+			BREAK_TO_DEBUGGER();
+			dm_error(
+				"DC: failed to create transform!\n");
+			goto res_create_fail;
+		}
+
+		pool->base.opps[j] = dce120_opp_create(
+			ctx,
+			i);
+		if (pool->base.opps[j] == NULL) {
+			BREAK_TO_DEBUGGER();
+			dm_error(
+				"DC: failed to create output pixel processor!\n");
+		}
+
+		/* check next valid pipe */
+		j++;
+	}
+
+	/* valid pipe num */
+	pool->base.pipe_count = j;
+	pool->base.timing_generator_count = j;
+
+	if (!resource_construct(num_virtual_links, dc, &pool->base,
+			 &res_create_funcs))
+		goto res_create_fail;
+
+	/* Create hardware sequencer */
+	if (!dce120_hw_sequencer_create(dc))
+		goto controller_create_fail;
+
+	dc->caps.max_planes =  pool->base.pipe_count;
+
+	bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id);
+
+	bw_calcs_data_update_from_pplib(dc);
+
+	return true;
+
+irqs_create_fail:
+controller_create_fail:
+disp_clk_create_fail:
+clk_src_create_fail:
+res_create_fail:
+
+	destruct(pool);
+
+	return false;
+}
+#else
 static bool construct(
 	uint8_t num_virtual_links,
 	struct dc *dc,
@@ -988,6 +1195,7 @@ res_create_fail:
 
 	return false;
 }
+#endif
 
 struct resource_pool *dce120_create_resource_pool(
 	uint8_t num_virtual_links,

From 6f68711dd63522aab34c3e9513fa42a7586a95e5 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Thu, 15 Mar 2018 21:32:27 -0500
Subject: [PATCH 0990/1286] drm/amdgpu/atomfirmware: add new gfx_info data
 table v2.4 (v2)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Adds additional gfx configuration data.

v2: fix typo

Acked-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/include/atomfirmware.h | 34 ++++++++++++++++++++++
 1 file changed, 34 insertions(+)

diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index de177ce8ca80..fd5e80c92ed0 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -1219,6 +1219,40 @@ struct  atom_gfx_info_v2_3 {
   uint32_t rm21_sram_vmin_value;
 };
 
+struct  atom_gfx_info_v2_4 {
+  struct  atom_common_table_header  table_header;
+  uint8_t gfxip_min_ver;
+  uint8_t gfxip_max_ver;
+  uint8_t gc_num_se;
+  uint8_t max_tile_pipes;
+  uint8_t gc_num_cu_per_sh;
+  uint8_t gc_num_sh_per_se;
+  uint8_t gc_num_rb_per_se;
+  uint8_t gc_num_tccs;
+  uint32_t regaddr_cp_dma_src_addr;
+  uint32_t regaddr_cp_dma_src_addr_hi;
+  uint32_t regaddr_cp_dma_dst_addr;
+  uint32_t regaddr_cp_dma_dst_addr_hi;
+  uint32_t regaddr_cp_dma_command;
+  uint32_t regaddr_cp_status;
+  uint32_t regaddr_rlc_gpu_clock_32;
+  uint32_t rlc_gpu_timer_refclk;
+  uint8_t active_cu_per_sh;
+  uint8_t active_rb_per_se;
+  uint16_t gcgoldenoffset;
+  uint32_t rm21_sram_vmin_value;
+  uint16_t gc_num_gprs;
+  uint16_t gc_gsprim_buff_depth;
+  uint16_t gc_parameter_cache_depth;
+  uint16_t gc_wave_size;
+  uint16_t gc_max_waves_per_simd;
+  uint16_t gc_lds_size;
+  uint8_t gc_num_max_gs_thds;
+  uint8_t gc_gs_table_depth;
+  uint8_t gc_double_offchip_lds_buffer;
+  uint8_t gc_max_scratch_slots_per_cu;
+};
+
 /* 
   ***************************************************************************
     Data Table smu_info  structure

From 59b0b509f1ae0c7ca54607f2770a1aec6e55d8dc Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Tue, 20 Mar 2018 12:24:03 -0500
Subject: [PATCH 0991/1286] drm/amdgpu/atomfirmware: add parser for gfx_info
 table
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Add support for the gfx_info table on boards that use atomfirmware.

Acked-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c  | 46 +++++++++++++++++++
 .../gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h  |  1 +
 2 files changed, 47 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index a0f48cb9b8f0..7014d5875d5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -322,3 +322,49 @@ int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
 
 	return ret;
 }
+
+union gfx_info {
+	struct  atom_gfx_info_v2_4 v24;
+};
+
+int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
+{
+	struct amdgpu_mode_info *mode_info = &adev->mode_info;
+	int index;
+	uint8_t frev, crev;
+	uint16_t data_offset;
+
+	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+					    gfx_info);
+	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		union gfx_info *gfx_info = (union gfx_info *)
+			(mode_info->atom_context->bios + data_offset);
+		switch (crev) {
+		case 4:
+			adev->gfx.config.max_shader_engines = gfx_info->v24.gc_num_se;
+			adev->gfx.config.max_cu_per_sh = gfx_info->v24.gc_num_cu_per_sh;
+			adev->gfx.config.max_sh_per_se = gfx_info->v24.gc_num_sh_per_se;
+			adev->gfx.config.max_backends_per_se = gfx_info->v24.gc_num_rb_per_se;
+			adev->gfx.config.max_texture_channel_caches = gfx_info->v24.gc_num_tccs;
+			adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v24.gc_num_gprs);
+			adev->gfx.config.max_gs_threads = gfx_info->v24.gc_num_max_gs_thds;
+			adev->gfx.config.gs_vgt_table_depth = gfx_info->v24.gc_gs_table_depth;
+			adev->gfx.config.gs_prim_buffer_depth =
+				le16_to_cpu(gfx_info->v24.gc_gsprim_buff_depth);
+			adev->gfx.config.double_offchip_lds_buf =
+				gfx_info->v24.gc_double_offchip_lds_buffer;
+			adev->gfx.cu_info.wave_front_size = gfx_info->v24.gc_wave_size;
+			adev->gfx.cu_info.max_waves_per_simd =
+				le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd);
+			adev->gfx.cu_info.max_scratch_slots_per_cu =
+				gfx_info->v24.gc_max_scratch_slots_per_cu;
+			adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size);
+			return 0;
+		default:
+			return -EINVAL;
+		}
+
+	}
+	return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
index 7689c961c4ef..20f158fd3b76 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
@@ -30,5 +30,6 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
 int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev);
 int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev);
 int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
+int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
 
 #endif

From 3251c0438a1efcc51c357f7014b33b9e02b129cd Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Fri, 20 Apr 2018 12:31:04 +0800
Subject: [PATCH 0992/1286] drm/amdgpu: Use vbios table for gpu info on vega20

Use the vbios table rather than gpu info firmware.

Squash of the following patches:
drm/amdgpu/vg20: fallback to vbios table if gpu info fw is not available (v2)
drm/amdgpu: drop gpu_info firmware for vega20

Reviewed-by: Amber Lin <Amber.Lin@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 14 ++++++++++++--
 1 file changed, 12 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 13253e09f4bd..d7530fdfaad5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -27,6 +27,7 @@
 #include "amdgpu_gfx.h"
 #include "soc15.h"
 #include "soc15d.h"
+#include "amdgpu_atomfirmware.h"
 
 #include "gc/gc_9_0_offset.h"
 #include "gc/gc_9_0_sh_mask.h"
@@ -1113,9 +1114,10 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
 	.select_me_pipe_q = &gfx_v9_0_select_me_pipe_q
 };
 
-static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
+static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
 {
 	u32 gb_addr_config;
+	int err;
 
 	adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
 
@@ -1146,6 +1148,10 @@ static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
 		gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
 		gb_addr_config &= ~0xf3e777ff;
 		gb_addr_config |= 0x22014042;
+		/* check vbios table if gpu info is not available */
+		err = amdgpu_atomfirmware_get_gfx_info(adev);
+		if (err)
+			return err;
 		break;
 	case CHIP_RAVEN:
 		adev->gfx.config.max_hw_contexts = 8;
@@ -1196,6 +1202,8 @@ static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
 					adev->gfx.config.gb_addr_config,
 					GB_ADDR_CONFIG,
 					PIPE_INTERLEAVE_SIZE));
+
+	return 0;
 }
 
 static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev,
@@ -1557,7 +1565,9 @@ static int gfx_v9_0_sw_init(void *handle)
 
 	adev->gfx.ce_ram_size = 0x8000;
 
-	gfx_v9_0_gpu_early_init(adev);
+	r = gfx_v9_0_gpu_early_init(adev);
+	if (r)
+		return r;
 
 	r = gfx_v9_0_ngg_init(adev);
 	if (r)

From 24e6bc784363ee4056d81c8990a0127891678b43 Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Tue, 24 Apr 2018 11:11:16 +0800
Subject: [PATCH 0993/1286] drm/amdgpu: Set vega20 load_type to
 AMDGPU_FW_LOAD_DIRECT.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Please revert this patch when psp load fw is enabled.

Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index b419d6e33b3a..f55f72a37ca8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -303,11 +303,12 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
 	case CHIP_VEGA10:
 	case CHIP_RAVEN:
 	case CHIP_VEGA12:
-	case CHIP_VEGA20:
 		if (!load_type)
 			return AMDGPU_FW_LOAD_DIRECT;
 		else
 			return AMDGPU_FW_LOAD_PSP;
+	case CHIP_VEGA20:
+		return AMDGPU_FW_LOAD_DIRECT;
 	default:
 		DRM_ERROR("Unknown firmware load type\n");
 	}

From 27db6a0073f162cdb15975c9d29d159d772b1ec0 Mon Sep 17 00:00:00 2001
From: Dmitry Osipenko <digetx@gmail.com>
Date: Mon, 23 Apr 2018 12:54:56 +0300
Subject: [PATCH 0994/1286] gpu: host1x: Fix dma_free_wc() argument in the
 error path

If IOVA allocation or IOMMU mapping fails, dma_free_wc() is invoked with
size=0 because of a typo, that triggers "kernel BUG at mm/vmalloc.c:124!".

Signed-off-by: Dmitry Osipenko <digetx@gmail.com>
Reviewed-by: Mikko Perttunen <mperttunen@nvidia.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/host1x/cdma.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
index 28541b280739..cf6caa90bf89 100644
--- a/drivers/gpu/host1x/cdma.c
+++ b/drivers/gpu/host1x/cdma.c
@@ -127,7 +127,7 @@ static int host1x_pushbuffer_init(struct push_buffer *pb)
 iommu_free_iova:
 	__free_iova(&host1x->iova, alloc);
 iommu_free_mem:
-	dma_free_wc(host1x->dev, pb->alloc_size, pb->mapped, pb->phys);
+	dma_free_wc(host1x->dev, size, pb->mapped, pb->phys);
 
 	return err;
 }

From 5f43ac8d80e4c768380e86d312a591472d080eeb Mon Sep 17 00:00:00 2001
From: Thierry Reding <treding@nvidia.com>
Date: Mon, 23 Apr 2018 08:57:44 +0200
Subject: [PATCH 0995/1286] drm/tegra: Fix order of teardown in IOMMU case

The original code works fine, this is merely a cosmetic change to make
the teardown order the reverse of the setup order.

Reviewed-by: Dmitry Osipenko <digetx@gmail.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/drm/tegra/drm.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 181e82c58a4f..7b9f73bcf155 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -204,10 +204,10 @@ config:
 	drm_mode_config_cleanup(drm);
 
 	if (tegra->domain) {
-		iommu_domain_free(tegra->domain);
-		drm_mm_takedown(&tegra->mm);
 		mutex_destroy(&tegra->mm_lock);
+		drm_mm_takedown(&tegra->mm);
 		put_iova_domain(&tegra->carveout.domain);
+		iommu_domain_free(tegra->domain);
 	}
 free:
 	kfree(tegra);
@@ -230,10 +230,10 @@ static void tegra_drm_unload(struct drm_device *drm)
 		return;
 
 	if (tegra->domain) {
-		iommu_domain_free(tegra->domain);
-		drm_mm_takedown(&tegra->mm);
 		mutex_destroy(&tegra->mm_lock);
+		drm_mm_takedown(&tegra->mm);
 		put_iova_domain(&tegra->carveout.domain);
+		iommu_domain_free(tegra->domain);
 	}
 
 	kfree(tegra);

From 24cfdc1ac7d4260aa8416505b9cb6316c9e89021 Mon Sep 17 00:00:00 2001
From: Thierry Reding <treding@nvidia.com>
Date: Mon, 23 Apr 2018 08:57:45 +0200
Subject: [PATCH 0996/1286] drm/tegra: Acquire a reference to the IOVA cache

The IOVA API uses a memory cache to allocate IOVA nodes from. To make
sure that this cache is available, obtain a reference to it and release
the reference when the cache is no longer needed.

On 64-bit ARM this is hidden by the fact that the DMA mapping API gets
that reference and never releases it. On 32-bit ARM, however, the DMA
mapping API doesn't do that, so allocation of IOVA nodes fails.

Fixes: ad92601521ea ("drm/tegra: Add Tegra DRM allocation API")
Reviewed-by: Dmitry Osipenko <digetx@gmail.com>
Tested-by: Dmitry Osipenko <digetx@gmail.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/drm/tegra/drm.c | 10 +++++++++-
 1 file changed, 9 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 7b9f73bcf155..3cdef659cd39 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -113,6 +113,10 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
 			goto free;
 		}
 
+		err = iova_cache_get();
+		if (err < 0)
+			goto domain;
+
 		geometry = &tegra->domain->geometry;
 		gem_start = geometry->aperture_start;
 		gem_end = geometry->aperture_end - CARVEOUT_SZ;
@@ -207,8 +211,11 @@ config:
 		mutex_destroy(&tegra->mm_lock);
 		drm_mm_takedown(&tegra->mm);
 		put_iova_domain(&tegra->carveout.domain);
-		iommu_domain_free(tegra->domain);
+		iova_cache_put();
 	}
+domain:
+	if (tegra->domain)
+		iommu_domain_free(tegra->domain);
 free:
 	kfree(tegra);
 	return err;
@@ -233,6 +240,7 @@ static void tegra_drm_unload(struct drm_device *drm)
 		mutex_destroy(&tegra->mm_lock);
 		drm_mm_takedown(&tegra->mm);
 		put_iova_domain(&tegra->carveout.domain);
+		iova_cache_put();
 		iommu_domain_free(tegra->domain);
 	}
 

From f40e1590c5270e5559fb95a5a0a7c1f5266a522d Mon Sep 17 00:00:00 2001
From: Thierry Reding <treding@nvidia.com>
Date: Mon, 14 May 2018 11:14:00 +0200
Subject: [PATCH 0997/1286] gpu: host1x: Acquire a reference to the IOVA cache

The IOVA API uses a memory cache to allocate IOVA nodes from. To make
sure that this cache is available, obtain a reference to it and release
the reference when the cache is no longer needed.

On 64-bit ARM this is hidden by the fact that the DMA mapping API gets
that reference and never releases it. On 32-bit ARM, this is papered
over by the Tegra DRM driver (the sole user of the host1x API requiring
the cache) acquiring a reference to the IOVA cache for its own purposes.
However, there may be additional users of this API in the future, so fix
this upfront to avoid surprises.

Fixes: 404bfb78daf3 ("gpu: host1x: Add IOMMU support")
Reviewed-by: Dmitry Osipenko <digetx@gmail.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/host1x/dev.c | 11 ++++++++++-
 1 file changed, 10 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 03db71173f5d..f1d5f76e9c33 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -223,10 +223,14 @@ static int host1x_probe(struct platform_device *pdev)
 		struct iommu_domain_geometry *geometry;
 		unsigned long order;
 
+		err = iova_cache_get();
+		if (err < 0)
+			goto put_group;
+
 		host->domain = iommu_domain_alloc(&platform_bus_type);
 		if (!host->domain) {
 			err = -ENOMEM;
-			goto put_group;
+			goto put_cache;
 		}
 
 		err = iommu_attach_group(host->domain, host->group);
@@ -234,6 +238,7 @@ static int host1x_probe(struct platform_device *pdev)
 			if (err == -ENODEV) {
 				iommu_domain_free(host->domain);
 				host->domain = NULL;
+				iova_cache_put();
 				iommu_group_put(host->group);
 				host->group = NULL;
 				goto skip_iommu;
@@ -308,6 +313,9 @@ fail_detach_device:
 fail_free_domain:
 	if (host->domain)
 		iommu_domain_free(host->domain);
+put_cache:
+	if (host->group)
+		iova_cache_put();
 put_group:
 	iommu_group_put(host->group);
 
@@ -328,6 +336,7 @@ static int host1x_remove(struct platform_device *pdev)
 		put_iova_domain(&host->iova);
 		iommu_detach_group(host->domain, host->group);
 		iommu_domain_free(host->domain);
+		iova_cache_put();
 		iommu_group_put(host->group);
 	}
 

From cc7add70cad12054e096b034578827d7065f64bb Mon Sep 17 00:00:00 2001
From: Souptick Joarder <jrdr.linux@gmail.com>
Date: Tue, 17 Apr 2018 19:17:55 +0530
Subject: [PATCH 0998/1286] drm/tegra: Adding new typedef vm_fault_t

Use new return type vm_fault_t for fault handler. For now, this is just
documenting that the function returns a VM_FAULT value rather than an
errno. Once all instances are converted, vm_fault_t will become a
distinct type.

Reference id -> 1c8f422059ae ("mm: change return type to vm_fault_t")

Previously vm_insert_page() returns err which driver mapped into
VM_FAULT_* type. The new function vmf_insert_page() will replace this
inefficiency by returning VM_FAULT_* type.

Signed-off-by: Souptick Joarder <jrdr.linux@gmail.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/drm/tegra/gem.c | 18 ++----------------
 1 file changed, 2 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 8b0b4ff64bb4..1c4011774c3f 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -422,14 +422,13 @@ int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
 	return 0;
 }
 
-static int tegra_bo_fault(struct vm_fault *vmf)
+static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
 	struct drm_gem_object *gem = vma->vm_private_data;
 	struct tegra_bo *bo = to_tegra_bo(gem);
 	struct page *page;
 	pgoff_t offset;
-	int err;
 
 	if (!bo->pages)
 		return VM_FAULT_SIGBUS;
@@ -437,20 +436,7 @@ static int tegra_bo_fault(struct vm_fault *vmf)
 	offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 	page = bo->pages[offset];
 
-	err = vm_insert_page(vma, vmf->address, page);
-	switch (err) {
-	case -EAGAIN:
-	case 0:
-	case -ERESTARTSYS:
-	case -EINTR:
-	case -EBUSY:
-		return VM_FAULT_NOPAGE;
-
-	case -ENOMEM:
-		return VM_FAULT_OOM;
-	}
-
-	return VM_FAULT_SIGBUS;
+	return vmf_insert_page(vma, vmf->address, page);
 }
 
 const struct vm_operations_struct tegra_bo_vm_ops = {

From e1189921b5ff9dcfec52b21cf12bb52c5dccd34d Mon Sep 17 00:00:00 2001
From: Daniel Stone <daniels@collabora.com>
Date: Fri, 30 Mar 2018 15:11:25 +0100
Subject: [PATCH 0999/1286] drm/tegra: Remove duplicate framebuffer num_planes

drm_framebuffer already stores num_planes for us.

Signed-off-by: Daniel Stone <daniels@collabora.com>
Cc: Thierry Reding <thierry.reding@gmail.com>
Cc: linux-tegra@vger.kernel.org
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/drm/tegra/drm.h | 1 -
 drivers/gpu/drm/tegra/fb.c  | 6 ++----
 2 files changed, 2 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index fe263cf58f34..61a4657e45fa 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -32,7 +32,6 @@ struct reset_control;
 struct tegra_fb {
 	struct drm_framebuffer base;
 	struct tegra_bo **planes;
-	unsigned int num_planes;
 };
 
 #ifdef CONFIG_DRM_FBDEV_EMULATION
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index e69434909a42..75badf371721 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -107,7 +107,7 @@ static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
 	struct tegra_fb *fb = to_tegra_fb(framebuffer);
 	unsigned int i;
 
-	for (i = 0; i < fb->num_planes; i++) {
+	for (i = 0; i < framebuffer->format->num_planes; i++) {
 		struct tegra_bo *bo = fb->planes[i];
 
 		if (bo) {
@@ -155,11 +155,9 @@ static struct tegra_fb *tegra_fb_alloc(struct drm_device *drm,
 		return ERR_PTR(-ENOMEM);
 	}
 
-	fb->num_planes = num_planes;
-
 	drm_helper_mode_fill_fb_struct(drm, &fb->base, mode_cmd);
 
-	for (i = 0; i < fb->num_planes; i++)
+	for (i = 0; i < fb->base.format->num_planes; i++)
 		fb->planes[i] = planes[i];
 
 	err = drm_framebuffer_init(drm, &fb->base, &tegra_fb_funcs);

From 0bc6af006f5ce7fb92d41dc8e01b621bd8d2226b Mon Sep 17 00:00:00 2001
From: Daniel Stone <daniels@collabora.com>
Date: Fri, 30 Mar 2018 15:11:26 +0100
Subject: [PATCH 1000/1286] drm/tegra: Move GEM BOs to drm_framebuffer

Since drm_framebuffer can now store GEM objects directly, place them
there rather than in our own subclass. As this makes the framebuffer
create_handle function the same as the GEM framebuffer helper, we
can reuse that.

Signed-off-by: Daniel Stone <daniels@collabora.com>
Cc: Thierry Reding <thierry.reding@gmail.com>
Cc: linux-tegra@vger.kernel.org
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/drm/tegra/drm.h |  1 -
 drivers/gpu/drm/tegra/fb.c  | 37 ++++++++-----------------------------
 2 files changed, 8 insertions(+), 30 deletions(-)

diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 61a4657e45fa..7f9810f026e8 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -31,7 +31,6 @@ struct reset_control;
 
 struct tegra_fb {
 	struct drm_framebuffer base;
-	struct tegra_bo **planes;
 };
 
 #ifdef CONFIG_DRM_FBDEV_EMULATION
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 75badf371721..5bc8f968284c 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -14,6 +14,7 @@
 
 #include "drm.h"
 #include "gem.h"
+#include <drm/drm_gem_framebuffer_helper.h>
 
 static inline struct tegra_fb *to_tegra_fb(struct drm_framebuffer *fb)
 {
@@ -30,19 +31,14 @@ static inline struct tegra_fbdev *to_tegra_fbdev(struct drm_fb_helper *helper)
 struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
 				    unsigned int index)
 {
-	struct tegra_fb *fb = to_tegra_fb(framebuffer);
-
-	if (index >= framebuffer->format->num_planes)
-		return NULL;
-
-	return fb->planes[index];
+	return to_tegra_bo(drm_gem_fb_get_obj(framebuffer, index));
 }
 
 bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer)
 {
-	struct tegra_fb *fb = to_tegra_fb(framebuffer);
+	struct tegra_bo *bo = tegra_fb_get_plane(framebuffer, 0);
 
-	if (fb->planes[0]->flags & TEGRA_BO_BOTTOM_UP)
+	if (bo->flags & TEGRA_BO_BOTTOM_UP)
 		return true;
 
 	return false;
@@ -51,8 +47,7 @@ bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer)
 int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
 			struct tegra_bo_tiling *tiling)
 {
-	struct tegra_fb *fb = to_tegra_fb(framebuffer);
-	uint64_t modifier = fb->base.modifier;
+	uint64_t modifier = framebuffer->modifier;
 
 	switch (modifier) {
 	case DRM_FORMAT_MOD_LINEAR:
@@ -108,7 +103,7 @@ static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
 	unsigned int i;
 
 	for (i = 0; i < framebuffer->format->num_planes; i++) {
-		struct tegra_bo *bo = fb->planes[i];
+		struct tegra_bo *bo = tegra_fb_get_plane(framebuffer, i);
 
 		if (bo) {
 			if (bo->pages)
@@ -119,21 +114,12 @@ static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
 	}
 
 	drm_framebuffer_cleanup(framebuffer);
-	kfree(fb->planes);
 	kfree(fb);
 }
 
-static int tegra_fb_create_handle(struct drm_framebuffer *framebuffer,
-				  struct drm_file *file, unsigned int *handle)
-{
-	struct tegra_fb *fb = to_tegra_fb(framebuffer);
-
-	return drm_gem_handle_create(file, &fb->planes[0]->gem, handle);
-}
-
 static const struct drm_framebuffer_funcs tegra_fb_funcs = {
 	.destroy = tegra_fb_destroy,
-	.create_handle = tegra_fb_create_handle,
+	.create_handle = drm_gem_fb_create_handle,
 };
 
 static struct tegra_fb *tegra_fb_alloc(struct drm_device *drm,
@@ -149,22 +135,15 @@ static struct tegra_fb *tegra_fb_alloc(struct drm_device *drm,
 	if (!fb)
 		return ERR_PTR(-ENOMEM);
 
-	fb->planes = kzalloc(num_planes * sizeof(*planes), GFP_KERNEL);
-	if (!fb->planes) {
-		kfree(fb);
-		return ERR_PTR(-ENOMEM);
-	}
-
 	drm_helper_mode_fill_fb_struct(drm, &fb->base, mode_cmd);
 
 	for (i = 0; i < fb->base.format->num_planes; i++)
-		fb->planes[i] = planes[i];
+		fb->base.obj[i] = &planes[i]->gem;
 
 	err = drm_framebuffer_init(drm, &fb->base, &tegra_fb_funcs);
 	if (err < 0) {
 		dev_err(drm->dev, "failed to initialize framebuffer: %d\n",
 			err);
-		kfree(fb->planes);
 		kfree(fb);
 		return ERR_PTR(err);
 	}

From dbc33c7d65536bce447057dc6f882decc515047d Mon Sep 17 00:00:00 2001
From: Daniel Stone <daniels@collabora.com>
Date: Fri, 30 Mar 2018 15:11:27 +0100
Subject: [PATCH 1001/1286] drm/tegra: tegra_fb -> drm_framebuffer

Since tegra_fb is now the same as drm_framebuffer, we can just replace
the type completely.

Signed-off-by: Daniel Stone <daniels@collabora.com>
Cc: Thierry Reding <thierry.reding@gmail.com>
Cc: linux-tegra@vger.kernel.org
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/drm/tegra/drm.h |  6 +-----
 drivers/gpu/drm/tegra/fb.c  | 34 ++++++++++++++--------------------
 2 files changed, 15 insertions(+), 25 deletions(-)

diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 7f9810f026e8..f47a60592334 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -29,14 +29,10 @@
 
 struct reset_control;
 
-struct tegra_fb {
-	struct drm_framebuffer base;
-};
-
 #ifdef CONFIG_DRM_FBDEV_EMULATION
 struct tegra_fbdev {
 	struct drm_fb_helper base;
-	struct tegra_fb *fb;
+	struct drm_framebuffer *fb;
 };
 #endif
 
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 5bc8f968284c..57da9683a713 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -16,11 +16,6 @@
 #include "gem.h"
 #include <drm/drm_gem_framebuffer_helper.h>
 
-static inline struct tegra_fb *to_tegra_fb(struct drm_framebuffer *fb)
-{
-	return container_of(fb, struct tegra_fb, base);
-}
-
 #ifdef CONFIG_DRM_FBDEV_EMULATION
 static inline struct tegra_fbdev *to_tegra_fbdev(struct drm_fb_helper *helper)
 {
@@ -99,7 +94,6 @@ int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
 
 static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
 {
-	struct tegra_fb *fb = to_tegra_fb(framebuffer);
 	unsigned int i;
 
 	for (i = 0; i < framebuffer->format->num_planes; i++) {
@@ -114,7 +108,7 @@ static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
 	}
 
 	drm_framebuffer_cleanup(framebuffer);
-	kfree(fb);
+	kfree(framebuffer);
 }
 
 static const struct drm_framebuffer_funcs tegra_fb_funcs = {
@@ -122,12 +116,12 @@ static const struct drm_framebuffer_funcs tegra_fb_funcs = {
 	.create_handle = drm_gem_fb_create_handle,
 };
 
-static struct tegra_fb *tegra_fb_alloc(struct drm_device *drm,
-				       const struct drm_mode_fb_cmd2 *mode_cmd,
-				       struct tegra_bo **planes,
-				       unsigned int num_planes)
+static struct drm_framebuffer *tegra_fb_alloc(struct drm_device *drm,
+					      const struct drm_mode_fb_cmd2 *mode_cmd,
+					      struct tegra_bo **planes,
+					      unsigned int num_planes)
 {
-	struct tegra_fb *fb;
+	struct drm_framebuffer *fb;
 	unsigned int i;
 	int err;
 
@@ -135,12 +129,12 @@ static struct tegra_fb *tegra_fb_alloc(struct drm_device *drm,
 	if (!fb)
 		return ERR_PTR(-ENOMEM);
 
-	drm_helper_mode_fill_fb_struct(drm, &fb->base, mode_cmd);
+	drm_helper_mode_fill_fb_struct(drm, fb, mode_cmd);
 
-	for (i = 0; i < fb->base.format->num_planes; i++)
-		fb->base.obj[i] = &planes[i]->gem;
+	for (i = 0; i < fb->format->num_planes; i++)
+		fb->obj[i] = &planes[i]->gem;
 
-	err = drm_framebuffer_init(drm, &fb->base, &tegra_fb_funcs);
+	err = drm_framebuffer_init(drm, fb, &tegra_fb_funcs);
 	if (err < 0) {
 		dev_err(drm->dev, "failed to initialize framebuffer: %d\n",
 			err);
@@ -158,7 +152,7 @@ struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
 	unsigned int hsub, vsub, i;
 	struct tegra_bo *planes[4];
 	struct drm_gem_object *gem;
-	struct tegra_fb *fb;
+	struct drm_framebuffer *fb;
 	int err;
 
 	hsub = drm_format_horz_chroma_subsampling(cmd->pixel_format);
@@ -194,7 +188,7 @@ struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
 		goto unreference;
 	}
 
-	return &fb->base;
+	return fb;
 
 unreference:
 	while (i--)
@@ -275,7 +269,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
 		return PTR_ERR(fbdev->fb);
 	}
 
-	fb = &fbdev->fb->base;
+	fb = fbdev->fb;
 	helper->fb = fb;
 	helper->fbdev = info;
 
@@ -376,7 +370,7 @@ static void tegra_fbdev_exit(struct tegra_fbdev *fbdev)
 	drm_fb_helper_unregister_fbi(&fbdev->base);
 
 	if (fbdev->fb)
-		drm_framebuffer_remove(&fbdev->fb->base);
+		drm_framebuffer_remove(fbdev->fb);
 
 	drm_fb_helper_fini(&fbdev->base);
 	tegra_fbdev_free(fbdev);

From c34a997d033df6bbeaf5c06e9124f89bc0ecac8d Mon Sep 17 00:00:00 2001
From: Daniel Stone <daniels@collabora.com>
Date: Fri, 30 Mar 2018 15:11:28 +0100
Subject: [PATCH 1002/1286] drm/tegra: Move fbdev unmap special case

User framebuffers are created with either bo->pages or bo->vaddr set,
depending on whether or not an IOMMU is present. On the other hand, the
framebuffer created for fbdev emulation has a vaddr mapping made if
bo->pages is set after creation. This is set up in fbdev probe.

Remove the special case unmapping from the general-purpose framebuffer
destroy, and move it to fbdev teardown.

Signed-off-by: Daniel Stone <daniels@collabora.com>
Cc: Thierry Reding <thierry.reding@gmail.com>
Cc: linux-tegra@vger.kernel.org
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/drm/tegra/fb.c | 17 +++++++++++------
 1 file changed, 11 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 57da9683a713..709aa6ef171a 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -99,12 +99,8 @@ static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
 	for (i = 0; i < framebuffer->format->num_planes; i++) {
 		struct tegra_bo *bo = tegra_fb_get_plane(framebuffer, i);
 
-		if (bo) {
-			if (bo->pages)
-				vunmap(bo->vaddr);
-
+		if (bo)
 			drm_gem_object_put_unlocked(&bo->gem);
-		}
 	}
 
 	drm_framebuffer_cleanup(framebuffer);
@@ -369,8 +365,17 @@ static void tegra_fbdev_exit(struct tegra_fbdev *fbdev)
 {
 	drm_fb_helper_unregister_fbi(&fbdev->base);
 
-	if (fbdev->fb)
+	if (fbdev->fb) {
+		struct tegra_bo *bo = tegra_fb_get_plane(fbdev->fb, 0);
+
+		/* Undo the special mapping we made in fbdev probe. */
+		if (bo && bo->pages) {
+			vunmap(bo->vaddr);
+			bo->vaddr = 0;
+		}
+
 		drm_framebuffer_remove(fbdev->fb);
+	}
 
 	drm_fb_helper_fini(&fbdev->base);
 	tegra_fbdev_free(fbdev);

From 5cb8b9969be6f14ac3b7ba90de8f7a65f68e46fe Mon Sep 17 00:00:00 2001
From: Daniel Stone <daniels@collabora.com>
Date: Fri, 30 Mar 2018 15:11:29 +0100
Subject: [PATCH 1003/1286] drm/tegra: Use drm_gem_fb_destroy

Now that our destroy function is the same as the helper, use that
directly.

Signed-off-by: Daniel Stone <daniels@collabora.com>
Cc: Thierry Reding <thierry.reding@gmail.com>
Cc: linux-tegra@vger.kernel.org
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/drm/tegra/fb.c | 17 +----------------
 1 file changed, 1 insertion(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 709aa6ef171a..4c22cdded3c2 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -92,23 +92,8 @@ int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
 	return 0;
 }
 
-static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
-{
-	unsigned int i;
-
-	for (i = 0; i < framebuffer->format->num_planes; i++) {
-		struct tegra_bo *bo = tegra_fb_get_plane(framebuffer, i);
-
-		if (bo)
-			drm_gem_object_put_unlocked(&bo->gem);
-	}
-
-	drm_framebuffer_cleanup(framebuffer);
-	kfree(framebuffer);
-}
-
 static const struct drm_framebuffer_funcs tegra_fb_funcs = {
-	.destroy = tegra_fb_destroy,
+	.destroy = drm_gem_fb_destroy,
 	.create_handle = drm_gem_fb_create_handle,
 };
 

From 9dfbd7319909a948146f5c3438f7bd86c2c53cb6 Mon Sep 17 00:00:00 2001
From: Arnd Bergmann <arnd@arndb.de>
Date: Fri, 2 Feb 2018 16:27:31 +0100
Subject: [PATCH 1004/1286] drm/nouveau: nouveau: use larger buffer in
 nvif_vmm_map

gcc points out a buffer that is clearly too small to be used
in a meaningful way, as the 'sizeof(*args) + argc > sizeof(stack)'
will always fail:

In function 'memcpy',
    inlined from 'nvif_vmm_map' at drivers/gpu/drm/nouveau/nvif/vmm.c:55:2:
include/linux/string.h:353:9: error: '__builtin_memcpy' offset 40 is out of the bounds [0, 16] of object 'stack' with type 'u8[16]' {aka 'unsigned char[16]'} [-Werror=array-bounds]
  return __builtin_memcpy(p, q, size);
         ^~~~~~~~~~~~~~~~~~~~~~~~~~~~
drivers/gpu/drm/nouveau/nvif/vmm.c: In function 'nvif_vmm_map':
drivers/gpu/drm/nouveau/nvif/vmm.c:40:5: note: 'stack' declared here

This makes the buffer large enough so it should serve the purpose
that the author presumably had in mind. Alternatively we could
just get rid of it completely and simplify the code at the cost
of always doing the kmalloc (as we do in the current version).

Fixes: 920d2b5ef215 ("drm/nouveau/mmu: define user interfaces to mmu vmm opertaions")
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvif/vmm.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/nouveau/nvif/vmm.c b/drivers/gpu/drm/nouveau/nvif/vmm.c
index 31cdb2d2e1ff..191832be6c65 100644
--- a/drivers/gpu/drm/nouveau/nvif/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvif/vmm.c
@@ -37,7 +37,7 @@ nvif_vmm_map(struct nvif_vmm *vmm, u64 addr, u64 size, void *argv, u32 argc,
 	     struct nvif_mem *mem, u64 offset)
 {
 	struct nvif_vmm_map_v0 *args;
-	u8 stack[16];
+	u8 stack[48];
 	int ret;
 
 	if (sizeof(*args) + argc > sizeof(stack)) {

From 7bf5b70befd7817b9e42acbd2291b2042ea1bf81 Mon Sep 17 00:00:00 2001
From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
Date: Tue, 13 Mar 2018 11:24:11 -0500
Subject: [PATCH 1005/1286] drm/nouveau/secboot: remove VLA usage

In preparation to enabling -Wvla, remove VLA. In this particular
case directly use macro NVKM_MSGQUEUE_CMDLINE_SIZE instead of local
variable cmdline_size. Also, remove cmdline_size as it is not
actually useful anymore.

The use of stack Variable Length Arrays needs to be avoided, as they
can be a vector for stack exhaustion, which can be both a runtime bug
or a security flaw. Also, in general, as code evolves it is easy to
lose track of how big a VLA can get. Thus, we can end up having runtime
failures that are hard to debug.

Also, fixed as part of the directive to remove all VLAs from
the kernel: https://lkml.org/lkml/2018/3/7/621

Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
Reviewed-by: Thierry Reding <treding@nvidia.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c    | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
index 6f10b098676c..1e1f1c635cab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
@@ -80,12 +80,11 @@ acr_ls_msgqueue_post_run(struct nvkm_msgqueue *queue,
 			 struct nvkm_falcon *falcon, u32 addr_args)
 {
 	struct nvkm_device *device = falcon->owner->device;
-	u32 cmdline_size = NVKM_MSGQUEUE_CMDLINE_SIZE;
-	u8 buf[cmdline_size];
+	u8 buf[NVKM_MSGQUEUE_CMDLINE_SIZE];
 
-	memset(buf, 0, cmdline_size);
+	memset(buf, 0, sizeof(buf));
 	nvkm_msgqueue_write_cmdline(queue, buf);
-	nvkm_falcon_load_dmem(falcon, buf, addr_args, cmdline_size, 0);
+	nvkm_falcon_load_dmem(falcon, buf, addr_args, sizeof(buf), 0);
 	/* rearm the queue so it will wait for the init message */
 	nvkm_msgqueue_reinit(queue);
 

From 1ce466894b532dc26b02a334fc609378b9231ea8 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1006/1286] drm/nouveau/core: define FAULT subdev

This will be responsible for the handling of MMU fault buffers on GPUs
that support them.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/include/nvkm/core/device.h  | 3 +++
 drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h | 8 ++++++++
 drivers/gpu/drm/nouveau/nvkm/core/subdev.c          | 1 +
 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c   | 2 ++
 drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h   | 1 +
 drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild          | 1 +
 drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild    | 0
 7 files changed, 16 insertions(+)
 create mode 100644 drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
 create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild

diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 560265b15ec2..f2f9b9e7ce2e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -22,6 +22,7 @@ enum nvkm_devidx {
 	NVKM_SUBDEV_LTC,
 	NVKM_SUBDEV_MMU,
 	NVKM_SUBDEV_BAR,
+	NVKM_SUBDEV_FAULT,
 	NVKM_SUBDEV_PMU,
 	NVKM_SUBDEV_VOLT,
 	NVKM_SUBDEV_ICCSENSE,
@@ -123,6 +124,7 @@ struct nvkm_device {
 	struct nvkm_bus *bus;
 	struct nvkm_clk *clk;
 	struct nvkm_devinit *devinit;
+	struct nvkm_fault *fault;
 	struct nvkm_fb *fb;
 	struct nvkm_fuse *fuse;
 	struct nvkm_gpio *gpio;
@@ -194,6 +196,7 @@ struct nvkm_device_chip {
 	int (*bus     )(struct nvkm_device *, int idx, struct nvkm_bus **);
 	int (*clk     )(struct nvkm_device *, int idx, struct nvkm_clk **);
 	int (*devinit )(struct nvkm_device *, int idx, struct nvkm_devinit **);
+	int (*fault   )(struct nvkm_device *, int idx, struct nvkm_fault **);
 	int (*fb      )(struct nvkm_device *, int idx, struct nvkm_fb **);
 	int (*fuse    )(struct nvkm_device *, int idx, struct nvkm_fuse **);
 	int (*gpio    )(struct nvkm_device *, int idx, struct nvkm_gpio **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
new file mode 100644
index 000000000000..35a9cfc9301d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
@@ -0,0 +1,8 @@
+#ifndef __NVKM_FAULT_H__
+#define __NVKM_FAULT_H__
+#include <core/subdev.h>
+
+struct nvkm_fault {
+	struct nvkm_subdev subdev;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index a134d225f958..c707306ac286 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -35,6 +35,7 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = {
 	[NVKM_SUBDEV_BUS     ] = "bus",
 	[NVKM_SUBDEV_CLK     ] = "clk",
 	[NVKM_SUBDEV_DEVINIT ] = "devinit",
+	[NVKM_SUBDEV_FAULT   ] = "fault",
 	[NVKM_SUBDEV_FB      ] = "fb",
 	[NVKM_SUBDEV_FUSE    ] = "fuse",
 	[NVKM_SUBDEV_GPIO    ] = "gpio",
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 05cd674326a6..70f3cc0844c9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2420,6 +2420,7 @@ nvkm_device_subdev(struct nvkm_device *device, int index)
 	_(BUS     , device->bus     , &device->bus->subdev);
 	_(CLK     , device->clk     , &device->clk->subdev);
 	_(DEVINIT , device->devinit , &device->devinit->subdev);
+	_(FAULT   , device->fault   , &device->fault->subdev);
 	_(FB      , device->fb      , &device->fb->subdev);
 	_(FUSE    , device->fuse    , &device->fuse->subdev);
 	_(GPIO    , device->gpio    , &device->gpio->subdev);
@@ -2891,6 +2892,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
 		_(NVKM_SUBDEV_BUS     ,      bus);
 		_(NVKM_SUBDEV_CLK     ,      clk);
 		_(NVKM_SUBDEV_DEVINIT ,  devinit);
+		_(NVKM_SUBDEV_FAULT   ,    fault);
 		_(NVKM_SUBDEV_FB      ,       fb);
 		_(NVKM_SUBDEV_FUSE    ,     fuse);
 		_(NVKM_SUBDEV_GPIO    ,     gpio);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
index 08d0bf605722..253ab914a8ef 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -8,6 +8,7 @@
 #include <subdev/bus.h>
 #include <subdev/clk.h>
 #include <subdev/devinit.h>
+#include <subdev/fault.h>
 #include <subdev/fb.h>
 #include <subdev/fuse.h>
 #include <subdev/gpio.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
index 3f5d38d74fba..cfdffef1afb9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
@@ -3,6 +3,7 @@ include $(src)/nvkm/subdev/bios/Kbuild
 include $(src)/nvkm/subdev/bus/Kbuild
 include $(src)/nvkm/subdev/clk/Kbuild
 include $(src)/nvkm/subdev/devinit/Kbuild
+include $(src)/nvkm/subdev/fault/Kbuild
 include $(src)/nvkm/subdev/fb/Kbuild
 include $(src)/nvkm/subdev/fuse/Kbuild
 include $(src)/nvkm/subdev/gpio/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
new file mode 100644
index 000000000000..e69de29bb2d1

From 2f68234fb3e7d0b123a8166ba46bd5c3e577b270 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1007/1286] drm/nouveau/mc/gp100-: route fault buffer
 interrupts to FAULT

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/subdev/mc/gp100.c    | 20 ++++++++++++++++++-
 .../gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c    |  2 +-
 drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h |  2 ++
 3 files changed, 22 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
index 7321ad3758c3..43db245eec9a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
@@ -75,10 +75,28 @@ gp100_mc_intr_mask(struct nvkm_mc *base, u32 mask, u32 intr)
 	spin_unlock_irqrestore(&mc->lock, flags);
 }
 
+const struct nvkm_mc_map
+gp100_mc_intr[] = {
+	{ 0x04000000, NVKM_ENGINE_DISP },
+	{ 0x00000100, NVKM_ENGINE_FIFO },
+	{ 0x00000200, NVKM_SUBDEV_FAULT },
+	{ 0x40000000, NVKM_SUBDEV_IBUS },
+	{ 0x10000000, NVKM_SUBDEV_BUS },
+	{ 0x08000000, NVKM_SUBDEV_FB },
+	{ 0x02000000, NVKM_SUBDEV_LTC },
+	{ 0x01000000, NVKM_SUBDEV_PMU },
+	{ 0x00200000, NVKM_SUBDEV_GPIO },
+	{ 0x00200000, NVKM_SUBDEV_I2C },
+	{ 0x00100000, NVKM_SUBDEV_TIMER },
+	{ 0x00040000, NVKM_SUBDEV_THERM },
+	{ 0x00002000, NVKM_SUBDEV_FB },
+	{},
+};
+
 static const struct nvkm_mc_func
 gp100_mc = {
 	.init = nv50_mc_init,
-	.intr = gk104_mc_intr,
+	.intr = gp100_mc_intr,
 	.intr_unarm = gp100_mc_intr_unarm,
 	.intr_rearm = gp100_mc_intr_rearm,
 	.intr_mask = gp100_mc_intr_mask,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c
index 2283e3b74277..ff8629de97d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c
@@ -34,7 +34,7 @@ gp10b_mc_init(struct nvkm_mc *mc)
 static const struct nvkm_mc_func
 gp10b_mc = {
 	.init = gp10b_mc_init,
-	.intr = gk104_mc_intr,
+	.intr = gp100_mc_intr,
 	.intr_unarm = gp100_mc_intr_unarm,
 	.intr_rearm = gp100_mc_intr_rearm,
 	.intr_mask = gp100_mc_intr_mask,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
index 8869d79c2b59..d9e3691d45b7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
@@ -57,4 +57,6 @@ int gp100_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *, int,
 
 extern const struct nvkm_mc_map gk104_mc_intr[];
 extern const struct nvkm_mc_map gk104_mc_reset[];
+
+extern const struct nvkm_mc_map gp100_mc_intr[];
 #endif

From 36780d7eee827047bd9e736f6e2c0be650f30b4e Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1008/1286] drm/nouveau/fault: add infrastructure to support
 fault buffers

GPU-specific support will be added separately.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../drm/nouveau/include/nvkm/subdev/fault.h   |   6 +
 .../gpu/drm/nouveau/nvkm/subdev/fault/Kbuild  |   1 +
 .../gpu/drm/nouveau/nvkm/subdev/fault/base.c  | 179 ++++++++++++++++++
 .../gpu/drm/nouveau/nvkm/subdev/fault/priv.h  |  34 ++++
 4 files changed, 220 insertions(+)
 create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
 create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h

diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
index 35a9cfc9301d..6ba9c179aa09 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
@@ -3,6 +3,12 @@
 #include <core/subdev.h>
 
 struct nvkm_fault {
+	const struct nvkm_fault_func *func;
 	struct nvkm_subdev subdev;
+
+	struct nvkm_fault_buffer *buffer[1];
+	int buffer_nr;
+
+	struct nvkm_event event;
 };
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
index e69de29bb2d1..2e4c226634a1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
@@ -0,0 +1 @@
+nvkm-y += nvkm/subdev/fault/base.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
new file mode 100644
index 000000000000..007bf4af33b9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/memory.h>
+#include <core/notify.h>
+#include <subdev/bar.h>
+#include <subdev/mmu.h>
+
+static void
+nvkm_fault_ntfy_fini(struct nvkm_event *event, int type, int index)
+{
+	struct nvkm_fault *fault = container_of(event, typeof(*fault), event);
+	fault->func->buffer.fini(fault->buffer[index]);
+}
+
+static void
+nvkm_fault_ntfy_init(struct nvkm_event *event, int type, int index)
+{
+	struct nvkm_fault *fault = container_of(event, typeof(*fault), event);
+	fault->func->buffer.init(fault->buffer[index]);
+}
+
+static int
+nvkm_fault_ntfy_ctor(struct nvkm_object *object, void *argv, u32 argc,
+		     struct nvkm_notify *notify)
+{
+	struct nvkm_fault_buffer *buffer = nvkm_fault_buffer(object);
+	if (argc == 0) {
+		notify->size  = 0;
+		notify->types = 1;
+		notify->index = buffer->id;
+		return 0;
+	}
+	return -ENOSYS;
+}
+
+static const struct nvkm_event_func
+nvkm_fault_ntfy = {
+	.ctor = nvkm_fault_ntfy_ctor,
+	.init = nvkm_fault_ntfy_init,
+	.fini = nvkm_fault_ntfy_fini,
+};
+
+static void
+nvkm_fault_intr(struct nvkm_subdev *subdev)
+{
+	struct nvkm_fault *fault = nvkm_fault(subdev);
+	return fault->func->intr(fault);
+}
+
+static int
+nvkm_fault_fini(struct nvkm_subdev *subdev, bool suspend)
+{
+	struct nvkm_fault *fault = nvkm_fault(subdev);
+	if (fault->func->fini)
+		fault->func->fini(fault);
+	return 0;
+}
+
+static int
+nvkm_fault_init(struct nvkm_subdev *subdev)
+{
+	struct nvkm_fault *fault = nvkm_fault(subdev);
+	if (fault->func->init)
+		fault->func->init(fault);
+	return 0;
+}
+
+static int
+nvkm_fault_oneinit_buffer(struct nvkm_fault *fault, int id)
+{
+	struct nvkm_subdev *subdev = &fault->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_vmm *bar2 = nvkm_bar_bar2_vmm(device);
+	struct nvkm_fault_buffer *buffer;
+	int ret;
+
+	if (!(buffer = kzalloc(sizeof(*buffer), GFP_KERNEL)))
+		return -ENOMEM;
+	buffer->fault = fault;
+	buffer->id = id;
+	buffer->entries = fault->func->buffer.entries(buffer);
+	fault->buffer[id] = buffer;
+
+	nvkm_debug(subdev, "buffer %d: %d entries\n", id, buffer->entries);
+
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, buffer->entries *
+			      fault->func->buffer.entry_size, 0x1000, true,
+			      &buffer->mem);
+	if (ret)
+		return ret;
+
+	ret = nvkm_vmm_get(bar2, 12, nvkm_memory_size(buffer->mem),
+			   &buffer->vma);
+	if (ret)
+		return ret;
+
+	return nvkm_memory_map(buffer->mem, 0, bar2, buffer->vma, NULL, 0);
+}
+
+static int
+nvkm_fault_oneinit(struct nvkm_subdev *subdev)
+{
+	struct nvkm_fault *fault = nvkm_fault(subdev);
+	int ret, i;
+
+	for (i = 0; i < ARRAY_SIZE(fault->buffer); i++) {
+		if (i < fault->func->buffer.nr) {
+			ret = nvkm_fault_oneinit_buffer(fault, i);
+			if (ret)
+				return ret;
+			fault->buffer_nr = i + 1;
+		}
+	}
+
+	return nvkm_event_init(&nvkm_fault_ntfy, 1, fault->buffer_nr,
+			       &fault->event);
+}
+
+static void *
+nvkm_fault_dtor(struct nvkm_subdev *subdev)
+{
+	struct nvkm_vmm *bar2 = nvkm_bar_bar2_vmm(subdev->device);
+	struct nvkm_fault *fault = nvkm_fault(subdev);
+	int i;
+
+	nvkm_event_fini(&fault->event);
+
+	for (i = 0; i < fault->buffer_nr; i++) {
+		if (fault->buffer[i]) {
+			nvkm_vmm_put(bar2, &fault->buffer[i]->vma);
+			nvkm_memory_unref(&fault->buffer[i]->mem);
+			kfree(fault->buffer[i]);
+		}
+	}
+
+	return fault;
+}
+
+static const struct nvkm_subdev_func
+nvkm_fault = {
+	.dtor = nvkm_fault_dtor,
+	.oneinit = nvkm_fault_oneinit,
+	.init = nvkm_fault_init,
+	.fini = nvkm_fault_fini,
+	.intr = nvkm_fault_intr,
+};
+
+int
+nvkm_fault_new_(const struct nvkm_fault_func *func, struct nvkm_device *device,
+		int index, struct nvkm_fault **pfault)
+{
+	struct nvkm_fault *fault;
+	if (!(fault = *pfault = kzalloc(sizeof(*fault), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_subdev_ctor(&nvkm_fault, device, index, &fault->subdev);
+	fault->func = func;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
new file mode 100644
index 000000000000..44843ecf12b0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
@@ -0,0 +1,34 @@
+#ifndef __NVKM_FAULT_PRIV_H__
+#define __NVKM_FAULT_PRIV_H__
+#define nvkm_fault_buffer(p) container_of((p), struct nvkm_fault_buffer, object)
+#define nvkm_fault(p) container_of((p), struct nvkm_fault, subdev)
+#include <subdev/fault.h>
+
+#include <core/event.h>
+#include <core/object.h>
+
+struct nvkm_fault_buffer {
+	struct nvkm_object object;
+	struct nvkm_fault *fault;
+	int id;
+	int entries;
+	struct nvkm_memory *mem;
+	struct nvkm_vma *vma;
+};
+
+int nvkm_fault_new_(const struct nvkm_fault_func *, struct nvkm_device *,
+		    int index, struct nvkm_fault **);
+
+struct nvkm_fault_func {
+	void (*init)(struct nvkm_fault *);
+	void (*fini)(struct nvkm_fault *);
+	void (*intr)(struct nvkm_fault *);
+	struct {
+		int nr;
+		u32 entry_size;
+		u32 (*entries)(struct nvkm_fault_buffer *);
+		void (*init)(struct nvkm_fault_buffer *);
+		void (*fini)(struct nvkm_fault_buffer *);
+	} buffer;
+};
+#endif

From d0e9351e420695907e28e921d3786265253787c1 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1009/1286] drm/nouveau/fault/gp100: implement replayable fault
 buffer initialisation

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../drm/nouveau/include/nvkm/subdev/fault.h   |  2 +
 .../gpu/drm/nouveau/nvkm/engine/device/base.c |  7 ++
 .../gpu/drm/nouveau/nvkm/subdev/fault/Kbuild  |  1 +
 .../gpu/drm/nouveau/nvkm/subdev/fault/gp100.c | 69 +++++++++++++++++++
 4 files changed, 79 insertions(+)
 create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c

diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
index 6ba9c179aa09..8ca66e572779 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
@@ -11,4 +11,6 @@ struct nvkm_fault {
 
 	struct nvkm_event event;
 };
+
+int gp100_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 70f3cc0844c9..379e701962a7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2161,6 +2161,7 @@ nv130_chipset = {
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.devinit = gm200_devinit_new,
+	.fault = gp100_fault_new,
 	.fb = gp100_fb_new,
 	.fuse = gm107_fuse_new,
 	.gpio = gk104_gpio_new,
@@ -2196,6 +2197,7 @@ nv132_chipset = {
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.devinit = gm200_devinit_new,
+	.fault = gp100_fault_new,
 	.fb = gp102_fb_new,
 	.fuse = gm107_fuse_new,
 	.gpio = gk104_gpio_new,
@@ -2231,6 +2233,7 @@ nv134_chipset = {
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.devinit = gm200_devinit_new,
+	.fault = gp100_fault_new,
 	.fb = gp102_fb_new,
 	.fuse = gm107_fuse_new,
 	.gpio = gk104_gpio_new,
@@ -2266,6 +2269,7 @@ nv136_chipset = {
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.devinit = gm200_devinit_new,
+	.fault = gp100_fault_new,
 	.fb = gp102_fb_new,
 	.fuse = gm107_fuse_new,
 	.gpio = gk104_gpio_new,
@@ -2301,6 +2305,7 @@ nv137_chipset = {
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.devinit = gm200_devinit_new,
+	.fault = gp100_fault_new,
 	.fb = gp102_fb_new,
 	.fuse = gm107_fuse_new,
 	.gpio = gk104_gpio_new,
@@ -2336,6 +2341,7 @@ nv138_chipset = {
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.devinit = gm200_devinit_new,
+	.fault = gp100_fault_new,
 	.fb = gp102_fb_new,
 	.fuse = gm107_fuse_new,
 	.gpio = gk104_gpio_new,
@@ -2369,6 +2375,7 @@ nv13b_chipset = {
 	.name = "GP10B",
 	.bar = gm20b_bar_new,
 	.bus = gf100_bus_new,
+	.fault = gp100_fault_new,
 	.fb = gp10b_fb_new,
 	.fuse = gm107_fuse_new,
 	.ibus = gp10b_ibus_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
index 2e4c226634a1..807ea402a162 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
@@ -1 +1,2 @@
 nvkm-y += nvkm/subdev/fault/base.o
+nvkm-y += nvkm/subdev/fault/gp100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
new file mode 100644
index 000000000000..5e71db2e8d75
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/mmu.h>
+
+static void
+gp100_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
+{
+	struct nvkm_device *device = buffer->fault->subdev.device;
+	nvkm_mask(device, 0x002a70, 0x00000001, 0x00000000);
+}
+
+static void
+gp100_fault_buffer_init(struct nvkm_fault_buffer *buffer)
+{
+	struct nvkm_device *device = buffer->fault->subdev.device;
+	nvkm_wr32(device, 0x002a74, upper_32_bits(buffer->vma->addr));
+	nvkm_wr32(device, 0x002a70, lower_32_bits(buffer->vma->addr));
+	nvkm_mask(device, 0x002a70, 0x00000001, 0x00000001);
+}
+
+static u32
+gp100_fault_buffer_entries(struct nvkm_fault_buffer *buffer)
+{
+	return nvkm_rd32(buffer->fault->subdev.device, 0x002a78);
+}
+
+static void
+gp100_fault_intr(struct nvkm_fault *fault)
+{
+	nvkm_event_send(&fault->event, 1, 0, NULL, 0);
+}
+
+static const struct nvkm_fault_func
+gp100_fault = {
+	.intr = gp100_fault_intr,
+	.buffer.nr = 1,
+	.buffer.entry_size = 32,
+	.buffer.entries = gp100_fault_buffer_entries,
+	.buffer.init = gp100_fault_buffer_init,
+	.buffer.fini = gp100_fault_buffer_fini,
+};
+
+int
+gp100_fault_new(struct nvkm_device *device, int index,
+		struct nvkm_fault **pfault)
+{
+	return nvkm_fault_new_(&gp100_fault, device, index, pfault);
+}

From d1ea77ab5f7cf378864255fa90f1ab70676a2a10 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1010/1286] drm/nouveau/fb/gf100-: bump size of mmu debug
 buffers to match big page size

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c | 4 ++--
 drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
index cdc4e0a2cc6b..e8dc4e913494 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
@@ -46,10 +46,10 @@ gf100_fb_oneinit(struct nvkm_fb *base)
 {
 	struct gf100_fb *fb = gf100_fb(base);
 	struct nvkm_device *device = fb->base.subdev.device;
-	int ret, size = 0x1000;
+	int ret, size = 1 << (fb->base.page ? fb->base.page : 17);
 
 	size = nvkm_longopt(device->cfgopt, "MmuDebugBufferSize", size);
-	size = min(size, 0x1000);
+	size = max(size, 0x1000);
 
 	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
 			      true, &fb->base.mmu_rd);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
index 147f69b30cd8..d0a47b9a8cd8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
@@ -48,7 +48,7 @@ gp100_fb_init(struct nvkm_fb *base)
 	nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->base.mmu_wr) >> 8);
 	nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->base.mmu_rd) >> 8);
 	nvkm_mask(device, 0x100cc4, 0x00060000,
-		  max(nvkm_memory_size(fb->base.mmu_rd) >> 16, (u64)2) << 17);
+		  min(nvkm_memory_size(fb->base.mmu_rd) >> 16, (u64)2) << 17);
 }
 
 static const struct nvkm_fb_func

From 85a3b9c8398b2c4c3698a9d851165acf4ffc8d26 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1011/1286] drm/nouveau/fb/gm200-: fix overwriting of big page
 setting

Likely a rebase bug.  Should have no impact in default configuration due
to using per-instance setting by default.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
index 8137e19d3292..d3b8c3367152 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
@@ -49,8 +49,6 @@ gm200_fb_init(struct nvkm_fb *base)
 	if (fb->r100c10_page)
 		nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
 
-	nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
-
 	nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->base.mmu_wr) >> 8);
 	nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->base.mmu_rd) >> 8);
 	nvkm_mask(device, 0x100cc4, 0x00060000,

From 3b9ba66ab0e027e6d7a2b9b62cbb5cd547421ebd Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1012/1286] drm/nouveau/disp/nv50-: delay subunit construction
 until oneinit

We should be reading registers to determine which subunits are really
present on a given board, and this needs to be done after DEVINIT.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/disp/base.c   |  6 ++
 .../gpu/drm/nouveau/nvkm/engine/disp/nv50.c   | 68 +++++++++++--------
 .../gpu/drm/nouveau/nvkm/engine/disp/nv50.h   |  4 ++
 .../gpu/drm/nouveau/nvkm/engine/disp/priv.h   |  1 +
 4 files changed, 52 insertions(+), 27 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
index 93a75e5b2791..5b9d9c632aeb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
@@ -376,6 +376,12 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
 	if (ret)
 		return ret;
 
+	if (disp->func->oneinit) {
+		ret = disp->func->oneinit(disp);
+		if (ret)
+			return ret;
+	}
+
 	i = 0;
 	list_for_each_entry(head, &disp->head, head)
 		i = max(i, head->id + 1);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index 0c570dbd3021..43373b366263 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -59,35 +59,14 @@ nv50_disp_dtor_(struct nvkm_disp *base)
 	return disp;
 }
 
-static const struct nvkm_disp_func
-nv50_disp_ = {
-	.dtor = nv50_disp_dtor_,
-	.intr = nv50_disp_intr_,
-	.root = nv50_disp_root_,
-};
-
-int
-nv50_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
-	       int index, int heads, struct nvkm_disp **pdisp)
+static int
+nv50_disp_oneinit_(struct nvkm_disp *base)
 {
-	struct nv50_disp *disp;
+	struct nv50_disp *disp = nv50_disp(base);
+	const struct nv50_disp_func *func = disp->func;
 	int ret, i;
 
-	if (!(disp = kzalloc(sizeof(*disp), GFP_KERNEL)))
-		return -ENOMEM;
-	disp->func = func;
-	*pdisp = &disp->base;
-
-	ret = nvkm_disp_ctor(&nv50_disp_, device, index, &disp->base);
-	if (ret)
-		return ret;
-
-	disp->wq = create_singlethread_workqueue("nvkm-disp");
-	if (!disp->wq)
-		return -ENOMEM;
-	INIT_WORK(&disp->supervisor, func->super);
-
-	for (i = 0; func->head.new && i < heads; i++) {
+	for (i = 0; func->head.new && i < disp->head.nr; i++) {
 		ret = func->head.new(&disp->base, i);
 		if (ret)
 			return ret;
@@ -111,7 +90,42 @@ nv50_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
 			return ret;
 	}
 
-	return nvkm_event_init(func->uevent, 1, 1 + (heads * 4), &disp->uevent);
+	return 0;
+}
+
+static const struct nvkm_disp_func
+nv50_disp_ = {
+	.dtor = nv50_disp_dtor_,
+	.oneinit = nv50_disp_oneinit_,
+	.intr = nv50_disp_intr_,
+	.root = nv50_disp_root_,
+};
+
+int
+nv50_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
+	       int index, int heads, struct nvkm_disp **pdisp)
+{
+	struct nv50_disp *disp;
+	int ret;
+
+	if (!(disp = kzalloc(sizeof(*disp), GFP_KERNEL)))
+		return -ENOMEM;
+	disp->func = func;
+	*pdisp = &disp->base;
+
+	ret = nvkm_disp_ctor(&nv50_disp_, device, index, &disp->base);
+	if (ret)
+		return ret;
+
+	disp->wq = create_singlethread_workqueue("nvkm-disp");
+	if (!disp->wq)
+		return -ENOMEM;
+
+	INIT_WORK(&disp->supervisor, func->super);
+	disp->head.nr = heads;
+
+	return nvkm_event_init(func->uevent, 1, ARRAY_SIZE(disp->chan),
+			       &disp->uevent);
 }
 
 static u32
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index eb0b8acb1c5b..0ae32cda45e7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -15,6 +15,10 @@ struct nv50_disp {
 
 	struct nvkm_event uevent;
 
+	struct {
+		int nr;
+	} head;
+
 	struct {
 		u32 lvdsconf;
 	} sor;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
index 6c9bfff6d043..c614351f5012 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
@@ -12,6 +12,7 @@ void nvkm_disp_vblank(struct nvkm_disp *, int head);
 
 struct nvkm_disp_func {
 	void *(*dtor)(struct nvkm_disp *);
+	int (*oneinit)(struct nvkm_disp *);
 	void (*intr)(struct nvkm_disp *);
 
 	const struct nvkm_disp_oclass *(*root)(struct nvkm_disp *);

From f7b2ece37fce822692d3d6e616e0d0f3df9d4f49 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1013/1286] drm/nouveau/disp/nv50-: fetch mask of available
 heads during oneinit

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c      |  4 ++--
 drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c      |  4 ++--
 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c    | 12 ++----------
 drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c    |  4 ++--
 drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c    |  4 ++--
 drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c    |  4 ++--
 drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c    |  4 ++--
 drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c    |  4 ++--
 drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c    |  4 ++--
 drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c    |  4 ++--
 drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c    |  4 ++--
 drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h     |  4 ++++
 .../gpu/drm/nouveau/nvkm/engine/disp/headgf119.c    | 11 ++++++++---
 drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv50.c |  7 +++++++
 drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c    |  4 ++--
 drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c    |  4 ++--
 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c     | 13 ++++++++-----
 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h     |  6 +++---
 18 files changed, 56 insertions(+), 45 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
index 842e1b72ee42..a1741a80bd05 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
@@ -32,7 +32,7 @@ g84_disp = {
 	.uevent = &nv50_disp_chan_uevent,
 	.super = nv50_disp_super,
 	.root = &g84_disp_root_oclass,
-	.head.new = nv50_head_new,
+	.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
 	.dac = { .nr = 3, .new = nv50_dac_new },
 	.sor = { .nr = 2, .new = g84_sor_new },
 	.pior = { .nr = 3, .new = nv50_pior_new },
@@ -41,5 +41,5 @@ g84_disp = {
 int
 g84_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	return nv50_disp_new_(&g84_disp, device, index, 2, pdisp);
+	return nv50_disp_new_(&g84_disp, device, index, pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
index d184e6ab8918..b7febc4b0f85 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
@@ -32,7 +32,7 @@ g94_disp = {
 	.uevent = &nv50_disp_chan_uevent,
 	.super = nv50_disp_super,
 	.root = &g94_disp_root_oclass,
-	.head.new = nv50_head_new,
+	.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
 	.dac = { .nr = 3, .new = nv50_dac_new },
 	.sor = { .nr = 4, .new = g94_sor_new },
 	.pior = { .nr = 3, .new = nv50_pior_new },
@@ -41,5 +41,5 @@ g94_disp = {
 int
 g94_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	return nv50_disp_new_(&g94_disp, device, index, 2, pdisp);
+	return nv50_disp_new_(&g94_disp, device, index, pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
index d8765b57180b..b580ca9b4418 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
@@ -164,14 +164,6 @@ gf119_disp_intr(struct nv50_disp *disp)
 	}
 }
 
-int
-gf119_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
-		int index, struct nvkm_disp **pdisp)
-{
-	u32 heads = nvkm_rd32(device, 0x022448);
-	return nv50_disp_new_(func, device, index, heads, pdisp);
-}
-
 static const struct nv50_disp_func
 gf119_disp = {
 	.intr = gf119_disp_intr,
@@ -179,7 +171,7 @@ gf119_disp = {
 	.uevent = &gf119_disp_chan_uevent,
 	.super = gf119_disp_super,
 	.root = &gf119_disp_root_oclass,
-	.head.new = gf119_head_new,
+	.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
 	.dac = { .nr = 3, .new = gf119_dac_new },
 	.sor = { .nr = 4, .new = gf119_sor_new },
 };
@@ -187,5 +179,5 @@ gf119_disp = {
 int
 gf119_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	return gf119_disp_new_(&gf119_disp, device, index, pdisp);
+	return nv50_disp_new_(&gf119_disp, device, index, pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
index e8fe9f315d64..b3ee5b1d4a45 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
@@ -33,7 +33,7 @@ gk104_disp = {
 	.uevent = &gf119_disp_chan_uevent,
 	.super = gf119_disp_super,
 	.root = &gk104_disp_root_oclass,
-	.head.new = gf119_head_new,
+	.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
 	.dac = { .nr = 3, .new = gf119_dac_new },
 	.sor = { .nr = 4, .new = gk104_sor_new },
 };
@@ -41,5 +41,5 @@ gk104_disp = {
 int
 gk104_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	return gf119_disp_new_(&gk104_disp, device, index, pdisp);
+	return nv50_disp_new_(&gk104_disp, device, index, pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
index 769687502e7a..22533abbfb67 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
@@ -33,7 +33,7 @@ gk110_disp = {
 	.uevent = &gf119_disp_chan_uevent,
 	.super = gf119_disp_super,
 	.root = &gk110_disp_root_oclass,
-	.head.new = gf119_head_new,
+	.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
 	.dac = { .nr = 3, .new = gf119_dac_new },
 	.sor = { .nr = 4, .new = gk104_sor_new },
 };
@@ -41,5 +41,5 @@ gk110_disp = {
 int
 gk110_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	return gf119_disp_new_(&gk110_disp, device, index, pdisp);
+	return nv50_disp_new_(&gk110_disp, device, index, pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
index ede70e5d188e..85e602120abc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
@@ -33,7 +33,7 @@ gm107_disp = {
 	.uevent = &gf119_disp_chan_uevent,
 	.super = gf119_disp_super,
 	.root = &gm107_disp_root_oclass,
-	.head.new = gf119_head_new,
+	.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
 	.dac = { .nr = 3, .new = gf119_dac_new },
 	.sor = { .nr = 4, .new = gm107_sor_new },
 };
@@ -41,5 +41,5 @@ gm107_disp = {
 int
 gm107_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	return gf119_disp_new_(&gm107_disp, device, index, pdisp);
+	return nv50_disp_new_(&gm107_disp, device, index, pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
index 292d3b5f9704..48ab65d82717 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
@@ -33,7 +33,7 @@ gm200_disp = {
 	.uevent = &gf119_disp_chan_uevent,
 	.super = gf119_disp_super,
 	.root = &gm200_disp_root_oclass,
-	.head.new = gf119_head_new,
+	.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
 	.dac = { .nr = 3, .new = gf119_dac_new },
 	.sor = { .nr = 4, .new = gm200_sor_new },
 };
@@ -41,5 +41,5 @@ gm200_disp = {
 int
 gm200_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	return gf119_disp_new_(&gm200_disp, device, index, pdisp);
+	return nv50_disp_new_(&gm200_disp, device, index, pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
index 39eb98b2c3a2..190d03025fc1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
@@ -33,12 +33,12 @@ gp100_disp = {
 	.uevent = &gf119_disp_chan_uevent,
 	.super = gf119_disp_super,
 	.root = &gp100_disp_root_oclass,
-	.head.new = gf119_head_new,
+	.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
 	.sor = { .nr = 4, .new = gm200_sor_new },
 };
 
 int
 gp100_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	return gf119_disp_new_(&gp100_disp, device, index, pdisp);
+	return nv50_disp_new_(&gp100_disp, device, index, pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
index 91d70fe18275..a059c65243a9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
@@ -59,12 +59,12 @@ gp102_disp = {
 	.uevent = &gf119_disp_chan_uevent,
 	.super = gf119_disp_super,
 	.root = &gp102_disp_root_oclass,
-	.head.new = gf119_head_new,
+	.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
 	.sor = { .nr = 4, .new = gm200_sor_new },
 };
 
 int
 gp102_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	return gf119_disp_new_(&gp102_disp, device, index, pdisp);
+	return nv50_disp_new_(&gp102_disp, device, index, pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
index bf00c4e3be3a..7554c732a655 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
@@ -32,7 +32,7 @@ gt200_disp = {
 	.uevent = &nv50_disp_chan_uevent,
 	.super = nv50_disp_super,
 	.root = &gt200_disp_root_oclass,
-	.head.new = nv50_head_new,
+	.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
 	.dac = { .nr = 3, .new = nv50_dac_new },
 	.sor = { .nr = 2, .new = g84_sor_new },
 	.pior = { .nr = 3, .new = nv50_pior_new },
@@ -41,5 +41,5 @@ gt200_disp = {
 int
 gt200_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	return nv50_disp_new_(&gt200_disp, device, index, 2, pdisp);
+	return nv50_disp_new_(&gt200_disp, device, index, pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
index 2cdd4d7a98d3..6a878a7e6af3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
@@ -32,7 +32,7 @@ gt215_disp = {
 	.uevent = &nv50_disp_chan_uevent,
 	.super = nv50_disp_super,
 	.root = &gt215_disp_root_oclass,
-	.head.new = nv50_head_new,
+	.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
 	.dac = { .nr = 3, .new = nv50_dac_new },
 	.sor = { .nr = 4, .new = gt215_sor_new },
 	.pior = { .nr = 3, .new = nv50_pior_new },
@@ -41,5 +41,5 @@ gt215_disp = {
 int
 gt215_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	return nv50_disp_new_(&gt215_disp, device, index, 2, pdisp);
+	return nv50_disp_new_(&gt215_disp, device, index, pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h
index 57030b3a4a75..4a5d7892ff54 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h
@@ -52,6 +52,10 @@ void nv50_head_rgpos(struct nvkm_head *, u16 *, u16 *);
 #define HEAD_DBG(h,f,a...) HEAD_MSG((h), debug, f, ##a)
 
 int nv04_head_new(struct nvkm_disp *, int id);
+
+int nv50_head_cnt(struct nvkm_disp *, unsigned long *);
 int nv50_head_new(struct nvkm_disp *, int id);
+
+int gf119_head_cnt(struct nvkm_disp *, unsigned long *);
 int gf119_head_new(struct nvkm_disp *, int id);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c
index 9fd7ae331308..bcbdaaf8ba20 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c
@@ -92,8 +92,13 @@ gf119_head = {
 int
 gf119_head_new(struct nvkm_disp *disp, int id)
 {
-	struct nvkm_device *device = disp->engine.subdev.device;
-	if (!(nvkm_rd32(device, 0x612004) & (0x00000001 << id)))
-		return 0;
 	return nvkm_head_new_(&gf119_head, disp, id);
 }
+
+int
+gf119_head_cnt(struct nvkm_disp *disp, unsigned long *pmask)
+{
+	struct nvkm_device *device = disp->engine.subdev.device;
+	*pmask = nvkm_rd32(device, 0x612004) & 0x0000000f;
+	return nvkm_rd32(device, 0x022448);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv50.c
index c80d06d5168f..e7d5c397cd29 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv50.c
@@ -90,3 +90,10 @@ nv50_head_new(struct nvkm_disp *disp, int id)
 {
 	return nvkm_head_new_(&nv50_head, disp, id);
 }
+
+int
+nv50_head_cnt(struct nvkm_disp *disp, unsigned long *pmask)
+{
+	*pmask = 3;
+	return 2;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
index d7e0fbb12bf1..9fc7507774e1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
@@ -30,7 +30,7 @@ mcp77_disp = {
 	.uevent = &nv50_disp_chan_uevent,
 	.super = nv50_disp_super,
 	.root = &g94_disp_root_oclass,
-	.head.new = nv50_head_new,
+	.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
 	.dac = { .nr = 3, .new = nv50_dac_new },
 	.sor = { .nr = 4, .new = mcp77_sor_new },
 	.pior = { .nr = 3, .new = nv50_pior_new },
@@ -39,5 +39,5 @@ mcp77_disp = {
 int
 mcp77_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	return nv50_disp_new_(&mcp77_disp, device, index, 2, pdisp);
+	return nv50_disp_new_(&mcp77_disp, device, index, pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c
index 7b75c57c12ed..28647d365057 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c
@@ -30,7 +30,7 @@ mcp89_disp = {
 	.uevent = &nv50_disp_chan_uevent,
 	.super = nv50_disp_super,
 	.root = &gt215_disp_root_oclass,
-	.head.new = nv50_head_new,
+	.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
 	.dac = { .nr = 3, .new = nv50_dac_new },
 	.sor = { .nr = 4, .new = mcp89_sor_new },
 	.pior = { .nr = 3, .new = nv50_pior_new },
@@ -39,5 +39,5 @@ mcp89_disp = {
 int
 mcp89_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	return nv50_disp_new_(&mcp89_disp, device, index, 2, pdisp);
+	return nv50_disp_new_(&mcp89_disp, device, index, pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index 43373b366263..bf50b7be8826 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -64,9 +64,13 @@ nv50_disp_oneinit_(struct nvkm_disp *base)
 {
 	struct nv50_disp *disp = nv50_disp(base);
 	const struct nv50_disp_func *func = disp->func;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	int ret, i;
 
-	for (i = 0; func->head.new && i < disp->head.nr; i++) {
+	disp->head.nr = func->head.cnt(&disp->base, &disp->head.mask);
+	nvkm_debug(subdev, "  Head(s): %d (%02lx)\n",
+		   disp->head.nr, disp->head.mask);
+	for_each_set_bit(i, &disp->head.mask, disp->head.nr) {
 		ret = func->head.new(&disp->base, i);
 		if (ret)
 			return ret;
@@ -103,7 +107,7 @@ nv50_disp_ = {
 
 int
 nv50_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
-	       int index, int heads, struct nvkm_disp **pdisp)
+	       int index, struct nvkm_disp **pdisp)
 {
 	struct nv50_disp *disp;
 	int ret;
@@ -122,7 +126,6 @@ nv50_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
 		return -ENOMEM;
 
 	INIT_WORK(&disp->supervisor, func->super);
-	disp->head.nr = heads;
 
 	return nvkm_event_init(func->uevent, 1, ARRAY_SIZE(disp->chan),
 			       &disp->uevent);
@@ -633,7 +636,7 @@ nv50_disp = {
 	.uevent = &nv50_disp_chan_uevent,
 	.super = nv50_disp_super,
 	.root = &nv50_disp_root_oclass,
-	.head.new = nv50_head_new,
+	.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
 	.dac = { .nr = 3, .new = nv50_dac_new },
 	.sor = { .nr = 2, .new = nv50_sor_new },
 	.pior = { .nr = 3, .new = nv50_pior_new },
@@ -642,5 +645,5 @@ nv50_disp = {
 int
 nv50_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	return nv50_disp_new_(&nv50_disp, device, index, 2, pdisp);
+	return nv50_disp_new_(&nv50_disp, device, index, pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index 0ae32cda45e7..c7fe29cf3e05 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -16,6 +16,7 @@ struct nv50_disp {
 	struct nvkm_event uevent;
 
 	struct {
+		unsigned long mask;
 		int nr;
 	} head;
 
@@ -38,9 +39,7 @@ void nv50_disp_super_2_2(struct nv50_disp *, struct nvkm_head *);
 void nv50_disp_super_3_0(struct nv50_disp *, struct nvkm_head *);
 
 int nv50_disp_new_(const struct nv50_disp_func *, struct nvkm_device *,
-		   int index, int heads, struct nvkm_disp **);
-int gf119_disp_new_(const struct nv50_disp_func *, struct nvkm_device *,
-		    int index, struct nvkm_disp **);
+		   int index, struct nvkm_disp **);
 
 struct nv50_disp_func {
 	void (*intr)(struct nv50_disp *);
@@ -52,6 +51,7 @@ struct nv50_disp_func {
 	const struct nvkm_disp_oclass *root;
 
 	struct {
+		int (*cnt)(struct nvkm_disp *, unsigned long *mask);
 		int (*new)(struct nvkm_disp *, int id);
 	} head;
 

From bf5d1a6b6a4489b7887589fca6321d4024da71c8 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1014/1286] drm/nouveau/disp/nv50-: fetch mask of available
 dacs during oneinit

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/disp/dacgf119.c   | 11 ++++++++---
 .../gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c    | 11 ++++++++---
 drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c    |  2 +-
 drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c    |  2 +-
 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c  |  2 +-
 drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c  |  2 +-
 drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c  |  2 +-
 drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c  |  2 +-
 drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c  |  2 +-
 drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c  |  2 +-
 drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c  |  2 +-
 drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h    |  3 +++
 drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c  |  2 +-
 drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c  |  2 +-
 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c   | 15 ++++++++++-----
 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h   |  9 ++-------
 .../gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c  |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c   |  2 +-
 18 files changed, 44 insertions(+), 31 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacgf119.c
index dbd032ef352a..71a94777ea2e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacgf119.c
@@ -58,8 +58,13 @@ gf119_dac = {
 int
 gf119_dac_new(struct nvkm_disp *disp, int id)
 {
-	struct nvkm_device *device = disp->engine.subdev.device;
-	if (!(nvkm_rd32(device, 0x612004) & (0x00000010 << id)))
-		return 0;
 	return nvkm_ior_new_(&gf119_dac, disp, DAC, id);
 }
+
+int
+gf119_dac_cnt(struct nvkm_disp *disp, unsigned long *pmask)
+{
+	struct nvkm_device *device = disp->engine.subdev.device;
+	*pmask = (nvkm_rd32(device, 0x612004) & 0x000000f0) >> 4;
+	return 4;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c
index 85e692b12260..558012db35f8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c
@@ -109,8 +109,13 @@ nv50_dac = {
 int
 nv50_dac_new(struct nvkm_disp *disp, int id)
 {
-	struct nvkm_device *device = disp->engine.subdev.device;
-	if (!(nvkm_rd32(device, 0x610184) & (0x00100000 << id)))
-		return 0;
 	return nvkm_ior_new_(&nv50_dac, disp, DAC, id);
 }
+
+int
+nv50_dac_cnt(struct nvkm_disp *disp, unsigned long *pmask)
+{
+	struct nvkm_device *device = disp->engine.subdev.device;
+	*pmask = (nvkm_rd32(device, 0x610184) & 0x00700000) >> 20;
+	return 3;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
index a1741a80bd05..9f31f04e4e58 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
@@ -33,7 +33,7 @@ g84_disp = {
 	.super = nv50_disp_super,
 	.root = &g84_disp_root_oclass,
 	.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
-	.dac = { .nr = 3, .new = nv50_dac_new },
+	.dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
 	.sor = { .nr = 2, .new = g84_sor_new },
 	.pior = { .nr = 3, .new = nv50_pior_new },
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
index b7febc4b0f85..71fe26b25835 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
@@ -33,7 +33,7 @@ g94_disp = {
 	.super = nv50_disp_super,
 	.root = &g94_disp_root_oclass,
 	.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
-	.dac = { .nr = 3, .new = nv50_dac_new },
+	.dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
 	.sor = { .nr = 4, .new = g94_sor_new },
 	.pior = { .nr = 3, .new = nv50_pior_new },
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
index b580ca9b4418..c8495bcf289c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
@@ -172,7 +172,7 @@ gf119_disp = {
 	.super = gf119_disp_super,
 	.root = &gf119_disp_root_oclass,
 	.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
-	.dac = { .nr = 3, .new = gf119_dac_new },
+	.dac = { .cnt = gf119_dac_cnt, .new = gf119_dac_new },
 	.sor = { .nr = 4, .new = gf119_sor_new },
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
index b3ee5b1d4a45..b948619818cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
@@ -34,7 +34,7 @@ gk104_disp = {
 	.super = gf119_disp_super,
 	.root = &gk104_disp_root_oclass,
 	.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
-	.dac = { .nr = 3, .new = gf119_dac_new },
+	.dac = { .cnt = gf119_dac_cnt, .new = gf119_dac_new },
 	.sor = { .nr = 4, .new = gk104_sor_new },
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
index 22533abbfb67..0b4945703568 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
@@ -34,7 +34,7 @@ gk110_disp = {
 	.super = gf119_disp_super,
 	.root = &gk110_disp_root_oclass,
 	.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
-	.dac = { .nr = 3, .new = gf119_dac_new },
+	.dac = { .cnt = gf119_dac_cnt, .new = gf119_dac_new },
 	.sor = { .nr = 4, .new = gk104_sor_new },
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
index 85e602120abc..35ad965ffee2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
@@ -34,7 +34,7 @@ gm107_disp = {
 	.super = gf119_disp_super,
 	.root = &gm107_disp_root_oclass,
 	.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
-	.dac = { .nr = 3, .new = gf119_dac_new },
+	.dac = { .cnt = gf119_dac_cnt, .new = gf119_dac_new },
 	.sor = { .nr = 4, .new = gm107_sor_new },
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
index 48ab65d82717..b2a5d364ffaf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
@@ -34,7 +34,7 @@ gm200_disp = {
 	.super = gf119_disp_super,
 	.root = &gm200_disp_root_oclass,
 	.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
-	.dac = { .nr = 3, .new = gf119_dac_new },
+	.dac = { .cnt = gf119_dac_cnt, .new = gf119_dac_new },
 	.sor = { .nr = 4, .new = gm200_sor_new },
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
index 7554c732a655..88b6a34e9df7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
@@ -33,7 +33,7 @@ gt200_disp = {
 	.super = nv50_disp_super,
 	.root = &gt200_disp_root_oclass,
 	.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
-	.dac = { .nr = 3, .new = nv50_dac_new },
+	.dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
 	.sor = { .nr = 2, .new = g84_sor_new },
 	.pior = { .nr = 3, .new = nv50_pior_new },
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
index 6a878a7e6af3..1519ca566d9f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
@@ -33,7 +33,7 @@ gt215_disp = {
 	.super = nv50_disp_super,
 	.root = &gt215_disp_root_oclass,
 	.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
-	.dac = { .nr = 3, .new = nv50_dac_new },
+	.dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
 	.sor = { .nr = 4, .new = gt215_sor_new },
 	.pior = { .nr = 3, .new = nv50_pior_new },
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index 4548c031b937..89d9ab154c4b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -153,7 +153,10 @@ void gf119_hda_eld(struct nvkm_ior *, u8 *, u8);
 #define IOR_WARN(i,f,a...) IOR_MSG((i), warn, f, ##a)
 #define IOR_DBG(i,f,a...) IOR_MSG((i), debug, f, ##a)
 
+int nv50_dac_cnt(struct nvkm_disp *, unsigned long *);
 int nv50_dac_new(struct nvkm_disp *, int);
+
+int gf119_dac_cnt(struct nvkm_disp *, unsigned long *);
 int gf119_dac_new(struct nvkm_disp *, int);
 
 int nv50_pior_new(struct nvkm_disp *, int);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
index 9fc7507774e1..fb188546f5f8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
@@ -31,7 +31,7 @@ mcp77_disp = {
 	.super = nv50_disp_super,
 	.root = &g94_disp_root_oclass,
 	.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
-	.dac = { .nr = 3, .new = nv50_dac_new },
+	.dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
 	.sor = { .nr = 4, .new = mcp77_sor_new },
 	.pior = { .nr = 3, .new = nv50_pior_new },
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c
index 28647d365057..ae7fb9625674 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c
@@ -31,7 +31,7 @@ mcp89_disp = {
 	.super = nv50_disp_super,
 	.root = &gt215_disp_root_oclass,
 	.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
-	.dac = { .nr = 3, .new = nv50_dac_new },
+	.dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
 	.sor = { .nr = 4, .new = mcp89_sor_new },
 	.pior = { .nr = 3, .new = nv50_pior_new },
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index bf50b7be8826..500ef8da696a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -76,10 +76,15 @@ nv50_disp_oneinit_(struct nvkm_disp *base)
 			return ret;
 	}
 
-	for (i = 0; func->dac.new && i < func->dac.nr; i++) {
-		ret = func->dac.new(&disp->base, i);
-		if (ret)
-			return ret;
+	if (func->dac.cnt) {
+		disp->dac.nr = func->dac.cnt(&disp->base, &disp->dac.mask);
+		nvkm_debug(subdev, "   DAC(s): %d (%02lx)\n",
+			   disp->dac.nr, disp->dac.mask);
+		for_each_set_bit(i, &disp->dac.mask, disp->dac.nr) {
+			ret = func->dac.new(&disp->base, i);
+			if (ret)
+				return ret;
+		}
 	}
 
 	for (i = 0; func->pior.new && i < func->pior.nr; i++) {
@@ -637,7 +642,7 @@ nv50_disp = {
 	.super = nv50_disp_super,
 	.root = &nv50_disp_root_oclass,
 	.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
-	.dac = { .nr = 3, .new = nv50_dac_new },
+	.dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
 	.sor = { .nr = 2, .new = nv50_sor_new },
 	.pior = { .nr = 3, .new = nv50_pior_new },
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index c7fe29cf3e05..3415a5258492 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -18,7 +18,7 @@ struct nv50_disp {
 	struct {
 		unsigned long mask;
 		int nr;
-	} head;
+	} head, dac;
 
 	struct {
 		u32 lvdsconf;
@@ -53,12 +53,7 @@ struct nv50_disp_func {
 	struct {
 		int (*cnt)(struct nvkm_disp *, unsigned long *mask);
 		int (*new)(struct nvkm_disp *, int id);
-	} head;
-
-	struct {
-		int nr;
-		int (*new)(struct nvkm_disp *, int id);
-	} dac;
+	} head, dac;
 
 	struct {
 		int nr;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
index 333c8424b413..7f22b875b9b3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
@@ -64,7 +64,7 @@ gf119_disp_root_init(struct nv50_disp_root *root)
 	}
 
 	/* ... DAC caps */
-	for (i = 0; i < disp->func->dac.nr; i++) {
+	for (i = 0; i < disp->dac.nr; i++) {
 		tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800));
 		nvkm_wr32(device, 0x6101c0 + (i * 0x800), tmp);
 	}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
index 1208524aae14..3fbaccd9569b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
@@ -427,7 +427,7 @@ nv50_disp_root_init(struct nv50_disp_root *root)
 	}
 
 	/* ... DAC caps */
-	for (i = 0; i < disp->func->dac.nr; i++) {
+	for (i = 0; i < disp->dac.nr; i++) {
 		tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800));
 		nvkm_wr32(device, 0x6101d0 + (i * 0x04), tmp);
 	}

From 9fe4e177045f4b5af25d25859e30450ff1f18be9 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1015/1286] drm/nouveau/disp/nv50-: fetch mask of available
 sors during oneinit

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/disp/g84.c    |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/disp/g94.c    |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/disp/gf119.c  |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/disp/gk104.c  |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/disp/gk110.c  |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/disp/gm107.c  |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/disp/gm200.c  |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/disp/gp100.c  |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/disp/gp102.c  |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/disp/gt200.c  |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/disp/gt215.c  |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/disp/ior.h    | 10 ++++++++--
 .../gpu/drm/nouveau/nvkm/engine/disp/mcp77.c  |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/disp/mcp89.c  |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/disp/nv50.c   |  7 +++++--
 .../gpu/drm/nouveau/nvkm/engine/disp/nv50.h   |  9 +++------
 .../drm/nouveau/nvkm/engine/disp/rootgf119.c  |  2 +-
 .../drm/nouveau/nvkm/engine/disp/rootnv50.c   |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/disp/sorg84.c |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/disp/sorg94.c | 10 +++++++++-
 .../drm/nouveau/nvkm/engine/disp/sorgf119.c   | 19 +++++++++----------
 .../drm/nouveau/nvkm/engine/disp/sorgk104.c   |  2 +-
 .../drm/nouveau/nvkm/engine/disp/sorgm107.c   |  2 +-
 .../drm/nouveau/nvkm/engine/disp/sorgm200.c   |  2 +-
 .../drm/nouveau/nvkm/engine/disp/sorgt215.c   |  2 +-
 .../drm/nouveau/nvkm/engine/disp/sormcp77.c   |  2 +-
 .../drm/nouveau/nvkm/engine/disp/sormcp89.c   |  2 +-
 .../drm/nouveau/nvkm/engine/disp/sornv50.c    | 19 +++++++++----------
 28 files changed, 65 insertions(+), 53 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
index 9f31f04e4e58..287ac5edc88a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
@@ -34,7 +34,7 @@ g84_disp = {
 	.root = &g84_disp_root_oclass,
 	.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
 	.dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
-	.sor = { .nr = 2, .new = g84_sor_new },
+	.sor = { .cnt = nv50_sor_cnt, .new = g84_sor_new },
 	.pior = { .nr = 3, .new = nv50_pior_new },
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
index 71fe26b25835..e21f76d4e28e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
@@ -34,7 +34,7 @@ g94_disp = {
 	.root = &g94_disp_root_oclass,
 	.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
 	.dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
-	.sor = { .nr = 4, .new = g94_sor_new },
+	.sor = { .cnt = g94_sor_cnt, .new = g94_sor_new },
 	.pior = { .nr = 3, .new = nv50_pior_new },
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
index c8495bcf289c..0139d143c733 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
@@ -173,7 +173,7 @@ gf119_disp = {
 	.root = &gf119_disp_root_oclass,
 	.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
 	.dac = { .cnt = gf119_dac_cnt, .new = gf119_dac_new },
-	.sor = { .nr = 4, .new = gf119_sor_new },
+	.sor = { .cnt = gf119_sor_cnt, .new = gf119_sor_new },
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
index b948619818cc..6a59a52468c1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
@@ -35,7 +35,7 @@ gk104_disp = {
 	.root = &gk104_disp_root_oclass,
 	.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
 	.dac = { .cnt = gf119_dac_cnt, .new = gf119_dac_new },
-	.sor = { .nr = 4, .new = gk104_sor_new },
+	.sor = { .cnt = gf119_sor_cnt, .new = gk104_sor_new },
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
index 0b4945703568..f3b10dc4e673 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
@@ -35,7 +35,7 @@ gk110_disp = {
 	.root = &gk110_disp_root_oclass,
 	.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
 	.dac = { .cnt = gf119_dac_cnt, .new = gf119_dac_new },
-	.sor = { .nr = 4, .new = gk104_sor_new },
+	.sor = { .cnt = gf119_sor_cnt, .new = gk104_sor_new },
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
index 35ad965ffee2..068c5951efe3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
@@ -35,7 +35,7 @@ gm107_disp = {
 	.root = &gm107_disp_root_oclass,
 	.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
 	.dac = { .cnt = gf119_dac_cnt, .new = gf119_dac_new },
-	.sor = { .nr = 4, .new = gm107_sor_new },
+	.sor = { .cnt = gf119_sor_cnt, .new = gm107_sor_new },
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
index b2a5d364ffaf..1c27dbe6ccec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
@@ -35,7 +35,7 @@ gm200_disp = {
 	.root = &gm200_disp_root_oclass,
 	.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
 	.dac = { .cnt = gf119_dac_cnt, .new = gf119_dac_new },
-	.sor = { .nr = 4, .new = gm200_sor_new },
+	.sor = { .cnt = gf119_sor_cnt, .new = gm200_sor_new },
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
index 190d03025fc1..84933b6119f2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
@@ -34,7 +34,7 @@ gp100_disp = {
 	.super = gf119_disp_super,
 	.root = &gp100_disp_root_oclass,
 	.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
-	.sor = { .nr = 4, .new = gm200_sor_new },
+	.sor = { .cnt = gf119_sor_cnt, .new = gm200_sor_new },
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
index a059c65243a9..b36d926f0264 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
@@ -60,7 +60,7 @@ gp102_disp = {
 	.super = gf119_disp_super,
 	.root = &gp102_disp_root_oclass,
 	.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
-	.sor = { .nr = 4, .new = gm200_sor_new },
+	.sor = { .cnt = gf119_sor_cnt, .new = gm200_sor_new },
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
index 88b6a34e9df7..2ca92a23a62a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
@@ -34,7 +34,7 @@ gt200_disp = {
 	.root = &gt200_disp_root_oclass,
 	.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
 	.dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
-	.sor = { .nr = 2, .new = g84_sor_new },
+	.sor = { .cnt = nv50_sor_cnt, .new = g84_sor_new },
 	.pior = { .nr = 3, .new = nv50_pior_new },
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
index 1519ca566d9f..73b039069660 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
@@ -34,7 +34,7 @@ gt215_disp = {
 	.root = &gt215_disp_root_oclass,
 	.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
 	.dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
-	.sor = { .nr = 4, .new = gt215_sor_new },
+	.sor = { .cnt = g94_sor_cnt, .new = gt215_sor_new },
 	.pior = { .nr = 3, .new = nv50_pior_new },
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index 89d9ab154c4b..6432e0611dee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -106,7 +106,6 @@ nv50_sor_link(struct nvkm_ior *ior)
 	return nv50_ior_base(ior) + ((ior->asy.link == 2) * 0x80);
 }
 
-int nv50_sor_new_(const struct nvkm_ior_func *, struct nvkm_disp *, int id);
 void nv50_sor_state(struct nvkm_ior *, struct nvkm_ior_state *);
 void nv50_sor_power(struct nvkm_ior *, bool, bool, bool, bool, bool);
 void nv50_sor_clock(struct nvkm_ior *);
@@ -122,7 +121,6 @@ void g94_sor_dp_watermark(struct nvkm_ior *, int, u8);
 
 void gt215_sor_dp_audio(struct nvkm_ior *, int, bool);
 
-int gf119_sor_new_(const struct nvkm_ior_func *, struct nvkm_disp *, int id);
 void gf119_sor_state(struct nvkm_ior *, struct nvkm_ior_state *);
 void gf119_sor_clock(struct nvkm_ior *);
 int gf119_sor_dp_links(struct nvkm_ior *, struct nvkm_i2c_aux *);
@@ -161,13 +159,21 @@ int gf119_dac_new(struct nvkm_disp *, int);
 
 int nv50_pior_new(struct nvkm_disp *, int);
 
+int nv50_sor_cnt(struct nvkm_disp *, unsigned long *);
 int nv50_sor_new(struct nvkm_disp *, int);
+
 int g84_sor_new(struct nvkm_disp *, int);
+
+int g94_sor_cnt(struct nvkm_disp *, unsigned long *);
 int g94_sor_new(struct nvkm_disp *, int);
+
 int mcp77_sor_new(struct nvkm_disp *, int);
 int gt215_sor_new(struct nvkm_disp *, int);
 int mcp89_sor_new(struct nvkm_disp *, int);
+
+int gf119_sor_cnt(struct nvkm_disp *, unsigned long *);
 int gf119_sor_new(struct nvkm_disp *, int);
+
 int gk104_sor_new(struct nvkm_disp *, int);
 int gm107_sor_new(struct nvkm_disp *, int);
 int gm200_sor_new(struct nvkm_disp *, int);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
index fb188546f5f8..c3f6504ea67d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
@@ -32,7 +32,7 @@ mcp77_disp = {
 	.root = &g94_disp_root_oclass,
 	.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
 	.dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
-	.sor = { .nr = 4, .new = mcp77_sor_new },
+	.sor = { .cnt = g94_sor_cnt, .new = mcp77_sor_new },
 	.pior = { .nr = 3, .new = nv50_pior_new },
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c
index ae7fb9625674..2d1d9218d253 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c
@@ -32,7 +32,7 @@ mcp89_disp = {
 	.root = &gt215_disp_root_oclass,
 	.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
 	.dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
-	.sor = { .nr = 4, .new = mcp89_sor_new },
+	.sor = { .cnt = g94_sor_cnt, .new = mcp89_sor_new },
 	.pior = { .nr = 3, .new = nv50_pior_new },
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index 500ef8da696a..1e32814488bf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -93,7 +93,10 @@ nv50_disp_oneinit_(struct nvkm_disp *base)
 			return ret;
 	}
 
-	for (i = 0; func->sor.new && i < func->sor.nr; i++) {
+	disp->sor.nr = func->sor.cnt(&disp->base, &disp->sor.mask);
+	nvkm_debug(subdev, "   SOR(s): %d (%02lx)\n",
+		   disp->sor.nr, disp->sor.mask);
+	for_each_set_bit(i, &disp->sor.mask, disp->sor.nr) {
 		ret = func->sor.new(&disp->base, i);
 		if (ret)
 			return ret;
@@ -643,7 +646,7 @@ nv50_disp = {
 	.root = &nv50_disp_root_oclass,
 	.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
 	.dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
-	.sor = { .nr = 2, .new = nv50_sor_new },
+	.sor = { .cnt = nv50_sor_cnt, .new = nv50_sor_new },
 	.pior = { .nr = 3, .new = nv50_pior_new },
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index 3415a5258492..ee2968b7aef1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -21,6 +21,8 @@ struct nv50_disp {
 	} head, dac;
 
 	struct {
+		unsigned long mask;
+		int nr;
 		u32 lvdsconf;
 	} sor;
 
@@ -53,12 +55,7 @@ struct nv50_disp_func {
 	struct {
 		int (*cnt)(struct nvkm_disp *, unsigned long *mask);
 		int (*new)(struct nvkm_disp *, int id);
-	} head, dac;
-
-	struct {
-		int nr;
-		int (*new)(struct nvkm_disp *, int id);
-	} sor;
+	} head, dac, sor;
 
 	struct {
 		int nr;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
index 7f22b875b9b3..4ba2d80db52b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
@@ -70,7 +70,7 @@ gf119_disp_root_init(struct nv50_disp_root *root)
 	}
 
 	/* ... SOR caps */
-	for (i = 0; i < disp->func->sor.nr; i++) {
+	for (i = 0; i < disp->sor.nr; i++) {
 		tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
 		nvkm_wr32(device, 0x6301c4 + (i * 0x800), tmp);
 	}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
index 3fbaccd9569b..9d0b5b71d38a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
@@ -433,7 +433,7 @@ nv50_disp_root_init(struct nv50_disp_root *root)
 	}
 
 	/* ... SOR caps */
-	for (i = 0; i < disp->func->sor.nr; i++) {
+	for (i = 0; i < disp->sor.nr; i++) {
 		tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
 		nvkm_wr32(device, 0x6101e0 + (i * 0x04), tmp);
 	}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg84.c
index f40b909b4ca2..ec3a7db08118 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg84.c
@@ -34,5 +34,5 @@ g84_sor = {
 int
 g84_sor_new(struct nvkm_disp *disp, int id)
 {
-	return nv50_sor_new_(&g84_sor, disp, id);
+	return nvkm_ior_new_(&g84_sor, disp, SOR, id);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
index 49aeafde0031..4d59d02525d9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
@@ -279,5 +279,13 @@ g94_sor = {
 int
 g94_sor_new(struct nvkm_disp *disp, int id)
 {
-	return nv50_sor_new_(&g94_sor, disp, id);
+	return nvkm_ior_new_(&g94_sor, disp, SOR, id);
+}
+
+int
+g94_sor_cnt(struct nvkm_disp *disp, unsigned long *pmask)
+{
+	struct nvkm_device *device = disp->engine.subdev.device;
+	*pmask = (nvkm_rd32(device, 0x610184) & 0x0f000000) >> 24;
+	return 4;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
index 700fc754f28a..e6e6dfbb1283 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
@@ -152,15 +152,6 @@ gf119_sor_state(struct nvkm_ior *sor, struct nvkm_ior_state *state)
 	state->head = ctrl & 0x0000000f;
 }
 
-int
-gf119_sor_new_(const struct nvkm_ior_func *func, struct nvkm_disp *disp, int id)
-{
-	struct nvkm_device *device = disp->engine.subdev.device;
-	if (!(nvkm_rd32(device, 0x612004) & (0x00000100 << id)))
-		return 0;
-	return nvkm_ior_new_(func, disp, SOR, id);
-}
-
 static const struct nvkm_ior_func
 gf119_sor = {
 	.state = gf119_sor_state,
@@ -189,5 +180,13 @@ gf119_sor = {
 int
 gf119_sor_new(struct nvkm_disp *disp, int id)
 {
-	return gf119_sor_new_(&gf119_sor, disp, id);
+	return nvkm_ior_new_(&gf119_sor, disp, SOR, id);
+}
+
+int
+gf119_sor_cnt(struct nvkm_disp *disp, unsigned long *pmask)
+{
+	struct nvkm_device *device = disp->engine.subdev.device;
+	*pmask = (nvkm_rd32(device, 0x612004) & 0x0000ff00) >> 8;
+	return 8;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgk104.c
index a1547bdf490b..b94090edaebf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgk104.c
@@ -49,5 +49,5 @@ gk104_sor = {
 int
 gk104_sor_new(struct nvkm_disp *disp, int id)
 {
-	return gf119_sor_new_(&gk104_sor, disp, id);
+	return nvkm_ior_new_(&gk104_sor, disp, SOR, id);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
index 60230957d82b..e6965dec09c9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
@@ -63,5 +63,5 @@ gm107_sor = {
 int
 gm107_sor_new(struct nvkm_disp *disp, int id)
 {
-	return gf119_sor_new_(&gm107_sor, disp, id);
+	return nvkm_ior_new_(&gm107_sor, disp, SOR, id);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
index f9b8107aa2a2..8bc019b6ffab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
@@ -120,5 +120,5 @@ gm200_sor = {
 int
 gm200_sor_new(struct nvkm_disp *disp, int id)
 {
-	return gf119_sor_new_(&gm200_sor, disp, id);
+	return nvkm_ior_new_(&gm200_sor, disp, SOR, id);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgt215.c
index da228b54b43e..54d134d4ca1d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgt215.c
@@ -65,5 +65,5 @@ gt215_sor = {
 int
 gt215_sor_new(struct nvkm_disp *disp, int id)
 {
-	return nv50_sor_new_(&gt215_sor, disp, id);
+	return nvkm_ior_new_(&gt215_sor, disp, SOR, id);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sormcp77.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sormcp77.c
index c0179ccb956d..8a70dd25b13a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sormcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sormcp77.c
@@ -44,5 +44,5 @@ mcp77_sor = {
 int
 mcp77_sor_new(struct nvkm_disp *disp, int id)
 {
-	return nv50_sor_new_(&mcp77_sor, disp, id);
+	return nvkm_ior_new_(&mcp77_sor, disp, SOR, id);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sormcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sormcp89.c
index 9bb01cd96697..eac9c5be9166 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sormcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sormcp89.c
@@ -49,5 +49,5 @@ mcp89_sor = {
 int
 mcp89_sor_new(struct nvkm_disp *disp, int id)
 {
-	return nv50_sor_new_(&mcp89_sor, disp, id);
+	return nvkm_ior_new_(&mcp89_sor, disp, SOR, id);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c
index f3ebd0c22e7d..b4729f8798af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c
@@ -84,15 +84,6 @@ nv50_sor_state(struct nvkm_ior *sor, struct nvkm_ior_state *state)
 	state->head = ctrl & 0x00000003;
 }
 
-int
-nv50_sor_new_(const struct nvkm_ior_func *func, struct nvkm_disp *disp, int id)
-{
-	struct nvkm_device *device = disp->engine.subdev.device;
-	if (!(nvkm_rd32(device, 0x610184) & (0x01000000 << id)))
-		return 0;
-	return nvkm_ior_new_(func, disp, SOR, id);
-}
-
 static const struct nvkm_ior_func
 nv50_sor = {
 	.state = nv50_sor_state,
@@ -103,5 +94,13 @@ nv50_sor = {
 int
 nv50_sor_new(struct nvkm_disp *disp, int id)
 {
-	return nv50_sor_new_(&nv50_sor, disp, id);
+	return nvkm_ior_new_(&nv50_sor, disp, SOR, id);
+}
+
+int
+nv50_sor_cnt(struct nvkm_disp *disp, unsigned long *pmask)
+{
+	struct nvkm_device *device = disp->engine.subdev.device;
+	*pmask = (nvkm_rd32(device, 0x610184) & 0x03000000) >> 24;
+	return 2;
 }

From f5e088d6f0b4d969b2e7d8f931af082ba2527a56 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1016/1286] drm/nouveau/disp/nv50-: fetch mask of available
 piors during oneinit

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c    |  2 +-
 drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c    |  2 +-
 drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c  |  2 +-
 drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c  |  2 +-
 drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h    |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c  |  2 +-
 drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c  |  2 +-
 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c   | 15 ++++++++++-----
 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h   |  9 +++------
 .../gpu/drm/nouveau/nvkm/engine/disp/piornv50.c   | 11 ++++++++---
 .../gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c   |  2 +-
 11 files changed, 29 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
index 287ac5edc88a..1ec81f3e5d0a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
@@ -35,7 +35,7 @@ g84_disp = {
 	.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
 	.dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
 	.sor = { .cnt = nv50_sor_cnt, .new = g84_sor_new },
-	.pior = { .nr = 3, .new = nv50_pior_new },
+	.pior = { .cnt = nv50_pior_cnt, .new = nv50_pior_new },
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
index e21f76d4e28e..791c2cd157dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
@@ -35,7 +35,7 @@ g94_disp = {
 	.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
 	.dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
 	.sor = { .cnt = g94_sor_cnt, .new = g94_sor_new },
-	.pior = { .nr = 3, .new = nv50_pior_new },
+	.pior = { .cnt = nv50_pior_cnt, .new = nv50_pior_new },
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
index 2ca92a23a62a..62e721d5963a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
@@ -35,7 +35,7 @@ gt200_disp = {
 	.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
 	.dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
 	.sor = { .cnt = nv50_sor_cnt, .new = g84_sor_new },
-	.pior = { .nr = 3, .new = nv50_pior_new },
+	.pior = { .cnt = nv50_pior_cnt, .new = nv50_pior_new },
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
index 73b039069660..a5b1b1416740 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
@@ -35,7 +35,7 @@ gt215_disp = {
 	.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
 	.dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
 	.sor = { .cnt = g94_sor_cnt, .new = gt215_sor_new },
-	.pior = { .nr = 3, .new = nv50_pior_new },
+	.pior = { .cnt = nv50_pior_cnt, .new = nv50_pior_new },
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index 6432e0611dee..9d43ab23f4d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -157,6 +157,7 @@ int nv50_dac_new(struct nvkm_disp *, int);
 int gf119_dac_cnt(struct nvkm_disp *, unsigned long *);
 int gf119_dac_new(struct nvkm_disp *, int);
 
+int nv50_pior_cnt(struct nvkm_disp *, unsigned long *);
 int nv50_pior_new(struct nvkm_disp *, int);
 
 int nv50_sor_cnt(struct nvkm_disp *, unsigned long *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
index c3f6504ea67d..ff49040a5819 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
@@ -33,7 +33,7 @@ mcp77_disp = {
 	.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
 	.dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
 	.sor = { .cnt = g94_sor_cnt, .new = mcp77_sor_new },
-	.pior = { .nr = 3, .new = nv50_pior_new },
+	.pior = { .cnt = nv50_pior_cnt, .new = nv50_pior_new },
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c
index 2d1d9218d253..0cf968d58fca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c
@@ -33,7 +33,7 @@ mcp89_disp = {
 	.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
 	.dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
 	.sor = { .cnt = g94_sor_cnt, .new = mcp89_sor_new },
-	.pior = { .nr = 3, .new = nv50_pior_new },
+	.pior = { .cnt = nv50_pior_cnt, .new = nv50_pior_new },
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index 1e32814488bf..c0faa3908a00 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -87,10 +87,15 @@ nv50_disp_oneinit_(struct nvkm_disp *base)
 		}
 	}
 
-	for (i = 0; func->pior.new && i < func->pior.nr; i++) {
-		ret = func->pior.new(&disp->base, i);
-		if (ret)
-			return ret;
+	if (func->pior.cnt) {
+		disp->pior.nr = func->pior.cnt(&disp->base, &disp->pior.mask);
+		nvkm_debug(subdev, "  PIOR(s): %d (%02lx)\n",
+			   disp->pior.nr, disp->pior.mask);
+		for_each_set_bit(i, &disp->pior.mask, disp->pior.nr) {
+			ret = func->pior.new(&disp->base, i);
+			if (ret)
+				return ret;
+		}
 	}
 
 	disp->sor.nr = func->sor.cnt(&disp->base, &disp->sor.mask);
@@ -647,7 +652,7 @@ nv50_disp = {
 	.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
 	.dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
 	.sor = { .cnt = nv50_sor_cnt, .new = nv50_sor_new },
-	.pior = { .nr = 3, .new = nv50_pior_new },
+	.pior = { .cnt = nv50_pior_cnt, .new = nv50_pior_new },
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index ee2968b7aef1..a29bcf73ce6f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -27,6 +27,8 @@ struct nv50_disp {
 	} sor;
 
 	struct {
+		unsigned long mask;
+		int nr;
 		u8 type[3];
 	} pior;
 
@@ -55,12 +57,7 @@ struct nv50_disp_func {
 	struct {
 		int (*cnt)(struct nvkm_disp *, unsigned long *mask);
 		int (*new)(struct nvkm_disp *, int id);
-	} head, dac, sor;
-
-	struct {
-		int nr;
-		int (*new)(struct nvkm_disp *, int id);
-	} pior;
+	} head, dac, sor, pior;
 };
 
 void nv50_disp_intr(struct nv50_disp *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c
index 99b3b9050635..e997a207f546 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c
@@ -127,8 +127,13 @@ nv50_pior = {
 int
 nv50_pior_new(struct nvkm_disp *disp, int id)
 {
-	struct nvkm_device *device = disp->engine.subdev.device;
-	if (!(nvkm_rd32(device, 0x610184) & (0x10000000 << id)))
-		return 0;
 	return nvkm_ior_new_(&nv50_pior, disp, PIOR, id);
 }
+
+int
+nv50_pior_cnt(struct nvkm_disp *disp, unsigned long *pmask)
+{
+	struct nvkm_device *device = disp->engine.subdev.device;
+	*pmask = (nvkm_rd32(device, 0x610184) & 0x70000000) >> 28;
+	return 3;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
index 9d0b5b71d38a..c8379bf37a6d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
@@ -439,7 +439,7 @@ nv50_disp_root_init(struct nv50_disp_root *root)
 	}
 
 	/* ... PIOR caps */
-	for (i = 0; i < disp->func->pior.nr; i++) {
+	for (i = 0; i < disp->pior.nr; i++) {
 		tmp = nvkm_rd32(device, 0x61e000 + (i * 0x800));
 		nvkm_wr32(device, 0x6101f0 + (i * 0x04), tmp);
 	}

From bb3b0a422074606400e6aff216300bb4f012b22a Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1017/1286] drm/nouveau/disp/nv50-: initialise from the engine,
 rather than the user object

Engines are initialised on an as-needed basis, so this results in the
same behaviour, whilst allowing us to simplify things a bit.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/disp/base.c   |  17 +++
 .../drm/nouveau/nvkm/engine/disp/channv50.h   |   1 +
 .../drm/nouveau/nvkm/engine/disp/dmacgf119.c  |   2 +-
 .../drm/nouveau/nvkm/engine/disp/dmacnv50.c   |  12 +-
 .../gpu/drm/nouveau/nvkm/engine/disp/g84.c    |   2 +
 .../gpu/drm/nouveau/nvkm/engine/disp/g94.c    |   2 +
 .../gpu/drm/nouveau/nvkm/engine/disp/gf119.c  |  82 ++++++++++++
 .../gpu/drm/nouveau/nvkm/engine/disp/gk104.c  |   2 +
 .../gpu/drm/nouveau/nvkm/engine/disp/gk110.c  |   2 +
 .../gpu/drm/nouveau/nvkm/engine/disp/gm107.c  |   2 +
 .../gpu/drm/nouveau/nvkm/engine/disp/gm200.c  |   2 +
 .../gpu/drm/nouveau/nvkm/engine/disp/gp100.c  |   2 +
 .../gpu/drm/nouveau/nvkm/engine/disp/gp102.c  |   2 +
 .../gpu/drm/nouveau/nvkm/engine/disp/gt200.c  |   2 +
 .../gpu/drm/nouveau/nvkm/engine/disp/gt215.c  |   2 +
 .../gpu/drm/nouveau/nvkm/engine/disp/mcp77.c  |   2 +
 .../gpu/drm/nouveau/nvkm/engine/disp/mcp89.c  |   2 +
 .../gpu/drm/nouveau/nvkm/engine/disp/nv50.c   | 107 +++++++++++++++-
 .../gpu/drm/nouveau/nvkm/engine/disp/nv50.h   |   9 ++
 .../gpu/drm/nouveau/nvkm/engine/disp/priv.h   |   2 +
 .../drm/nouveau/nvkm/engine/disp/rootg84.c    |   2 -
 .../drm/nouveau/nvkm/engine/disp/rootg94.c    |   2 -
 .../drm/nouveau/nvkm/engine/disp/rootgf119.c  |  84 ------------
 .../drm/nouveau/nvkm/engine/disp/rootgk104.c  |   2 -
 .../drm/nouveau/nvkm/engine/disp/rootgk110.c  |   2 -
 .../drm/nouveau/nvkm/engine/disp/rootgm107.c  |   2 -
 .../drm/nouveau/nvkm/engine/disp/rootgm200.c  |   2 -
 .../drm/nouveau/nvkm/engine/disp/rootgp100.c  |   2 -
 .../drm/nouveau/nvkm/engine/disp/rootgp102.c  |   2 -
 .../drm/nouveau/nvkm/engine/disp/rootgt200.c  |   2 -
 .../drm/nouveau/nvkm/engine/disp/rootgt215.c  |   2 -
 .../drm/nouveau/nvkm/engine/disp/rootnv50.c   | 120 ------------------
 .../drm/nouveau/nvkm/engine/disp/rootnv50.h   |  10 --
 33 files changed, 247 insertions(+), 243 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
index 5b9d9c632aeb..32fa94a9773f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
@@ -220,6 +220,9 @@ nvkm_disp_fini(struct nvkm_engine *engine, bool suspend)
 	struct nvkm_conn *conn;
 	struct nvkm_outp *outp;
 
+	if (disp->func->fini)
+		disp->func->fini(disp);
+
 	list_for_each_entry(outp, &disp->outp, head) {
 		nvkm_outp_fini(outp);
 	}
@@ -237,6 +240,7 @@ nvkm_disp_init(struct nvkm_engine *engine)
 	struct nvkm_disp *disp = nvkm_disp(engine);
 	struct nvkm_conn *conn;
 	struct nvkm_outp *outp;
+	struct nvkm_ior *ior;
 
 	list_for_each_entry(conn, &disp->conn, head) {
 		nvkm_conn_init(conn);
@@ -246,6 +250,19 @@ nvkm_disp_init(struct nvkm_engine *engine)
 		nvkm_outp_init(outp);
 	}
 
+	if (disp->func->init) {
+		int ret = disp->func->init(disp);
+		if (ret)
+			return ret;
+	}
+
+	/* Set 'normal' (ie. when it's attached to a head) state for
+	 * each output resource to 'fully enabled'.
+	 */
+	list_for_each_entry(ior, &disp->ior, head) {
+		ior->func->power(ior, true, true, true, true, true);
+	}
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
index 40681db91a02..b5185853b7d8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -4,6 +4,7 @@
 #define nv50_disp_chan(p) container_of((p), struct nv50_disp_chan, object)
 #include <core/object.h>
 #include "nv50.h"
+struct nv50_disp_root;
 
 struct nv50_disp_chan {
 	const struct nv50_disp_chan_func *func;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
index ce7cd74fbd5d..6680ff8bf029 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
@@ -31,7 +31,7 @@ int
 gf119_disp_dmac_bind(struct nv50_disp_dmac *chan,
 		     struct nvkm_object *object, u32 handle)
 {
-	return nvkm_ramht_insert(chan->base.root->ramht, object,
+	return nvkm_ramht_insert(chan->base.root->disp->ramht, object,
 				 chan->base.chid.user, -9, handle,
 				 chan->base.chid.user << 27 | 0x00000001);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
index 070ec5e18fdb..c80d0479c79a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
@@ -33,7 +33,7 @@
 
 struct nv50_disp_dmac_object {
 	struct nvkm_oproxy oproxy;
-	struct nv50_disp_root *root;
+	struct nv50_disp *disp;
 	int hash;
 };
 
@@ -42,7 +42,7 @@ nv50_disp_dmac_child_del_(struct nvkm_oproxy *base)
 {
 	struct nv50_disp_dmac_object *object =
 		container_of(base, typeof(*object), oproxy);
-	nvkm_ramht_remove(object->root->ramht, object->hash);
+	nvkm_ramht_remove(object->disp->ramht, object->hash);
 }
 
 static const struct nvkm_oproxy_func
@@ -56,8 +56,8 @@ nv50_disp_dmac_child_new_(struct nv50_disp_chan *base,
 			  void *data, u32 size, struct nvkm_object **pobject)
 {
 	struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
-	struct nv50_disp_root *root = chan->base.root;
-	struct nvkm_device *device = root->disp->base.engine.subdev.device;
+	struct nv50_disp *disp = chan->base.root->disp;
+	struct nvkm_device *device = disp->base.engine.subdev.device;
 	const struct nvkm_device_oclass *sclass = oclass->priv;
 	struct nv50_disp_dmac_object *object;
 	int ret;
@@ -65,7 +65,7 @@ nv50_disp_dmac_child_new_(struct nv50_disp_chan *base,
 	if (!(object = kzalloc(sizeof(*object), GFP_KERNEL)))
 		return -ENOMEM;
 	nvkm_oproxy_ctor(&nv50_disp_dmac_child_func_, oclass, &object->oproxy);
-	object->root = root;
+	object->disp = disp;
 	*pobject = &object->oproxy.base;
 
 	ret = sclass->ctor(device, oclass, data, size, &object->oproxy.object);
@@ -177,7 +177,7 @@ int
 nv50_disp_dmac_bind(struct nv50_disp_dmac *chan,
 		    struct nvkm_object *object, u32 handle)
 {
-	return nvkm_ramht_insert(chan->base.root->ramht, object,
+	return nvkm_ramht_insert(chan->base.root->disp->ramht, object,
 				 chan->base.chid.user, -10, handle,
 				 chan->base.chid.user << 28 |
 				 chan->base.chid.user);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
index 1ec81f3e5d0a..731f188fc1ee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
@@ -28,6 +28,8 @@
 
 static const struct nv50_disp_func
 g84_disp = {
+	.init = nv50_disp_init,
+	.fini = nv50_disp_fini,
 	.intr = nv50_disp_intr,
 	.uevent = &nv50_disp_chan_uevent,
 	.super = nv50_disp_super,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
index 791c2cd157dc..def54fe1951e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
@@ -28,6 +28,8 @@
 
 static const struct nv50_disp_func
 g94_disp = {
+	.init = nv50_disp_init,
+	.fini = nv50_disp_fini,
 	.intr = nv50_disp_intr,
 	.uevent = &nv50_disp_chan_uevent,
 	.super = nv50_disp_super,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
index 0139d143c733..382e6a6a6ff2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
@@ -26,6 +26,9 @@
 #include "ior.h"
 #include "rootnv50.h"
 
+#include <core/ramht.h>
+#include <subdev/timer.h>
+
 void
 gf119_disp_super(struct work_struct *work)
 {
@@ -164,8 +167,87 @@ gf119_disp_intr(struct nv50_disp *disp)
 	}
 }
 
+void
+gf119_disp_fini(struct nv50_disp *disp)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	/* disable all interrupts */
+	nvkm_wr32(device, 0x6100b0, 0x00000000);
+}
+
+int
+gf119_disp_init(struct nv50_disp *disp)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	struct nvkm_head *head;
+	u32 tmp;
+	int i;
+
+	/* The below segments of code copying values from one register to
+	 * another appear to inform EVO of the display capabilities or
+	 * something similar.
+	 */
+
+	/* ... CRTC caps */
+	list_for_each_entry(head, &disp->base.head, head) {
+		const u32 hoff = head->id * 0x800;
+		tmp = nvkm_rd32(device, 0x616104 + hoff);
+		nvkm_wr32(device, 0x6101b4 + hoff, tmp);
+		tmp = nvkm_rd32(device, 0x616108 + hoff);
+		nvkm_wr32(device, 0x6101b8 + hoff, tmp);
+		tmp = nvkm_rd32(device, 0x61610c + hoff);
+		nvkm_wr32(device, 0x6101bc + hoff, tmp);
+	}
+
+	/* ... DAC caps */
+	for (i = 0; i < disp->dac.nr; i++) {
+		tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800));
+		nvkm_wr32(device, 0x6101c0 + (i * 0x800), tmp);
+	}
+
+	/* ... SOR caps */
+	for (i = 0; i < disp->sor.nr; i++) {
+		tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
+		nvkm_wr32(device, 0x6301c4 + (i * 0x800), tmp);
+	}
+
+	/* steal display away from vbios, or something like that */
+	if (nvkm_rd32(device, 0x6100ac) & 0x00000100) {
+		nvkm_wr32(device, 0x6100ac, 0x00000100);
+		nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000);
+		if (nvkm_msec(device, 2000,
+			if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002))
+				break;
+		) < 0)
+			return -EBUSY;
+	}
+
+	/* point at display engine memory area (hash table, objects) */
+	nvkm_wr32(device, 0x610010, (disp->inst->addr >> 8) | 9);
+
+	/* enable supervisor interrupts, disable everything else */
+	nvkm_wr32(device, 0x610090, 0x00000000);
+	nvkm_wr32(device, 0x6100a0, 0x00000000);
+	nvkm_wr32(device, 0x6100b0, 0x00000307);
+
+	/* disable underflow reporting, preventing an intermittent issue
+	 * on some gk104 boards where the production vbios left this
+	 * setting enabled by default.
+	 *
+	 * ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
+	 */
+	list_for_each_entry(head, &disp->base.head, head) {
+		const u32 hoff = head->id * 0x800;
+		nvkm_mask(device, 0x616308 + hoff, 0x00000111, 0x00000010);
+	}
+
+	return 0;
+}
+
 static const struct nv50_disp_func
 gf119_disp = {
+	.init = gf119_disp_init,
+	.fini = gf119_disp_fini,
 	.intr = gf119_disp_intr,
 	.intr_error = gf119_disp_intr_error,
 	.uevent = &gf119_disp_chan_uevent,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
index 6a59a52468c1..4c3439b1a62d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
@@ -28,6 +28,8 @@
 
 static const struct nv50_disp_func
 gk104_disp = {
+	.init = gf119_disp_init,
+	.fini = gf119_disp_fini,
 	.intr = gf119_disp_intr,
 	.intr_error = gf119_disp_intr_error,
 	.uevent = &gf119_disp_chan_uevent,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
index f3b10dc4e673..bc6f4750c942 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
@@ -28,6 +28,8 @@
 
 static const struct nv50_disp_func
 gk110_disp = {
+	.init = gf119_disp_init,
+	.fini = gf119_disp_fini,
 	.intr = gf119_disp_intr,
 	.intr_error = gf119_disp_intr_error,
 	.uevent = &gf119_disp_chan_uevent,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
index 068c5951efe3..031cf6b03a76 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
@@ -28,6 +28,8 @@
 
 static const struct nv50_disp_func
 gm107_disp = {
+	.init = gf119_disp_init,
+	.fini = gf119_disp_fini,
 	.intr = gf119_disp_intr,
 	.intr_error = gf119_disp_intr_error,
 	.uevent = &gf119_disp_chan_uevent,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
index 1c27dbe6ccec..ec9c33a5162d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
@@ -28,6 +28,8 @@
 
 static const struct nv50_disp_func
 gm200_disp = {
+	.init = gf119_disp_init,
+	.fini = gf119_disp_fini,
 	.intr = gf119_disp_intr,
 	.intr_error = gf119_disp_intr_error,
 	.uevent = &gf119_disp_chan_uevent,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
index 84933b6119f2..fd6216684f6d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
@@ -28,6 +28,8 @@
 
 static const struct nv50_disp_func
 gp100_disp = {
+	.init = gf119_disp_init,
+	.fini = gf119_disp_fini,
 	.intr = gf119_disp_intr,
 	.intr_error = gf119_disp_intr_error,
 	.uevent = &gf119_disp_chan_uevent,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
index b36d926f0264..0a2c5b5f87eb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
@@ -54,6 +54,8 @@ gp102_disp_intr_error(struct nv50_disp *disp, int chid)
 
 static const struct nv50_disp_func
 gp102_disp = {
+	.init = gf119_disp_init,
+	.fini = gf119_disp_fini,
 	.intr = gf119_disp_intr,
 	.intr_error = gp102_disp_intr_error,
 	.uevent = &gf119_disp_chan_uevent,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
index 62e721d5963a..f80183701f44 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
@@ -28,6 +28,8 @@
 
 static const struct nv50_disp_func
 gt200_disp = {
+	.init = nv50_disp_init,
+	.fini = nv50_disp_fini,
 	.intr = nv50_disp_intr,
 	.uevent = &nv50_disp_chan_uevent,
 	.super = nv50_disp_super,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
index a5b1b1416740..7581efc1357e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
@@ -28,6 +28,8 @@
 
 static const struct nv50_disp_func
 gt215_disp = {
+	.init = nv50_disp_init,
+	.fini = nv50_disp_fini,
 	.intr = nv50_disp_intr,
 	.uevent = &nv50_disp_chan_uevent,
 	.super = nv50_disp_super,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
index ff49040a5819..cfdce23ab83a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
@@ -26,6 +26,8 @@
 
 static const struct nv50_disp_func
 mcp77_disp = {
+	.init = nv50_disp_init,
+	.fini = nv50_disp_fini,
 	.intr = nv50_disp_intr,
 	.uevent = &nv50_disp_chan_uevent,
 	.super = nv50_disp_super,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c
index 0cf968d58fca..85d9329cfa0e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c
@@ -26,6 +26,8 @@
 
 static const struct nv50_disp_func
 mcp89_disp = {
+	.init = nv50_disp_init,
+	.fini = nv50_disp_fini,
 	.intr = nv50_disp_intr,
 	.uevent = &nv50_disp_chan_uevent,
 	.super = nv50_disp_super,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index c0faa3908a00..1d2280ab3194 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -28,7 +28,7 @@
 
 #include <core/client.h>
 #include <core/enum.h>
-#include <core/gpuobj.h>
+#include <core/ramht.h>
 #include <subdev/bios.h>
 #include <subdev/bios/disp.h>
 #include <subdev/bios/init.h>
@@ -49,13 +49,32 @@ nv50_disp_intr_(struct nvkm_disp *base)
 	disp->func->intr(disp);
 }
 
+static void
+nv50_disp_fini_(struct nvkm_disp *base)
+{
+	struct nv50_disp *disp = nv50_disp(base);
+	disp->func->fini(disp);
+}
+
+static int
+nv50_disp_init_(struct nvkm_disp *base)
+{
+	struct nv50_disp *disp = nv50_disp(base);
+	return disp->func->init(disp);
+}
+
 static void *
 nv50_disp_dtor_(struct nvkm_disp *base)
 {
 	struct nv50_disp *disp = nv50_disp(base);
+
+	nvkm_ramht_del(&disp->ramht);
+	nvkm_gpuobj_del(&disp->inst);
+
 	nvkm_event_fini(&disp->uevent);
 	if (disp->wq)
 		destroy_workqueue(disp->wq);
+
 	return disp;
 }
 
@@ -65,6 +84,7 @@ nv50_disp_oneinit_(struct nvkm_disp *base)
 	struct nv50_disp *disp = nv50_disp(base);
 	const struct nv50_disp_func *func = disp->func;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
 	int ret, i;
 
 	disp->head.nr = func->head.cnt(&disp->base, &disp->head.mask);
@@ -107,13 +127,20 @@ nv50_disp_oneinit_(struct nvkm_disp *base)
 			return ret;
 	}
 
-	return 0;
+	ret = nvkm_gpuobj_new(device, 0x10000, 0x10000, false, NULL,
+			      &disp->inst);
+	if (ret)
+		return ret;
+
+	return nvkm_ramht_new(device, 0x1000, 0, disp->inst, &disp->ramht);
 }
 
 static const struct nvkm_disp_func
 nv50_disp_ = {
 	.dtor = nv50_disp_dtor_,
 	.oneinit = nv50_disp_oneinit_,
+	.init = nv50_disp_init_,
+	.fini = nv50_disp_fini_,
 	.intr = nv50_disp_intr_,
 	.root = nv50_disp_root_,
 };
@@ -643,8 +670,84 @@ nv50_disp_intr(struct nv50_disp *disp)
 	}
 }
 
+void
+nv50_disp_fini(struct nv50_disp *disp)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	/* disable all interrupts */
+	nvkm_wr32(device, 0x610024, 0x00000000);
+	nvkm_wr32(device, 0x610020, 0x00000000);
+}
+
+int
+nv50_disp_init(struct nv50_disp *disp)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	struct nvkm_head *head;
+	u32 tmp;
+	int i;
+
+	/* The below segments of code copying values from one register to
+	 * another appear to inform EVO of the display capabilities or
+	 * something similar.  NFI what the 0x614004 caps are for..
+	 */
+	tmp = nvkm_rd32(device, 0x614004);
+	nvkm_wr32(device, 0x610184, tmp);
+
+	/* ... CRTC caps */
+	list_for_each_entry(head, &disp->base.head, head) {
+		tmp = nvkm_rd32(device, 0x616100 + (head->id * 0x800));
+		nvkm_wr32(device, 0x610190 + (head->id * 0x10), tmp);
+		tmp = nvkm_rd32(device, 0x616104 + (head->id * 0x800));
+		nvkm_wr32(device, 0x610194 + (head->id * 0x10), tmp);
+		tmp = nvkm_rd32(device, 0x616108 + (head->id * 0x800));
+		nvkm_wr32(device, 0x610198 + (head->id * 0x10), tmp);
+		tmp = nvkm_rd32(device, 0x61610c + (head->id * 0x800));
+		nvkm_wr32(device, 0x61019c + (head->id * 0x10), tmp);
+	}
+
+	/* ... DAC caps */
+	for (i = 0; i < disp->dac.nr; i++) {
+		tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800));
+		nvkm_wr32(device, 0x6101d0 + (i * 0x04), tmp);
+	}
+
+	/* ... SOR caps */
+	for (i = 0; i < disp->sor.nr; i++) {
+		tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
+		nvkm_wr32(device, 0x6101e0 + (i * 0x04), tmp);
+	}
+
+	/* ... PIOR caps */
+	for (i = 0; i < disp->pior.nr; i++) {
+		tmp = nvkm_rd32(device, 0x61e000 + (i * 0x800));
+		nvkm_wr32(device, 0x6101f0 + (i * 0x04), tmp);
+	}
+
+	/* steal display away from vbios, or something like that */
+	if (nvkm_rd32(device, 0x610024) & 0x00000100) {
+		nvkm_wr32(device, 0x610024, 0x00000100);
+		nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000);
+		if (nvkm_msec(device, 2000,
+			if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002))
+				break;
+		) < 0)
+			return -EBUSY;
+	}
+
+	/* point at display engine memory area (hash table, objects) */
+	nvkm_wr32(device, 0x610010, (disp->inst->addr >> 8) | 9);
+
+	/* enable supervisor interrupts, disable everything else */
+	nvkm_wr32(device, 0x61002c, 0x00000370);
+	nvkm_wr32(device, 0x610028, 0x00000000);
+	return 0;
+}
+
 static const struct nv50_disp_func
 nv50_disp = {
+	.init = nv50_disp_init,
+	.fini = nv50_disp_fini,
 	.intr = nv50_disp_intr,
 	.uevent = &nv50_disp_chan_uevent,
 	.super = nv50_disp_super,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index a29bcf73ce6f..bb622d0f6d63 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -32,6 +32,9 @@ struct nv50_disp {
 		u8 type[3];
 	} pior;
 
+	struct nvkm_gpuobj *inst;
+	struct nvkm_ramht *ramht;
+
 	struct nv50_disp_chan *chan[21];
 };
 
@@ -46,6 +49,8 @@ int nv50_disp_new_(const struct nv50_disp_func *, struct nvkm_device *,
 		   int index, struct nvkm_disp **);
 
 struct nv50_disp_func {
+	int (*init)(struct nv50_disp *);
+	void (*fini)(struct nv50_disp *);
 	void (*intr)(struct nv50_disp *);
 	void (*intr_error)(struct nv50_disp *, int chid);
 
@@ -60,9 +65,13 @@ struct nv50_disp_func {
 	} head, dac, sor, pior;
 };
 
+int nv50_disp_init(struct nv50_disp *);
+void nv50_disp_fini(struct nv50_disp *);
 void nv50_disp_intr(struct nv50_disp *);
 void nv50_disp_super(struct work_struct *);
 
+int gf119_disp_init(struct nv50_disp *);
+void gf119_disp_fini(struct nv50_disp *);
 void gf119_disp_intr(struct nv50_disp *);
 void gf119_disp_super(struct work_struct *);
 void gf119_disp_intr_error(struct nv50_disp *, int);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
index c614351f5012..ef66c5f38ad5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
@@ -13,6 +13,8 @@ void nvkm_disp_vblank(struct nvkm_disp *, int head);
 struct nvkm_disp_func {
 	void *(*dtor)(struct nvkm_disp *);
 	int (*oneinit)(struct nvkm_disp *);
+	int (*init)(struct nvkm_disp *);
+	void (*fini)(struct nvkm_disp *);
 	void (*intr)(struct nvkm_disp *);
 
 	const struct nvkm_disp_oclass *(*root)(struct nvkm_disp *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
index 721e4f74d1fc..36ac0d4237c7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
@@ -28,8 +28,6 @@
 
 static const struct nv50_disp_root_func
 g84_disp_root = {
-	.init = nv50_disp_root_init,
-	.fini = nv50_disp_root_fini,
 	.dmac = {
 		&g84_disp_core_oclass,
 		&g84_disp_base_oclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
index 9493f6edf62b..18b87b3df862 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
@@ -28,8 +28,6 @@
 
 static const struct nv50_disp_root_func
 g94_disp_root = {
-	.init = nv50_disp_root_init,
-	.fini = nv50_disp_root_fini,
 	.dmac = {
 		&g94_disp_core_oclass,
 		&gt200_disp_base_oclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
index 4ba2d80db52b..7c5701f0b496 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
@@ -22,96 +22,12 @@
  * Authors: Ben Skeggs
  */
 #include "rootnv50.h"
-#include "head.h"
 #include "dmacnv50.h"
 
-#include <core/ramht.h>
-#include <subdev/timer.h>
-
 #include <nvif/class.h>
 
-void
-gf119_disp_root_fini(struct nv50_disp_root *root)
-{
-	struct nvkm_device *device = root->disp->base.engine.subdev.device;
-	/* disable all interrupts */
-	nvkm_wr32(device, 0x6100b0, 0x00000000);
-}
-
-int
-gf119_disp_root_init(struct nv50_disp_root *root)
-{
-	struct nv50_disp *disp = root->disp;
-	struct nvkm_head *head;
-	struct nvkm_device *device = disp->base.engine.subdev.device;
-	u32 tmp;
-	int i;
-
-	/* The below segments of code copying values from one register to
-	 * another appear to inform EVO of the display capabilities or
-	 * something similar.
-	 */
-
-	/* ... CRTC caps */
-	list_for_each_entry(head, &disp->base.head, head) {
-		const u32 hoff = head->id * 0x800;
-		tmp = nvkm_rd32(device, 0x616104 + hoff);
-		nvkm_wr32(device, 0x6101b4 + hoff, tmp);
-		tmp = nvkm_rd32(device, 0x616108 + hoff);
-		nvkm_wr32(device, 0x6101b8 + hoff, tmp);
-		tmp = nvkm_rd32(device, 0x61610c + hoff);
-		nvkm_wr32(device, 0x6101bc + hoff, tmp);
-	}
-
-	/* ... DAC caps */
-	for (i = 0; i < disp->dac.nr; i++) {
-		tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800));
-		nvkm_wr32(device, 0x6101c0 + (i * 0x800), tmp);
-	}
-
-	/* ... SOR caps */
-	for (i = 0; i < disp->sor.nr; i++) {
-		tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
-		nvkm_wr32(device, 0x6301c4 + (i * 0x800), tmp);
-	}
-
-	/* steal display away from vbios, or something like that */
-	if (nvkm_rd32(device, 0x6100ac) & 0x00000100) {
-		nvkm_wr32(device, 0x6100ac, 0x00000100);
-		nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000);
-		if (nvkm_msec(device, 2000,
-			if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002))
-				break;
-		) < 0)
-			return -EBUSY;
-	}
-
-	/* point at display engine memory area (hash table, objects) */
-	nvkm_wr32(device, 0x610010, (root->instmem->addr >> 8) | 9);
-
-	/* enable supervisor interrupts, disable everything else */
-	nvkm_wr32(device, 0x610090, 0x00000000);
-	nvkm_wr32(device, 0x6100a0, 0x00000000);
-	nvkm_wr32(device, 0x6100b0, 0x00000307);
-
-	/* disable underflow reporting, preventing an intermittent issue
-	 * on some gk104 boards where the production vbios left this
-	 * setting enabled by default.
-	 *
-	 * ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
-	 */
-	list_for_each_entry(head, &disp->base.head, head) {
-		const u32 hoff = head->id * 0x800;
-		nvkm_mask(device, 0x616308 + hoff, 0x00000111, 0x00000010);
-	}
-
-	return 0;
-}
-
 static const struct nv50_disp_root_func
 gf119_disp_root = {
-	.init = gf119_disp_root_init,
-	.fini = gf119_disp_root_fini,
 	.dmac = {
 		&gf119_disp_core_oclass,
 		&gf119_disp_base_oclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
index 0bfdb1d1c6ab..c0946a602b71 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
@@ -28,8 +28,6 @@
 
 static const struct nv50_disp_root_func
 gk104_disp_root = {
-	.init = gf119_disp_root_init,
-	.fini = gf119_disp_root_fini,
 	.dmac = {
 		&gk104_disp_core_oclass,
 		&gk104_disp_base_oclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
index 1e8dbed8a67c..2ebc16687b50 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
@@ -28,8 +28,6 @@
 
 static const struct nv50_disp_root_func
 gk110_disp_root = {
-	.init = gf119_disp_root_init,
-	.fini = gf119_disp_root_fini,
 	.dmac = {
 		&gk110_disp_core_oclass,
 		&gk110_disp_base_oclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
index 44c55be69e99..5a62c9e1a2cf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
@@ -28,8 +28,6 @@
 
 static const struct nv50_disp_root_func
 gm107_disp_root = {
-	.init = gf119_disp_root_init,
-	.fini = gf119_disp_root_fini,
 	.dmac = {
 		&gm107_disp_core_oclass,
 		&gk110_disp_base_oclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c
index 38f5ee1dfc58..2634e06bf666 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c
@@ -28,8 +28,6 @@
 
 static const struct nv50_disp_root_func
 gm200_disp_root = {
-	.init = gf119_disp_root_init,
-	.fini = gf119_disp_root_fini,
 	.dmac = {
 		&gm200_disp_core_oclass,
 		&gk110_disp_base_oclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
index ac8fdd728ec6..784723597c7e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
@@ -28,8 +28,6 @@
 
 static const struct nv50_disp_root_func
 gp100_disp_root = {
-	.init = gf119_disp_root_init,
-	.fini = gf119_disp_root_fini,
 	.dmac = {
 		&gp100_disp_core_oclass,
 		&gk110_disp_base_oclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c
index 37122ca579ad..2fdfa8df0378 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c
@@ -28,8 +28,6 @@
 
 static const struct nv50_disp_root_func
 gp102_disp_root = {
-	.init = gf119_disp_root_init,
-	.fini = gf119_disp_root_fini,
 	.dmac = {
 		&gp102_disp_core_oclass,
 		&gp102_disp_base_oclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
index 124a0c24f92c..facad2794eb6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
@@ -28,8 +28,6 @@
 
 static const struct nv50_disp_root_func
 gt200_disp_root = {
-	.init = nv50_disp_root_init,
-	.fini = nv50_disp_root_fini,
 	.dmac = {
 		&gt200_disp_core_oclass,
 		&gt200_disp_base_oclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
index dff52f30668b..3e93db58263f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
@@ -28,8 +28,6 @@
 
 static const struct nv50_disp_root_func
 gt215_disp_root = {
-	.init = nv50_disp_root_init,
-	.fini = nv50_disp_root_fini,
 	.dmac = {
 		&gt215_disp_core_oclass,
 		&gt215_disp_base_oclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
index c8379bf37a6d..072c8c0e7096 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
@@ -28,8 +28,6 @@
 #include "ior.h"
 
 #include <core/client.h>
-#include <core/ramht.h>
-#include <subdev/timer.h>
 
 #include <nvif/class.h>
 #include <nvif/cl5070.h>
@@ -315,49 +313,16 @@ nv50_disp_root_child_get_(struct nvkm_object *object, int index,
 	return -EINVAL;
 }
 
-static int
-nv50_disp_root_fini_(struct nvkm_object *object, bool suspend)
-{
-	struct nv50_disp_root *root = nv50_disp_root(object);
-	root->func->fini(root);
-	return 0;
-}
-
-static int
-nv50_disp_root_init_(struct nvkm_object *object)
-{
-	struct nv50_disp_root *root = nv50_disp_root(object);
-	struct nvkm_ior *ior;
-	int ret;
-
-	ret = root->func->init(root);
-	if (ret)
-		return ret;
-
-	/* Set 'normal' (ie. when it's attached to a head) state for
-	 * each output resource to 'fully enabled'.
-	 */
-	list_for_each_entry(ior, &root->disp->base.ior, head) {
-		ior->func->power(ior, true, true, true, true, true);
-	}
-
-	return 0;
-}
-
 static void *
 nv50_disp_root_dtor_(struct nvkm_object *object)
 {
 	struct nv50_disp_root *root = nv50_disp_root(object);
-	nvkm_ramht_del(&root->ramht);
-	nvkm_gpuobj_del(&root->instmem);
 	return root;
 }
 
 static const struct nvkm_object_func
 nv50_disp_root_ = {
 	.dtor = nv50_disp_root_dtor_,
-	.init = nv50_disp_root_init_,
-	.fini = nv50_disp_root_fini_,
 	.mthd = nv50_disp_root_mthd_,
 	.ntfy = nvkm_disp_ntfy,
 	.sclass = nv50_disp_root_child_get_,
@@ -370,8 +335,6 @@ nv50_disp_root_new_(const struct nv50_disp_root_func *func,
 {
 	struct nv50_disp *disp = nv50_disp(base);
 	struct nv50_disp_root *root;
-	struct nvkm_device *device = disp->base.engine.subdev.device;
-	int ret;
 
 	if (!(root = kzalloc(sizeof(*root), GFP_KERNEL)))
 		return -ENOMEM;
@@ -380,94 +343,11 @@ nv50_disp_root_new_(const struct nv50_disp_root_func *func,
 	nvkm_object_ctor(&nv50_disp_root_, oclass, &root->object);
 	root->func = func;
 	root->disp = disp;
-
-	ret = nvkm_gpuobj_new(disp->base.engine.subdev.device, 0x10000, 0x10000,
-			      false, NULL, &root->instmem);
-	if (ret)
-		return ret;
-
-	return nvkm_ramht_new(device, 0x1000, 0, root->instmem, &root->ramht);
-}
-
-void
-nv50_disp_root_fini(struct nv50_disp_root *root)
-{
-	struct nvkm_device *device = root->disp->base.engine.subdev.device;
-	/* disable all interrupts */
-	nvkm_wr32(device, 0x610024, 0x00000000);
-	nvkm_wr32(device, 0x610020, 0x00000000);
-}
-
-int
-nv50_disp_root_init(struct nv50_disp_root *root)
-{
-	struct nv50_disp *disp = root->disp;
-	struct nvkm_head *head;
-	struct nvkm_device *device = disp->base.engine.subdev.device;
-	u32 tmp;
-	int i;
-
-	/* The below segments of code copying values from one register to
-	 * another appear to inform EVO of the display capabilities or
-	 * something similar.  NFI what the 0x614004 caps are for..
-	 */
-	tmp = nvkm_rd32(device, 0x614004);
-	nvkm_wr32(device, 0x610184, tmp);
-
-	/* ... CRTC caps */
-	list_for_each_entry(head, &disp->base.head, head) {
-		tmp = nvkm_rd32(device, 0x616100 + (head->id * 0x800));
-		nvkm_wr32(device, 0x610190 + (head->id * 0x10), tmp);
-		tmp = nvkm_rd32(device, 0x616104 + (head->id * 0x800));
-		nvkm_wr32(device, 0x610194 + (head->id * 0x10), tmp);
-		tmp = nvkm_rd32(device, 0x616108 + (head->id * 0x800));
-		nvkm_wr32(device, 0x610198 + (head->id * 0x10), tmp);
-		tmp = nvkm_rd32(device, 0x61610c + (head->id * 0x800));
-		nvkm_wr32(device, 0x61019c + (head->id * 0x10), tmp);
-	}
-
-	/* ... DAC caps */
-	for (i = 0; i < disp->dac.nr; i++) {
-		tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800));
-		nvkm_wr32(device, 0x6101d0 + (i * 0x04), tmp);
-	}
-
-	/* ... SOR caps */
-	for (i = 0; i < disp->sor.nr; i++) {
-		tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
-		nvkm_wr32(device, 0x6101e0 + (i * 0x04), tmp);
-	}
-
-	/* ... PIOR caps */
-	for (i = 0; i < disp->pior.nr; i++) {
-		tmp = nvkm_rd32(device, 0x61e000 + (i * 0x800));
-		nvkm_wr32(device, 0x6101f0 + (i * 0x04), tmp);
-	}
-
-	/* steal display away from vbios, or something like that */
-	if (nvkm_rd32(device, 0x610024) & 0x00000100) {
-		nvkm_wr32(device, 0x610024, 0x00000100);
-		nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000);
-		if (nvkm_msec(device, 2000,
-			if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002))
-				break;
-		) < 0)
-			return -EBUSY;
-	}
-
-	/* point at display engine memory area (hash table, objects) */
-	nvkm_wr32(device, 0x610010, (root->instmem->addr >> 8) | 9);
-
-	/* enable supervisor interrupts, disable everything else */
-	nvkm_wr32(device, 0x61002c, 0x00000370);
-	nvkm_wr32(device, 0x610028, 0x00000000);
 	return 0;
 }
 
 static const struct nv50_disp_root_func
 nv50_disp_root = {
-	.init = nv50_disp_root_init,
-	.fini = nv50_disp_root_fini,
 	.dmac = {
 		&nv50_disp_core_oclass,
 		&nv50_disp_base_oclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
index 4818fa69ae6c..06b554b212bd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -10,14 +10,9 @@ struct nv50_disp_root {
 	const struct nv50_disp_root_func *func;
 	struct nv50_disp *disp;
 	struct nvkm_object object;
-
-	struct nvkm_gpuobj *instmem;
-	struct nvkm_ramht *ramht;
 };
 
 struct nv50_disp_root_func {
-	int (*init)(struct nv50_disp_root *);
-	void (*fini)(struct nv50_disp_root *);
 	const struct nv50_disp_dmac_oclass *dmac[3];
 	const struct nv50_disp_pioc_oclass *pioc[2];
 };
@@ -25,11 +20,6 @@ struct nv50_disp_root_func {
 int  nv50_disp_root_new_(const struct nv50_disp_root_func *, struct nvkm_disp *,
 			 const struct nvkm_oclass *, void *data, u32 size,
 			 struct nvkm_object **);
-int  nv50_disp_root_init(struct nv50_disp_root *);
-void nv50_disp_root_fini(struct nv50_disp_root *);
-
-int  gf119_disp_root_init(struct nv50_disp_root *);
-void gf119_disp_root_fini(struct nv50_disp_root *);
 
 extern const struct nvkm_disp_oclass nv50_disp_root_oclass;
 extern const struct nvkm_disp_oclass g84_disp_root_oclass;

From abc1d4379bafc504b05039db2336b3955b17ffdb Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1018/1286] drm/nouveau/disp/nv50-: replace user object with
 engine pointer in channels

More simplification.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../drm/nouveau/nvkm/engine/disp/basenv50.c   |  2 +-
 .../drm/nouveau/nvkm/engine/disp/channv50.c   | 22 +++++++++----------
 .../drm/nouveau/nvkm/engine/disp/channv50.h   |  6 ++---
 .../drm/nouveau/nvkm/engine/disp/coregf119.c  |  4 ++--
 .../drm/nouveau/nvkm/engine/disp/coregp102.c  |  2 +-
 .../drm/nouveau/nvkm/engine/disp/corenv50.c   |  6 ++---
 .../drm/nouveau/nvkm/engine/disp/cursnv50.c   |  2 +-
 .../drm/nouveau/nvkm/engine/disp/dmacgf119.c  |  6 ++---
 .../drm/nouveau/nvkm/engine/disp/dmacgp102.c  |  2 +-
 .../drm/nouveau/nvkm/engine/disp/dmacnv50.c   | 14 ++++++------
 .../drm/nouveau/nvkm/engine/disp/dmacnv50.h   |  2 +-
 .../drm/nouveau/nvkm/engine/disp/oimmnv50.c   |  2 +-
 .../drm/nouveau/nvkm/engine/disp/ovlynv50.c   |  2 +-
 .../drm/nouveau/nvkm/engine/disp/piocgf119.c  |  4 ++--
 .../drm/nouveau/nvkm/engine/disp/piocnv50.c   |  4 ++--
 15 files changed, 39 insertions(+), 41 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c
index f1d6b820d482..418741a61f11 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c
@@ -58,7 +58,7 @@ nv50_disp_base_new(const struct nv50_disp_dmac_func *func,
 	} else
 		return ret;
 
-	return nv50_disp_dmac_new_(func, mthd, root, chid + head,
+	return nv50_disp_dmac_new_(func, mthd, disp, chid + head,
 				   head, push, oclass, pobject);
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
index 723dcbde2ac2..96b732d4b9ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
@@ -65,7 +65,7 @@ nv50_disp_mthd_list(struct nv50_disp *disp, int debug, u32 base, int c,
 void
 nv50_disp_chan_mthd(struct nv50_disp_chan *chan, int debug)
 {
-	struct nv50_disp *disp = chan->root->disp;
+	struct nv50_disp *disp = chan->disp;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	const struct nv50_disp_chan_mthd *mthd = chan->mthd;
 	const struct nv50_disp_mthd_list *list;
@@ -158,7 +158,7 @@ static int
 nv50_disp_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
 {
 	struct nv50_disp_chan *chan = nv50_disp_chan(object);
-	struct nv50_disp *disp = chan->root->disp;
+	struct nv50_disp *disp = chan->disp;
 	struct nvkm_device *device = disp->base.engine.subdev.device;
 	*data = nvkm_rd32(device, 0x640000 + (chan->chid.user * 0x1000) + addr);
 	return 0;
@@ -168,7 +168,7 @@ static int
 nv50_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
 {
 	struct nv50_disp_chan *chan = nv50_disp_chan(object);
-	struct nv50_disp *disp = chan->root->disp;
+	struct nv50_disp *disp = chan->disp;
 	struct nvkm_device *device = disp->base.engine.subdev.device;
 	nvkm_wr32(device, 0x640000 + (chan->chid.user * 0x1000) + addr, data);
 	return 0;
@@ -179,7 +179,7 @@ nv50_disp_chan_ntfy(struct nvkm_object *object, u32 type,
 		    struct nvkm_event **pevent)
 {
 	struct nv50_disp_chan *chan = nv50_disp_chan(object);
-	struct nv50_disp *disp = chan->root->disp;
+	struct nv50_disp *disp = chan->disp;
 	switch (type) {
 	case NV50_DISP_CORE_CHANNEL_DMA_V0_NTFY_UEVENT:
 		*pevent = &disp->uevent;
@@ -195,7 +195,7 @@ nv50_disp_chan_map(struct nvkm_object *object, void *argv, u32 argc,
 		   enum nvkm_object_map *type, u64 *addr, u64 *size)
 {
 	struct nv50_disp_chan *chan = nv50_disp_chan(object);
-	struct nv50_disp *disp = chan->root->disp;
+	struct nv50_disp *disp = chan->disp;
 	struct nvkm_device *device = disp->base.engine.subdev.device;
 	*type = NVKM_OBJECT_MAP_IO;
 	*addr = device->func->resource_addr(device, 0) +
@@ -245,7 +245,7 @@ static void *
 nv50_disp_chan_dtor(struct nvkm_object *object)
 {
 	struct nv50_disp_chan *chan = nv50_disp_chan(object);
-	struct nv50_disp *disp = chan->root->disp;
+	struct nv50_disp *disp = chan->disp;
 	if (chan->chid.user >= 0)
 		disp->chan[chan->chid.user] = NULL;
 	return chan->func->dtor ? chan->func->dtor(chan) : chan;
@@ -266,16 +266,14 @@ nv50_disp_chan = {
 int
 nv50_disp_chan_ctor(const struct nv50_disp_chan_func *func,
 		    const struct nv50_disp_chan_mthd *mthd,
-		    struct nv50_disp_root *root, int ctrl, int user, int head,
+		    struct nv50_disp *disp, int ctrl, int user, int head,
 		    const struct nvkm_oclass *oclass,
 		    struct nv50_disp_chan *chan)
 {
-	struct nv50_disp *disp = root->disp;
-
 	nvkm_object_ctor(&nv50_disp_chan, oclass, &chan->object);
 	chan->func = func;
 	chan->mthd = mthd;
-	chan->root = root;
+	chan->disp = disp;
 	chan->chid.ctrl = ctrl;
 	chan->chid.user = user;
 	chan->head = head;
@@ -291,7 +289,7 @@ nv50_disp_chan_ctor(const struct nv50_disp_chan_func *func,
 int
 nv50_disp_chan_new_(const struct nv50_disp_chan_func *func,
 		    const struct nv50_disp_chan_mthd *mthd,
-		    struct nv50_disp_root *root, int ctrl, int user, int head,
+		    struct nv50_disp *disp, int ctrl, int user, int head,
 		    const struct nvkm_oclass *oclass,
 		    struct nvkm_object **pobject)
 {
@@ -301,6 +299,6 @@ nv50_disp_chan_new_(const struct nv50_disp_chan_func *func,
 		return -ENOMEM;
 	*pobject = &chan->object;
 
-	return nv50_disp_chan_ctor(func, mthd, root, ctrl, user,
+	return nv50_disp_chan_ctor(func, mthd, disp, ctrl, user,
 				   head, oclass, chan);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
index b5185853b7d8..c9dc58ce47dd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -9,7 +9,7 @@ struct nv50_disp_root;
 struct nv50_disp_chan {
 	const struct nv50_disp_chan_func *func;
 	const struct nv50_disp_chan_mthd *mthd;
-	struct nv50_disp_root *root;
+	struct nv50_disp *disp;
 
 	struct {
 		int ctrl;
@@ -32,11 +32,11 @@ struct nv50_disp_chan_func {
 
 int nv50_disp_chan_ctor(const struct nv50_disp_chan_func *,
 			const struct nv50_disp_chan_mthd *,
-			struct nv50_disp_root *, int ctrl, int user, int head,
+			struct nv50_disp *, int ctrl, int user, int head,
 			const struct nvkm_oclass *, struct nv50_disp_chan *);
 int nv50_disp_chan_new_(const struct nv50_disp_chan_func *,
 			const struct nv50_disp_chan_mthd *,
-			struct nv50_disp_root *, int ctrl, int user, int head,
+			struct nv50_disp *, int ctrl, int user, int head,
 			const struct nvkm_oclass *, struct nvkm_object **);
 
 extern const struct nv50_disp_chan_func nv50_disp_pioc_func;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
index 21fbf89b6319..200dd90e016b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
@@ -174,7 +174,7 @@ gf119_disp_core_chan_mthd = {
 void
 gf119_disp_core_fini(struct nv50_disp_dmac *chan)
 {
-	struct nv50_disp *disp = chan->base.root->disp;
+	struct nv50_disp *disp = chan->base.disp;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
 
@@ -197,7 +197,7 @@ gf119_disp_core_fini(struct nv50_disp_dmac *chan)
 static int
 gf119_disp_core_init(struct nv50_disp_dmac *chan)
 {
-	struct nv50_disp *disp = chan->base.root->disp;
+	struct nv50_disp *disp = chan->base.disp;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c
index b0df4b752b8c..6ad5f2fb2ac1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c
@@ -31,7 +31,7 @@
 static int
 gp102_disp_core_init(struct nv50_disp_dmac *chan)
 {
-	struct nv50_disp *disp = chan->base.root->disp;
+	struct nv50_disp *disp = chan->base.disp;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
index b547c8b833ca..839cbc83428c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
@@ -54,7 +54,7 @@ nv50_disp_core_new(const struct nv50_disp_dmac_func *func,
 	} else
 		return ret;
 
-	return nv50_disp_dmac_new_(func, mthd, root, chid, 0,
+	return nv50_disp_dmac_new_(func, mthd, root->disp, chid, 0,
 				   push, oclass, pobject);
 }
 
@@ -168,7 +168,7 @@ nv50_disp_core_chan_mthd = {
 static void
 nv50_disp_core_fini(struct nv50_disp_dmac *chan)
 {
-	struct nv50_disp *disp = chan->base.root->disp;
+	struct nv50_disp *disp = chan->base.disp;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
 
@@ -190,7 +190,7 @@ nv50_disp_core_fini(struct nv50_disp_dmac *chan)
 static int
 nv50_disp_core_init(struct nv50_disp_dmac *chan)
 {
-	struct nv50_disp *disp = chan->base.root->disp;
+	struct nv50_disp *disp = chan->base.disp;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
index ab51121b7982..1f9a6c31ab3b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
@@ -55,7 +55,7 @@ nv50_disp_curs_new(const struct nv50_disp_chan_func *func,
 	} else
 		return ret;
 
-	return nv50_disp_chan_new_(func, mthd, root, ctrl + head, user + head,
+	return nv50_disp_chan_new_(func, mthd, disp, ctrl + head, user + head,
 				   head, oclass, pobject);
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
index 6680ff8bf029..b73bcc38a259 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
@@ -31,7 +31,7 @@ int
 gf119_disp_dmac_bind(struct nv50_disp_dmac *chan,
 		     struct nvkm_object *object, u32 handle)
 {
-	return nvkm_ramht_insert(chan->base.root->disp->ramht, object,
+	return nvkm_ramht_insert(chan->base.disp->ramht, object,
 				 chan->base.chid.user, -9, handle,
 				 chan->base.chid.user << 27 | 0x00000001);
 }
@@ -39,7 +39,7 @@ gf119_disp_dmac_bind(struct nv50_disp_dmac *chan,
 void
 gf119_disp_dmac_fini(struct nv50_disp_dmac *chan)
 {
-	struct nv50_disp *disp = chan->base.root->disp;
+	struct nv50_disp *disp = chan->base.disp;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
 	int ctrl = chan->base.chid.ctrl;
@@ -64,7 +64,7 @@ gf119_disp_dmac_fini(struct nv50_disp_dmac *chan)
 static int
 gf119_disp_dmac_init(struct nv50_disp_dmac *chan)
 {
-	struct nv50_disp *disp = chan->base.root->disp;
+	struct nv50_disp *disp = chan->base.disp;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
 	int ctrl = chan->base.chid.ctrl;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c
index cdead9500343..62e9b8430791 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c
@@ -29,7 +29,7 @@
 static int
 gp102_disp_dmac_init(struct nv50_disp_dmac *chan)
 {
-	struct nv50_disp *disp = chan->base.root->disp;
+	struct nv50_disp *disp = chan->base.disp;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
 	int ctrl = chan->base.chid.ctrl;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
index c80d0479c79a..d081947d0689 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
@@ -56,7 +56,7 @@ nv50_disp_dmac_child_new_(struct nv50_disp_chan *base,
 			  void *data, u32 size, struct nvkm_object **pobject)
 {
 	struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
-	struct nv50_disp *disp = chan->base.root->disp;
+	struct nv50_disp *disp = chan->base.disp;
 	struct nvkm_device *device = disp->base.engine.subdev.device;
 	const struct nvkm_device_oclass *sclass = oclass->priv;
 	struct nv50_disp_dmac_object *object;
@@ -85,7 +85,7 @@ nv50_disp_dmac_child_get_(struct nv50_disp_chan *base, int index,
 			  struct nvkm_oclass *sclass)
 {
 	struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
-	struct nv50_disp *disp = chan->base.root->disp;
+	struct nv50_disp *disp = chan->base.disp;
 	struct nvkm_device *device = disp->base.engine.subdev.device;
 	const struct nvkm_device_oclass *oclass = NULL;
 
@@ -133,7 +133,7 @@ nv50_disp_dmac_func_ = {
 int
 nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *func,
 		    const struct nv50_disp_chan_mthd *mthd,
-		    struct nv50_disp_root *root, int chid, int head, u64 push,
+		    struct nv50_disp *disp, int chid, int head, u64 push,
 		    const struct nvkm_oclass *oclass,
 		    struct nvkm_object **pobject)
 {
@@ -147,7 +147,7 @@ nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *func,
 	*pobject = &chan->base.object;
 	chan->func = func;
 
-	ret = nv50_disp_chan_ctor(&nv50_disp_dmac_func_, mthd, root,
+	ret = nv50_disp_chan_ctor(&nv50_disp_dmac_func_, mthd, disp,
 				  chid, chid, head, oclass, &chan->base);
 	if (ret)
 		return ret;
@@ -177,7 +177,7 @@ int
 nv50_disp_dmac_bind(struct nv50_disp_dmac *chan,
 		    struct nvkm_object *object, u32 handle)
 {
-	return nvkm_ramht_insert(chan->base.root->disp->ramht, object,
+	return nvkm_ramht_insert(chan->base.disp->ramht, object,
 				 chan->base.chid.user, -10, handle,
 				 chan->base.chid.user << 28 |
 				 chan->base.chid.user);
@@ -186,7 +186,7 @@ nv50_disp_dmac_bind(struct nv50_disp_dmac *chan,
 static void
 nv50_disp_dmac_fini(struct nv50_disp_dmac *chan)
 {
-	struct nv50_disp *disp = chan->base.root->disp;
+	struct nv50_disp *disp = chan->base.disp;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
 	int ctrl = chan->base.chid.ctrl;
@@ -210,7 +210,7 @@ nv50_disp_dmac_fini(struct nv50_disp_dmac *chan)
 static int
 nv50_disp_dmac_init(struct nv50_disp_dmac *chan)
 {
-	struct nv50_disp *disp = chan->base.root->disp;
+	struct nv50_disp *disp = chan->base.disp;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
 	int ctrl = chan->base.chid.ctrl;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
index f9b98211da6a..ca323864ebfe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
@@ -18,7 +18,7 @@ struct nv50_disp_dmac_func {
 
 int nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *,
 			const struct nv50_disp_chan_mthd *,
-			struct nv50_disp_root *, int chid, int head, u64 push,
+			struct nv50_disp *, int chid, int head, u64 push,
 			const struct nvkm_oclass *, struct nvkm_object **);
 
 extern const struct nv50_disp_dmac_func nv50_disp_dmac_func;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
index f3b0fa2c5924..7e013cdf6f5c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
@@ -55,7 +55,7 @@ nv50_disp_oimm_new(const struct nv50_disp_chan_func *func,
 	} else
 		return ret;
 
-	return nv50_disp_chan_new_(func, mthd, root, ctrl + head, user + head,
+	return nv50_disp_chan_new_(func, mthd, disp, ctrl + head, user + head,
 				   head, oclass, pobject);
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c
index 9ebaaa6e9e33..d0e554601cc1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c
@@ -58,7 +58,7 @@ nv50_disp_ovly_new(const struct nv50_disp_dmac_func *func,
 	} else
 		return ret;
 
-	return nv50_disp_dmac_new_(func, mthd, root, chid + head,
+	return nv50_disp_dmac_new_(func, mthd, disp, chid + head,
 				   head, push, oclass, pobject);
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
index 0abaa6431943..7b1e9bf75abd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
@@ -29,7 +29,7 @@
 static void
 gf119_disp_pioc_fini(struct nv50_disp_chan *chan)
 {
-	struct nv50_disp *disp = chan->root->disp;
+	struct nv50_disp *disp = chan->disp;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
 	int ctrl = chan->chid.ctrl;
@@ -52,7 +52,7 @@ gf119_disp_pioc_fini(struct nv50_disp_chan *chan)
 static int
 gf119_disp_pioc_init(struct nv50_disp_chan *chan)
 {
-	struct nv50_disp *disp = chan->root->disp;
+	struct nv50_disp *disp = chan->disp;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
 	int ctrl = chan->chid.ctrl;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
index 0211e0e8a35f..60c20123d84f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
@@ -29,7 +29,7 @@
 static void
 nv50_disp_pioc_fini(struct nv50_disp_chan *chan)
 {
-	struct nv50_disp *disp = chan->root->disp;
+	struct nv50_disp *disp = chan->disp;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
 	int ctrl = chan->chid.ctrl;
@@ -48,7 +48,7 @@ nv50_disp_pioc_fini(struct nv50_disp_chan *chan)
 static int
 nv50_disp_pioc_init(struct nv50_disp_chan *chan)
 {
-	struct nv50_disp *disp = chan->root->disp;
+	struct nv50_disp *disp = chan->disp;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
 	int ctrl = chan->chid.ctrl;

From 46f74a8ad79c4da47190df8492f0534fe8c02652 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1019/1286] drm/nouveau/disp/nv50-: simplify definition of
 overlay channels

Introduces a new method of defining channels available from the display,
common to all channel types, allowing for more flexibility in available
channel types/counts, and reducing the amount of boiler-plate required.

This will be required to support Volta.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/disp/Kbuild   |  1 -
 .../drm/nouveau/nvkm/engine/disp/channv50.h   | 28 +++++++++++++-
 .../drm/nouveau/nvkm/engine/disp/dmacnv50.h   | 12 ------
 .../drm/nouveau/nvkm/engine/disp/ovlyg84.c    | 24 +++++-------
 .../drm/nouveau/nvkm/engine/disp/ovlygf119.c  | 22 ++++-------
 .../drm/nouveau/nvkm/engine/disp/ovlygk104.c  | 22 ++++-------
 .../drm/nouveau/nvkm/engine/disp/ovlygp102.c  | 20 ++++------
 .../drm/nouveau/nvkm/engine/disp/ovlygt200.c  | 22 ++++-------
 .../drm/nouveau/nvkm/engine/disp/ovlygt215.c  | 38 -------------------
 .../drm/nouveau/nvkm/engine/disp/ovlynv50.c   | 38 ++++++++-----------
 .../drm/nouveau/nvkm/engine/disp/rootg84.c    |  5 ++-
 .../drm/nouveau/nvkm/engine/disp/rootg94.c    |  5 ++-
 .../drm/nouveau/nvkm/engine/disp/rootgf119.c  |  5 ++-
 .../drm/nouveau/nvkm/engine/disp/rootgk104.c  |  5 ++-
 .../drm/nouveau/nvkm/engine/disp/rootgk110.c  |  5 ++-
 .../drm/nouveau/nvkm/engine/disp/rootgm107.c  |  5 ++-
 .../drm/nouveau/nvkm/engine/disp/rootgm200.c  |  5 ++-
 .../drm/nouveau/nvkm/engine/disp/rootgp100.c  |  5 ++-
 .../drm/nouveau/nvkm/engine/disp/rootgp102.c  |  5 ++-
 .../drm/nouveau/nvkm/engine/disp/rootgt200.c  |  5 ++-
 .../drm/nouveau/nvkm/engine/disp/rootgt215.c  |  5 ++-
 .../drm/nouveau/nvkm/engine/disp/rootnv50.c   | 23 ++++++++++-
 .../drm/nouveau/nvkm/engine/disp/rootnv50.h   |  7 +++-
 23 files changed, 154 insertions(+), 158 deletions(-)
 delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt215.c

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index 48ce6699183e..67861f2ac2a4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -96,7 +96,6 @@ nvkm-y += nvkm/engine/disp/coregp102.o
 nvkm-y += nvkm/engine/disp/ovlynv50.o
 nvkm-y += nvkm/engine/disp/ovlyg84.o
 nvkm-y += nvkm/engine/disp/ovlygt200.o
-nvkm-y += nvkm/engine/disp/ovlygt215.o
 nvkm-y += nvkm/engine/disp/ovlygf119.o
 nvkm-y += nvkm/engine/disp/ovlygk104.o
 nvkm-y += nvkm/engine/disp/ovlygp102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
index c9dc58ce47dd..b222a1daec40 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -5,6 +5,7 @@
 #include <core/object.h>
 #include "nv50.h"
 struct nv50_disp_root;
+struct nv50_disp_dmac_func;
 
 struct nv50_disp_chan {
 	const struct nv50_disp_chan_func *func;
@@ -49,6 +50,30 @@ void nv50_disp_chan_uevent_send(struct nv50_disp *, int);
 
 extern const struct nvkm_event_func gf119_disp_chan_uevent;
 
+int nv50_disp_ovly_new_(const struct nv50_disp_dmac_func *,
+			const struct nv50_disp_chan_mthd *,
+			struct nv50_disp *, int chid,
+			const struct nvkm_oclass *, void *argv, u32 argc,
+			struct nvkm_object **);
+
+int nv50_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
+		       struct nv50_disp *, struct nvkm_object **);
+
+int g84_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
+		      struct nv50_disp *, struct nvkm_object **);
+
+int gt200_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
+			struct nv50_disp *, struct nvkm_object **);
+
+int gf119_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
+			struct nv50_disp *, struct nvkm_object **);
+
+int gk104_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
+			struct nv50_disp *, struct nvkm_object **);
+
+int gp102_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
+			struct nv50_disp *, struct nvkm_object **);
+
 struct nv50_disp_mthd_list {
 	u32 mthd;
 	u32 addr;
@@ -81,7 +106,6 @@ extern const struct nv50_disp_chan_mthd g84_disp_core_chan_mthd;
 extern const struct nv50_disp_mthd_list g84_disp_core_mthd_dac;
 extern const struct nv50_disp_mthd_list g84_disp_core_mthd_head;
 extern const struct nv50_disp_chan_mthd g84_disp_base_chan_mthd;
-extern const struct nv50_disp_chan_mthd g84_disp_ovly_chan_mthd;
 
 extern const struct nv50_disp_chan_mthd g94_disp_core_chan_mthd;
 
@@ -92,7 +116,7 @@ extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_pior;
 extern const struct nv50_disp_chan_mthd gf119_disp_base_chan_mthd;
 
 extern const struct nv50_disp_chan_mthd gk104_disp_core_chan_mthd;
-extern const struct nv50_disp_chan_mthd gk104_disp_ovly_chan_mthd;
+extern const struct nv50_disp_chan_mthd gk104_disp_ovly_mthd;
 
 struct nv50_disp_pioc_oclass {
 	int (*ctor)(const struct nv50_disp_chan_func *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
index ca323864ebfe..45caaee86744 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
@@ -55,37 +55,26 @@ int nv50_disp_base_new(const struct nv50_disp_dmac_func *,
 		       struct nv50_disp_root *, int chid,
 		       const struct nvkm_oclass *oclass, void *data, u32 size,
 		       struct nvkm_object **);
-int nv50_disp_ovly_new(const struct nv50_disp_dmac_func *,
-		       const struct nv50_disp_chan_mthd *,
-		       struct nv50_disp_root *, int chid,
-		       const struct nvkm_oclass *oclass, void *data, u32 size,
-		       struct nvkm_object **);
 
 extern const struct nv50_disp_dmac_oclass nv50_disp_core_oclass;
 extern const struct nv50_disp_dmac_oclass nv50_disp_base_oclass;
-extern const struct nv50_disp_dmac_oclass nv50_disp_ovly_oclass;
 
 extern const struct nv50_disp_dmac_oclass g84_disp_core_oclass;
 extern const struct nv50_disp_dmac_oclass g84_disp_base_oclass;
-extern const struct nv50_disp_dmac_oclass g84_disp_ovly_oclass;
 
 extern const struct nv50_disp_dmac_oclass g94_disp_core_oclass;
 
 extern const struct nv50_disp_dmac_oclass gt200_disp_core_oclass;
 extern const struct nv50_disp_dmac_oclass gt200_disp_base_oclass;
-extern const struct nv50_disp_dmac_oclass gt200_disp_ovly_oclass;
 
 extern const struct nv50_disp_dmac_oclass gt215_disp_core_oclass;
 extern const struct nv50_disp_dmac_oclass gt215_disp_base_oclass;
-extern const struct nv50_disp_dmac_oclass gt215_disp_ovly_oclass;
 
 extern const struct nv50_disp_dmac_oclass gf119_disp_core_oclass;
 extern const struct nv50_disp_dmac_oclass gf119_disp_base_oclass;
-extern const struct nv50_disp_dmac_oclass gf119_disp_ovly_oclass;
 
 extern const struct nv50_disp_dmac_oclass gk104_disp_core_oclass;
 extern const struct nv50_disp_dmac_oclass gk104_disp_base_oclass;
-extern const struct nv50_disp_dmac_oclass gk104_disp_ovly_oclass;
 
 extern const struct nv50_disp_dmac_oclass gk110_disp_core_oclass;
 extern const struct nv50_disp_dmac_oclass gk110_disp_base_oclass;
@@ -98,5 +87,4 @@ extern const struct nv50_disp_dmac_oclass gp100_disp_core_oclass;
 
 extern const struct nv50_disp_dmac_oclass gp102_disp_core_oclass;
 extern const struct nv50_disp_dmac_oclass gp102_disp_base_oclass;
-extern const struct nv50_disp_dmac_oclass gp102_disp_ovly_oclass;
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c
index db6234eebc61..6b55cf483fe2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c
@@ -22,9 +22,6 @@
  * Authors: Ben Skeggs
  */
 #include "dmacnv50.h"
-#include "rootnv50.h"
-
-#include <nvif/class.h>
 
 static const struct nv50_disp_mthd_list
 g84_disp_ovly_mthd_base = {
@@ -54,8 +51,8 @@ g84_disp_ovly_mthd_base = {
 	}
 };
 
-const struct nv50_disp_chan_mthd
-g84_disp_ovly_chan_mthd = {
+static const struct nv50_disp_chan_mthd
+g84_disp_ovly_mthd = {
 	.name = "Overlay",
 	.addr = 0x000540,
 	.prev = 0x000004,
@@ -65,13 +62,10 @@ g84_disp_ovly_chan_mthd = {
 	}
 };
 
-const struct nv50_disp_dmac_oclass
-g84_disp_ovly_oclass = {
-	.base.oclass = G82_DISP_OVERLAY_CHANNEL_DMA,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_ovly_new,
-	.func = &nv50_disp_dmac_func,
-	.mthd = &g84_disp_ovly_chan_mthd,
-	.chid = 3,
-};
+int
+g84_disp_ovly_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		  struct nv50_disp *disp, struct nvkm_object **pobject)
+{
+	return nv50_disp_ovly_new_(&nv50_disp_dmac_func, &g84_disp_ovly_mthd,
+				   disp, 3, oclass, argv, argc, pobject);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygf119.c
index 5985879abd23..30901caf75dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygf119.c
@@ -22,9 +22,6 @@
  * Authors: Ben Skeggs
  */
 #include "dmacnv50.h"
-#include "rootnv50.h"
-
-#include <nvif/class.h>
 
 static const struct nv50_disp_mthd_list
 gf119_disp_ovly_mthd_base = {
@@ -79,7 +76,7 @@ gf119_disp_ovly_mthd_base = {
 };
 
 static const struct nv50_disp_chan_mthd
-gf119_disp_ovly_chan_mthd = {
+gf119_disp_ovly_mthd = {
 	.name = "Overlay",
 	.addr = 0x001000,
 	.prev = -0x020000,
@@ -89,13 +86,10 @@ gf119_disp_ovly_chan_mthd = {
 	}
 };
 
-const struct nv50_disp_dmac_oclass
-gf119_disp_ovly_oclass = {
-	.base.oclass = GF110_DISP_OVERLAY_CONTROL_DMA,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_ovly_new,
-	.func = &gf119_disp_dmac_func,
-	.mthd = &gf119_disp_ovly_chan_mthd,
-	.chid = 5,
-};
+int
+gf119_disp_ovly_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		    struct nv50_disp *disp, struct nvkm_object **pobject)
+{
+	return nv50_disp_ovly_new_(&gf119_disp_dmac_func, &gf119_disp_ovly_mthd,
+				   disp, 5, oclass, argv, argc, pobject);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c
index 2f0220b39f34..682c146c39d4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c
@@ -22,9 +22,6 @@
  * Authors: Ben Skeggs
  */
 #include "dmacnv50.h"
-#include "rootnv50.h"
-
-#include <nvif/class.h>
 
 static const struct nv50_disp_mthd_list
 gk104_disp_ovly_mthd_base = {
@@ -81,7 +78,7 @@ gk104_disp_ovly_mthd_base = {
 };
 
 const struct nv50_disp_chan_mthd
-gk104_disp_ovly_chan_mthd = {
+gk104_disp_ovly_mthd = {
 	.name = "Overlay",
 	.addr = 0x001000,
 	.prev = -0x020000,
@@ -91,13 +88,10 @@ gk104_disp_ovly_chan_mthd = {
 	}
 };
 
-const struct nv50_disp_dmac_oclass
-gk104_disp_ovly_oclass = {
-	.base.oclass = GK104_DISP_OVERLAY_CONTROL_DMA,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_ovly_new,
-	.func = &gf119_disp_dmac_func,
-	.mthd = &gk104_disp_ovly_chan_mthd,
-	.chid = 5,
-};
+int
+gk104_disp_ovly_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		    struct nv50_disp *disp, struct nvkm_object **pobject)
+{
+	return nv50_disp_ovly_new_(&gf119_disp_dmac_func, &gk104_disp_ovly_mthd,
+				   disp, 5, oclass, argv, argc, pobject);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp102.c
index 589bd2f12b41..bcc5ac40f6f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp102.c
@@ -22,17 +22,11 @@
  * Authors: Ben Skeggs
  */
 #include "dmacnv50.h"
-#include "rootnv50.h"
 
-#include <nvif/class.h>
-
-const struct nv50_disp_dmac_oclass
-gp102_disp_ovly_oclass = {
-	.base.oclass = GK104_DISP_OVERLAY_CONTROL_DMA,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_ovly_new,
-	.func = &gp102_disp_dmac_func,
-	.mthd = &gk104_disp_ovly_chan_mthd,
-	.chid = 5,
-};
+int
+gp102_disp_ovly_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		    struct nv50_disp *disp, struct nvkm_object **pobject)
+{
+	return nv50_disp_ovly_new_(&gp102_disp_dmac_func, &gk104_disp_ovly_mthd,
+				   disp, 5, oclass, argv, argc, pobject);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt200.c
index f858053db83d..655deb0d2fa0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt200.c
@@ -22,9 +22,6 @@
  * Authors: Ben Skeggs
  */
 #include "dmacnv50.h"
-#include "rootnv50.h"
-
-#include <nvif/class.h>
 
 static const struct nv50_disp_mthd_list
 gt200_disp_ovly_mthd_base = {
@@ -58,7 +55,7 @@ gt200_disp_ovly_mthd_base = {
 };
 
 static const struct nv50_disp_chan_mthd
-gt200_disp_ovly_chan_mthd = {
+gt200_disp_ovly_mthd = {
 	.name = "Overlay",
 	.addr = 0x000540,
 	.prev = 0x000004,
@@ -68,13 +65,10 @@ gt200_disp_ovly_chan_mthd = {
 	}
 };
 
-const struct nv50_disp_dmac_oclass
-gt200_disp_ovly_oclass = {
-	.base.oclass = GT200_DISP_OVERLAY_CHANNEL_DMA,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_ovly_new,
-	.func = &nv50_disp_dmac_func,
-	.mthd = &gt200_disp_ovly_chan_mthd,
-	.chid = 3,
-};
+int
+gt200_disp_ovly_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		    struct nv50_disp *disp, struct nvkm_object **pobject)
+{
+	return nv50_disp_ovly_new_(&nv50_disp_dmac_func, &gt200_disp_ovly_mthd,
+				   disp, 3, oclass, argv, argc, pobject);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt215.c
deleted file mode 100644
index c947e1e16a37..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt215.c
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright 2015 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs <bskeggs@redhat.com>
- */
-#include "dmacnv50.h"
-#include "rootnv50.h"
-
-#include <nvif/class.h>
-
-const struct nv50_disp_dmac_oclass
-gt215_disp_ovly_oclass = {
-	.base.oclass = GT214_DISP_OVERLAY_CHANNEL_DMA,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_ovly_new,
-	.func = &nv50_disp_dmac_func,
-	.mthd = &g84_disp_ovly_chan_mthd,
-	.chid = 3,
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c
index d0e554601cc1..46f5df0b3a1e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c
@@ -23,31 +23,28 @@
  */
 #include "dmacnv50.h"
 #include "head.h"
-#include "rootnv50.h"
 
 #include <core/client.h>
 
-#include <nvif/class.h>
 #include <nvif/cl507e.h>
 #include <nvif/unpack.h>
 
 int
-nv50_disp_ovly_new(const struct nv50_disp_dmac_func *func,
-		   const struct nv50_disp_chan_mthd *mthd,
-		   struct nv50_disp_root *root, int chid,
-		   const struct nvkm_oclass *oclass, void *data, u32 size,
-		   struct nvkm_object **pobject)
+nv50_disp_ovly_new_(const struct nv50_disp_dmac_func *func,
+		    const struct nv50_disp_chan_mthd *mthd,
+		    struct nv50_disp *disp, int chid,
+		    const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		    struct nvkm_object **pobject)
 {
 	union {
 		struct nv50_disp_overlay_channel_dma_v0 v0;
-	} *args = data;
+	} *args = argv;
 	struct nvkm_object *parent = oclass->parent;
-	struct nv50_disp *disp = root->disp;
 	int head, ret = -ENOSYS;
 	u64 push;
 
-	nvif_ioctl(parent, "create disp overlay channel dma size %d\n", size);
-	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+	nvif_ioctl(parent, "create disp overlay channel dma size %d\n", argc);
+	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
 		nvif_ioctl(parent, "create disp overlay channel dma vers %d "
 				   "pushbuf %016llx head %d\n",
 			   args->v0.version, args->v0.pushbuf, args->v0.head);
@@ -91,7 +88,7 @@ nv50_disp_ovly_mthd_base = {
 };
 
 static const struct nv50_disp_chan_mthd
-nv50_disp_ovly_chan_mthd = {
+nv50_disp_ovly_mthd = {
 	.name = "Overlay",
 	.addr = 0x000540,
 	.prev = 0x000004,
@@ -101,13 +98,10 @@ nv50_disp_ovly_chan_mthd = {
 	}
 };
 
-const struct nv50_disp_dmac_oclass
-nv50_disp_ovly_oclass = {
-	.base.oclass = NV50_DISP_OVERLAY_CHANNEL_DMA,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_ovly_new,
-	.func = &nv50_disp_dmac_func,
-	.mthd = &nv50_disp_ovly_chan_mthd,
-	.chid = 3,
-};
+int
+nv50_disp_ovly_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		   struct nv50_disp *disp, struct nvkm_object **pobject)
+{
+	return nv50_disp_ovly_new_(&nv50_disp_dmac_func, &nv50_disp_ovly_mthd,
+				   disp, 3, oclass, argv, argc, pobject);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
index 36ac0d4237c7..ebb0803fd1b6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
@@ -31,12 +31,15 @@ g84_disp_root = {
 	.dmac = {
 		&g84_disp_core_oclass,
 		&g84_disp_base_oclass,
-		&g84_disp_ovly_oclass,
 	},
 	.pioc = {
 		&g84_disp_oimm_oclass,
 		&g84_disp_curs_oclass,
 	},
+	.user = {
+		{{0,0,G82_DISP_OVERLAY_CHANNEL_DMA},  g84_disp_ovly_new },
+		{}
+	},
 };
 
 static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
index 18b87b3df862..6949cf0d9825 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
@@ -31,12 +31,15 @@ g94_disp_root = {
 	.dmac = {
 		&g94_disp_core_oclass,
 		&gt200_disp_base_oclass,
-		&gt200_disp_ovly_oclass,
 	},
 	.pioc = {
 		&g84_disp_oimm_oclass,
 		&g84_disp_curs_oclass,
 	},
+	.user = {
+		{{0,0,GT200_DISP_OVERLAY_CHANNEL_DMA}, gt200_disp_ovly_new },
+		{}
+	},
 };
 
 static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
index 7c5701f0b496..acc897f943fc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
@@ -31,12 +31,15 @@ gf119_disp_root = {
 	.dmac = {
 		&gf119_disp_core_oclass,
 		&gf119_disp_base_oclass,
-		&gf119_disp_ovly_oclass,
 	},
 	.pioc = {
 		&gf119_disp_oimm_oclass,
 		&gf119_disp_curs_oclass,
 	},
+	.user = {
+		{{0,0,GF110_DISP_OVERLAY_CONTROL_DMA}, gf119_disp_ovly_new },
+		{}
+	},
 };
 
 static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
index c0946a602b71..2d19a0613a08 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
@@ -31,12 +31,15 @@ gk104_disp_root = {
 	.dmac = {
 		&gk104_disp_core_oclass,
 		&gk104_disp_base_oclass,
-		&gk104_disp_ovly_oclass,
 	},
 	.pioc = {
 		&gk104_disp_oimm_oclass,
 		&gk104_disp_curs_oclass,
 	},
+	.user = {
+		{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gk104_disp_ovly_new },
+		{}
+	},
 };
 
 static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
index 2ebc16687b50..d7e224962e14 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
@@ -31,12 +31,15 @@ gk110_disp_root = {
 	.dmac = {
 		&gk110_disp_core_oclass,
 		&gk110_disp_base_oclass,
-		&gk104_disp_ovly_oclass,
 	},
 	.pioc = {
 		&gk104_disp_oimm_oclass,
 		&gk104_disp_curs_oclass,
 	},
+	.user = {
+		{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gk104_disp_ovly_new },
+		{}
+	},
 };
 
 static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
index 5a62c9e1a2cf..de87b2743b22 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
@@ -31,12 +31,15 @@ gm107_disp_root = {
 	.dmac = {
 		&gm107_disp_core_oclass,
 		&gk110_disp_base_oclass,
-		&gk104_disp_ovly_oclass,
 	},
 	.pioc = {
 		&gk104_disp_oimm_oclass,
 		&gk104_disp_curs_oclass,
 	},
+	.user = {
+		{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gk104_disp_ovly_new },
+		{}
+	},
 };
 
 static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c
index 2634e06bf666..9e97c1e6548e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c
@@ -31,12 +31,15 @@ gm200_disp_root = {
 	.dmac = {
 		&gm200_disp_core_oclass,
 		&gk110_disp_base_oclass,
-		&gk104_disp_ovly_oclass,
 	},
 	.pioc = {
 		&gk104_disp_oimm_oclass,
 		&gk104_disp_curs_oclass,
 	},
+	.user = {
+		{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gk104_disp_ovly_new },
+		{}
+	},
 };
 
 static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
index 784723597c7e..af7031ff4085 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
@@ -31,12 +31,15 @@ gp100_disp_root = {
 	.dmac = {
 		&gp100_disp_core_oclass,
 		&gk110_disp_base_oclass,
-		&gk104_disp_ovly_oclass,
 	},
 	.pioc = {
 		&gk104_disp_oimm_oclass,
 		&gk104_disp_curs_oclass,
 	},
+	.user = {
+		{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gk104_disp_ovly_new },
+		{}
+	},
 };
 
 static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c
index 2fdfa8df0378..7603a4131581 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c
@@ -31,12 +31,15 @@ gp102_disp_root = {
 	.dmac = {
 		&gp102_disp_core_oclass,
 		&gp102_disp_base_oclass,
-		&gp102_disp_ovly_oclass,
 	},
 	.pioc = {
 		&gp102_disp_oimm_oclass,
 		&gp102_disp_curs_oclass,
 	},
+	.user = {
+		{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gp102_disp_ovly_new },
+		{}
+	},
 };
 
 static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
index facad2794eb6..8ef149d954cb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
@@ -31,12 +31,15 @@ gt200_disp_root = {
 	.dmac = {
 		&gt200_disp_core_oclass,
 		&gt200_disp_base_oclass,
-		&gt200_disp_ovly_oclass,
 	},
 	.pioc = {
 		&g84_disp_oimm_oclass,
 		&g84_disp_curs_oclass,
 	},
+	.user = {
+		{{0,0,GT200_DISP_OVERLAY_CHANNEL_DMA}, gt200_disp_ovly_new },
+		{}
+	},
 };
 
 static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
index 3e93db58263f..07c8013eddb9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
@@ -31,12 +31,15 @@ gt215_disp_root = {
 	.dmac = {
 		&gt215_disp_core_oclass,
 		&gt215_disp_base_oclass,
-		&gt215_disp_ovly_oclass,
 	},
 	.pioc = {
 		&gt215_disp_oimm_oclass,
 		&gt215_disp_curs_oclass,
 	},
+	.user = {
+		{{0,0,GT214_DISP_OVERLAY_CHANNEL_DMA},   g84_disp_ovly_new },
+		{}
+	},
 };
 
 static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
index 072c8c0e7096..f02368ffa1c9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
@@ -288,6 +288,15 @@ nv50_disp_root_pioc_new_(const struct nvkm_oclass *oclass,
 			    sclass->chid.user, oclass, data, size, pobject);
 }
 
+static int
+nv50_disp_root_child_new_(const struct nvkm_oclass *oclass,
+			  void *argv, u32 argc, struct nvkm_object **pobject)
+{
+	struct nv50_disp *disp = nv50_disp_root(oclass->parent)->disp;
+	const struct nv50_disp_user *user = oclass->priv;
+	return user->ctor(oclass, argv, argc, disp, pobject);
+}
+
 static int
 nv50_disp_root_child_get_(struct nvkm_object *object, int index,
 			  struct nvkm_oclass *sclass)
@@ -310,6 +319,15 @@ nv50_disp_root_child_get_(struct nvkm_object *object, int index,
 		return 0;
 	}
 
+	index -= ARRAY_SIZE(root->func->pioc);
+
+	if (root->func->user[index].ctor) {
+		sclass->base = root->func->user[index].base;
+		sclass->priv = root->func->user + index;
+		sclass->ctor = nv50_disp_root_child_new_;
+		return 0;
+	}
+
 	return -EINVAL;
 }
 
@@ -351,12 +369,15 @@ nv50_disp_root = {
 	.dmac = {
 		&nv50_disp_core_oclass,
 		&nv50_disp_base_oclass,
-		&nv50_disp_ovly_oclass,
 	},
 	.pioc = {
 		&nv50_disp_oimm_oclass,
 		&nv50_disp_curs_oclass,
 	},
+	.user = {
+		{{0,0,NV50_DISP_OVERLAY_CHANNEL_DMA}, nv50_disp_ovly_new },
+		{}
+	},
 };
 
 static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
index 06b554b212bd..1c4985a059cd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -13,8 +13,13 @@ struct nv50_disp_root {
 };
 
 struct nv50_disp_root_func {
-	const struct nv50_disp_dmac_oclass *dmac[3];
+	const struct nv50_disp_dmac_oclass *dmac[2];
 	const struct nv50_disp_pioc_oclass *pioc[2];
+	struct nv50_disp_user {
+		struct nvkm_sclass base;
+		int (*ctor)(const struct nvkm_oclass *, void *argv, u32 argc,
+			    struct nv50_disp *, struct nvkm_object **);
+	} user[];
 };
 
 int  nv50_disp_root_new_(const struct nv50_disp_root_func *, struct nvkm_disp *,

From c2c3a00310df71e1f92d99ec3d5818d152f12bc8 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1020/1286] drm/nouveau/disp/nv50-: simplify definition of
 overlay immediate channels

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/disp/Kbuild   |  3 --
 .../drm/nouveau/nvkm/engine/disp/channv50.h   | 21 +++++------
 .../drm/nouveau/nvkm/engine/disp/oimmg84.c    | 37 -------------------
 .../drm/nouveau/nvkm/engine/disp/oimmgf119.c  | 19 ++++------
 .../drm/nouveau/nvkm/engine/disp/oimmgk104.c  | 37 -------------------
 .../drm/nouveau/nvkm/engine/disp/oimmgp102.c  | 19 ++++------
 .../drm/nouveau/nvkm/engine/disp/oimmgt215.c  | 37 -------------------
 .../drm/nouveau/nvkm/engine/disp/oimmnv50.c   | 36 ++++++++----------
 .../drm/nouveau/nvkm/engine/disp/rootg84.c    |  2 +-
 .../drm/nouveau/nvkm/engine/disp/rootg94.c    |  2 +-
 .../drm/nouveau/nvkm/engine/disp/rootgf119.c  |  2 +-
 .../drm/nouveau/nvkm/engine/disp/rootgk104.c  |  2 +-
 .../drm/nouveau/nvkm/engine/disp/rootgk110.c  |  2 +-
 .../drm/nouveau/nvkm/engine/disp/rootgm107.c  |  2 +-
 .../drm/nouveau/nvkm/engine/disp/rootgm200.c  |  2 +-
 .../drm/nouveau/nvkm/engine/disp/rootgp100.c  |  2 +-
 .../drm/nouveau/nvkm/engine/disp/rootgp102.c  |  2 +-
 .../drm/nouveau/nvkm/engine/disp/rootgt200.c  |  2 +-
 .../drm/nouveau/nvkm/engine/disp/rootgt215.c  |  2 +-
 .../drm/nouveau/nvkm/engine/disp/rootnv50.c   |  2 +-
 .../drm/nouveau/nvkm/engine/disp/rootnv50.h   |  2 +-
 21 files changed, 52 insertions(+), 183 deletions(-)
 delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c
 delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c
 delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index 67861f2ac2a4..b53a0e2cfee3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -111,8 +111,5 @@ nvkm-y += nvkm/engine/disp/cursgk104.o
 nvkm-y += nvkm/engine/disp/cursgp102.o
 
 nvkm-y += nvkm/engine/disp/oimmnv50.o
-nvkm-y += nvkm/engine/disp/oimmg84.o
-nvkm-y += nvkm/engine/disp/oimmgt215.o
 nvkm-y += nvkm/engine/disp/oimmgf119.o
-nvkm-y += nvkm/engine/disp/oimmgk104.o
 nvkm-y += nvkm/engine/disp/oimmgp102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
index b222a1daec40..ba30766fe342 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -50,12 +50,18 @@ void nv50_disp_chan_uevent_send(struct nv50_disp *, int);
 
 extern const struct nvkm_event_func gf119_disp_chan_uevent;
 
+int nv50_disp_oimm_new_(const struct nv50_disp_chan_func *,
+			struct nv50_disp *, int ctrl, int user,
+			const struct nvkm_oclass *, void *argv, u32 argc,
+			struct nvkm_object **);
 int nv50_disp_ovly_new_(const struct nv50_disp_dmac_func *,
 			const struct nv50_disp_chan_mthd *,
 			struct nv50_disp *, int chid,
 			const struct nvkm_oclass *, void *argv, u32 argc,
 			struct nvkm_object **);
 
+int nv50_disp_oimm_new(const struct nvkm_oclass *, void *, u32,
+		       struct nv50_disp *, struct nvkm_object **);
 int nv50_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
 		       struct nv50_disp *, struct nvkm_object **);
 
@@ -65,12 +71,16 @@ int g84_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
 int gt200_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
 			struct nv50_disp *, struct nvkm_object **);
 
+int gf119_disp_oimm_new(const struct nvkm_oclass *, void *, u32,
+			struct nv50_disp *, struct nvkm_object **);
 int gf119_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
 			struct nv50_disp *, struct nvkm_object **);
 
 int gk104_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
 			struct nv50_disp *, struct nvkm_object **);
 
+int gp102_disp_oimm_new(const struct nvkm_oclass *, void *, u32,
+			struct nv50_disp *, struct nvkm_object **);
 int gp102_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
 			struct nv50_disp *, struct nvkm_object **);
 
@@ -133,22 +143,16 @@ struct nv50_disp_pioc_oclass {
 	} chid;
 };
 
-extern const struct nv50_disp_pioc_oclass nv50_disp_oimm_oclass;
 extern const struct nv50_disp_pioc_oclass nv50_disp_curs_oclass;
 
-extern const struct nv50_disp_pioc_oclass g84_disp_oimm_oclass;
 extern const struct nv50_disp_pioc_oclass g84_disp_curs_oclass;
 
-extern const struct nv50_disp_pioc_oclass gt215_disp_oimm_oclass;
 extern const struct nv50_disp_pioc_oclass gt215_disp_curs_oclass;
 
-extern const struct nv50_disp_pioc_oclass gf119_disp_oimm_oclass;
 extern const struct nv50_disp_pioc_oclass gf119_disp_curs_oclass;
 
-extern const struct nv50_disp_pioc_oclass gk104_disp_oimm_oclass;
 extern const struct nv50_disp_pioc_oclass gk104_disp_curs_oclass;
 
-extern const struct nv50_disp_pioc_oclass gp102_disp_oimm_oclass;
 extern const struct nv50_disp_pioc_oclass gp102_disp_curs_oclass;
 
 int nv50_disp_curs_new(const struct nv50_disp_chan_func *,
@@ -156,9 +160,4 @@ int nv50_disp_curs_new(const struct nv50_disp_chan_func *,
 		       struct nv50_disp_root *, int ctrl, int user,
 		       const struct nvkm_oclass *, void *data, u32 size,
 		       struct nvkm_object **);
-int nv50_disp_oimm_new(const struct nv50_disp_chan_func *,
-		       const struct nv50_disp_chan_mthd *,
-		       struct nv50_disp_root *, int ctrl, int user,
-		       const struct nvkm_oclass *, void *data, u32 size,
-		       struct nvkm_object **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c
deleted file mode 100644
index 5ad5d0f5db05..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "channv50.h"
-#include "rootnv50.h"
-
-#include <nvif/class.h>
-
-const struct nv50_disp_pioc_oclass
-g84_disp_oimm_oclass = {
-	.base.oclass = G82_DISP_OVERLAY,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_oimm_new,
-	.func = &nv50_disp_pioc_func,
-	.chid = { 5, 5 },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
index 1f9fd3403f07..1ae0bcfc89b9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
@@ -22,16 +22,11 @@
  * Authors: Ben Skeggs
  */
 #include "channv50.h"
-#include "rootnv50.h"
 
-#include <nvif/class.h>
-
-const struct nv50_disp_pioc_oclass
-gf119_disp_oimm_oclass = {
-	.base.oclass = GF110_DISP_OVERLAY,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_oimm_new,
-	.func = &gf119_disp_pioc_func,
-	.chid = { 9, 9 },
-};
+int
+gf119_disp_oimm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		    struct nv50_disp *disp, struct nvkm_object **pobject)
+{
+	return nv50_disp_oimm_new_(&gf119_disp_pioc_func, disp, 9, 9,
+				   oclass, argv, argc, pobject);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c
deleted file mode 100644
index 0c09fe85e952..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "channv50.h"
-#include "rootnv50.h"
-
-#include <nvif/class.h>
-
-const struct nv50_disp_pioc_oclass
-gk104_disp_oimm_oclass = {
-	.base.oclass = GK104_DISP_OVERLAY,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_oimm_new,
-	.func = &gf119_disp_pioc_func,
-	.chid = { 9, 9 },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c
index abf82365c671..30ffb1008505 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c
@@ -22,16 +22,11 @@
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
 #include "channv50.h"
-#include "rootnv50.h"
 
-#include <nvif/class.h>
-
-const struct nv50_disp_pioc_oclass
-gp102_disp_oimm_oclass = {
-	.base.oclass = GK104_DISP_OVERLAY,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_oimm_new,
-	.func = &gf119_disp_pioc_func,
-	.chid = { 9, 13 },
-};
+int
+gp102_disp_oimm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		    struct nv50_disp *disp, struct nvkm_object **pobject)
+{
+	return nv50_disp_oimm_new_(&gf119_disp_pioc_func, disp, 9, 13,
+				   oclass, argv, argc, pobject);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c
deleted file mode 100644
index 1281db28aebd..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "channv50.h"
-#include "rootnv50.h"
-
-#include <nvif/class.h>
-
-const struct nv50_disp_pioc_oclass
-gt215_disp_oimm_oclass = {
-	.base.oclass = GT214_DISP_OVERLAY,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_oimm_new,
-	.func = &nv50_disp_pioc_func,
-	.chid = { 5, 5 },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
index 7e013cdf6f5c..0db99bfe9db9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
@@ -23,30 +23,26 @@
  */
 #include "channv50.h"
 #include "head.h"
-#include "rootnv50.h"
 
 #include <core/client.h>
 
-#include <nvif/class.h>
 #include <nvif/cl507b.h>
 #include <nvif/unpack.h>
 
 int
-nv50_disp_oimm_new(const struct nv50_disp_chan_func *func,
-		   const struct nv50_disp_chan_mthd *mthd,
-		   struct nv50_disp_root *root, int ctrl, int user,
-		   const struct nvkm_oclass *oclass, void *data, u32 size,
-		   struct nvkm_object **pobject)
+nv50_disp_oimm_new_(const struct nv50_disp_chan_func *func,
+		    struct nv50_disp *disp, int ctrl, int user,
+		    const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		    struct nvkm_object **pobject)
 {
 	union {
 		struct nv50_disp_overlay_v0 v0;
-	} *args = data;
+	} *args = argv;
 	struct nvkm_object *parent = oclass->parent;
-	struct nv50_disp *disp = root->disp;
 	int head, ret = -ENOSYS;
 
-	nvif_ioctl(parent, "create disp overlay size %d\n", size);
-	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+	nvif_ioctl(parent, "create disp overlay size %d\n", argc);
+	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
 		nvif_ioctl(parent, "create disp overlay vers %d head %d\n",
 			   args->v0.version, args->v0.head);
 		if (!nvkm_head_find(&disp->base, args->v0.head))
@@ -55,16 +51,14 @@ nv50_disp_oimm_new(const struct nv50_disp_chan_func *func,
 	} else
 		return ret;
 
-	return nv50_disp_chan_new_(func, mthd, disp, ctrl + head, user + head,
+	return nv50_disp_chan_new_(func, NULL, disp, ctrl + head, user + head,
 				   head, oclass, pobject);
 }
 
-const struct nv50_disp_pioc_oclass
-nv50_disp_oimm_oclass = {
-	.base.oclass = NV50_DISP_OVERLAY,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_oimm_new,
-	.func = &nv50_disp_pioc_func,
-	.chid = { 5, 5 },
-};
+int
+nv50_disp_oimm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		   struct nv50_disp *disp, struct nvkm_object **pobject)
+{
+	return nv50_disp_oimm_new_(&nv50_disp_pioc_func, disp, 5, 5,
+				   oclass, argv, argc, pobject);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
index ebb0803fd1b6..650ed0df1f3e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
@@ -33,10 +33,10 @@ g84_disp_root = {
 		&g84_disp_base_oclass,
 	},
 	.pioc = {
-		&g84_disp_oimm_oclass,
 		&g84_disp_curs_oclass,
 	},
 	.user = {
+		{{0,0,G82_DISP_OVERLAY            }, nv50_disp_oimm_new },
 		{{0,0,G82_DISP_OVERLAY_CHANNEL_DMA},  g84_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
index 6949cf0d9825..19d23e0e2d28 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
@@ -33,10 +33,10 @@ g94_disp_root = {
 		&gt200_disp_base_oclass,
 	},
 	.pioc = {
-		&g84_disp_oimm_oclass,
 		&g84_disp_curs_oclass,
 	},
 	.user = {
+		{{0,0,  G82_DISP_OVERLAY            },  nv50_disp_oimm_new },
 		{{0,0,GT200_DISP_OVERLAY_CHANNEL_DMA}, gt200_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
index acc897f943fc..ef8be6a06b59 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
@@ -33,10 +33,10 @@ gf119_disp_root = {
 		&gf119_disp_base_oclass,
 	},
 	.pioc = {
-		&gf119_disp_oimm_oclass,
 		&gf119_disp_curs_oclass,
 	},
 	.user = {
+		{{0,0,GF110_DISP_OVERLAY            }, gf119_disp_oimm_new },
 		{{0,0,GF110_DISP_OVERLAY_CONTROL_DMA}, gf119_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
index 2d19a0613a08..67002c02015d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
@@ -33,10 +33,10 @@ gk104_disp_root = {
 		&gk104_disp_base_oclass,
 	},
 	.pioc = {
-		&gk104_disp_oimm_oclass,
 		&gk104_disp_curs_oclass,
 	},
 	.user = {
+		{{0,0,GK104_DISP_OVERLAY            }, gf119_disp_oimm_new },
 		{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gk104_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
index d7e224962e14..f82cf9c7bc87 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
@@ -33,10 +33,10 @@ gk110_disp_root = {
 		&gk110_disp_base_oclass,
 	},
 	.pioc = {
-		&gk104_disp_oimm_oclass,
 		&gk104_disp_curs_oclass,
 	},
 	.user = {
+		{{0,0,GK104_DISP_OVERLAY            }, gf119_disp_oimm_new },
 		{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gk104_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
index de87b2743b22..170961e61da1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
@@ -33,10 +33,10 @@ gm107_disp_root = {
 		&gk110_disp_base_oclass,
 	},
 	.pioc = {
-		&gk104_disp_oimm_oclass,
 		&gk104_disp_curs_oclass,
 	},
 	.user = {
+		{{0,0,GK104_DISP_OVERLAY            }, gf119_disp_oimm_new },
 		{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gk104_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c
index 9e97c1e6548e..3f77682aa017 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c
@@ -33,10 +33,10 @@ gm200_disp_root = {
 		&gk110_disp_base_oclass,
 	},
 	.pioc = {
-		&gk104_disp_oimm_oclass,
 		&gk104_disp_curs_oclass,
 	},
 	.user = {
+		{{0,0,GK104_DISP_OVERLAY            }, gf119_disp_oimm_new },
 		{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gk104_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
index af7031ff4085..c87b1d238098 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
@@ -33,10 +33,10 @@ gp100_disp_root = {
 		&gk110_disp_base_oclass,
 	},
 	.pioc = {
-		&gk104_disp_oimm_oclass,
 		&gk104_disp_curs_oclass,
 	},
 	.user = {
+		{{0,0,GK104_DISP_OVERLAY            }, gf119_disp_oimm_new },
 		{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gk104_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c
index 7603a4131581..16516a46cc91 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c
@@ -33,10 +33,10 @@ gp102_disp_root = {
 		&gp102_disp_base_oclass,
 	},
 	.pioc = {
-		&gp102_disp_oimm_oclass,
 		&gp102_disp_curs_oclass,
 	},
 	.user = {
+		{{0,0,GK104_DISP_OVERLAY            }, gp102_disp_oimm_new },
 		{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gp102_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
index 8ef149d954cb..6d46bf6cfdb7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
@@ -33,10 +33,10 @@ gt200_disp_root = {
 		&gt200_disp_base_oclass,
 	},
 	.pioc = {
-		&g84_disp_oimm_oclass,
 		&g84_disp_curs_oclass,
 	},
 	.user = {
+		{{0,0,  G82_DISP_OVERLAY            },  nv50_disp_oimm_new },
 		{{0,0,GT200_DISP_OVERLAY_CHANNEL_DMA}, gt200_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
index 07c8013eddb9..6863c94ec22d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
@@ -33,10 +33,10 @@ gt215_disp_root = {
 		&gt215_disp_base_oclass,
 	},
 	.pioc = {
-		&gt215_disp_oimm_oclass,
 		&gt215_disp_curs_oclass,
 	},
 	.user = {
+		{{0,0,GT214_DISP_OVERLAY            },  nv50_disp_oimm_new },
 		{{0,0,GT214_DISP_OVERLAY_CHANNEL_DMA},   g84_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
index f02368ffa1c9..41219c2d487c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
@@ -371,10 +371,10 @@ nv50_disp_root = {
 		&nv50_disp_base_oclass,
 	},
 	.pioc = {
-		&nv50_disp_oimm_oclass,
 		&nv50_disp_curs_oclass,
 	},
 	.user = {
+		{{0,0,NV50_DISP_OVERLAY            }, nv50_disp_oimm_new },
 		{{0,0,NV50_DISP_OVERLAY_CHANNEL_DMA}, nv50_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
index 1c4985a059cd..67f951864977 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -14,7 +14,7 @@ struct nv50_disp_root {
 
 struct nv50_disp_root_func {
 	const struct nv50_disp_dmac_oclass *dmac[2];
-	const struct nv50_disp_pioc_oclass *pioc[2];
+	const struct nv50_disp_pioc_oclass *pioc[1];
 	struct nv50_disp_user {
 		struct nvkm_sclass base;
 		int (*ctor)(const struct nvkm_oclass *, void *argv, u32 argc,

From 3ceeef9c03fc9ed6adbb0646b4b89096ca568670 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1021/1286] drm/nouveau/disp/nv50-: simplify definition of base
 channels

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/disp/Kbuild   |  4 --
 .../drm/nouveau/nvkm/engine/disp/baseg84.c    | 24 +++++-------
 .../drm/nouveau/nvkm/engine/disp/basegf119.c  | 22 ++++-------
 .../drm/nouveau/nvkm/engine/disp/basegk104.c  | 38 -------------------
 .../drm/nouveau/nvkm/engine/disp/basegk110.c  | 38 -------------------
 .../drm/nouveau/nvkm/engine/disp/basegp102.c  | 20 ++++------
 .../drm/nouveau/nvkm/engine/disp/basegt200.c  | 38 -------------------
 .../drm/nouveau/nvkm/engine/disp/basegt215.c  | 38 -------------------
 .../drm/nouveau/nvkm/engine/disp/basenv50.c   | 38 ++++++++-----------
 .../drm/nouveau/nvkm/engine/disp/channv50.h   | 16 +++++++-
 .../drm/nouveau/nvkm/engine/disp/dmacnv50.h   | 13 -------
 .../drm/nouveau/nvkm/engine/disp/rootg84.c    |  2 +-
 .../drm/nouveau/nvkm/engine/disp/rootg94.c    |  2 +-
 .../drm/nouveau/nvkm/engine/disp/rootgf119.c  |  2 +-
 .../drm/nouveau/nvkm/engine/disp/rootgk104.c  |  2 +-
 .../drm/nouveau/nvkm/engine/disp/rootgk110.c  |  2 +-
 .../drm/nouveau/nvkm/engine/disp/rootgm107.c  |  2 +-
 .../drm/nouveau/nvkm/engine/disp/rootgm200.c  |  2 +-
 .../drm/nouveau/nvkm/engine/disp/rootgp100.c  |  2 +-
 .../drm/nouveau/nvkm/engine/disp/rootgp102.c  |  2 +-
 .../drm/nouveau/nvkm/engine/disp/rootgt200.c  |  2 +-
 .../drm/nouveau/nvkm/engine/disp/rootgt215.c  |  2 +-
 .../drm/nouveau/nvkm/engine/disp/rootnv50.c   |  2 +-
 .../drm/nouveau/nvkm/engine/disp/rootnv50.h   |  2 +-
 24 files changed, 67 insertions(+), 248 deletions(-)
 delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk104.c
 delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk110.c
 delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt200.c
 delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt215.c

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index b53a0e2cfee3..c2d56bb5a452 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -73,11 +73,7 @@ nvkm-y += nvkm/engine/disp/dmacgp102.o
 
 nvkm-y += nvkm/engine/disp/basenv50.o
 nvkm-y += nvkm/engine/disp/baseg84.o
-nvkm-y += nvkm/engine/disp/basegt200.o
-nvkm-y += nvkm/engine/disp/basegt215.o
 nvkm-y += nvkm/engine/disp/basegf119.o
-nvkm-y += nvkm/engine/disp/basegk104.o
-nvkm-y += nvkm/engine/disp/basegk110.o
 nvkm-y += nvkm/engine/disp/basegp102.o
 
 nvkm-y += nvkm/engine/disp/corenv50.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/baseg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/baseg84.c
index 6d17630a3dee..03ec508d19f0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/baseg84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/baseg84.c
@@ -22,9 +22,6 @@
  * Authors: Ben Skeggs
  */
 #include "dmacnv50.h"
-#include "rootnv50.h"
-
-#include <nvif/class.h>
 
 static const struct nv50_disp_mthd_list
 g84_disp_base_mthd_base = {
@@ -56,8 +53,8 @@ g84_disp_base_mthd_base = {
 	}
 };
 
-const struct nv50_disp_chan_mthd
-g84_disp_base_chan_mthd = {
+static const struct nv50_disp_chan_mthd
+g84_disp_base_mthd = {
 	.name = "Base",
 	.addr = 0x000540,
 	.prev = 0x000004,
@@ -68,13 +65,10 @@ g84_disp_base_chan_mthd = {
 	}
 };
 
-const struct nv50_disp_dmac_oclass
-g84_disp_base_oclass = {
-	.base.oclass = G82_DISP_BASE_CHANNEL_DMA,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_base_new,
-	.func = &nv50_disp_dmac_func,
-	.mthd = &g84_disp_base_chan_mthd,
-	.chid = 1,
-};
+int
+g84_disp_base_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		  struct nv50_disp *disp, struct nvkm_object **pobject)
+{
+	return nv50_disp_base_new_(&nv50_disp_dmac_func, &g84_disp_base_mthd,
+				   disp, 1, oclass, argv, argc, pobject);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegf119.c
index ebcb925e9d90..4c372dc6a128 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegf119.c
@@ -22,9 +22,6 @@
  * Authors: Ben Skeggs
  */
 #include "dmacnv50.h"
-#include "rootnv50.h"
-
-#include <nvif/class.h>
 
 static const struct nv50_disp_mthd_list
 gf119_disp_base_mthd_base = {
@@ -91,7 +88,7 @@ gf119_disp_base_mthd_image = {
 };
 
 const struct nv50_disp_chan_mthd
-gf119_disp_base_chan_mthd = {
+gf119_disp_base_mthd = {
 	.name = "Base",
 	.addr = 0x001000,
 	.prev = -0x020000,
@@ -102,13 +99,10 @@ gf119_disp_base_chan_mthd = {
 	}
 };
 
-const struct nv50_disp_dmac_oclass
-gf119_disp_base_oclass = {
-	.base.oclass = GF110_DISP_BASE_CHANNEL_DMA,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_base_new,
-	.func = &gf119_disp_dmac_func,
-	.mthd = &gf119_disp_base_chan_mthd,
-	.chid = 1,
-};
+int
+gf119_disp_base_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		    struct nv50_disp *disp, struct nvkm_object **pobject)
+{
+	return nv50_disp_base_new_(&gf119_disp_dmac_func, &gf119_disp_base_mthd,
+				   disp, 1, oclass, argv, argc, pobject);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk104.c
deleted file mode 100644
index 780a1d973634..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk104.c
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright 2015 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs <bskeggs@redhat.com>
- */
-#include "dmacnv50.h"
-#include "rootnv50.h"
-
-#include <nvif/class.h>
-
-const struct nv50_disp_dmac_oclass
-gk104_disp_base_oclass = {
-	.base.oclass = GK104_DISP_BASE_CHANNEL_DMA,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_base_new,
-	.func = &gf119_disp_dmac_func,
-	.mthd = &gf119_disp_base_chan_mthd,
-	.chid = 1,
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk110.c
deleted file mode 100644
index d8bdd246c8ed..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk110.c
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright 2015 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs <bskeggs@redhat.com>
- */
-#include "dmacnv50.h"
-#include "rootnv50.h"
-
-#include <nvif/class.h>
-
-const struct nv50_disp_dmac_oclass
-gk110_disp_base_oclass = {
-	.base.oclass = GK110_DISP_BASE_CHANNEL_DMA,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_base_new,
-	.func = &gf119_disp_dmac_func,
-	.mthd = &gf119_disp_base_chan_mthd,
-	.chid = 1,
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp102.c
index 8a3cdeef8d2c..3a25259de057 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp102.c
@@ -22,17 +22,11 @@
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
 #include "dmacnv50.h"
-#include "rootnv50.h"
 
-#include <nvif/class.h>
-
-const struct nv50_disp_dmac_oclass
-gp102_disp_base_oclass = {
-	.base.oclass = GK110_DISP_BASE_CHANNEL_DMA,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_base_new,
-	.func = &gp102_disp_dmac_func,
-	.mthd = &gf119_disp_base_chan_mthd,
-	.chid = 1,
-};
+int
+gp102_disp_base_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		    struct nv50_disp *disp, struct nvkm_object **pobject)
+{
+	return nv50_disp_base_new_(&gp102_disp_dmac_func, &gf119_disp_base_mthd,
+				   disp, 1, oclass, argv, argc, pobject);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt200.c
deleted file mode 100644
index 93451e46570c..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt200.c
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright 2015 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs <bskeggs@redhat.com>
- */
-#include "dmacnv50.h"
-#include "rootnv50.h"
-
-#include <nvif/class.h>
-
-const struct nv50_disp_dmac_oclass
-gt200_disp_base_oclass = {
-	.base.oclass = GT200_DISP_BASE_CHANNEL_DMA,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_base_new,
-	.func = &nv50_disp_dmac_func,
-	.mthd = &g84_disp_base_chan_mthd,
-	.chid = 1,
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt215.c
deleted file mode 100644
index 08e2b1fa3806..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt215.c
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright 2015 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs <bskeggs@redhat.com>
- */
-#include "dmacnv50.h"
-#include "rootnv50.h"
-
-#include <nvif/class.h>
-
-const struct nv50_disp_dmac_oclass
-gt215_disp_base_oclass = {
-	.base.oclass = GT214_DISP_BASE_CHANNEL_DMA,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_base_new,
-	.func = &nv50_disp_dmac_func,
-	.mthd = &g84_disp_base_chan_mthd,
-	.chid = 1,
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c
index 418741a61f11..11639e2a792f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c
@@ -23,31 +23,28 @@
  */
 #include "dmacnv50.h"
 #include "head.h"
-#include "rootnv50.h"
 
 #include <core/client.h>
 
-#include <nvif/class.h>
 #include <nvif/cl507c.h>
 #include <nvif/unpack.h>
 
 int
-nv50_disp_base_new(const struct nv50_disp_dmac_func *func,
-		   const struct nv50_disp_chan_mthd *mthd,
-		   struct nv50_disp_root *root, int chid,
-		   const struct nvkm_oclass *oclass, void *data, u32 size,
-		   struct nvkm_object **pobject)
+nv50_disp_base_new_(const struct nv50_disp_dmac_func *func,
+		    const struct nv50_disp_chan_mthd *mthd,
+		    struct nv50_disp *disp, int chid,
+		    const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		    struct nvkm_object **pobject)
 {
 	union {
 		struct nv50_disp_base_channel_dma_v0 v0;
-	} *args = data;
+	} *args = argv;
 	struct nvkm_object *parent = oclass->parent;
-	struct nv50_disp *disp = root->disp;
 	int head, ret = -ENOSYS;
 	u64 push;
 
-	nvif_ioctl(parent, "create disp base channel dma size %d\n", size);
-	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+	nvif_ioctl(parent, "create disp base channel dma size %d\n", argc);
+	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
 		nvif_ioctl(parent, "create disp base channel dma vers %d "
 				   "pushbuf %016llx head %d\n",
 			   args->v0.version, args->v0.pushbuf, args->v0.head);
@@ -102,7 +99,7 @@ nv50_disp_base_mthd_image = {
 };
 
 static const struct nv50_disp_chan_mthd
-nv50_disp_base_chan_mthd = {
+nv50_disp_base_mthd = {
 	.name = "Base",
 	.addr = 0x000540,
 	.prev = 0x000004,
@@ -113,13 +110,10 @@ nv50_disp_base_chan_mthd = {
 	}
 };
 
-const struct nv50_disp_dmac_oclass
-nv50_disp_base_oclass = {
-	.base.oclass = NV50_DISP_BASE_CHANNEL_DMA,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_base_new,
-	.func = &nv50_disp_dmac_func,
-	.mthd = &nv50_disp_base_chan_mthd,
-	.chid = 1,
-};
+int
+nv50_disp_base_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		   struct nv50_disp *disp, struct nvkm_object **pobject)
+{
+	return nv50_disp_base_new_(&nv50_disp_dmac_func, &nv50_disp_base_mthd,
+				   disp, 1, oclass, argv, argc, pobject);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
index ba30766fe342..5d162775de19 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -54,6 +54,11 @@ int nv50_disp_oimm_new_(const struct nv50_disp_chan_func *,
 			struct nv50_disp *, int ctrl, int user,
 			const struct nvkm_oclass *, void *argv, u32 argc,
 			struct nvkm_object **);
+int nv50_disp_base_new_(const struct nv50_disp_dmac_func *,
+			const struct nv50_disp_chan_mthd *,
+			struct nv50_disp *, int chid,
+			const struct nvkm_oclass *, void *argv, u32 argc,
+			struct nvkm_object **);
 int nv50_disp_ovly_new_(const struct nv50_disp_dmac_func *,
 			const struct nv50_disp_chan_mthd *,
 			struct nv50_disp *, int chid,
@@ -62,9 +67,13 @@ int nv50_disp_ovly_new_(const struct nv50_disp_dmac_func *,
 
 int nv50_disp_oimm_new(const struct nvkm_oclass *, void *, u32,
 		       struct nv50_disp *, struct nvkm_object **);
+int nv50_disp_base_new(const struct nvkm_oclass *, void *, u32,
+		       struct nv50_disp *, struct nvkm_object **);
 int nv50_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
 		       struct nv50_disp *, struct nvkm_object **);
 
+int g84_disp_base_new(const struct nvkm_oclass *, void *, u32,
+		      struct nv50_disp *, struct nvkm_object **);
 int g84_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
 		      struct nv50_disp *, struct nvkm_object **);
 
@@ -73,6 +82,8 @@ int gt200_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
 
 int gf119_disp_oimm_new(const struct nvkm_oclass *, void *, u32,
 			struct nv50_disp *, struct nvkm_object **);
+int gf119_disp_base_new(const struct nvkm_oclass *, void *, u32,
+			struct nv50_disp *, struct nvkm_object **);
 int gf119_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
 			struct nv50_disp *, struct nvkm_object **);
 
@@ -81,6 +92,8 @@ int gk104_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
 
 int gp102_disp_oimm_new(const struct nvkm_oclass *, void *, u32,
 			struct nv50_disp *, struct nvkm_object **);
+int gp102_disp_base_new(const struct nvkm_oclass *, void *, u32,
+			struct nv50_disp *, struct nvkm_object **);
 int gp102_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
 			struct nv50_disp *, struct nvkm_object **);
 
@@ -115,7 +128,6 @@ extern const struct nv50_disp_mthd_list nv50_disp_base_mthd_image;
 extern const struct nv50_disp_chan_mthd g84_disp_core_chan_mthd;
 extern const struct nv50_disp_mthd_list g84_disp_core_mthd_dac;
 extern const struct nv50_disp_mthd_list g84_disp_core_mthd_head;
-extern const struct nv50_disp_chan_mthd g84_disp_base_chan_mthd;
 
 extern const struct nv50_disp_chan_mthd g94_disp_core_chan_mthd;
 
@@ -123,7 +135,7 @@ extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_base;
 extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_dac;
 extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_sor;
 extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_pior;
-extern const struct nv50_disp_chan_mthd gf119_disp_base_chan_mthd;
+extern const struct nv50_disp_chan_mthd gf119_disp_base_mthd;
 
 extern const struct nv50_disp_chan_mthd gk104_disp_core_chan_mthd;
 extern const struct nv50_disp_chan_mthd gk104_disp_ovly_mthd;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
index 45caaee86744..ae094c7c47f5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
@@ -50,34 +50,22 @@ int nv50_disp_core_new(const struct nv50_disp_dmac_func *,
 		       struct nv50_disp_root *, int chid,
 		       const struct nvkm_oclass *oclass, void *data, u32 size,
 		       struct nvkm_object **);
-int nv50_disp_base_new(const struct nv50_disp_dmac_func *,
-		       const struct nv50_disp_chan_mthd *,
-		       struct nv50_disp_root *, int chid,
-		       const struct nvkm_oclass *oclass, void *data, u32 size,
-		       struct nvkm_object **);
 
 extern const struct nv50_disp_dmac_oclass nv50_disp_core_oclass;
-extern const struct nv50_disp_dmac_oclass nv50_disp_base_oclass;
 
 extern const struct nv50_disp_dmac_oclass g84_disp_core_oclass;
-extern const struct nv50_disp_dmac_oclass g84_disp_base_oclass;
 
 extern const struct nv50_disp_dmac_oclass g94_disp_core_oclass;
 
 extern const struct nv50_disp_dmac_oclass gt200_disp_core_oclass;
-extern const struct nv50_disp_dmac_oclass gt200_disp_base_oclass;
 
 extern const struct nv50_disp_dmac_oclass gt215_disp_core_oclass;
-extern const struct nv50_disp_dmac_oclass gt215_disp_base_oclass;
 
 extern const struct nv50_disp_dmac_oclass gf119_disp_core_oclass;
-extern const struct nv50_disp_dmac_oclass gf119_disp_base_oclass;
 
 extern const struct nv50_disp_dmac_oclass gk104_disp_core_oclass;
-extern const struct nv50_disp_dmac_oclass gk104_disp_base_oclass;
 
 extern const struct nv50_disp_dmac_oclass gk110_disp_core_oclass;
-extern const struct nv50_disp_dmac_oclass gk110_disp_base_oclass;
 
 extern const struct nv50_disp_dmac_oclass gm107_disp_core_oclass;
 
@@ -86,5 +74,4 @@ extern const struct nv50_disp_dmac_oclass gm200_disp_core_oclass;
 extern const struct nv50_disp_dmac_oclass gp100_disp_core_oclass;
 
 extern const struct nv50_disp_dmac_oclass gp102_disp_core_oclass;
-extern const struct nv50_disp_dmac_oclass gp102_disp_base_oclass;
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
index 650ed0df1f3e..fa9076ba0a75 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
@@ -30,13 +30,13 @@ static const struct nv50_disp_root_func
 g84_disp_root = {
 	.dmac = {
 		&g84_disp_core_oclass,
-		&g84_disp_base_oclass,
 	},
 	.pioc = {
 		&g84_disp_curs_oclass,
 	},
 	.user = {
 		{{0,0,G82_DISP_OVERLAY            }, nv50_disp_oimm_new },
+		{{0,0,G82_DISP_BASE_CHANNEL_DMA   },  g84_disp_base_new },
 		{{0,0,G82_DISP_OVERLAY_CHANNEL_DMA},  g84_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
index 19d23e0e2d28..6aeab0f0278b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
@@ -30,13 +30,13 @@ static const struct nv50_disp_root_func
 g94_disp_root = {
 	.dmac = {
 		&g94_disp_core_oclass,
-		&gt200_disp_base_oclass,
 	},
 	.pioc = {
 		&g84_disp_curs_oclass,
 	},
 	.user = {
 		{{0,0,  G82_DISP_OVERLAY            },  nv50_disp_oimm_new },
+		{{0,0,GT200_DISP_BASE_CHANNEL_DMA   },   g84_disp_base_new },
 		{{0,0,GT200_DISP_OVERLAY_CHANNEL_DMA}, gt200_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
index ef8be6a06b59..b44b14505949 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
@@ -30,13 +30,13 @@ static const struct nv50_disp_root_func
 gf119_disp_root = {
 	.dmac = {
 		&gf119_disp_core_oclass,
-		&gf119_disp_base_oclass,
 	},
 	.pioc = {
 		&gf119_disp_curs_oclass,
 	},
 	.user = {
 		{{0,0,GF110_DISP_OVERLAY            }, gf119_disp_oimm_new },
+		{{0,0,GF110_DISP_BASE_CHANNEL_DMA   }, gf119_disp_base_new },
 		{{0,0,GF110_DISP_OVERLAY_CONTROL_DMA}, gf119_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
index 67002c02015d..c43eab97a393 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
@@ -30,13 +30,13 @@ static const struct nv50_disp_root_func
 gk104_disp_root = {
 	.dmac = {
 		&gk104_disp_core_oclass,
-		&gk104_disp_base_oclass,
 	},
 	.pioc = {
 		&gk104_disp_curs_oclass,
 	},
 	.user = {
 		{{0,0,GK104_DISP_OVERLAY            }, gf119_disp_oimm_new },
+		{{0,0,GK104_DISP_BASE_CHANNEL_DMA   }, gf119_disp_base_new },
 		{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gk104_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
index f82cf9c7bc87..2d48e73597b0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
@@ -30,13 +30,13 @@ static const struct nv50_disp_root_func
 gk110_disp_root = {
 	.dmac = {
 		&gk110_disp_core_oclass,
-		&gk110_disp_base_oclass,
 	},
 	.pioc = {
 		&gk104_disp_curs_oclass,
 	},
 	.user = {
 		{{0,0,GK104_DISP_OVERLAY            }, gf119_disp_oimm_new },
+		{{0,0,GK110_DISP_BASE_CHANNEL_DMA   }, gf119_disp_base_new },
 		{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gk104_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
index 170961e61da1..904125e8199f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
@@ -30,13 +30,13 @@ static const struct nv50_disp_root_func
 gm107_disp_root = {
 	.dmac = {
 		&gm107_disp_core_oclass,
-		&gk110_disp_base_oclass,
 	},
 	.pioc = {
 		&gk104_disp_curs_oclass,
 	},
 	.user = {
 		{{0,0,GK104_DISP_OVERLAY            }, gf119_disp_oimm_new },
+		{{0,0,GK110_DISP_BASE_CHANNEL_DMA   }, gf119_disp_base_new },
 		{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gk104_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c
index 3f77682aa017..52e9ccac93fa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c
@@ -30,13 +30,13 @@ static const struct nv50_disp_root_func
 gm200_disp_root = {
 	.dmac = {
 		&gm200_disp_core_oclass,
-		&gk110_disp_base_oclass,
 	},
 	.pioc = {
 		&gk104_disp_curs_oclass,
 	},
 	.user = {
 		{{0,0,GK104_DISP_OVERLAY            }, gf119_disp_oimm_new },
+		{{0,0,GK110_DISP_BASE_CHANNEL_DMA   }, gf119_disp_base_new },
 		{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gk104_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
index c87b1d238098..57f40d4930f7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
@@ -30,13 +30,13 @@ static const struct nv50_disp_root_func
 gp100_disp_root = {
 	.dmac = {
 		&gp100_disp_core_oclass,
-		&gk110_disp_base_oclass,
 	},
 	.pioc = {
 		&gk104_disp_curs_oclass,
 	},
 	.user = {
 		{{0,0,GK104_DISP_OVERLAY            }, gf119_disp_oimm_new },
+		{{0,0,GK110_DISP_BASE_CHANNEL_DMA   }, gf119_disp_base_new },
 		{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gk104_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c
index 16516a46cc91..f66d7fced3de 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c
@@ -30,13 +30,13 @@ static const struct nv50_disp_root_func
 gp102_disp_root = {
 	.dmac = {
 		&gp102_disp_core_oclass,
-		&gp102_disp_base_oclass,
 	},
 	.pioc = {
 		&gp102_disp_curs_oclass,
 	},
 	.user = {
 		{{0,0,GK104_DISP_OVERLAY            }, gp102_disp_oimm_new },
+		{{0,0,GK110_DISP_BASE_CHANNEL_DMA   }, gp102_disp_base_new },
 		{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gp102_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
index 6d46bf6cfdb7..c28017998bc6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
@@ -30,13 +30,13 @@ static const struct nv50_disp_root_func
 gt200_disp_root = {
 	.dmac = {
 		&gt200_disp_core_oclass,
-		&gt200_disp_base_oclass,
 	},
 	.pioc = {
 		&g84_disp_curs_oclass,
 	},
 	.user = {
 		{{0,0,  G82_DISP_OVERLAY            },  nv50_disp_oimm_new },
+		{{0,0,GT200_DISP_BASE_CHANNEL_DMA   },   g84_disp_base_new },
 		{{0,0,GT200_DISP_OVERLAY_CHANNEL_DMA}, gt200_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
index 6863c94ec22d..b9a6a32de82f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
@@ -30,13 +30,13 @@ static const struct nv50_disp_root_func
 gt215_disp_root = {
 	.dmac = {
 		&gt215_disp_core_oclass,
-		&gt215_disp_base_oclass,
 	},
 	.pioc = {
 		&gt215_disp_curs_oclass,
 	},
 	.user = {
 		{{0,0,GT214_DISP_OVERLAY            },  nv50_disp_oimm_new },
+		{{0,0,GT214_DISP_BASE_CHANNEL_DMA   },   g84_disp_base_new },
 		{{0,0,GT214_DISP_OVERLAY_CHANNEL_DMA},   g84_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
index 41219c2d487c..a87531d9a3b8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
@@ -368,13 +368,13 @@ static const struct nv50_disp_root_func
 nv50_disp_root = {
 	.dmac = {
 		&nv50_disp_core_oclass,
-		&nv50_disp_base_oclass,
 	},
 	.pioc = {
 		&nv50_disp_curs_oclass,
 	},
 	.user = {
 		{{0,0,NV50_DISP_OVERLAY            }, nv50_disp_oimm_new },
+		{{0,0,NV50_DISP_BASE_CHANNEL_DMA   }, nv50_disp_base_new },
 		{{0,0,NV50_DISP_OVERLAY_CHANNEL_DMA}, nv50_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
index 67f951864977..371c6ee32313 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -13,7 +13,7 @@ struct nv50_disp_root {
 };
 
 struct nv50_disp_root_func {
-	const struct nv50_disp_dmac_oclass *dmac[2];
+	const struct nv50_disp_dmac_oclass *dmac[1];
 	const struct nv50_disp_pioc_oclass *pioc[1];
 	struct nv50_disp_user {
 		struct nvkm_sclass base;

From 6d41a7536f8cff35be9b3c4ccb94e55f1553a7a1 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1022/1286] drm/nouveau/disp/nv50-: simplify definition of
 cursor channels

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/disp/Kbuild   |  3 --
 .../drm/nouveau/nvkm/engine/disp/channv50.h   | 43 +++++--------------
 .../drm/nouveau/nvkm/engine/disp/cursg84.c    | 37 ----------------
 .../drm/nouveau/nvkm/engine/disp/cursgf119.c  | 19 +++-----
 .../drm/nouveau/nvkm/engine/disp/cursgk104.c  | 37 ----------------
 .../drm/nouveau/nvkm/engine/disp/cursgp102.c  | 19 +++-----
 .../drm/nouveau/nvkm/engine/disp/cursgt215.c  | 37 ----------------
 .../drm/nouveau/nvkm/engine/disp/cursnv50.c   | 36 +++++++---------
 .../drm/nouveau/nvkm/engine/disp/rootg84.c    |  4 +-
 .../drm/nouveau/nvkm/engine/disp/rootg94.c    |  4 +-
 .../drm/nouveau/nvkm/engine/disp/rootgf119.c  |  4 +-
 .../drm/nouveau/nvkm/engine/disp/rootgk104.c  |  4 +-
 .../drm/nouveau/nvkm/engine/disp/rootgk110.c  |  4 +-
 .../drm/nouveau/nvkm/engine/disp/rootgm107.c  |  4 +-
 .../drm/nouveau/nvkm/engine/disp/rootgm200.c  |  4 +-
 .../drm/nouveau/nvkm/engine/disp/rootgp100.c  |  4 +-
 .../drm/nouveau/nvkm/engine/disp/rootgp102.c  |  4 +-
 .../drm/nouveau/nvkm/engine/disp/rootgt200.c  |  4 +-
 .../drm/nouveau/nvkm/engine/disp/rootgt215.c  |  4 +-
 .../drm/nouveau/nvkm/engine/disp/rootnv50.c   | 23 +---------
 .../drm/nouveau/nvkm/engine/disp/rootnv50.h   |  2 -
 21 files changed, 51 insertions(+), 249 deletions(-)
 delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c
 delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c
 delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index c2d56bb5a452..ee41fb75c94d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -100,10 +100,7 @@ nvkm-y += nvkm/engine/disp/piocnv50.o
 nvkm-y += nvkm/engine/disp/piocgf119.o
 
 nvkm-y += nvkm/engine/disp/cursnv50.o
-nvkm-y += nvkm/engine/disp/cursg84.o
-nvkm-y += nvkm/engine/disp/cursgt215.o
 nvkm-y += nvkm/engine/disp/cursgf119.o
-nvkm-y += nvkm/engine/disp/cursgk104.o
 nvkm-y += nvkm/engine/disp/cursgp102.o
 
 nvkm-y += nvkm/engine/disp/oimmnv50.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
index 5d162775de19..d52420f410ed 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -50,6 +50,10 @@ void nv50_disp_chan_uevent_send(struct nv50_disp *, int);
 
 extern const struct nvkm_event_func gf119_disp_chan_uevent;
 
+int nv50_disp_curs_new_(const struct nv50_disp_chan_func *,
+			struct nv50_disp *, int ctrl, int user,
+			const struct nvkm_oclass *, void *argv, u32 argc,
+			struct nvkm_object **);
 int nv50_disp_oimm_new_(const struct nv50_disp_chan_func *,
 			struct nv50_disp *, int ctrl, int user,
 			const struct nvkm_oclass *, void *argv, u32 argc,
@@ -65,6 +69,8 @@ int nv50_disp_ovly_new_(const struct nv50_disp_dmac_func *,
 			const struct nvkm_oclass *, void *argv, u32 argc,
 			struct nvkm_object **);
 
+int nv50_disp_curs_new(const struct nvkm_oclass *, void *, u32,
+		       struct nv50_disp *, struct nvkm_object **);
 int nv50_disp_oimm_new(const struct nvkm_oclass *, void *, u32,
 		       struct nv50_disp *, struct nvkm_object **);
 int nv50_disp_base_new(const struct nvkm_oclass *, void *, u32,
@@ -80,6 +86,8 @@ int g84_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
 int gt200_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
 			struct nv50_disp *, struct nvkm_object **);
 
+int gf119_disp_curs_new(const struct nvkm_oclass *, void *, u32,
+			struct nv50_disp *, struct nvkm_object **);
 int gf119_disp_oimm_new(const struct nvkm_oclass *, void *, u32,
 			struct nv50_disp *, struct nvkm_object **);
 int gf119_disp_base_new(const struct nvkm_oclass *, void *, u32,
@@ -90,6 +98,8 @@ int gf119_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
 int gk104_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
 			struct nv50_disp *, struct nvkm_object **);
 
+int gp102_disp_curs_new(const struct nvkm_oclass *, void *, u32,
+			struct nv50_disp *, struct nvkm_object **);
 int gp102_disp_oimm_new(const struct nvkm_oclass *, void *, u32,
 			struct nv50_disp *, struct nvkm_object **);
 int gp102_disp_base_new(const struct nvkm_oclass *, void *, u32,
@@ -139,37 +149,4 @@ extern const struct nv50_disp_chan_mthd gf119_disp_base_mthd;
 
 extern const struct nv50_disp_chan_mthd gk104_disp_core_chan_mthd;
 extern const struct nv50_disp_chan_mthd gk104_disp_ovly_mthd;
-
-struct nv50_disp_pioc_oclass {
-	int (*ctor)(const struct nv50_disp_chan_func *,
-		    const struct nv50_disp_chan_mthd *,
-		    struct nv50_disp_root *, int ctrl, int user,
-		    const struct nvkm_oclass *, void *data, u32 size,
-		    struct nvkm_object **);
-	struct nvkm_sclass base;
-	const struct nv50_disp_chan_func *func;
-	const struct nv50_disp_chan_mthd *mthd;
-	struct {
-		int ctrl;
-		int user;
-	} chid;
-};
-
-extern const struct nv50_disp_pioc_oclass nv50_disp_curs_oclass;
-
-extern const struct nv50_disp_pioc_oclass g84_disp_curs_oclass;
-
-extern const struct nv50_disp_pioc_oclass gt215_disp_curs_oclass;
-
-extern const struct nv50_disp_pioc_oclass gf119_disp_curs_oclass;
-
-extern const struct nv50_disp_pioc_oclass gk104_disp_curs_oclass;
-
-extern const struct nv50_disp_pioc_oclass gp102_disp_curs_oclass;
-
-int nv50_disp_curs_new(const struct nv50_disp_chan_func *,
-		       const struct nv50_disp_chan_mthd *,
-		       struct nv50_disp_root *, int ctrl, int user,
-		       const struct nvkm_oclass *, void *data, u32 size,
-		       struct nvkm_object **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c
deleted file mode 100644
index fa781b5a7e07..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright 2015 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs <bskeggs@redhat.com>
- */
-#include "channv50.h"
-#include "rootnv50.h"
-
-#include <nvif/class.h>
-
-const struct nv50_disp_pioc_oclass
-g84_disp_curs_oclass = {
-	.base.oclass = G82_DISP_CURSOR,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_curs_new,
-	.func = &nv50_disp_pioc_func,
-	.chid = { 7, 7 },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
index 2be6fb052c65..cdda3658dcb3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
@@ -22,16 +22,11 @@
  * Authors: Ben Skeggs
  */
 #include "channv50.h"
-#include "rootnv50.h"
 
-#include <nvif/class.h>
-
-const struct nv50_disp_pioc_oclass
-gf119_disp_curs_oclass = {
-	.base.oclass = GF110_DISP_CURSOR,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_curs_new,
-	.func = &gf119_disp_pioc_func,
-	.chid = { 13, 13 },
-};
+int
+gf119_disp_curs_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		    struct nv50_disp *disp, struct nvkm_object **pobject)
+{
+	return nv50_disp_curs_new_(&gf119_disp_pioc_func, disp, 13, 13,
+				   oclass, argv, argc, pobject);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c
deleted file mode 100644
index 2a99db4bf8f8..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "channv50.h"
-#include "rootnv50.h"
-
-#include <nvif/class.h>
-
-const struct nv50_disp_pioc_oclass
-gk104_disp_curs_oclass = {
-	.base.oclass = GK104_DISP_CURSOR,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_curs_new,
-	.func = &gf119_disp_pioc_func,
-	.chid = { 13, 13 },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c
index e958210d8105..1a4601f975e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c
@@ -22,16 +22,11 @@
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
 #include "channv50.h"
-#include "rootnv50.h"
 
-#include <nvif/class.h>
-
-const struct nv50_disp_pioc_oclass
-gp102_disp_curs_oclass = {
-	.base.oclass = GK104_DISP_CURSOR,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_curs_new,
-	.func = &gf119_disp_pioc_func,
-	.chid = { 13, 17 },
-};
+int
+gp102_disp_curs_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		    struct nv50_disp *disp, struct nvkm_object **pobject)
+{
+	return nv50_disp_curs_new_(&gf119_disp_pioc_func, disp, 13, 17,
+				   oclass, argv, argc, pobject);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c
deleted file mode 100644
index 00a7f3564450..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright 2015 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs <bskeggs@redhat.com>
- */
-#include "channv50.h"
-#include "rootnv50.h"
-
-#include <nvif/class.h>
-
-const struct nv50_disp_pioc_oclass
-gt215_disp_curs_oclass = {
-	.base.oclass = GT214_DISP_CURSOR,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_curs_new,
-	.func = &nv50_disp_pioc_func,
-	.chid = { 7, 7 },
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
index 1f9a6c31ab3b..d29758504a5f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
@@ -23,30 +23,26 @@
  */
 #include "channv50.h"
 #include "head.h"
-#include "rootnv50.h"
 
 #include <core/client.h>
 
-#include <nvif/class.h>
 #include <nvif/cl507a.h>
 #include <nvif/unpack.h>
 
 int
-nv50_disp_curs_new(const struct nv50_disp_chan_func *func,
-		   const struct nv50_disp_chan_mthd *mthd,
-		   struct nv50_disp_root *root, int ctrl, int user,
-		   const struct nvkm_oclass *oclass, void *data, u32 size,
-		   struct nvkm_object **pobject)
+nv50_disp_curs_new_(const struct nv50_disp_chan_func *func,
+		    struct nv50_disp *disp, int ctrl, int user,
+		    const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		    struct nvkm_object **pobject)
 {
 	union {
 		struct nv50_disp_cursor_v0 v0;
-	} *args = data;
+	} *args = argv;
 	struct nvkm_object *parent = oclass->parent;
-	struct nv50_disp *disp = root->disp;
 	int head, ret = -ENOSYS;
 
-	nvif_ioctl(parent, "create disp cursor size %d\n", size);
-	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+	nvif_ioctl(parent, "create disp cursor size %d\n", argc);
+	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
 		nvif_ioctl(parent, "create disp cursor vers %d head %d\n",
 			   args->v0.version, args->v0.head);
 		if (!nvkm_head_find(&disp->base, args->v0.head))
@@ -55,16 +51,14 @@ nv50_disp_curs_new(const struct nv50_disp_chan_func *func,
 	} else
 		return ret;
 
-	return nv50_disp_chan_new_(func, mthd, disp, ctrl + head, user + head,
+	return nv50_disp_chan_new_(func, NULL, disp, ctrl + head, user + head,
 				   head, oclass, pobject);
 }
 
-const struct nv50_disp_pioc_oclass
-nv50_disp_curs_oclass = {
-	.base.oclass = NV50_DISP_CURSOR,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_curs_new,
-	.func = &nv50_disp_pioc_func,
-	.chid = { 7, 7 },
-};
+int
+nv50_disp_curs_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		   struct nv50_disp *disp, struct nvkm_object **pobject)
+{
+	return nv50_disp_curs_new_(&nv50_disp_pioc_func, disp, 7, 7,
+				   oclass, argv, argc, pobject);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
index fa9076ba0a75..0f7e662cd175 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
@@ -31,10 +31,8 @@ g84_disp_root = {
 	.dmac = {
 		&g84_disp_core_oclass,
 	},
-	.pioc = {
-		&g84_disp_curs_oclass,
-	},
 	.user = {
+		{{0,0,G82_DISP_CURSOR             }, nv50_disp_curs_new },
 		{{0,0,G82_DISP_OVERLAY            }, nv50_disp_oimm_new },
 		{{0,0,G82_DISP_BASE_CHANNEL_DMA   },  g84_disp_base_new },
 		{{0,0,G82_DISP_OVERLAY_CHANNEL_DMA},  g84_disp_ovly_new },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
index 6aeab0f0278b..20709d6e8a8b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
@@ -31,10 +31,8 @@ g94_disp_root = {
 	.dmac = {
 		&g94_disp_core_oclass,
 	},
-	.pioc = {
-		&g84_disp_curs_oclass,
-	},
 	.user = {
+		{{0,0,  G82_DISP_CURSOR             },  nv50_disp_curs_new },
 		{{0,0,  G82_DISP_OVERLAY            },  nv50_disp_oimm_new },
 		{{0,0,GT200_DISP_BASE_CHANNEL_DMA   },   g84_disp_base_new },
 		{{0,0,GT200_DISP_OVERLAY_CHANNEL_DMA}, gt200_disp_ovly_new },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
index b44b14505949..1161698dbb4e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
@@ -31,10 +31,8 @@ gf119_disp_root = {
 	.dmac = {
 		&gf119_disp_core_oclass,
 	},
-	.pioc = {
-		&gf119_disp_curs_oclass,
-	},
 	.user = {
+		{{0,0,GF110_DISP_CURSOR             }, gf119_disp_curs_new },
 		{{0,0,GF110_DISP_OVERLAY            }, gf119_disp_oimm_new },
 		{{0,0,GF110_DISP_BASE_CHANNEL_DMA   }, gf119_disp_base_new },
 		{{0,0,GF110_DISP_OVERLAY_CONTROL_DMA}, gf119_disp_ovly_new },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
index c43eab97a393..15379ff5ebf4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
@@ -31,10 +31,8 @@ gk104_disp_root = {
 	.dmac = {
 		&gk104_disp_core_oclass,
 	},
-	.pioc = {
-		&gk104_disp_curs_oclass,
-	},
 	.user = {
+		{{0,0,GK104_DISP_CURSOR             }, gf119_disp_curs_new },
 		{{0,0,GK104_DISP_OVERLAY            }, gf119_disp_oimm_new },
 		{{0,0,GK104_DISP_BASE_CHANNEL_DMA   }, gf119_disp_base_new },
 		{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gk104_disp_ovly_new },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
index 2d48e73597b0..0a47674f6541 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
@@ -31,10 +31,8 @@ gk110_disp_root = {
 	.dmac = {
 		&gk110_disp_core_oclass,
 	},
-	.pioc = {
-		&gk104_disp_curs_oclass,
-	},
 	.user = {
+		{{0,0,GK104_DISP_CURSOR             }, gf119_disp_curs_new },
 		{{0,0,GK104_DISP_OVERLAY            }, gf119_disp_oimm_new },
 		{{0,0,GK110_DISP_BASE_CHANNEL_DMA   }, gf119_disp_base_new },
 		{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gk104_disp_ovly_new },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
index 904125e8199f..819521e25717 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
@@ -31,10 +31,8 @@ gm107_disp_root = {
 	.dmac = {
 		&gm107_disp_core_oclass,
 	},
-	.pioc = {
-		&gk104_disp_curs_oclass,
-	},
 	.user = {
+		{{0,0,GK104_DISP_CURSOR             }, gf119_disp_curs_new },
 		{{0,0,GK104_DISP_OVERLAY            }, gf119_disp_oimm_new },
 		{{0,0,GK110_DISP_BASE_CHANNEL_DMA   }, gf119_disp_base_new },
 		{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gk104_disp_ovly_new },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c
index 52e9ccac93fa..0dca1772bf33 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c
@@ -31,10 +31,8 @@ gm200_disp_root = {
 	.dmac = {
 		&gm200_disp_core_oclass,
 	},
-	.pioc = {
-		&gk104_disp_curs_oclass,
-	},
 	.user = {
+		{{0,0,GK104_DISP_CURSOR             }, gf119_disp_curs_new },
 		{{0,0,GK104_DISP_OVERLAY            }, gf119_disp_oimm_new },
 		{{0,0,GK110_DISP_BASE_CHANNEL_DMA   }, gf119_disp_base_new },
 		{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gk104_disp_ovly_new },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
index 57f40d4930f7..6fcf8583b819 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
@@ -31,10 +31,8 @@ gp100_disp_root = {
 	.dmac = {
 		&gp100_disp_core_oclass,
 	},
-	.pioc = {
-		&gk104_disp_curs_oclass,
-	},
 	.user = {
+		{{0,0,GK104_DISP_CURSOR             }, gf119_disp_curs_new },
 		{{0,0,GK104_DISP_OVERLAY            }, gf119_disp_oimm_new },
 		{{0,0,GK110_DISP_BASE_CHANNEL_DMA   }, gf119_disp_base_new },
 		{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gk104_disp_ovly_new },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c
index f66d7fced3de..bf5cbc32120d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c
@@ -31,10 +31,8 @@ gp102_disp_root = {
 	.dmac = {
 		&gp102_disp_core_oclass,
 	},
-	.pioc = {
-		&gp102_disp_curs_oclass,
-	},
 	.user = {
+		{{0,0,GK104_DISP_CURSOR             }, gp102_disp_curs_new },
 		{{0,0,GK104_DISP_OVERLAY            }, gp102_disp_oimm_new },
 		{{0,0,GK110_DISP_BASE_CHANNEL_DMA   }, gp102_disp_base_new },
 		{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gp102_disp_ovly_new },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
index c28017998bc6..2bd3f36475e3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
@@ -31,10 +31,8 @@ gt200_disp_root = {
 	.dmac = {
 		&gt200_disp_core_oclass,
 	},
-	.pioc = {
-		&g84_disp_curs_oclass,
-	},
 	.user = {
+		{{0,0,  G82_DISP_CURSOR             },  nv50_disp_curs_new },
 		{{0,0,  G82_DISP_OVERLAY            },  nv50_disp_oimm_new },
 		{{0,0,GT200_DISP_BASE_CHANNEL_DMA   },   g84_disp_base_new },
 		{{0,0,GT200_DISP_OVERLAY_CHANNEL_DMA}, gt200_disp_ovly_new },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
index b9a6a32de82f..aa4c52355e88 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
@@ -31,10 +31,8 @@ gt215_disp_root = {
 	.dmac = {
 		&gt215_disp_core_oclass,
 	},
-	.pioc = {
-		&gt215_disp_curs_oclass,
-	},
 	.user = {
+		{{0,0,GT214_DISP_CURSOR             },  nv50_disp_curs_new },
 		{{0,0,GT214_DISP_OVERLAY            },  nv50_disp_oimm_new },
 		{{0,0,GT214_DISP_BASE_CHANNEL_DMA   },   g84_disp_base_new },
 		{{0,0,GT214_DISP_OVERLAY_CHANNEL_DMA},   g84_disp_ovly_new },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
index a87531d9a3b8..24e8b418ac65 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
@@ -278,16 +278,6 @@ nv50_disp_root_dmac_new_(const struct nvkm_oclass *oclass,
 			    oclass, data, size, pobject);
 }
 
-static int
-nv50_disp_root_pioc_new_(const struct nvkm_oclass *oclass,
-			 void *data, u32 size, struct nvkm_object **pobject)
-{
-	const struct nv50_disp_pioc_oclass *sclass = oclass->priv;
-	struct nv50_disp_root *root = nv50_disp_root(oclass->parent);
-	return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid.ctrl,
-			    sclass->chid.user, oclass, data, size, pobject);
-}
-
 static int
 nv50_disp_root_child_new_(const struct nvkm_oclass *oclass,
 			  void *argv, u32 argc, struct nvkm_object **pobject)
@@ -312,15 +302,6 @@ nv50_disp_root_child_get_(struct nvkm_object *object, int index,
 
 	index -= ARRAY_SIZE(root->func->dmac);
 
-	if (index < ARRAY_SIZE(root->func->pioc)) {
-		sclass->base = root->func->pioc[index]->base;
-		sclass->priv = root->func->pioc[index];
-		sclass->ctor = nv50_disp_root_pioc_new_;
-		return 0;
-	}
-
-	index -= ARRAY_SIZE(root->func->pioc);
-
 	if (root->func->user[index].ctor) {
 		sclass->base = root->func->user[index].base;
 		sclass->priv = root->func->user + index;
@@ -369,10 +350,8 @@ nv50_disp_root = {
 	.dmac = {
 		&nv50_disp_core_oclass,
 	},
-	.pioc = {
-		&nv50_disp_curs_oclass,
-	},
 	.user = {
+		{{0,0,NV50_DISP_CURSOR             }, nv50_disp_curs_new },
 		{{0,0,NV50_DISP_OVERLAY            }, nv50_disp_oimm_new },
 		{{0,0,NV50_DISP_BASE_CHANNEL_DMA   }, nv50_disp_base_new },
 		{{0,0,NV50_DISP_OVERLAY_CHANNEL_DMA}, nv50_disp_ovly_new },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
index 371c6ee32313..484868af6597 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -3,7 +3,6 @@
 #define __NV50_DISP_ROOT_H__
 #define nv50_disp_root(p) container_of((p), struct nv50_disp_root, object)
 #include "nv50.h"
-#include "channv50.h"
 #include "dmacnv50.h"
 
 struct nv50_disp_root {
@@ -14,7 +13,6 @@ struct nv50_disp_root {
 
 struct nv50_disp_root_func {
 	const struct nv50_disp_dmac_oclass *dmac[1];
-	const struct nv50_disp_pioc_oclass *pioc[1];
 	struct nv50_disp_user {
 		struct nvkm_sclass base;
 		int (*ctor)(const struct nvkm_oclass *, void *argv, u32 argc,

From 9b096283bf78f659e1286ef9b783b27ecf5a9977 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1023/1286] drm/nouveau/disp/nv50-: simplify definiton of core
 channels

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/disp/Kbuild   |  6 ---
 .../drm/nouveau/nvkm/engine/disp/channv50.h   | 24 +++++++++--
 .../drm/nouveau/nvkm/engine/disp/coreg84.c    | 22 ++++------
 .../drm/nouveau/nvkm/engine/disp/coreg94.c    | 22 ++++------
 .../drm/nouveau/nvkm/engine/disp/coregf119.c  | 24 ++++-------
 .../drm/nouveau/nvkm/engine/disp/coregk104.c  | 22 ++++------
 .../drm/nouveau/nvkm/engine/disp/coregk110.c  | 38 -----------------
 .../drm/nouveau/nvkm/engine/disp/coregm107.c  | 38 -----------------
 .../drm/nouveau/nvkm/engine/disp/coregm200.c  | 38 -----------------
 .../drm/nouveau/nvkm/engine/disp/coregp100.c  | 38 -----------------
 .../drm/nouveau/nvkm/engine/disp/coregp102.c  | 20 ++++-----
 .../drm/nouveau/nvkm/engine/disp/coregt200.c  | 38 -----------------
 .../drm/nouveau/nvkm/engine/disp/coregt215.c  | 38 -----------------
 .../drm/nouveau/nvkm/engine/disp/corenv50.c   | 39 ++++++++---------
 .../drm/nouveau/nvkm/engine/disp/dmacnv50.h   | 42 -------------------
 .../gpu/drm/nouveau/nvkm/engine/disp/nv50.c   |  1 +
 .../drm/nouveau/nvkm/engine/disp/rootg84.c    |  4 +-
 .../drm/nouveau/nvkm/engine/disp/rootg94.c    |  4 +-
 .../drm/nouveau/nvkm/engine/disp/rootgf119.c  |  4 +-
 .../drm/nouveau/nvkm/engine/disp/rootgk104.c  |  4 +-
 .../drm/nouveau/nvkm/engine/disp/rootgk110.c  |  4 +-
 .../drm/nouveau/nvkm/engine/disp/rootgm107.c  |  4 +-
 .../drm/nouveau/nvkm/engine/disp/rootgm200.c  |  4 +-
 .../drm/nouveau/nvkm/engine/disp/rootgp100.c  |  4 +-
 .../drm/nouveau/nvkm/engine/disp/rootgp102.c  |  4 +-
 .../drm/nouveau/nvkm/engine/disp/rootgt200.c  |  4 +-
 .../drm/nouveau/nvkm/engine/disp/rootgt215.c  |  4 +-
 .../drm/nouveau/nvkm/engine/disp/rootnv50.c   | 23 +---------
 .../drm/nouveau/nvkm/engine/disp/rootnv50.h   |  2 +-
 29 files changed, 91 insertions(+), 428 deletions(-)
 delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk110.c
 delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm107.c
 delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm200.c
 delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c
 delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt200.c
 delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt215.c

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index ee41fb75c94d..b580581ef5b8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -79,14 +79,8 @@ nvkm-y += nvkm/engine/disp/basegp102.o
 nvkm-y += nvkm/engine/disp/corenv50.o
 nvkm-y += nvkm/engine/disp/coreg84.o
 nvkm-y += nvkm/engine/disp/coreg94.o
-nvkm-y += nvkm/engine/disp/coregt200.o
-nvkm-y += nvkm/engine/disp/coregt215.o
 nvkm-y += nvkm/engine/disp/coregf119.o
 nvkm-y += nvkm/engine/disp/coregk104.o
-nvkm-y += nvkm/engine/disp/coregk110.o
-nvkm-y += nvkm/engine/disp/coregm107.o
-nvkm-y += nvkm/engine/disp/coregm200.o
-nvkm-y += nvkm/engine/disp/coregp100.o
 nvkm-y += nvkm/engine/disp/coregp102.o
 
 nvkm-y += nvkm/engine/disp/ovlynv50.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
index d52420f410ed..10ce217cc081 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -63,6 +63,11 @@ int nv50_disp_base_new_(const struct nv50_disp_dmac_func *,
 			struct nv50_disp *, int chid,
 			const struct nvkm_oclass *, void *argv, u32 argc,
 			struct nvkm_object **);
+int nv50_disp_core_new_(const struct nv50_disp_dmac_func *,
+			const struct nv50_disp_chan_mthd *,
+			struct nv50_disp *, int chid,
+			const struct nvkm_oclass *oclass, void *argv, u32 argc,
+			struct nvkm_object **);
 int nv50_disp_ovly_new_(const struct nv50_disp_dmac_func *,
 			const struct nv50_disp_chan_mthd *,
 			struct nv50_disp *, int chid,
@@ -75,14 +80,21 @@ int nv50_disp_oimm_new(const struct nvkm_oclass *, void *, u32,
 		       struct nv50_disp *, struct nvkm_object **);
 int nv50_disp_base_new(const struct nvkm_oclass *, void *, u32,
 		       struct nv50_disp *, struct nvkm_object **);
+int nv50_disp_core_new(const struct nvkm_oclass *, void *, u32,
+		       struct nv50_disp *, struct nvkm_object **);
 int nv50_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
 		       struct nv50_disp *, struct nvkm_object **);
 
 int g84_disp_base_new(const struct nvkm_oclass *, void *, u32,
 		      struct nv50_disp *, struct nvkm_object **);
+int g84_disp_core_new(const struct nvkm_oclass *, void *, u32,
+		      struct nv50_disp *, struct nvkm_object **);
 int g84_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
 		      struct nv50_disp *, struct nvkm_object **);
 
+int g94_disp_core_new(const struct nvkm_oclass *, void *, u32,
+		      struct nv50_disp *, struct nvkm_object **);
+
 int gt200_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
 			struct nv50_disp *, struct nvkm_object **);
 
@@ -92,9 +104,13 @@ int gf119_disp_oimm_new(const struct nvkm_oclass *, void *, u32,
 			struct nv50_disp *, struct nvkm_object **);
 int gf119_disp_base_new(const struct nvkm_oclass *, void *, u32,
 			struct nv50_disp *, struct nvkm_object **);
+int gf119_disp_core_new(const struct nvkm_oclass *, void *, u32,
+			struct nv50_disp *, struct nvkm_object **);
 int gf119_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
 			struct nv50_disp *, struct nvkm_object **);
 
+int gk104_disp_core_new(const struct nvkm_oclass *, void *, u32,
+			struct nv50_disp *, struct nvkm_object **);
 int gk104_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
 			struct nv50_disp *, struct nvkm_object **);
 
@@ -104,6 +120,8 @@ int gp102_disp_oimm_new(const struct nvkm_oclass *, void *, u32,
 			struct nv50_disp *, struct nvkm_object **);
 int gp102_disp_base_new(const struct nvkm_oclass *, void *, u32,
 			struct nv50_disp *, struct nvkm_object **);
+int gp102_disp_core_new(const struct nvkm_oclass *, void *, u32,
+			struct nv50_disp *, struct nvkm_object **);
 int gp102_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
 			struct nv50_disp *, struct nvkm_object **);
 
@@ -135,11 +153,11 @@ extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_sor;
 extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_pior;
 extern const struct nv50_disp_mthd_list nv50_disp_base_mthd_image;
 
-extern const struct nv50_disp_chan_mthd g84_disp_core_chan_mthd;
+extern const struct nv50_disp_chan_mthd g84_disp_core_mthd;
 extern const struct nv50_disp_mthd_list g84_disp_core_mthd_dac;
 extern const struct nv50_disp_mthd_list g84_disp_core_mthd_head;
 
-extern const struct nv50_disp_chan_mthd g94_disp_core_chan_mthd;
+extern const struct nv50_disp_chan_mthd g94_disp_core_mthd;
 
 extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_base;
 extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_dac;
@@ -147,6 +165,6 @@ extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_sor;
 extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_pior;
 extern const struct nv50_disp_chan_mthd gf119_disp_base_mthd;
 
-extern const struct nv50_disp_chan_mthd gk104_disp_core_chan_mthd;
+extern const struct nv50_disp_chan_mthd gk104_disp_core_mthd;
 extern const struct nv50_disp_chan_mthd gk104_disp_ovly_mthd;
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg84.c
index 1baa5c34b327..b16857f468ee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg84.c
@@ -22,9 +22,6 @@
  * Authors: Ben Skeggs
  */
 #include "dmacnv50.h"
-#include "rootnv50.h"
-
-#include <nvif/class.h>
 
 const struct nv50_disp_mthd_list
 g84_disp_core_mthd_dac = {
@@ -91,7 +88,7 @@ g84_disp_core_mthd_head = {
 };
 
 const struct nv50_disp_chan_mthd
-g84_disp_core_chan_mthd = {
+g84_disp_core_mthd = {
 	.name = "Core",
 	.addr = 0x000000,
 	.prev = 0x000004,
@@ -105,13 +102,10 @@ g84_disp_core_chan_mthd = {
 	}
 };
 
-const struct nv50_disp_dmac_oclass
-g84_disp_core_oclass = {
-	.base.oclass = G82_DISP_CORE_CHANNEL_DMA,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_core_new,
-	.func = &nv50_disp_core_func,
-	.mthd = &g84_disp_core_chan_mthd,
-	.chid = 0,
-};
+int
+g84_disp_core_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		  struct nv50_disp *disp, struct nvkm_object **pobject)
+{
+	return nv50_disp_core_new_(&nv50_disp_core_func, &g84_disp_core_mthd,
+				   disp, 0, oclass, argv, argc, pobject);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c
index c65c9f3ff69f..ea5f48912c77 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c
@@ -22,9 +22,6 @@
  * Authors: Ben Skeggs
  */
 #include "dmacnv50.h"
-#include "rootnv50.h"
-
-#include <nvif/class.h>
 
 static const struct nv50_disp_mthd_list
 g94_disp_core_mthd_sor = {
@@ -37,7 +34,7 @@ g94_disp_core_mthd_sor = {
 };
 
 const struct nv50_disp_chan_mthd
-g94_disp_core_chan_mthd = {
+g94_disp_core_mthd = {
 	.name = "Core",
 	.addr = 0x000000,
 	.prev = 0x000004,
@@ -51,13 +48,10 @@ g94_disp_core_chan_mthd = {
 	}
 };
 
-const struct nv50_disp_dmac_oclass
-g94_disp_core_oclass = {
-	.base.oclass = GT206_DISP_CORE_CHANNEL_DMA,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_core_new,
-	.func = &nv50_disp_core_func,
-	.mthd = &g94_disp_core_chan_mthd,
-	.chid = 0,
-};
+int
+g94_disp_core_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		  struct nv50_disp *disp, struct nvkm_object **pobject)
+{
+	return nv50_disp_core_new_(&nv50_disp_core_func, &g94_disp_core_mthd,
+				   disp, 0, oclass, argv, argc, pobject);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
index 200dd90e016b..9e48cc3625b5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
@@ -22,14 +22,9 @@
  * Authors: Ben Skeggs
  */
 #include "dmacnv50.h"
-#include "rootnv50.h"
 
-#include <core/client.h>
 #include <subdev/timer.h>
 
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
 const struct nv50_disp_mthd_list
 gf119_disp_core_mthd_base = {
 	.mthd = 0x0000,
@@ -157,7 +152,7 @@ gf119_disp_core_mthd_head = {
 };
 
 static const struct nv50_disp_chan_mthd
-gf119_disp_core_chan_mthd = {
+gf119_disp_core_mthd = {
 	.name = "Core",
 	.addr = 0x000000,
 	.prev = -0x020000,
@@ -232,13 +227,10 @@ gf119_disp_core_func = {
 	.bind = gf119_disp_dmac_bind,
 };
 
-const struct nv50_disp_dmac_oclass
-gf119_disp_core_oclass = {
-	.base.oclass = GF110_DISP_CORE_CHANNEL_DMA,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_core_new,
-	.func = &gf119_disp_core_func,
-	.mthd = &gf119_disp_core_chan_mthd,
-	.chid = 0,
-};
+int
+gf119_disp_core_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		    struct nv50_disp *disp, struct nvkm_object **pobject)
+{
+	return nv50_disp_core_new_(&gf119_disp_core_func, &gf119_disp_core_mthd,
+				   disp, 0, oclass, argv, argc, pobject);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk104.c
index 088ab222e823..ca095958efdf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk104.c
@@ -22,9 +22,6 @@
  * Authors: Ben Skeggs
  */
 #include "dmacnv50.h"
-#include "rootnv50.h"
-
-#include <nvif/class.h>
 
 static const struct nv50_disp_mthd_list
 gk104_disp_core_mthd_head = {
@@ -106,7 +103,7 @@ gk104_disp_core_mthd_head = {
 };
 
 const struct nv50_disp_chan_mthd
-gk104_disp_core_chan_mthd = {
+gk104_disp_core_mthd = {
 	.name = "Core",
 	.addr = 0x000000,
 	.prev = -0x020000,
@@ -120,13 +117,10 @@ gk104_disp_core_chan_mthd = {
 	}
 };
 
-const struct nv50_disp_dmac_oclass
-gk104_disp_core_oclass = {
-	.base.oclass = GK104_DISP_CORE_CHANNEL_DMA,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_core_new,
-	.func = &gf119_disp_core_func,
-	.mthd = &gk104_disp_core_chan_mthd,
-	.chid = 0,
-};
+int
+gk104_disp_core_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		    struct nv50_disp *disp, struct nvkm_object **pobject)
+{
+	return nv50_disp_core_new_(&gf119_disp_core_func, &gk104_disp_core_mthd,
+				   disp, 0, oclass, argv, argc, pobject);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk110.c
deleted file mode 100644
index df0f45c20108..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk110.c
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright 2015 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs <bskeggs@redhat.com>
- */
-#include "dmacnv50.h"
-#include "rootnv50.h"
-
-#include <nvif/class.h>
-
-const struct nv50_disp_dmac_oclass
-gk110_disp_core_oclass = {
-	.base.oclass = GK110_DISP_CORE_CHANNEL_DMA,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_core_new,
-	.func = &gf119_disp_core_func,
-	.mthd = &gk104_disp_core_chan_mthd,
-	.chid = 0,
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm107.c
deleted file mode 100644
index 9e27f8fd98b6..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm107.c
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright 2015 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs <bskeggs@redhat.com>
- */
-#include "dmacnv50.h"
-#include "rootnv50.h"
-
-#include <nvif/class.h>
-
-const struct nv50_disp_dmac_oclass
-gm107_disp_core_oclass = {
-	.base.oclass = GM107_DISP_CORE_CHANNEL_DMA,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_core_new,
-	.func = &gf119_disp_core_func,
-	.mthd = &gk104_disp_core_chan_mthd,
-	.chid = 0,
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm200.c
deleted file mode 100644
index bb23a8658ac0..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm200.c
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright 2015 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs <bskeggs@redhat.com>
- */
-#include "dmacnv50.h"
-#include "rootnv50.h"
-
-#include <nvif/class.h>
-
-const struct nv50_disp_dmac_oclass
-gm200_disp_core_oclass = {
-	.base.oclass = GM200_DISP_CORE_CHANNEL_DMA,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_core_new,
-	.func = &gf119_disp_core_func,
-	.mthd = &gk104_disp_core_chan_mthd,
-	.chid = 0,
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c
deleted file mode 100644
index d5dff6619d4d..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright 2015 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs <bskeggs@redhat.com>
- */
-#include "dmacnv50.h"
-#include "rootnv50.h"
-
-#include <nvif/class.h>
-
-const struct nv50_disp_dmac_oclass
-gp100_disp_core_oclass = {
-	.base.oclass = GP100_DISP_CORE_CHANNEL_DMA,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_core_new,
-	.func = &gf119_disp_core_func,
-	.mthd = &gk104_disp_core_chan_mthd,
-	.chid = 0,
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c
index 6ad5f2fb2ac1..3ec353e90b3e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c
@@ -22,12 +22,9 @@
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
 #include "dmacnv50.h"
-#include "rootnv50.h"
 
 #include <subdev/timer.h>
 
-#include <nvif/class.h>
-
 static int
 gp102_disp_core_init(struct nv50_disp_dmac *chan)
 {
@@ -66,13 +63,10 @@ gp102_disp_core_func = {
 	.bind = gf119_disp_dmac_bind,
 };
 
-const struct nv50_disp_dmac_oclass
-gp102_disp_core_oclass = {
-	.base.oclass = GP102_DISP_CORE_CHANNEL_DMA,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_core_new,
-	.func = &gp102_disp_core_func,
-	.mthd = &gk104_disp_core_chan_mthd,
-	.chid = 0,
-};
+int
+gp102_disp_core_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		    struct nv50_disp *disp, struct nvkm_object **pobject)
+{
+	return nv50_disp_core_new_(&gp102_disp_core_func, &gk104_disp_core_mthd,
+				   disp, 0, oclass, argv, argc, pobject);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt200.c
deleted file mode 100644
index b234547708fc..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt200.c
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright 2015 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs <bskeggs@redhat.com>
- */
-#include "dmacnv50.h"
-#include "rootnv50.h"
-
-#include <nvif/class.h>
-
-const struct nv50_disp_dmac_oclass
-gt200_disp_core_oclass = {
-	.base.oclass = GT200_DISP_CORE_CHANNEL_DMA,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_core_new,
-	.func = &nv50_disp_core_func,
-	.mthd = &g84_disp_core_chan_mthd,
-	.chid = 0,
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt215.c
deleted file mode 100644
index 8f5ba2018975..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt215.c
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright 2015 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs <bskeggs@redhat.com>
- */
-#include "dmacnv50.h"
-#include "rootnv50.h"
-
-#include <nvif/class.h>
-
-const struct nv50_disp_dmac_oclass
-gt215_disp_core_oclass = {
-	.base.oclass = GT214_DISP_CORE_CHANNEL_DMA,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_core_new,
-	.func = &nv50_disp_core_func,
-	.mthd = &g94_disp_core_chan_mthd,
-	.chid = 0,
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
index 839cbc83428c..8cdcf5b590e7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
@@ -22,31 +22,29 @@
  * Authors: Ben Skeggs
  */
 #include "dmacnv50.h"
-#include "rootnv50.h"
 
 #include <core/client.h>
 #include <subdev/timer.h>
 
-#include <nvif/class.h>
 #include <nvif/cl507d.h>
 #include <nvif/unpack.h>
 
 int
-nv50_disp_core_new(const struct nv50_disp_dmac_func *func,
-		   const struct nv50_disp_chan_mthd *mthd,
-		   struct nv50_disp_root *root, int chid,
-		   const struct nvkm_oclass *oclass, void *data, u32 size,
-		   struct nvkm_object **pobject)
+nv50_disp_core_new_(const struct nv50_disp_dmac_func *func,
+		    const struct nv50_disp_chan_mthd *mthd,
+		    struct nv50_disp *disp, int chid,
+		    const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		    struct nvkm_object **pobject)
 {
 	union {
 		struct nv50_disp_core_channel_dma_v0 v0;
-	} *args = data;
+	} *args = argv;
 	struct nvkm_object *parent = oclass->parent;
 	u64 push;
 	int ret = -ENOSYS;
 
-	nvif_ioctl(parent, "create disp core channel dma size %d\n", size);
-	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+	nvif_ioctl(parent, "create disp core channel dma size %d\n", argc);
+	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
 		nvif_ioctl(parent, "create disp core channel dma vers %d "
 				   "pushbuf %016llx\n",
 			   args->v0.version, args->v0.pushbuf);
@@ -54,7 +52,7 @@ nv50_disp_core_new(const struct nv50_disp_dmac_func *func,
 	} else
 		return ret;
 
-	return nv50_disp_dmac_new_(func, mthd, root->disp, chid, 0,
+	return nv50_disp_dmac_new_(func, mthd, disp, chid, 0,
 				   push, oclass, pobject);
 }
 
@@ -151,7 +149,7 @@ nv50_disp_core_mthd_head = {
 };
 
 static const struct nv50_disp_chan_mthd
-nv50_disp_core_chan_mthd = {
+nv50_disp_core_mthd = {
 	.name = "Core",
 	.addr = 0x000000,
 	.prev = 0x000004,
@@ -231,13 +229,10 @@ nv50_disp_core_func = {
 	.bind = nv50_disp_dmac_bind,
 };
 
-const struct nv50_disp_dmac_oclass
-nv50_disp_core_oclass = {
-	.base.oclass = NV50_DISP_CORE_CHANNEL_DMA,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_core_new,
-	.func = &nv50_disp_core_func,
-	.mthd = &nv50_disp_core_chan_mthd,
-	.chid = 0,
-};
+int
+nv50_disp_core_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		   struct nv50_disp *disp, struct nvkm_object **pobject)
+{
+	return nv50_disp_core_new_(&nv50_disp_core_func, &nv50_disp_core_mthd,
+				   disp, 0, oclass, argv, argc, pobject);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
index ae094c7c47f5..feeb5882dc91 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
@@ -32,46 +32,4 @@ extern const struct nv50_disp_dmac_func gf119_disp_core_func;
 void gf119_disp_core_fini(struct nv50_disp_dmac *);
 
 extern const struct nv50_disp_dmac_func gp102_disp_dmac_func;
-
-struct nv50_disp_dmac_oclass {
-	int (*ctor)(const struct nv50_disp_dmac_func *,
-		    const struct nv50_disp_chan_mthd *,
-		    struct nv50_disp_root *, int chid,
-		    const struct nvkm_oclass *, void *data, u32 size,
-		    struct nvkm_object **);
-	struct nvkm_sclass base;
-	const struct nv50_disp_dmac_func *func;
-	const struct nv50_disp_chan_mthd *mthd;
-	int chid;
-};
-
-int nv50_disp_core_new(const struct nv50_disp_dmac_func *,
-		       const struct nv50_disp_chan_mthd *,
-		       struct nv50_disp_root *, int chid,
-		       const struct nvkm_oclass *oclass, void *data, u32 size,
-		       struct nvkm_object **);
-
-extern const struct nv50_disp_dmac_oclass nv50_disp_core_oclass;
-
-extern const struct nv50_disp_dmac_oclass g84_disp_core_oclass;
-
-extern const struct nv50_disp_dmac_oclass g94_disp_core_oclass;
-
-extern const struct nv50_disp_dmac_oclass gt200_disp_core_oclass;
-
-extern const struct nv50_disp_dmac_oclass gt215_disp_core_oclass;
-
-extern const struct nv50_disp_dmac_oclass gf119_disp_core_oclass;
-
-extern const struct nv50_disp_dmac_oclass gk104_disp_core_oclass;
-
-extern const struct nv50_disp_dmac_oclass gk110_disp_core_oclass;
-
-extern const struct nv50_disp_dmac_oclass gm107_disp_core_oclass;
-
-extern const struct nv50_disp_dmac_oclass gm200_disp_core_oclass;
-
-extern const struct nv50_disp_dmac_oclass gp100_disp_core_oclass;
-
-extern const struct nv50_disp_dmac_oclass gp102_disp_core_oclass;
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index 1d2280ab3194..4a37c44fcbed 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -24,6 +24,7 @@
 #include "nv50.h"
 #include "head.h"
 #include "ior.h"
+#include "channv50.h"
 #include "rootnv50.h"
 
 #include <core/client.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
index 0f7e662cd175..e51f89f32507 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
@@ -28,13 +28,11 @@
 
 static const struct nv50_disp_root_func
 g84_disp_root = {
-	.dmac = {
-		&g84_disp_core_oclass,
-	},
 	.user = {
 		{{0,0,G82_DISP_CURSOR             }, nv50_disp_curs_new },
 		{{0,0,G82_DISP_OVERLAY            }, nv50_disp_oimm_new },
 		{{0,0,G82_DISP_BASE_CHANNEL_DMA   },  g84_disp_base_new },
+		{{0,0,G82_DISP_CORE_CHANNEL_DMA   },  g84_disp_core_new },
 		{{0,0,G82_DISP_OVERLAY_CHANNEL_DMA},  g84_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
index 20709d6e8a8b..ed7838eedb2c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
@@ -28,13 +28,11 @@
 
 static const struct nv50_disp_root_func
 g94_disp_root = {
-	.dmac = {
-		&g94_disp_core_oclass,
-	},
 	.user = {
 		{{0,0,  G82_DISP_CURSOR             },  nv50_disp_curs_new },
 		{{0,0,  G82_DISP_OVERLAY            },  nv50_disp_oimm_new },
 		{{0,0,GT200_DISP_BASE_CHANNEL_DMA   },   g84_disp_base_new },
+		{{0,0,GT206_DISP_CORE_CHANNEL_DMA   },   g94_disp_core_new },
 		{{0,0,GT200_DISP_OVERLAY_CHANNEL_DMA}, gt200_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
index 1161698dbb4e..ac92e65131c9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
@@ -28,13 +28,11 @@
 
 static const struct nv50_disp_root_func
 gf119_disp_root = {
-	.dmac = {
-		&gf119_disp_core_oclass,
-	},
 	.user = {
 		{{0,0,GF110_DISP_CURSOR             }, gf119_disp_curs_new },
 		{{0,0,GF110_DISP_OVERLAY            }, gf119_disp_oimm_new },
 		{{0,0,GF110_DISP_BASE_CHANNEL_DMA   }, gf119_disp_base_new },
+		{{0,0,GF110_DISP_CORE_CHANNEL_DMA   }, gf119_disp_core_new },
 		{{0,0,GF110_DISP_OVERLAY_CONTROL_DMA}, gf119_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
index 15379ff5ebf4..3bb6d601aed2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
@@ -28,13 +28,11 @@
 
 static const struct nv50_disp_root_func
 gk104_disp_root = {
-	.dmac = {
-		&gk104_disp_core_oclass,
-	},
 	.user = {
 		{{0,0,GK104_DISP_CURSOR             }, gf119_disp_curs_new },
 		{{0,0,GK104_DISP_OVERLAY            }, gf119_disp_oimm_new },
 		{{0,0,GK104_DISP_BASE_CHANNEL_DMA   }, gf119_disp_base_new },
+		{{0,0,GK104_DISP_CORE_CHANNEL_DMA   }, gk104_disp_core_new },
 		{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gk104_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
index 0a47674f6541..336419815d98 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
@@ -28,13 +28,11 @@
 
 static const struct nv50_disp_root_func
 gk110_disp_root = {
-	.dmac = {
-		&gk110_disp_core_oclass,
-	},
 	.user = {
 		{{0,0,GK104_DISP_CURSOR             }, gf119_disp_curs_new },
 		{{0,0,GK104_DISP_OVERLAY            }, gf119_disp_oimm_new },
 		{{0,0,GK110_DISP_BASE_CHANNEL_DMA   }, gf119_disp_base_new },
+		{{0,0,GK110_DISP_CORE_CHANNEL_DMA   }, gk104_disp_core_new },
 		{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gk104_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
index 819521e25717..c53e71ee69e7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
@@ -28,13 +28,11 @@
 
 static const struct nv50_disp_root_func
 gm107_disp_root = {
-	.dmac = {
-		&gm107_disp_core_oclass,
-	},
 	.user = {
 		{{0,0,GK104_DISP_CURSOR             }, gf119_disp_curs_new },
 		{{0,0,GK104_DISP_OVERLAY            }, gf119_disp_oimm_new },
 		{{0,0,GK110_DISP_BASE_CHANNEL_DMA   }, gf119_disp_base_new },
+		{{0,0,GM107_DISP_CORE_CHANNEL_DMA   }, gk104_disp_core_new },
 		{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gk104_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c
index 0dca1772bf33..85409d1bc7bc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c
@@ -28,13 +28,11 @@
 
 static const struct nv50_disp_root_func
 gm200_disp_root = {
-	.dmac = {
-		&gm200_disp_core_oclass,
-	},
 	.user = {
 		{{0,0,GK104_DISP_CURSOR             }, gf119_disp_curs_new },
 		{{0,0,GK104_DISP_OVERLAY            }, gf119_disp_oimm_new },
 		{{0,0,GK110_DISP_BASE_CHANNEL_DMA   }, gf119_disp_base_new },
+		{{0,0,GM200_DISP_CORE_CHANNEL_DMA   }, gk104_disp_core_new },
 		{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gk104_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
index 6fcf8583b819..ebfd245c573a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
@@ -28,13 +28,11 @@
 
 static const struct nv50_disp_root_func
 gp100_disp_root = {
-	.dmac = {
-		&gp100_disp_core_oclass,
-	},
 	.user = {
 		{{0,0,GK104_DISP_CURSOR             }, gf119_disp_curs_new },
 		{{0,0,GK104_DISP_OVERLAY            }, gf119_disp_oimm_new },
 		{{0,0,GK110_DISP_BASE_CHANNEL_DMA   }, gf119_disp_base_new },
+		{{0,0,GP100_DISP_CORE_CHANNEL_DMA   }, gk104_disp_core_new },
 		{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gk104_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c
index bf5cbc32120d..54b5fda99208 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c
@@ -28,13 +28,11 @@
 
 static const struct nv50_disp_root_func
 gp102_disp_root = {
-	.dmac = {
-		&gp102_disp_core_oclass,
-	},
 	.user = {
 		{{0,0,GK104_DISP_CURSOR             }, gp102_disp_curs_new },
 		{{0,0,GK104_DISP_OVERLAY            }, gp102_disp_oimm_new },
 		{{0,0,GK110_DISP_BASE_CHANNEL_DMA   }, gp102_disp_base_new },
+		{{0,0,GP102_DISP_CORE_CHANNEL_DMA   }, gp102_disp_core_new },
 		{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, gp102_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
index 2bd3f36475e3..14ac83bf3693 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
@@ -28,13 +28,11 @@
 
 static const struct nv50_disp_root_func
 gt200_disp_root = {
-	.dmac = {
-		&gt200_disp_core_oclass,
-	},
 	.user = {
 		{{0,0,  G82_DISP_CURSOR             },  nv50_disp_curs_new },
 		{{0,0,  G82_DISP_OVERLAY            },  nv50_disp_oimm_new },
 		{{0,0,GT200_DISP_BASE_CHANNEL_DMA   },   g84_disp_base_new },
+		{{0,0,GT200_DISP_CORE_CHANNEL_DMA   },   g84_disp_core_new },
 		{{0,0,GT200_DISP_OVERLAY_CHANNEL_DMA}, gt200_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
index aa4c52355e88..cb1a208e68bd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
@@ -28,13 +28,11 @@
 
 static const struct nv50_disp_root_func
 gt215_disp_root = {
-	.dmac = {
-		&gt215_disp_core_oclass,
-	},
 	.user = {
 		{{0,0,GT214_DISP_CURSOR             },  nv50_disp_curs_new },
 		{{0,0,GT214_DISP_OVERLAY            },  nv50_disp_oimm_new },
 		{{0,0,GT214_DISP_BASE_CHANNEL_DMA   },   g84_disp_base_new },
+		{{0,0,GT214_DISP_CORE_CHANNEL_DMA   },   g94_disp_core_new },
 		{{0,0,GT214_DISP_OVERLAY_CHANNEL_DMA},   g84_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
index 24e8b418ac65..9d716ee7621e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
@@ -268,16 +268,6 @@ nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
 	return -EINVAL;
 }
 
-static int
-nv50_disp_root_dmac_new_(const struct nvkm_oclass *oclass,
-			 void *data, u32 size, struct nvkm_object **pobject)
-{
-	const struct nv50_disp_dmac_oclass *sclass = oclass->priv;
-	struct nv50_disp_root *root = nv50_disp_root(oclass->parent);
-	return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid,
-			    oclass, data, size, pobject);
-}
-
 static int
 nv50_disp_root_child_new_(const struct nvkm_oclass *oclass,
 			  void *argv, u32 argc, struct nvkm_object **pobject)
@@ -293,15 +283,6 @@ nv50_disp_root_child_get_(struct nvkm_object *object, int index,
 {
 	struct nv50_disp_root *root = nv50_disp_root(object);
 
-	if (index < ARRAY_SIZE(root->func->dmac)) {
-		sclass->base = root->func->dmac[index]->base;
-		sclass->priv = root->func->dmac[index];
-		sclass->ctor = nv50_disp_root_dmac_new_;
-		return 0;
-	}
-
-	index -= ARRAY_SIZE(root->func->dmac);
-
 	if (root->func->user[index].ctor) {
 		sclass->base = root->func->user[index].base;
 		sclass->priv = root->func->user + index;
@@ -347,13 +328,11 @@ nv50_disp_root_new_(const struct nv50_disp_root_func *func,
 
 static const struct nv50_disp_root_func
 nv50_disp_root = {
-	.dmac = {
-		&nv50_disp_core_oclass,
-	},
 	.user = {
 		{{0,0,NV50_DISP_CURSOR             }, nv50_disp_curs_new },
 		{{0,0,NV50_DISP_OVERLAY            }, nv50_disp_oimm_new },
 		{{0,0,NV50_DISP_BASE_CHANNEL_DMA   }, nv50_disp_base_new },
+		{{0,0,NV50_DISP_CORE_CHANNEL_DMA   }, nv50_disp_core_new },
 		{{0,0,NV50_DISP_OVERLAY_CHANNEL_DMA}, nv50_disp_ovly_new },
 		{}
 	},
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
index 484868af6597..12fc13b397ed 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -12,7 +12,7 @@ struct nv50_disp_root {
 };
 
 struct nv50_disp_root_func {
-	const struct nv50_disp_dmac_oclass *dmac[1];
+	int blah;
 	struct nv50_disp_user {
 		struct nvkm_sclass base;
 		int (*ctor)(const struct nvkm_oclass *, void *argv, u32 argc,

From 8531f57027136fa63ddae91821ca89b32b571fe2 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1024/1286] drm/nouveau/disp/nv50-: merge handling of pio and
 dma channels

Unnecessarily complicated, and a barrier to cleanly supporting Volta.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../drm/nouveau/nvkm/engine/disp/baseg84.c    |   2 +-
 .../drm/nouveau/nvkm/engine/disp/basegf119.c  |   2 +-
 .../drm/nouveau/nvkm/engine/disp/basegp102.c  |   2 +-
 .../drm/nouveau/nvkm/engine/disp/basenv50.c   |   4 +-
 .../drm/nouveau/nvkm/engine/disp/channv50.c   |  97 ++++++++----
 .../drm/nouveau/nvkm/engine/disp/channv50.h   |  39 ++---
 .../drm/nouveau/nvkm/engine/disp/coreg84.c    |   2 +-
 .../drm/nouveau/nvkm/engine/disp/coreg94.c    |   2 +-
 .../drm/nouveau/nvkm/engine/disp/coregf119.c  |  14 +-
 .../drm/nouveau/nvkm/engine/disp/coregk104.c  |   2 +-
 .../drm/nouveau/nvkm/engine/disp/coregp102.c  |   9 +-
 .../drm/nouveau/nvkm/engine/disp/corenv50.c   |  16 +-
 .../drm/nouveau/nvkm/engine/disp/dmacgf119.c  |  31 ++--
 .../drm/nouveau/nvkm/engine/disp/dmacgp102.c  |  14 +-
 .../drm/nouveau/nvkm/engine/disp/dmacnv50.c   | 147 +++---------------
 .../drm/nouveau/nvkm/engine/disp/dmacnv50.h   |  35 -----
 .../gpu/drm/nouveau/nvkm/engine/disp/gf119.c  |   1 +
 .../gpu/drm/nouveau/nvkm/engine/disp/gp102.c  |   1 +
 .../gpu/drm/nouveau/nvkm/engine/disp/nv50.h   |   7 +
 .../drm/nouveau/nvkm/engine/disp/ovlyg84.c    |   2 +-
 .../drm/nouveau/nvkm/engine/disp/ovlygf119.c  |   2 +-
 .../drm/nouveau/nvkm/engine/disp/ovlygk104.c  |   2 +-
 .../drm/nouveau/nvkm/engine/disp/ovlygp102.c  |   2 +-
 .../drm/nouveau/nvkm/engine/disp/ovlygt200.c  |   2 +-
 .../drm/nouveau/nvkm/engine/disp/ovlynv50.c   |   4 +-
 .../drm/nouveau/nvkm/engine/disp/rootg84.c    |   2 +-
 .../drm/nouveau/nvkm/engine/disp/rootg94.c    |   2 +-
 .../drm/nouveau/nvkm/engine/disp/rootgf119.c  |   2 +-
 .../drm/nouveau/nvkm/engine/disp/rootgk104.c  |   2 +-
 .../drm/nouveau/nvkm/engine/disp/rootgk110.c  |   2 +-
 .../drm/nouveau/nvkm/engine/disp/rootgm107.c  |   2 +-
 .../drm/nouveau/nvkm/engine/disp/rootgm200.c  |   2 +-
 .../drm/nouveau/nvkm/engine/disp/rootgp100.c  |   2 +-
 .../drm/nouveau/nvkm/engine/disp/rootgp102.c  |   2 +-
 .../drm/nouveau/nvkm/engine/disp/rootgt200.c  |   2 +-
 .../drm/nouveau/nvkm/engine/disp/rootgt215.c  |   2 +-
 .../drm/nouveau/nvkm/engine/disp/rootnv50.c   |   2 +-
 .../drm/nouveau/nvkm/engine/disp/rootnv50.h   |   2 +-
 38 files changed, 183 insertions(+), 284 deletions(-)
 delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/baseg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/baseg84.c
index 03ec508d19f0..01253f4a9946 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/baseg84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/baseg84.c
@@ -21,7 +21,7 @@
  *
  * Authors: Ben Skeggs
  */
-#include "dmacnv50.h"
+#include "channv50.h"
 
 static const struct nv50_disp_mthd_list
 g84_disp_base_mthd_base = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegf119.c
index 4c372dc6a128..389e19dfc514 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegf119.c
@@ -21,7 +21,7 @@
  *
  * Authors: Ben Skeggs
  */
-#include "dmacnv50.h"
+#include "channv50.h"
 
 static const struct nv50_disp_mthd_list
 gf119_disp_base_mthd_base = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp102.c
index 3a25259de057..0cb23d673aa0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp102.c
@@ -21,7 +21,7 @@
  *
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
-#include "dmacnv50.h"
+#include "channv50.h"
 
 int
 gp102_disp_base_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c
index 11639e2a792f..19eb7dde01f2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c
@@ -21,7 +21,7 @@
  *
  * Authors: Ben Skeggs
  */
-#include "dmacnv50.h"
+#include "channv50.h"
 #include "head.h"
 
 #include <core/client.h>
@@ -30,7 +30,7 @@
 #include <nvif/unpack.h>
 
 int
-nv50_disp_base_new_(const struct nv50_disp_dmac_func *func,
+nv50_disp_base_new_(const struct nv50_disp_chan_func *func,
 		    const struct nv50_disp_chan_mthd *mthd,
 		    struct nv50_disp *disp, int chid,
 		    const struct nvkm_oclass *oclass, void *argv, u32 argc,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
index 96b732d4b9ab..91a2f0c64731 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
@@ -26,6 +26,7 @@
 
 #include <core/client.h>
 #include <core/notify.h>
+#include <core/oproxy.h>
 #include <core/ramht.h>
 #include <engine/dma.h>
 
@@ -204,25 +205,76 @@ nv50_disp_chan_map(struct nvkm_object *object, void *argv, u32 argc,
 	return 0;
 }
 
+struct nv50_disp_chan_object {
+	struct nvkm_oproxy oproxy;
+	struct nv50_disp *disp;
+	int hash;
+};
+
+static void
+nv50_disp_chan_child_del_(struct nvkm_oproxy *base)
+{
+	struct nv50_disp_chan_object *object =
+		container_of(base, typeof(*object), oproxy);
+	nvkm_ramht_remove(object->disp->ramht, object->hash);
+}
+
+static const struct nvkm_oproxy_func
+nv50_disp_chan_child_func_ = {
+	.dtor[0] = nv50_disp_chan_child_del_,
+};
+
 static int
 nv50_disp_chan_child_new(const struct nvkm_oclass *oclass,
-			 void *data, u32 size, struct nvkm_object **pobject)
+			 void *argv, u32 argc, struct nvkm_object **pobject)
 {
 	struct nv50_disp_chan *chan = nv50_disp_chan(oclass->parent);
-	return chan->func->child_new(chan, oclass, data, size, pobject);
+	struct nv50_disp *disp = chan->disp;
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	const struct nvkm_device_oclass *sclass = oclass->priv;
+	struct nv50_disp_chan_object *object;
+	int ret;
+
+	if (!(object = kzalloc(sizeof(*object), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_oproxy_ctor(&nv50_disp_chan_child_func_, oclass, &object->oproxy);
+	object->disp = disp;
+	*pobject = &object->oproxy.base;
+
+	ret = sclass->ctor(device, oclass, argv, argc, &object->oproxy.object);
+	if (ret)
+		return ret;
+
+	object->hash = chan->func->bind(chan, object->oproxy.object,
+					      oclass->handle);
+	if (object->hash < 0)
+		return object->hash;
+
+	return 0;
 }
 
 static int
 nv50_disp_chan_child_get(struct nvkm_object *object, int index,
-			 struct nvkm_oclass *oclass)
+			 struct nvkm_oclass *sclass)
 {
 	struct nv50_disp_chan *chan = nv50_disp_chan(object);
-	if (chan->func->child_get) {
-		int ret = chan->func->child_get(chan, index, oclass);
-		if (ret == 0)
-			oclass->ctor = nv50_disp_chan_child_new;
-		return ret;
+	struct nvkm_device *device = chan->disp->base.engine.subdev.device;
+	const struct nvkm_device_oclass *oclass = NULL;
+
+	if (chan->func->bind)
+		sclass->engine = nvkm_device_engine(device, NVKM_ENGINE_DMAOBJ);
+	else
+		sclass->engine = NULL;
+
+	if (sclass->engine && sclass->engine->func->base.sclass) {
+		sclass->engine->func->base.sclass(sclass, index, &oclass);
+		if (oclass) {
+			sclass->ctor = nv50_disp_chan_child_new,
+			sclass->priv = oclass;
+			return 0;
+		}
 	}
+
 	return -EINVAL;
 }
 
@@ -248,7 +300,7 @@ nv50_disp_chan_dtor(struct nvkm_object *object)
 	struct nv50_disp *disp = chan->disp;
 	if (chan->chid.user >= 0)
 		disp->chan[chan->chid.user] = NULL;
-	return chan->func->dtor ? chan->func->dtor(chan) : chan;
+	return chan;
 }
 
 static const struct nvkm_object_func
@@ -264,12 +316,18 @@ nv50_disp_chan = {
 };
 
 int
-nv50_disp_chan_ctor(const struct nv50_disp_chan_func *func,
+nv50_disp_chan_new_(const struct nv50_disp_chan_func *func,
 		    const struct nv50_disp_chan_mthd *mthd,
 		    struct nv50_disp *disp, int ctrl, int user, int head,
 		    const struct nvkm_oclass *oclass,
-		    struct nv50_disp_chan *chan)
+		    struct nvkm_object **pobject)
 {
+	struct nv50_disp_chan *chan;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->object;
+
 	nvkm_object_ctor(&nv50_disp_chan, oclass, &chan->object);
 	chan->func = func;
 	chan->mthd = mthd;
@@ -285,20 +343,3 @@ nv50_disp_chan_ctor(const struct nv50_disp_chan_func *func,
 	disp->chan[chan->chid.user] = chan;
 	return 0;
 }
-
-int
-nv50_disp_chan_new_(const struct nv50_disp_chan_func *func,
-		    const struct nv50_disp_chan_mthd *mthd,
-		    struct nv50_disp *disp, int ctrl, int user, int head,
-		    const struct nvkm_oclass *oclass,
-		    struct nvkm_object **pobject)
-{
-	struct nv50_disp_chan *chan;
-
-	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
-		return -ENOMEM;
-	*pobject = &chan->object;
-
-	return nv50_disp_chan_ctor(func, mthd, disp, ctrl, user,
-				   head, oclass, chan);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
index 10ce217cc081..feb8a56fcc85 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -5,7 +5,6 @@
 #include <core/object.h>
 #include "nv50.h"
 struct nv50_disp_root;
-struct nv50_disp_dmac_func;
 
 struct nv50_disp_chan {
 	const struct nv50_disp_chan_func *func;
@@ -19,36 +18,38 @@ struct nv50_disp_chan {
 	int head;
 
 	struct nvkm_object object;
+
+	u64 push;
 };
 
 struct nv50_disp_chan_func {
-	void *(*dtor)(struct nv50_disp_chan *);
 	int (*init)(struct nv50_disp_chan *);
 	void (*fini)(struct nv50_disp_chan *);
-	int (*child_get)(struct nv50_disp_chan *, int index,
-			 struct nvkm_oclass *);
-	int (*child_new)(struct nv50_disp_chan *, const struct nvkm_oclass *,
-			 void *data, u32 size, struct nvkm_object **);
+	int (*bind)(struct nv50_disp_chan *, struct nvkm_object *, u32 handle);
 };
 
-int nv50_disp_chan_ctor(const struct nv50_disp_chan_func *,
-			const struct nv50_disp_chan_mthd *,
-			struct nv50_disp *, int ctrl, int user, int head,
-			const struct nvkm_oclass *, struct nv50_disp_chan *);
 int nv50_disp_chan_new_(const struct nv50_disp_chan_func *,
 			const struct nv50_disp_chan_mthd *,
 			struct nv50_disp *, int ctrl, int user, int head,
 			const struct nvkm_oclass *, struct nvkm_object **);
+int nv50_disp_dmac_new_(const struct nv50_disp_chan_func *,
+			const struct nv50_disp_chan_mthd *,
+			struct nv50_disp *, int chid, int head, u64 push,
+			const struct nvkm_oclass *, struct nvkm_object **);
 
 extern const struct nv50_disp_chan_func nv50_disp_pioc_func;
+extern const struct nv50_disp_chan_func nv50_disp_dmac_func;
+int nv50_disp_dmac_bind(struct nv50_disp_chan *, struct nvkm_object *, u32);
+extern const struct nv50_disp_chan_func nv50_disp_core_func;
+
 extern const struct nv50_disp_chan_func gf119_disp_pioc_func;
+extern const struct nv50_disp_chan_func gf119_disp_dmac_func;
+void gf119_disp_dmac_fini(struct nv50_disp_chan *);
+int gf119_disp_dmac_bind(struct nv50_disp_chan *, struct nvkm_object *, u32);
+extern const struct nv50_disp_chan_func gf119_disp_core_func;
+void gf119_disp_core_fini(struct nv50_disp_chan *);
 
-extern const struct nvkm_event_func nv50_disp_chan_uevent;
-int  nv50_disp_chan_uevent_ctor(struct nvkm_object *, void *, u32,
-				struct nvkm_notify *);
-void nv50_disp_chan_uevent_send(struct nv50_disp *, int);
-
-extern const struct nvkm_event_func gf119_disp_chan_uevent;
+extern const struct nv50_disp_chan_func gp102_disp_dmac_func;
 
 int nv50_disp_curs_new_(const struct nv50_disp_chan_func *,
 			struct nv50_disp *, int ctrl, int user,
@@ -58,17 +59,17 @@ int nv50_disp_oimm_new_(const struct nv50_disp_chan_func *,
 			struct nv50_disp *, int ctrl, int user,
 			const struct nvkm_oclass *, void *argv, u32 argc,
 			struct nvkm_object **);
-int nv50_disp_base_new_(const struct nv50_disp_dmac_func *,
+int nv50_disp_base_new_(const struct nv50_disp_chan_func *,
 			const struct nv50_disp_chan_mthd *,
 			struct nv50_disp *, int chid,
 			const struct nvkm_oclass *, void *argv, u32 argc,
 			struct nvkm_object **);
-int nv50_disp_core_new_(const struct nv50_disp_dmac_func *,
+int nv50_disp_core_new_(const struct nv50_disp_chan_func *,
 			const struct nv50_disp_chan_mthd *,
 			struct nv50_disp *, int chid,
 			const struct nvkm_oclass *oclass, void *argv, u32 argc,
 			struct nvkm_object **);
-int nv50_disp_ovly_new_(const struct nv50_disp_dmac_func *,
+int nv50_disp_ovly_new_(const struct nv50_disp_chan_func *,
 			const struct nv50_disp_chan_mthd *,
 			struct nv50_disp *, int chid,
 			const struct nvkm_oclass *, void *argv, u32 argc,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg84.c
index b16857f468ee..cfc54aad3e7c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg84.c
@@ -21,7 +21,7 @@
  *
  * Authors: Ben Skeggs
  */
-#include "dmacnv50.h"
+#include "channv50.h"
 
 const struct nv50_disp_mthd_list
 g84_disp_core_mthd_dac = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c
index ea5f48912c77..e911925f1182 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c
@@ -21,7 +21,7 @@
  *
  * Authors: Ben Skeggs
  */
-#include "dmacnv50.h"
+#include "channv50.h"
 
 static const struct nv50_disp_mthd_list
 g94_disp_core_mthd_sor = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
index 9e48cc3625b5..17c66162417b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
@@ -21,7 +21,7 @@
  *
  * Authors: Ben Skeggs
  */
-#include "dmacnv50.h"
+#include "channv50.h"
 
 #include <subdev/timer.h>
 
@@ -167,10 +167,9 @@ gf119_disp_core_mthd = {
 };
 
 void
-gf119_disp_core_fini(struct nv50_disp_dmac *chan)
+gf119_disp_core_fini(struct nv50_disp_chan *chan)
 {
-	struct nv50_disp *disp = chan->base.disp;
-	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_subdev *subdev = &chan->disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
 
 	/* deactivate channel */
@@ -190,10 +189,9 @@ gf119_disp_core_fini(struct nv50_disp_dmac *chan)
 }
 
 static int
-gf119_disp_core_init(struct nv50_disp_dmac *chan)
+gf119_disp_core_init(struct nv50_disp_chan *chan)
 {
-	struct nv50_disp *disp = chan->base.disp;
-	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_subdev *subdev = &chan->disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
 
 	/* enable error reporting */
@@ -220,7 +218,7 @@ gf119_disp_core_init(struct nv50_disp_dmac *chan)
 	return 0;
 }
 
-const struct nv50_disp_dmac_func
+const struct nv50_disp_chan_func
 gf119_disp_core_func = {
 	.init = gf119_disp_core_init,
 	.fini = gf119_disp_core_fini,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk104.c
index ca095958efdf..5c800174e079 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk104.c
@@ -21,7 +21,7 @@
  *
  * Authors: Ben Skeggs
  */
-#include "dmacnv50.h"
+#include "channv50.h"
 
 static const struct nv50_disp_mthd_list
 gk104_disp_core_mthd_head = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c
index 3ec353e90b3e..57d26051bc95 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c
@@ -21,15 +21,14 @@
  *
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
-#include "dmacnv50.h"
+#include "channv50.h"
 
 #include <subdev/timer.h>
 
 static int
-gp102_disp_core_init(struct nv50_disp_dmac *chan)
+gp102_disp_core_init(struct nv50_disp_chan *chan)
 {
-	struct nv50_disp *disp = chan->base.disp;
-	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_subdev *subdev = &chan->disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
 
 	/* enable error reporting */
@@ -56,7 +55,7 @@ gp102_disp_core_init(struct nv50_disp_dmac *chan)
 	return 0;
 }
 
-static const struct nv50_disp_dmac_func
+static const struct nv50_disp_chan_func
 gp102_disp_core_func = {
 	.init = gp102_disp_core_init,
 	.fini = gf119_disp_core_fini,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
index 8cdcf5b590e7..d648c4d7b55c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
@@ -21,7 +21,7 @@
  *
  * Authors: Ben Skeggs
  */
-#include "dmacnv50.h"
+#include "channv50.h"
 
 #include <core/client.h>
 #include <subdev/timer.h>
@@ -30,7 +30,7 @@
 #include <nvif/unpack.h>
 
 int
-nv50_disp_core_new_(const struct nv50_disp_dmac_func *func,
+nv50_disp_core_new_(const struct nv50_disp_chan_func *func,
 		    const struct nv50_disp_chan_mthd *mthd,
 		    struct nv50_disp *disp, int chid,
 		    const struct nvkm_oclass *oclass, void *argv, u32 argc,
@@ -164,10 +164,9 @@ nv50_disp_core_mthd = {
 };
 
 static void
-nv50_disp_core_fini(struct nv50_disp_dmac *chan)
+nv50_disp_core_fini(struct nv50_disp_chan *chan)
 {
-	struct nv50_disp *disp = chan->base.disp;
-	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_subdev *subdev = &chan->disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
 
 	/* deactivate channel */
@@ -186,10 +185,9 @@ nv50_disp_core_fini(struct nv50_disp_dmac *chan)
 }
 
 static int
-nv50_disp_core_init(struct nv50_disp_dmac *chan)
+nv50_disp_core_init(struct nv50_disp_chan *chan)
 {
-	struct nv50_disp *disp = chan->base.disp;
-	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_subdev *subdev = &chan->disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
 
 	/* enable error reporting */
@@ -222,7 +220,7 @@ nv50_disp_core_init(struct nv50_disp_dmac *chan)
 	return 0;
 }
 
-const struct nv50_disp_dmac_func
+const struct nv50_disp_chan_func
 nv50_disp_core_func = {
 	.init = nv50_disp_core_init,
 	.fini = nv50_disp_core_fini,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
index b73bcc38a259..f69749a29df8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
@@ -21,29 +21,27 @@
  *
  * Authors: Ben Skeggs
  */
-#include "dmacnv50.h"
-#include "rootnv50.h"
+#include "channv50.h"
 
 #include <core/ramht.h>
 #include <subdev/timer.h>
 
 int
-gf119_disp_dmac_bind(struct nv50_disp_dmac *chan,
+gf119_disp_dmac_bind(struct nv50_disp_chan *chan,
 		     struct nvkm_object *object, u32 handle)
 {
-	return nvkm_ramht_insert(chan->base.disp->ramht, object,
-				 chan->base.chid.user, -9, handle,
-				 chan->base.chid.user << 27 | 0x00000001);
+	return nvkm_ramht_insert(chan->disp->ramht, object,
+				 chan->chid.user, -9, handle,
+				 chan->chid.user << 27 | 0x00000001);
 }
 
 void
-gf119_disp_dmac_fini(struct nv50_disp_dmac *chan)
+gf119_disp_dmac_fini(struct nv50_disp_chan *chan)
 {
-	struct nv50_disp *disp = chan->base.disp;
-	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_subdev *subdev = &chan->disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
-	int ctrl = chan->base.chid.ctrl;
-	int user = chan->base.chid.user;
+	int ctrl = chan->chid.ctrl;
+	int user = chan->chid.user;
 
 	/* deactivate channel */
 	nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00001010, 0x00001000);
@@ -62,13 +60,12 @@ gf119_disp_dmac_fini(struct nv50_disp_dmac *chan)
 }
 
 static int
-gf119_disp_dmac_init(struct nv50_disp_dmac *chan)
+gf119_disp_dmac_init(struct nv50_disp_chan *chan)
 {
-	struct nv50_disp *disp = chan->base.disp;
-	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_subdev *subdev = &chan->disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
-	int ctrl = chan->base.chid.ctrl;
-	int user = chan->base.chid.user;
+	int ctrl = chan->chid.ctrl;
+	int user = chan->chid.user;
 
 	/* enable error reporting */
 	nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user);
@@ -94,7 +91,7 @@ gf119_disp_dmac_init(struct nv50_disp_dmac *chan)
 	return 0;
 }
 
-const struct nv50_disp_dmac_func
+const struct nv50_disp_chan_func
 gf119_disp_dmac_func = {
 	.init = gf119_disp_dmac_init,
 	.fini = gf119_disp_dmac_fini,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c
index 62e9b8430791..22fa5925644a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c
@@ -21,19 +21,17 @@
  *
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
-#include "dmacnv50.h"
-#include "rootnv50.h"
+#include "channv50.h"
 
 #include <subdev/timer.h>
 
 static int
-gp102_disp_dmac_init(struct nv50_disp_dmac *chan)
+gp102_disp_dmac_init(struct nv50_disp_chan *chan)
 {
-	struct nv50_disp *disp = chan->base.disp;
-	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_subdev *subdev = &chan->disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
-	int ctrl = chan->base.chid.ctrl;
-	int user = chan->base.chid.user;
+	int ctrl = chan->chid.ctrl;
+	int user = chan->chid.user;
 
 	/* enable error reporting */
 	nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user);
@@ -59,7 +57,7 @@ gp102_disp_dmac_init(struct nv50_disp_dmac *chan)
 	return 0;
 }
 
-const struct nv50_disp_dmac_func
+const struct nv50_disp_chan_func
 gp102_disp_dmac_func = {
 	.init = gp102_disp_dmac_init,
 	.fini = gf119_disp_dmac_fini,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
index d081947d0689..5cd08cad2c26 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
@@ -21,117 +21,16 @@
  *
  * Authors: Ben Skeggs
  */
-#include "dmacnv50.h"
-#include "rootnv50.h"
+#include "channv50.h"
 
 #include <core/client.h>
-#include <core/oproxy.h>
 #include <core/ramht.h>
 #include <subdev/fb.h>
 #include <subdev/timer.h>
 #include <engine/dma.h>
 
-struct nv50_disp_dmac_object {
-	struct nvkm_oproxy oproxy;
-	struct nv50_disp *disp;
-	int hash;
-};
-
-static void
-nv50_disp_dmac_child_del_(struct nvkm_oproxy *base)
-{
-	struct nv50_disp_dmac_object *object =
-		container_of(base, typeof(*object), oproxy);
-	nvkm_ramht_remove(object->disp->ramht, object->hash);
-}
-
-static const struct nvkm_oproxy_func
-nv50_disp_dmac_child_func_ = {
-	.dtor[0] = nv50_disp_dmac_child_del_,
-};
-
-static int
-nv50_disp_dmac_child_new_(struct nv50_disp_chan *base,
-			  const struct nvkm_oclass *oclass,
-			  void *data, u32 size, struct nvkm_object **pobject)
-{
-	struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
-	struct nv50_disp *disp = chan->base.disp;
-	struct nvkm_device *device = disp->base.engine.subdev.device;
-	const struct nvkm_device_oclass *sclass = oclass->priv;
-	struct nv50_disp_dmac_object *object;
-	int ret;
-
-	if (!(object = kzalloc(sizeof(*object), GFP_KERNEL)))
-		return -ENOMEM;
-	nvkm_oproxy_ctor(&nv50_disp_dmac_child_func_, oclass, &object->oproxy);
-	object->disp = disp;
-	*pobject = &object->oproxy.base;
-
-	ret = sclass->ctor(device, oclass, data, size, &object->oproxy.object);
-	if (ret)
-		return ret;
-
-	object->hash = chan->func->bind(chan, object->oproxy.object,
-					      oclass->handle);
-	if (object->hash < 0)
-		return object->hash;
-
-	return 0;
-}
-
-static int
-nv50_disp_dmac_child_get_(struct nv50_disp_chan *base, int index,
-			  struct nvkm_oclass *sclass)
-{
-	struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
-	struct nv50_disp *disp = chan->base.disp;
-	struct nvkm_device *device = disp->base.engine.subdev.device;
-	const struct nvkm_device_oclass *oclass = NULL;
-
-	sclass->engine = nvkm_device_engine(device, NVKM_ENGINE_DMAOBJ);
-	if (sclass->engine && sclass->engine->func->base.sclass) {
-		sclass->engine->func->base.sclass(sclass, index, &oclass);
-		if (oclass) {
-			sclass->priv = oclass;
-			return 0;
-		}
-	}
-
-	return -EINVAL;
-}
-
-static void
-nv50_disp_dmac_fini_(struct nv50_disp_chan *base)
-{
-	struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
-	chan->func->fini(chan);
-}
-
-static int
-nv50_disp_dmac_init_(struct nv50_disp_chan *base)
-{
-	struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
-	return chan->func->init(chan);
-}
-
-static void *
-nv50_disp_dmac_dtor_(struct nv50_disp_chan *base)
-{
-	return nv50_disp_dmac(base);
-}
-
-static const struct nv50_disp_chan_func
-nv50_disp_dmac_func_ = {
-	.dtor = nv50_disp_dmac_dtor_,
-	.init = nv50_disp_dmac_init_,
-	.fini = nv50_disp_dmac_fini_,
-	.child_get = nv50_disp_dmac_child_get_,
-	.child_new = nv50_disp_dmac_child_new_,
-};
-
 int
-nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *func,
+nv50_disp_dmac_new_(const struct nv50_disp_chan_func *func,
 		    const struct nv50_disp_chan_mthd *mthd,
 		    struct nv50_disp *disp, int chid, int head, u64 push,
 		    const struct nvkm_oclass *oclass,
@@ -139,16 +38,12 @@ nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *func,
 {
 	struct nvkm_client *client = oclass->client;
 	struct nvkm_dmaobj *dmaobj;
-	struct nv50_disp_dmac *chan;
+	struct nv50_disp_chan *chan;
 	int ret;
 
-	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
-		return -ENOMEM;
-	*pobject = &chan->base.object;
-	chan->func = func;
-
-	ret = nv50_disp_chan_ctor(&nv50_disp_dmac_func_, mthd, disp,
-				  chid, chid, head, oclass, &chan->base);
+	ret = nv50_disp_chan_new_(func, mthd, disp, chid, chid, head, oclass,
+				  pobject);
+	chan = nv50_disp_chan(*pobject);
 	if (ret)
 		return ret;
 
@@ -174,23 +69,22 @@ nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *func,
 }
 
 int
-nv50_disp_dmac_bind(struct nv50_disp_dmac *chan,
+nv50_disp_dmac_bind(struct nv50_disp_chan *chan,
 		    struct nvkm_object *object, u32 handle)
 {
-	return nvkm_ramht_insert(chan->base.disp->ramht, object,
-				 chan->base.chid.user, -10, handle,
-				 chan->base.chid.user << 28 |
-				 chan->base.chid.user);
+	return nvkm_ramht_insert(chan->disp->ramht, object,
+				 chan->chid.user, -10, handle,
+				 chan->chid.user << 28 |
+				 chan->chid.user);
 }
 
 static void
-nv50_disp_dmac_fini(struct nv50_disp_dmac *chan)
+nv50_disp_dmac_fini(struct nv50_disp_chan *chan)
 {
-	struct nv50_disp *disp = chan->base.disp;
-	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_subdev *subdev = &chan->disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
-	int ctrl = chan->base.chid.ctrl;
-	int user = chan->base.chid.user;
+	int ctrl = chan->chid.ctrl;
+	int user = chan->chid.user;
 
 	/* deactivate channel */
 	nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00001010, 0x00001000);
@@ -208,13 +102,12 @@ nv50_disp_dmac_fini(struct nv50_disp_dmac *chan)
 }
 
 static int
-nv50_disp_dmac_init(struct nv50_disp_dmac *chan)
+nv50_disp_dmac_init(struct nv50_disp_chan *chan)
 {
-	struct nv50_disp *disp = chan->base.disp;
-	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_subdev *subdev = &chan->disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
-	int ctrl = chan->base.chid.ctrl;
-	int user = chan->base.chid.user;
+	int ctrl = chan->chid.ctrl;
+	int user = chan->chid.user;
 
 	/* enable error reporting */
 	nvkm_mask(device, 0x610028, 0x00010000 << user, 0x00010000 << user);
@@ -240,7 +133,7 @@ nv50_disp_dmac_init(struct nv50_disp_dmac *chan)
 	return 0;
 }
 
-const struct nv50_disp_dmac_func
+const struct nv50_disp_chan_func
 nv50_disp_dmac_func = {
 	.init = nv50_disp_dmac_init,
 	.fini = nv50_disp_dmac_fini,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
deleted file mode 100644
index feeb5882dc91..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __NV50_DISP_DMAC_H__
-#define __NV50_DISP_DMAC_H__
-#define nv50_disp_dmac(p) container_of((p), struct nv50_disp_dmac, base)
-#include "channv50.h"
-
-struct nv50_disp_dmac {
-	const struct nv50_disp_dmac_func *func;
-	struct nv50_disp_chan base;
-	u32 push;
-};
-
-struct nv50_disp_dmac_func {
-	int  (*init)(struct nv50_disp_dmac *);
-	void (*fini)(struct nv50_disp_dmac *);
-	int  (*bind)(struct nv50_disp_dmac *, struct nvkm_object *, u32 handle);
-};
-
-int nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *,
-			const struct nv50_disp_chan_mthd *,
-			struct nv50_disp *, int chid, int head, u64 push,
-			const struct nvkm_oclass *, struct nvkm_object **);
-
-extern const struct nv50_disp_dmac_func nv50_disp_dmac_func;
-int nv50_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32);
-extern const struct nv50_disp_dmac_func nv50_disp_core_func;
-
-extern const struct nv50_disp_dmac_func gf119_disp_dmac_func;
-void gf119_disp_dmac_fini(struct nv50_disp_dmac *);
-int gf119_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32);
-extern const struct nv50_disp_dmac_func gf119_disp_core_func;
-void gf119_disp_core_fini(struct nv50_disp_dmac *);
-
-extern const struct nv50_disp_dmac_func gp102_disp_dmac_func;
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
index 382e6a6a6ff2..794e90982641 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
@@ -24,6 +24,7 @@
 #include "nv50.h"
 #include "head.h"
 #include "ior.h"
+#include "channv50.h"
 #include "rootnv50.h"
 
 #include <core/ramht.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
index 0a2c5b5f87eb..3468ddec1270 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
@@ -24,6 +24,7 @@
 #include "nv50.h"
 #include "head.h"
 #include "ior.h"
+#include "channv50.h"
 #include "rootnv50.h"
 
 static void
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index bb622d0f6d63..77aa2c8cfcd6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -79,4 +79,11 @@ void gf119_disp_intr_error(struct nv50_disp *, int);
 void nv50_disp_dptmds_war_2(struct nv50_disp *, struct dcb_output *);
 void nv50_disp_dptmds_war_3(struct nv50_disp *, struct dcb_output *);
 void nv50_disp_update_sppll1(struct nv50_disp *);
+
+extern const struct nvkm_event_func nv50_disp_chan_uevent;
+int  nv50_disp_chan_uevent_ctor(struct nvkm_object *, void *, u32,
+				struct nvkm_notify *);
+void nv50_disp_chan_uevent_send(struct nv50_disp *, int);
+
+extern const struct nvkm_event_func gf119_disp_chan_uevent;
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c
index 6b55cf483fe2..31b915d48699 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c
@@ -21,7 +21,7 @@
  *
  * Authors: Ben Skeggs
  */
-#include "dmacnv50.h"
+#include "channv50.h"
 
 static const struct nv50_disp_mthd_list
 g84_disp_ovly_mthd_base = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygf119.c
index 30901caf75dc..83fd534c44da 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygf119.c
@@ -21,7 +21,7 @@
  *
  * Authors: Ben Skeggs
  */
-#include "dmacnv50.h"
+#include "channv50.h"
 
 static const struct nv50_disp_mthd_list
 gf119_disp_ovly_mthd_base = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c
index 682c146c39d4..a7acacbc92c1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c
@@ -21,7 +21,7 @@
  *
  * Authors: Ben Skeggs
  */
-#include "dmacnv50.h"
+#include "channv50.h"
 
 static const struct nv50_disp_mthd_list
 gk104_disp_ovly_mthd_base = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp102.c
index bcc5ac40f6f9..e0eca6ea914c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp102.c
@@ -21,7 +21,7 @@
  *
  * Authors: Ben Skeggs
  */
-#include "dmacnv50.h"
+#include "channv50.h"
 
 int
 gp102_disp_ovly_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt200.c
index 655deb0d2fa0..dc60cd00dc16 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt200.c
@@ -21,7 +21,7 @@
  *
  * Authors: Ben Skeggs
  */
-#include "dmacnv50.h"
+#include "channv50.h"
 
 static const struct nv50_disp_mthd_list
 gt200_disp_ovly_mthd_base = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c
index 46f5df0b3a1e..6974c12c4518 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c
@@ -21,7 +21,7 @@
  *
  * Authors: Ben Skeggs
  */
-#include "dmacnv50.h"
+#include "channv50.h"
 #include "head.h"
 
 #include <core/client.h>
@@ -30,7 +30,7 @@
 #include <nvif/unpack.h>
 
 int
-nv50_disp_ovly_new_(const struct nv50_disp_dmac_func *func,
+nv50_disp_ovly_new_(const struct nv50_disp_chan_func *func,
 		    const struct nv50_disp_chan_mthd *mthd,
 		    struct nv50_disp *disp, int chid,
 		    const struct nvkm_oclass *oclass, void *argv, u32 argc,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
index e51f89f32507..1ed371fd7ddf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
@@ -22,7 +22,7 @@
  * Authors: Ben Skeggs
  */
 #include "rootnv50.h"
-#include "dmacnv50.h"
+#include "channv50.h"
 
 #include <nvif/class.h>
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
index ed7838eedb2c..ef579eb00238 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
@@ -22,7 +22,7 @@
  * Authors: Ben Skeggs
  */
 #include "rootnv50.h"
-#include "dmacnv50.h"
+#include "channv50.h"
 
 #include <nvif/class.h>
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
index ac92e65131c9..fe011165dc02 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
@@ -22,7 +22,7 @@
  * Authors: Ben Skeggs
  */
 #include "rootnv50.h"
-#include "dmacnv50.h"
+#include "channv50.h"
 
 #include <nvif/class.h>
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
index 3bb6d601aed2..9e8ffd348b50 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
@@ -22,7 +22,7 @@
  * Authors: Ben Skeggs
  */
 #include "rootnv50.h"
-#include "dmacnv50.h"
+#include "channv50.h"
 
 #include <nvif/class.h>
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
index 336419815d98..dc85cc1c9490 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
@@ -22,7 +22,7 @@
  * Authors: Ben Skeggs
  */
 #include "rootnv50.h"
-#include "dmacnv50.h"
+#include "channv50.h"
 
 #include <nvif/class.h>
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
index c53e71ee69e7..e0181ca08840 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
@@ -22,7 +22,7 @@
  * Authors: Ben Skeggs
  */
 #include "rootnv50.h"
-#include "dmacnv50.h"
+#include "channv50.h"
 
 #include <nvif/class.h>
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c
index 85409d1bc7bc..e5e590e19f62 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c
@@ -22,7 +22,7 @@
  * Authors: Ben Skeggs
  */
 #include "rootnv50.h"
-#include "dmacnv50.h"
+#include "channv50.h"
 
 #include <nvif/class.h>
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
index ebfd245c573a..762a1a922e05 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
@@ -22,7 +22,7 @@
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
 #include "rootnv50.h"
-#include "dmacnv50.h"
+#include "channv50.h"
 
 #include <nvif/class.h>
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c
index 54b5fda99208..c7f00946c9af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp102.c
@@ -22,7 +22,7 @@
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
 #include "rootnv50.h"
-#include "dmacnv50.h"
+#include "channv50.h"
 
 #include <nvif/class.h>
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
index 14ac83bf3693..a6963654087c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
@@ -22,7 +22,7 @@
  * Authors: Ben Skeggs
  */
 #include "rootnv50.h"
-#include "dmacnv50.h"
+#include "channv50.h"
 
 #include <nvif/class.h>
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
index cb1a208e68bd..4fe0a3ae8891 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
@@ -22,7 +22,7 @@
  * Authors: Ben Skeggs
  */
 #include "rootnv50.h"
-#include "dmacnv50.h"
+#include "channv50.h"
 
 #include <nvif/class.h>
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
index 9d716ee7621e..3aa5a2879239 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
@@ -22,7 +22,7 @@
  * Authors: Ben Skeggs
  */
 #include "rootnv50.h"
-#include "dmacnv50.h"
+#include "channv50.h"
 #include "dp.h"
 #include "head.h"
 #include "ior.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
index 12fc13b397ed..9983a424d30d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -2,8 +2,8 @@
 #ifndef __NV50_DISP_ROOT_H__
 #define __NV50_DISP_ROOT_H__
 #define nv50_disp_root(p) container_of((p), struct nv50_disp_root, object)
+#include <core/object.h>
 #include "nv50.h"
-#include "dmacnv50.h"
 
 struct nv50_disp_root {
 	const struct nv50_disp_root_func *func;

From 4a8621a24a8f68ecba6e59dccad2b252fa90ba59 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1025/1286] drm/nouveau/disp/nv50-: add channel interfaces to
 determine the user area

This will be required to support Volta.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../drm/nouveau/nvkm/engine/disp/channv50.c   | 27 +++++++++++--------
 .../drm/nouveau/nvkm/engine/disp/channv50.h   |  2 ++
 .../drm/nouveau/nvkm/engine/disp/coregf119.c  |  1 +
 .../drm/nouveau/nvkm/engine/disp/coregp102.c  |  1 +
 .../drm/nouveau/nvkm/engine/disp/corenv50.c   |  1 +
 .../drm/nouveau/nvkm/engine/disp/dmacgf119.c  |  1 +
 .../drm/nouveau/nvkm/engine/disp/dmacgp102.c  |  1 +
 .../drm/nouveau/nvkm/engine/disp/dmacnv50.c   |  1 +
 .../drm/nouveau/nvkm/engine/disp/piocgf119.c  |  1 +
 .../drm/nouveau/nvkm/engine/disp/piocnv50.c   |  1 +
 10 files changed, 26 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
index 91a2f0c64731..8e79aa5f52e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
@@ -155,13 +155,20 @@ nv50_disp_chan_uevent = {
 	.fini = nv50_disp_chan_uevent_fini,
 };
 
+u64
+nv50_disp_chan_user(struct nv50_disp_chan *chan, u64 *psize)
+{
+	*psize = 0x1000;
+	return 0x640000 + (chan->chid.user * 0x1000);
+}
+
 static int
 nv50_disp_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
 {
 	struct nv50_disp_chan *chan = nv50_disp_chan(object);
-	struct nv50_disp *disp = chan->disp;
-	struct nvkm_device *device = disp->base.engine.subdev.device;
-	*data = nvkm_rd32(device, 0x640000 + (chan->chid.user * 0x1000) + addr);
+	struct nvkm_device *device = chan->disp->base.engine.subdev.device;
+	u64 size, base = chan->func->user(chan, &size);
+	*data = nvkm_rd32(device, base + addr);
 	return 0;
 }
 
@@ -169,9 +176,9 @@ static int
 nv50_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
 {
 	struct nv50_disp_chan *chan = nv50_disp_chan(object);
-	struct nv50_disp *disp = chan->disp;
-	struct nvkm_device *device = disp->base.engine.subdev.device;
-	nvkm_wr32(device, 0x640000 + (chan->chid.user * 0x1000) + addr, data);
+	struct nvkm_device *device = chan->disp->base.engine.subdev.device;
+	u64 size, base = chan->func->user(chan, &size);
+	nvkm_wr32(device, base + addr, data);
 	return 0;
 }
 
@@ -196,12 +203,10 @@ nv50_disp_chan_map(struct nvkm_object *object, void *argv, u32 argc,
 		   enum nvkm_object_map *type, u64 *addr, u64 *size)
 {
 	struct nv50_disp_chan *chan = nv50_disp_chan(object);
-	struct nv50_disp *disp = chan->disp;
-	struct nvkm_device *device = disp->base.engine.subdev.device;
+	struct nvkm_device *device = chan->disp->base.engine.subdev.device;
+	const u64 base = device->func->resource_addr(device, 0);
 	*type = NVKM_OBJECT_MAP_IO;
-	*addr = device->func->resource_addr(device, 0) +
-		0x640000 + (chan->chid.user * 0x1000);
-	*size = 0x001000;
+	*addr = base + chan->func->user(chan, size);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
index feb8a56fcc85..75ae181da0e8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -25,6 +25,7 @@ struct nv50_disp_chan {
 struct nv50_disp_chan_func {
 	int (*init)(struct nv50_disp_chan *);
 	void (*fini)(struct nv50_disp_chan *);
+	u64 (*user)(struct nv50_disp_chan *, u64 *size);
 	int (*bind)(struct nv50_disp_chan *, struct nvkm_object *, u32 handle);
 };
 
@@ -37,6 +38,7 @@ int nv50_disp_dmac_new_(const struct nv50_disp_chan_func *,
 			struct nv50_disp *, int chid, int head, u64 push,
 			const struct nvkm_oclass *, struct nvkm_object **);
 
+u64 nv50_disp_chan_user(struct nv50_disp_chan *, u64 *);
 extern const struct nv50_disp_chan_func nv50_disp_pioc_func;
 extern const struct nv50_disp_chan_func nv50_disp_dmac_func;
 int nv50_disp_dmac_bind(struct nv50_disp_chan *, struct nvkm_object *, u32);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
index 17c66162417b..9ba4a8cd3dba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
@@ -222,6 +222,7 @@ const struct nv50_disp_chan_func
 gf119_disp_core_func = {
 	.init = gf119_disp_core_init,
 	.fini = gf119_disp_core_fini,
+	.user = nv50_disp_chan_user,
 	.bind = gf119_disp_dmac_bind,
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c
index 57d26051bc95..aae5db4dc622 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c
@@ -59,6 +59,7 @@ static const struct nv50_disp_chan_func
 gp102_disp_core_func = {
 	.init = gp102_disp_core_init,
 	.fini = gf119_disp_core_fini,
+	.user = nv50_disp_chan_user,
 	.bind = gf119_disp_dmac_bind,
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
index d648c4d7b55c..5fd449d32109 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
@@ -224,6 +224,7 @@ const struct nv50_disp_chan_func
 nv50_disp_core_func = {
 	.init = nv50_disp_core_init,
 	.fini = nv50_disp_core_fini,
+	.user = nv50_disp_chan_user,
 	.bind = nv50_disp_dmac_bind,
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
index f69749a29df8..2a6d0728dd2e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
@@ -95,5 +95,6 @@ const struct nv50_disp_chan_func
 gf119_disp_dmac_func = {
 	.init = gf119_disp_dmac_init,
 	.fini = gf119_disp_dmac_fini,
+	.user = nv50_disp_chan_user,
 	.bind = gf119_disp_dmac_bind,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c
index 22fa5925644a..7e6b308eb596 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c
@@ -61,5 +61,6 @@ const struct nv50_disp_chan_func
 gp102_disp_dmac_func = {
 	.init = gp102_disp_dmac_init,
 	.fini = gf119_disp_dmac_fini,
+	.user = nv50_disp_chan_user,
 	.bind = gf119_disp_dmac_bind,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
index 5cd08cad2c26..5db26e31a799 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
@@ -137,5 +137,6 @@ const struct nv50_disp_chan_func
 nv50_disp_dmac_func = {
 	.init = nv50_disp_dmac_init,
 	.fini = nv50_disp_dmac_fini,
+	.user = nv50_disp_chan_user,
 	.bind = nv50_disp_dmac_bind,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
index 7b1e9bf75abd..5970e40f4d69 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
@@ -80,4 +80,5 @@ const struct nv50_disp_chan_func
 gf119_disp_pioc_func = {
 	.init = gf119_disp_pioc_init,
 	.fini = gf119_disp_pioc_fini,
+	.user = nv50_disp_chan_user,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
index 60c20123d84f..0a76bda4ef2a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
@@ -82,4 +82,5 @@ const struct nv50_disp_chan_func
 nv50_disp_pioc_func = {
 	.init = nv50_disp_pioc_init,
 	.fini = nv50_disp_pioc_fini,
+	.user = nv50_disp_chan_user,
 };

From a9c44a88ca2f957c755bcb2ce8b9d2e031d65f64 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1026/1286] drm/nouveau/disp/nv50-: add channel interfaces to
 control error interrupts

This will be required to support Volta, but also allows us to remove code
that's duplicated for each channel type already.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/disp/changf119.c    | 13 +++++++++++++
 drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c | 11 +++++++++++
 drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h |  3 +++
 .../gpu/drm/nouveau/nvkm/engine/disp/coregf119.c    |  8 +-------
 .../gpu/drm/nouveau/nvkm/engine/disp/coregp102.c    |  4 +---
 drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c |  7 +------
 .../gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c    |  8 +-------
 .../gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c    |  4 +---
 drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c |  7 +------
 .../gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c    |  8 +-------
 drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c |  1 +
 11 files changed, 35 insertions(+), 39 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c
index 17a3d835cb42..29e6dd58ac48 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c
@@ -47,3 +47,16 @@ gf119_disp_chan_uevent = {
 	.init = gf119_disp_chan_uevent_init,
 	.fini = gf119_disp_chan_uevent_fini,
 };
+
+void
+gf119_disp_chan_intr(struct nv50_disp_chan *chan, bool en)
+{
+	struct nvkm_device *device = chan->disp->base.engine.subdev.device;
+	const u64 mask = 0x00000001 << chan->chid.user;
+	if (!en) {
+		nvkm_mask(device, 0x610090, mask, 0x00000000);
+		nvkm_mask(device, 0x6100a0, mask, 0x00000000);
+	} else {
+		nvkm_mask(device, 0x6100a0, mask, mask);
+	}
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
index 8e79aa5f52e6..53c3ed6da9ae 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
@@ -162,6 +162,15 @@ nv50_disp_chan_user(struct nv50_disp_chan *chan, u64 *psize)
 	return 0x640000 + (chan->chid.user * 0x1000);
 }
 
+void
+nv50_disp_chan_intr(struct nv50_disp_chan *chan, bool en)
+{
+	struct nvkm_device *device = chan->disp->base.engine.subdev.device;
+	const u64 mask = 0x00010001 << chan->chid.user;
+	const u64 data = en ? 0x00010000 : 0x00000000;
+	nvkm_mask(device, 0x610028, mask, data);
+}
+
 static int
 nv50_disp_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
 {
@@ -288,6 +297,7 @@ nv50_disp_chan_fini(struct nvkm_object *object, bool suspend)
 {
 	struct nv50_disp_chan *chan = nv50_disp_chan(object);
 	chan->func->fini(chan);
+	chan->func->intr(chan, false);
 	return 0;
 }
 
@@ -295,6 +305,7 @@ static int
 nv50_disp_chan_init(struct nvkm_object *object)
 {
 	struct nv50_disp_chan *chan = nv50_disp_chan(object);
+	chan->func->intr(chan, true);
 	return chan->func->init(chan);
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
index 75ae181da0e8..2a48243b00ae 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -25,6 +25,7 @@ struct nv50_disp_chan {
 struct nv50_disp_chan_func {
 	int (*init)(struct nv50_disp_chan *);
 	void (*fini)(struct nv50_disp_chan *);
+	void (*intr)(struct nv50_disp_chan *, bool en);
 	u64 (*user)(struct nv50_disp_chan *, u64 *size);
 	int (*bind)(struct nv50_disp_chan *, struct nvkm_object *, u32 handle);
 };
@@ -38,12 +39,14 @@ int nv50_disp_dmac_new_(const struct nv50_disp_chan_func *,
 			struct nv50_disp *, int chid, int head, u64 push,
 			const struct nvkm_oclass *, struct nvkm_object **);
 
+void nv50_disp_chan_intr(struct nv50_disp_chan *, bool);
 u64 nv50_disp_chan_user(struct nv50_disp_chan *, u64 *);
 extern const struct nv50_disp_chan_func nv50_disp_pioc_func;
 extern const struct nv50_disp_chan_func nv50_disp_dmac_func;
 int nv50_disp_dmac_bind(struct nv50_disp_chan *, struct nvkm_object *, u32);
 extern const struct nv50_disp_chan_func nv50_disp_core_func;
 
+void gf119_disp_chan_intr(struct nv50_disp_chan *, bool);
 extern const struct nv50_disp_chan_func gf119_disp_pioc_func;
 extern const struct nv50_disp_chan_func gf119_disp_dmac_func;
 void gf119_disp_dmac_fini(struct nv50_disp_chan *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
index 9ba4a8cd3dba..d162b9cf4eac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
@@ -182,10 +182,6 @@ gf119_disp_core_fini(struct nv50_disp_chan *chan)
 		nvkm_error(subdev, "core fini: %08x\n",
 			   nvkm_rd32(device, 0x610490));
 	}
-
-	/* disable error reporting and completion notification */
-	nvkm_mask(device, 0x610090, 0x00000001, 0x00000000);
-	nvkm_mask(device, 0x6100a0, 0x00000001, 0x00000000);
 }
 
 static int
@@ -194,9 +190,6 @@ gf119_disp_core_init(struct nv50_disp_chan *chan)
 	struct nvkm_subdev *subdev = &chan->disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
 
-	/* enable error reporting */
-	nvkm_mask(device, 0x6100a0, 0x00000001, 0x00000001);
-
 	/* initialise channel for dma command submission */
 	nvkm_wr32(device, 0x610494, chan->push);
 	nvkm_wr32(device, 0x610498, 0x00010000);
@@ -222,6 +215,7 @@ const struct nv50_disp_chan_func
 gf119_disp_core_func = {
 	.init = gf119_disp_core_init,
 	.fini = gf119_disp_core_fini,
+	.intr = gf119_disp_chan_intr,
 	.user = nv50_disp_chan_user,
 	.bind = gf119_disp_dmac_bind,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c
index aae5db4dc622..5b7f993c73c7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp102.c
@@ -31,9 +31,6 @@ gp102_disp_core_init(struct nv50_disp_chan *chan)
 	struct nvkm_subdev *subdev = &chan->disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
 
-	/* enable error reporting */
-	nvkm_mask(device, 0x6100a0, 0x00000001, 0x00000001);
-
 	/* initialise channel for dma command submission */
 	nvkm_wr32(device, 0x611494, chan->push);
 	nvkm_wr32(device, 0x611498, 0x00010000);
@@ -59,6 +56,7 @@ static const struct nv50_disp_chan_func
 gp102_disp_core_func = {
 	.init = gp102_disp_core_init,
 	.fini = gf119_disp_core_fini,
+	.intr = gf119_disp_chan_intr,
 	.user = nv50_disp_chan_user,
 	.bind = gf119_disp_dmac_bind,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
index 5fd449d32109..55db9a22b4be 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
@@ -179,9 +179,6 @@ nv50_disp_core_fini(struct nv50_disp_chan *chan)
 		nvkm_error(subdev, "core fini: %08x\n",
 			   nvkm_rd32(device, 0x610200));
 	}
-
-	/* disable error reporting and completion notifications */
-	nvkm_mask(device, 0x610028, 0x00010001, 0x00000000);
 }
 
 static int
@@ -190,9 +187,6 @@ nv50_disp_core_init(struct nv50_disp_chan *chan)
 	struct nvkm_subdev *subdev = &chan->disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
 
-	/* enable error reporting */
-	nvkm_mask(device, 0x610028, 0x00010000, 0x00010000);
-
 	/* attempt to unstick channel from some unknown state */
 	if ((nvkm_rd32(device, 0x610200) & 0x009f0000) == 0x00020000)
 		nvkm_mask(device, 0x610200, 0x00800000, 0x00800000);
@@ -224,6 +218,7 @@ const struct nv50_disp_chan_func
 nv50_disp_core_func = {
 	.init = nv50_disp_core_init,
 	.fini = nv50_disp_core_fini,
+	.intr = nv50_disp_chan_intr,
 	.user = nv50_disp_chan_user,
 	.bind = nv50_disp_dmac_bind,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
index 2a6d0728dd2e..edf7dd0d931d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
@@ -53,10 +53,6 @@ gf119_disp_dmac_fini(struct nv50_disp_chan *chan)
 		nvkm_error(subdev, "ch %d fini: %08x\n", user,
 			   nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
 	}
-
-	/* disable error reporting and completion notification */
-	nvkm_mask(device, 0x610090, 0x00000001 << user, 0x00000000);
-	nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000000);
 }
 
 static int
@@ -67,9 +63,6 @@ gf119_disp_dmac_init(struct nv50_disp_chan *chan)
 	int ctrl = chan->chid.ctrl;
 	int user = chan->chid.user;
 
-	/* enable error reporting */
-	nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user);
-
 	/* initialise channel for dma command submission */
 	nvkm_wr32(device, 0x610494 + (ctrl * 0x0010), chan->push);
 	nvkm_wr32(device, 0x610498 + (ctrl * 0x0010), 0x00010000);
@@ -95,6 +88,7 @@ const struct nv50_disp_chan_func
 gf119_disp_dmac_func = {
 	.init = gf119_disp_dmac_init,
 	.fini = gf119_disp_dmac_fini,
+	.intr = gf119_disp_chan_intr,
 	.user = nv50_disp_chan_user,
 	.bind = gf119_disp_dmac_bind,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c
index 7e6b308eb596..f21a433199aa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp102.c
@@ -33,9 +33,6 @@ gp102_disp_dmac_init(struct nv50_disp_chan *chan)
 	int ctrl = chan->chid.ctrl;
 	int user = chan->chid.user;
 
-	/* enable error reporting */
-	nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user);
-
 	/* initialise channel for dma command submission */
 	nvkm_wr32(device, 0x611494 + (ctrl * 0x0010), chan->push);
 	nvkm_wr32(device, 0x611498 + (ctrl * 0x0010), 0x00010000);
@@ -61,6 +58,7 @@ const struct nv50_disp_chan_func
 gp102_disp_dmac_func = {
 	.init = gp102_disp_dmac_init,
 	.fini = gf119_disp_dmac_fini,
+	.intr = gf119_disp_chan_intr,
 	.user = nv50_disp_chan_user,
 	.bind = gf119_disp_dmac_bind,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
index 5db26e31a799..981b98def151 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
@@ -96,9 +96,6 @@ nv50_disp_dmac_fini(struct nv50_disp_chan *chan)
 		nvkm_error(subdev, "ch %d fini timeout, %08x\n", user,
 			   nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
 	}
-
-	/* disable error reporting and completion notifications */
-	nvkm_mask(device, 0x610028, 0x00010001 << user, 0x00000000 << user);
 }
 
 static int
@@ -109,9 +106,6 @@ nv50_disp_dmac_init(struct nv50_disp_chan *chan)
 	int ctrl = chan->chid.ctrl;
 	int user = chan->chid.user;
 
-	/* enable error reporting */
-	nvkm_mask(device, 0x610028, 0x00010000 << user, 0x00010000 << user);
-
 	/* initialise channel for dma command submission */
 	nvkm_wr32(device, 0x610204 + (ctrl * 0x0010), chan->push);
 	nvkm_wr32(device, 0x610208 + (ctrl * 0x0010), 0x00010000);
@@ -137,6 +131,7 @@ const struct nv50_disp_chan_func
 nv50_disp_dmac_func = {
 	.init = nv50_disp_dmac_init,
 	.fini = nv50_disp_dmac_fini,
+	.intr = nv50_disp_chan_intr,
 	.user = nv50_disp_chan_user,
 	.bind = nv50_disp_dmac_bind,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
index 5970e40f4d69..5296e7bee813 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
@@ -43,10 +43,6 @@ gf119_disp_pioc_fini(struct nv50_disp_chan *chan)
 		nvkm_error(subdev, "ch %d fini: %08x\n", user,
 			   nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
 	}
-
-	/* disable error reporting and completion notification */
-	nvkm_mask(device, 0x610090, 0x00000001 << user, 0x00000000);
-	nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000000);
 }
 
 static int
@@ -58,9 +54,6 @@ gf119_disp_pioc_init(struct nv50_disp_chan *chan)
 	int ctrl = chan->chid.ctrl;
 	int user = chan->chid.user;
 
-	/* enable error reporting */
-	nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user);
-
 	/* activate channel */
 	nvkm_wr32(device, 0x610490 + (ctrl * 0x10), 0x00000001);
 	if (nvkm_msec(device, 2000,
@@ -80,5 +73,6 @@ const struct nv50_disp_chan_func
 gf119_disp_pioc_func = {
 	.init = gf119_disp_pioc_init,
 	.fini = gf119_disp_pioc_fini,
+	.intr = gf119_disp_chan_intr,
 	.user = nv50_disp_chan_user,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
index 0a76bda4ef2a..4faed6fce682 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
@@ -82,5 +82,6 @@ const struct nv50_disp_chan_func
 nv50_disp_pioc_func = {
 	.init = nv50_disp_pioc_init,
 	.fini = nv50_disp_pioc_fini,
+	.intr = nv50_disp_chan_intr,
 	.user = nv50_disp_chan_user,
 };

From f5650478ab07c0921127a6a0735253b64073e978 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1027/1286] drm/nouveau/disp/nv50-: pass nvkm_memory objects
 for channel push buffers

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/include/nvif/mem.h    |  2 +
 drivers/gpu/drm/nouveau/include/nvif/mmu.h    |  1 +
 drivers/gpu/drm/nouveau/nv50_display.c        | 48 +++++++------------
 drivers/gpu/drm/nouveau/nvif/mem.c            | 13 +++++
 drivers/gpu/drm/nouveau/nvif/mmu.c            | 11 +++++
 .../drm/nouveau/nvkm/engine/disp/channv50.c   |  1 +
 .../drm/nouveau/nvkm/engine/disp/channv50.h   |  1 +
 .../drm/nouveau/nvkm/engine/disp/dmacnv50.c   | 22 ++++-----
 8 files changed, 55 insertions(+), 44 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/include/nvif/mem.h b/drivers/gpu/drm/nouveau/include/nvif/mem.h
index b542fe38398e..80ee4ab0f016 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/mem.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/mem.h
@@ -15,4 +15,6 @@ int nvif_mem_init_type(struct nvif_mmu *mmu, s32 oclass, int type, u8 page,
 int nvif_mem_init(struct nvif_mmu *mmu, s32 oclass, u8 type, u8 page,
 		  u64 size, void *argv, u32 argc, struct nvif_mem *);
 void nvif_mem_fini(struct nvif_mem *);
+
+int nvif_mem_init_map(struct nvif_mmu *, u8 type, u64 size, struct nvif_mem *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/mmu.h b/drivers/gpu/drm/nouveau/include/nvif/mmu.h
index c8cd5b5b0688..747ecf67e403 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/mmu.h
@@ -8,6 +8,7 @@ struct nvif_mmu {
 	u8  heap_nr;
 	u8  type_nr;
 	u16 kind_nr;
+	s32 mem;
 
 	struct {
 		u64 size;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 2b3ccd850750..e90330e4e8c5 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -34,6 +34,8 @@
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_edid.h>
 
+#include <nvif/mem.h>
+
 #include <nvif/class.h>
 #include <nvif/cl0002.h>
 #include <nvif/cl5070.h>
@@ -400,7 +402,8 @@ struct nv50_dmac_ctxdma {
 
 struct nv50_dmac {
 	struct nv50_chan base;
-	dma_addr_t handle;
+
+	struct nvif_mem push;
 	u32 *ptr;
 
 	struct nvif_object sync;
@@ -482,9 +485,8 @@ nv50_dmac_ctxdma_new(struct nv50_dmac *dmac, struct nouveau_framebuffer *fb)
 }
 
 static void
-nv50_dmac_destroy(struct nv50_dmac *dmac, struct nvif_object *disp)
+nv50_dmac_destroy(struct nv50_dmac *dmac)
 {
-	struct nvif_device *device = dmac->base.device;
 	struct nv50_dmac_ctxdma *ctxdma, *ctxtmp;
 
 	list_for_each_entry_safe(ctxdma, ctxtmp, &dmac->ctxdma, head) {
@@ -496,10 +498,7 @@ nv50_dmac_destroy(struct nv50_dmac *dmac, struct nvif_object *disp)
 
 	nv50_chan_destroy(&dmac->base);
 
-	if (dmac->ptr) {
-		struct device *dev = nvxx_device(device)->dev;
-		dma_free_coherent(dev, PAGE_SIZE, dmac->ptr, dmac->handle);
-	}
+	nvif_mem_fini(&dmac->push);
 }
 
 static int
@@ -507,33 +506,24 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
 		 const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf,
 		 struct nv50_dmac *dmac)
 {
+	struct nouveau_cli *cli = (void *)device->object.client;
 	struct nv50_disp_core_channel_dma_v0 *args = data;
-	struct nvif_object pushbuf;
 	int ret;
 
 	mutex_init(&dmac->lock);
 	INIT_LIST_HEAD(&dmac->ctxdma);
 
-	dmac->ptr = dma_alloc_coherent(nvxx_device(device)->dev, PAGE_SIZE,
-				       &dmac->handle, GFP_KERNEL);
-	if (!dmac->ptr)
-		return -ENOMEM;
-
-	ret = nvif_object_init(&device->object, 0, NV_DMA_FROM_MEMORY,
-			       &(struct nv_dma_v0) {
-					.target = NV_DMA_V0_TARGET_PCI_US,
-					.access = NV_DMA_V0_ACCESS_RD,
-					.start = dmac->handle + 0x0000,
-					.limit = dmac->handle + 0x0fff,
-			       }, sizeof(struct nv_dma_v0), &pushbuf);
+	ret = nvif_mem_init_map(&cli->mmu, NVIF_MEM_COHERENT, 0x1000,
+				&dmac->push);
 	if (ret)
 		return ret;
 
-	args->pushbuf = nvif_handle(&pushbuf);
+	dmac->ptr = dmac->push.object.map.ptr;
+
+	args->pushbuf = nvif_handle(&dmac->push.object);
 
 	ret = nv50_chan_create(device, disp, oclass, head, data, size,
 			       &dmac->base);
-	nvif_object_fini(&pushbuf);
 	if (ret)
 		return ret;
 
@@ -574,9 +564,7 @@ static int
 nv50_core_create(struct nvif_device *device, struct nvif_object *disp,
 		 u64 syncbuf, struct nv50_mast *core)
 {
-	struct nv50_disp_core_channel_dma_v0 args = {
-		.pushbuf = 0xb0007d00,
-	};
+	struct nv50_disp_core_channel_dma_v0 args = {};
 	static const s32 oclass[] = {
 		GP102_DISP_CORE_CHANNEL_DMA,
 		GP100_DISP_CORE_CHANNEL_DMA,
@@ -612,7 +600,6 @@ nv50_base_create(struct nvif_device *device, struct nvif_object *disp,
 		 int head, u64 syncbuf, struct nv50_sync *base)
 {
 	struct nv50_disp_base_channel_dma_v0 args = {
-		.pushbuf = 0xb0007c00 | head,
 		.head = head,
 	};
 	static const s32 oclass[] = {
@@ -643,7 +630,6 @@ nv50_ovly_create(struct nvif_device *device, struct nvif_object *disp,
 		 int head, u64 syncbuf, struct nv50_ovly *ovly)
 {
 	struct nv50_disp_overlay_channel_dma_v0 args = {
-		.pushbuf = 0xb0007e00 | head,
 		.head = head,
 	};
 	static const s32 oclass[] = {
@@ -1472,9 +1458,8 @@ nv50_base_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
 static void *
 nv50_base_dtor(struct nv50_wndw *wndw)
 {
-	struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
 	struct nv50_base *base = nv50_base(wndw);
-	nv50_dmac_destroy(&base->chan.base, disp->disp);
+	nv50_dmac_destroy(&base->chan.base);
 	return base;
 }
 
@@ -2354,11 +2339,10 @@ nv50_head_reset(struct drm_crtc *crtc)
 static void
 nv50_head_destroy(struct drm_crtc *crtc)
 {
-	struct nv50_disp *disp = nv50_disp(crtc->dev);
 	struct nv50_head *head = nv50_head(crtc);
 	int i;
 
-	nv50_dmac_destroy(&head->ovly.base, disp->disp);
+	nv50_dmac_destroy(&head->ovly.base);
 	nv50_pioc_destroy(&head->oimm.base);
 
 	for (i = 0; i < ARRAY_SIZE(head->lut.nvbo); i++)
@@ -4430,7 +4414,7 @@ nv50_display_destroy(struct drm_device *dev)
 {
 	struct nv50_disp *disp = nv50_disp(dev);
 
-	nv50_dmac_destroy(&disp->mast.base, disp->disp);
+	nv50_dmac_destroy(&disp->mast.base);
 
 	nouveau_bo_unmap(disp->sync);
 	if (disp->sync)
diff --git a/drivers/gpu/drm/nouveau/nvif/mem.c b/drivers/gpu/drm/nouveau/nvif/mem.c
index 0f9382c60145..b6ebb3b58673 100644
--- a/drivers/gpu/drm/nouveau/nvif/mem.c
+++ b/drivers/gpu/drm/nouveau/nvif/mem.c
@@ -24,6 +24,19 @@
 
 #include <nvif/if000a.h>
 
+int
+nvif_mem_init_map(struct nvif_mmu *mmu, u8 type, u64 size, struct nvif_mem *mem)
+{
+	int ret = nvif_mem_init(mmu, mmu->mem, NVIF_MEM_MAPPABLE | type, 0,
+				size, NULL, 0, mem);
+	if (ret == 0) {
+		ret = nvif_object_map(&mem->object, NULL, 0);
+		if (ret)
+			nvif_mem_fini(mem);
+	}
+	return ret;
+}
+
 void
 nvif_mem_fini(struct nvif_mem *mem)
 {
diff --git a/drivers/gpu/drm/nouveau/nvif/mmu.c b/drivers/gpu/drm/nouveau/nvif/mmu.c
index 15d0dcbf7ab4..358ac4f3cf91 100644
--- a/drivers/gpu/drm/nouveau/nvif/mmu.c
+++ b/drivers/gpu/drm/nouveau/nvif/mmu.c
@@ -36,6 +36,12 @@ nvif_mmu_fini(struct nvif_mmu *mmu)
 int
 nvif_mmu_init(struct nvif_object *parent, s32 oclass, struct nvif_mmu *mmu)
 {
+	static const struct nvif_mclass mems[] = {
+		{ NVIF_CLASS_MEM_GF100, -1 },
+		{ NVIF_CLASS_MEM_NV50 , -1 },
+		{ NVIF_CLASS_MEM_NV04 , -1 },
+		{}
+	};
 	struct nvif_mmu_v0 args;
 	int ret, i;
 
@@ -54,6 +60,11 @@ nvif_mmu_init(struct nvif_object *parent, s32 oclass, struct nvif_mmu *mmu)
 	mmu->type_nr = args.type_nr;
 	mmu->kind_nr = args.kind_nr;
 
+	ret = nvif_mclass(&mmu->object, mems);
+	if (ret < 0)
+		goto done;
+	mmu->mem = mems[ret].oclass;
+
 	mmu->heap = kmalloc(sizeof(*mmu->heap) * mmu->heap_nr, GFP_KERNEL);
 	mmu->type = kmalloc(sizeof(*mmu->type) * mmu->type_nr, GFP_KERNEL);
 	if (ret = -ENOMEM, !mmu->heap || !mmu->type)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
index 53c3ed6da9ae..57719f675eec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
@@ -316,6 +316,7 @@ nv50_disp_chan_dtor(struct nvkm_object *object)
 	struct nv50_disp *disp = chan->disp;
 	if (chan->chid.user >= 0)
 		disp->chan[chan->chid.user] = NULL;
+	nvkm_memory_unref(&chan->memory);
 	return chan;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
index 2a48243b00ae..391b007a6824 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -19,6 +19,7 @@ struct nv50_disp_chan {
 
 	struct nvkm_object object;
 
+	struct nvkm_memory *memory;
 	u64 push;
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
index 981b98def151..9e8a9d7a9b68 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
@@ -26,6 +26,7 @@
 #include <core/client.h>
 #include <core/ramht.h>
 #include <subdev/fb.h>
+#include <subdev/mmu.h>
 #include <subdev/timer.h>
 #include <engine/dma.h>
 
@@ -37,7 +38,6 @@ nv50_disp_dmac_new_(const struct nv50_disp_chan_func *func,
 		    struct nvkm_object **pobject)
 {
 	struct nvkm_client *client = oclass->client;
-	struct nvkm_dmaobj *dmaobj;
 	struct nv50_disp_chan *chan;
 	int ret;
 
@@ -47,24 +47,22 @@ nv50_disp_dmac_new_(const struct nv50_disp_chan_func *func,
 	if (ret)
 		return ret;
 
-	dmaobj = nvkm_dmaobj_search(client, push);
-	if (IS_ERR(dmaobj))
-		return PTR_ERR(dmaobj);
+	chan->memory = nvkm_umem_search(client, push);
+	if (IS_ERR(chan->memory))
+		return PTR_ERR(chan->memory);
 
-	if (dmaobj->limit - dmaobj->start != 0xfff)
+	if (nvkm_memory_size(chan->memory) < 0x1000)
 		return -EINVAL;
 
-	switch (dmaobj->target) {
-	case NV_MEM_TARGET_VRAM:
-		chan->push = 0x00000001 | dmaobj->start >> 8;
-		break;
-	case NV_MEM_TARGET_PCI_NOSNOOP:
-		chan->push = 0x00000003 | dmaobj->start >> 8;
-		break;
+	switch (nvkm_memory_target(chan->memory)) {
+	case NVKM_MEM_TARGET_VRAM: chan->push = 0x00000001; break;
+	case NVKM_MEM_TARGET_NCOH: chan->push = 0x00000002; break;
+	case NVKM_MEM_TARGET_HOST: chan->push = 0x00000003; break;
 	default:
 		return -EINVAL;
 	}
 
+	chan->push |= nvkm_memory_addr(chan->memory) >> 8;
 	return 0;
 }
 

From c5c9127b25b2946369877d16b3c208cf54d4bf54 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1028/1286] drm/nouveau/device: implement a generic method to
 query device-specific properties

We have a need to fetch data from GPU-specific sub-devices that is not
tied to any particular engine object.

This commit provides the framework to support such queries.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/include/nvif/cl0080.h | 16 +++++++
 .../drm/nouveau/include/nvkm/core/engine.h    |  1 +
 .../drm/nouveau/include/nvkm/core/subdev.h    |  2 +
 drivers/gpu/drm/nouveau/nvkm/core/engine.c    | 15 ++++++
 drivers/gpu/drm/nouveau/nvkm/core/subdev.c    |  8 ++++
 .../gpu/drm/nouveau/nvkm/engine/device/user.c | 47 ++++++++++++++++++-
 6 files changed, 88 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
index 2740278d226b..51a4af6a77eb 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
@@ -39,9 +39,25 @@ struct nv_device_info_v0 {
 	char  name[64];
 };
 
+struct nv_device_info_v1 {
+	__u8  version;
+	__u8  count;
+	__u8  pad02[6];
+	struct nv_device_info_v1_data {
+		__u64 mthd; /* NV_DEVICE_INFO_* (see below). */
+		__u64 data;
+	} data[];
+};
+
 struct nv_device_time_v0 {
 	__u8  version;
 	__u8  pad01[7];
 	__u64 time;
 };
+
+#define NV_DEVICE_INFO_UNIT                               (0xffffffffULL << 32)
+#define NV_DEVICE_INFO(n)                          ((n) | (0x00000000ULL << 32))
+
+/* This will be returned for unsupported queries. */
+#define NV_DEVICE_INFO_INVALID                                           ~0ULL
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
index ebf8473a39fe..8a2be5b635e2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
@@ -18,6 +18,7 @@ struct nvkm_engine_func {
 	void *(*dtor)(struct nvkm_engine *);
 	void (*preinit)(struct nvkm_engine *);
 	int (*oneinit)(struct nvkm_engine *);
+	int (*info)(struct nvkm_engine *, u64 mthd, u64 *data);
 	int (*init)(struct nvkm_engine *);
 	int (*fini)(struct nvkm_engine *, bool suspend);
 	void (*intr)(struct nvkm_engine *);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
index 63df2290177f..85a0777c2ce4 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
@@ -17,6 +17,7 @@ struct nvkm_subdev_func {
 	void *(*dtor)(struct nvkm_subdev *);
 	int (*preinit)(struct nvkm_subdev *);
 	int (*oneinit)(struct nvkm_subdev *);
+	int (*info)(struct nvkm_subdev *, u64 mthd, u64 *data);
 	int (*init)(struct nvkm_subdev *);
 	int (*fini)(struct nvkm_subdev *, bool suspend);
 	void (*intr)(struct nvkm_subdev *);
@@ -29,6 +30,7 @@ void nvkm_subdev_del(struct nvkm_subdev **);
 int  nvkm_subdev_preinit(struct nvkm_subdev *);
 int  nvkm_subdev_init(struct nvkm_subdev *);
 int  nvkm_subdev_fini(struct nvkm_subdev *, bool suspend);
+int  nvkm_subdev_info(struct nvkm_subdev *, u64, u64 *);
 void nvkm_subdev_intr(struct nvkm_subdev *);
 
 /* subdev logging */
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engine.c b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
index 657231c3c098..d0322ce85172 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/engine.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
@@ -82,6 +82,20 @@ nvkm_engine_intr(struct nvkm_subdev *subdev)
 		engine->func->intr(engine);
 }
 
+static int
+nvkm_engine_info(struct nvkm_subdev *subdev, u64 mthd, u64 *data)
+{
+	struct nvkm_engine *engine = nvkm_engine(subdev);
+	if (engine->func->info) {
+		if ((engine = nvkm_engine_ref(engine))) {
+			int ret = engine->func->info(engine, mthd, data);
+			nvkm_engine_unref(&engine);
+			return ret;
+		}
+	}
+	return -ENOSYS;
+}
+
 static int
 nvkm_engine_fini(struct nvkm_subdev *subdev, bool suspend)
 {
@@ -150,6 +164,7 @@ nvkm_engine_func = {
 	.preinit = nvkm_engine_preinit,
 	.init = nvkm_engine_init,
 	.fini = nvkm_engine_fini,
+	.info = nvkm_engine_info,
 	.intr = nvkm_engine_intr,
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index c707306ac286..b96f9e2f237a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -92,6 +92,14 @@ nvkm_subdev_intr(struct nvkm_subdev *subdev)
 		subdev->func->intr(subdev);
 }
 
+int
+nvkm_subdev_info(struct nvkm_subdev *subdev, u64 mthd, u64 *data)
+{
+	if (subdev->func->info)
+		return subdev->func->info(subdev, mthd, data);
+	return -ENOSYS;
+}
+
 int
 nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend)
 {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index 17adcb4e8854..3526516765f8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -39,6 +39,40 @@ struct nvkm_udevice {
 	struct nvkm_device *device;
 };
 
+static int
+nvkm_udevice_info_subdev(struct nvkm_device *device, u64 mthd, u64 *data)
+{
+	struct nvkm_subdev *subdev;
+	enum nvkm_devidx subidx;
+
+	switch (mthd & NV_DEVICE_INFO_UNIT) {
+	default:
+		return -EINVAL;
+	}
+
+	subdev = nvkm_device_subdev(device, subidx);
+	if (subdev)
+		return nvkm_subdev_info(subdev, mthd, data);
+	return -ENODEV;
+}
+
+static void
+nvkm_udevice_info_v1(struct nvkm_device *device,
+		     struct nv_device_info_v1_data *args)
+{
+	if (args->mthd & NV_DEVICE_INFO_UNIT) {
+		if (nvkm_udevice_info_subdev(device, args->mthd, &args->data))
+			args->mthd = NV_DEVICE_INFO_INVALID;
+		return;
+	}
+
+	switch (args->mthd) {
+	default:
+		args->mthd = NV_DEVICE_INFO_INVALID;
+		break;
+	}
+}
+
 static int
 nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
 {
@@ -48,10 +82,21 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
 	struct nvkm_instmem *imem = device->imem;
 	union {
 		struct nv_device_info_v0 v0;
+		struct nv_device_info_v1 v1;
 	} *args = data;
-	int ret = -ENOSYS;
+	int ret = -ENOSYS, i;
 
 	nvif_ioctl(object, "device info size %d\n", size);
+	if (!(ret = nvif_unpack(ret, &data, &size, args->v1, 1, 1, true))) {
+		nvif_ioctl(object, "device info vers %d count %d\n",
+			   args->v1.version, args->v1.count);
+		if (args->v1.count * sizeof(args->v1.data[0]) == size) {
+			for (i = 0; i < args->v1.count; i++)
+				nvkm_udevice_info_v1(device, &args->v1.data[i]);
+			return 0;
+		}
+		return -EINVAL;
+	} else
 	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
 		nvif_ioctl(object, "device info vers %d\n", args->v0.version);
 	} else

From 6eb01aa8988873167adc5285f4afef310d01b8fb Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1029/1286] drm/nouveau/device: support querying available
 engines of a specific type

Will be used for fifo runlist selection.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/include/nvif/cl0080.h | 19 ++++++++++++++
 .../gpu/drm/nouveau/nvkm/engine/device/user.c | 25 +++++++++++++++++++
 2 files changed, 44 insertions(+)

diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
index 51a4af6a77eb..6a54cda9613e 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
@@ -60,4 +60,23 @@ struct nv_device_time_v0 {
 
 /* This will be returned for unsupported queries. */
 #define NV_DEVICE_INFO_INVALID                                           ~0ULL
+
+/* These return a mask of available engines of particular type. */
+#define NV_DEVICE_INFO_ENGINE_SW                     NV_DEVICE_INFO(0x00000000)
+#define NV_DEVICE_INFO_ENGINE_GR                     NV_DEVICE_INFO(0x00000001)
+#define NV_DEVICE_INFO_ENGINE_MPEG                   NV_DEVICE_INFO(0x00000002)
+#define NV_DEVICE_INFO_ENGINE_ME                     NV_DEVICE_INFO(0x00000003)
+#define NV_DEVICE_INFO_ENGINE_CIPHER                 NV_DEVICE_INFO(0x00000004)
+#define NV_DEVICE_INFO_ENGINE_BSP                    NV_DEVICE_INFO(0x00000005)
+#define NV_DEVICE_INFO_ENGINE_VP                     NV_DEVICE_INFO(0x00000006)
+#define NV_DEVICE_INFO_ENGINE_CE                     NV_DEVICE_INFO(0x00000007)
+#define NV_DEVICE_INFO_ENGINE_SEC                    NV_DEVICE_INFO(0x00000008)
+#define NV_DEVICE_INFO_ENGINE_MSVLD                  NV_DEVICE_INFO(0x00000009)
+#define NV_DEVICE_INFO_ENGINE_MSPDEC                 NV_DEVICE_INFO(0x0000000a)
+#define NV_DEVICE_INFO_ENGINE_MSPPP                  NV_DEVICE_INFO(0x0000000b)
+#define NV_DEVICE_INFO_ENGINE_MSENC                  NV_DEVICE_INFO(0x0000000c)
+#define NV_DEVICE_INFO_ENGINE_VIC                    NV_DEVICE_INFO(0x0000000d)
+#define NV_DEVICE_INFO_ENGINE_SEC2                   NV_DEVICE_INFO(0x0000000e)
+#define NV_DEVICE_INFO_ENGINE_NVDEC                  NV_DEVICE_INFO(0x0000000f)
+#define NV_DEVICE_INFO_ENGINE_NVENC                  NV_DEVICE_INFO(0x00000010)
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index 3526516765f8..42a552d314ef 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -67,6 +67,31 @@ nvkm_udevice_info_v1(struct nvkm_device *device,
 	}
 
 	switch (args->mthd) {
+#define ENGINE__(A,B,C) NV_DEVICE_INFO_ENGINE_##A: { int _i;                   \
+	for (_i = (B), args->data = 0ULL; _i <= (C); _i++) {                   \
+		if (nvkm_device_engine(device, _i))                            \
+			args->data |= BIT_ULL(_i);                             \
+	}                                                                      \
+}
+#define ENGINE_A(A) ENGINE__(A, NVKM_ENGINE_##A   , NVKM_ENGINE_##A)
+#define ENGINE_B(A) ENGINE__(A, NVKM_ENGINE_##A##0, NVKM_ENGINE_##A##_LAST)
+	case ENGINE_A(SW    ); break;
+	case ENGINE_A(GR    ); break;
+	case ENGINE_A(MPEG  ); break;
+	case ENGINE_A(ME    ); break;
+	case ENGINE_A(CIPHER); break;
+	case ENGINE_A(BSP   ); break;
+	case ENGINE_A(VP    ); break;
+	case ENGINE_B(CE    ); break;
+	case ENGINE_A(SEC   ); break;
+	case ENGINE_A(MSVLD ); break;
+	case ENGINE_A(MSPDEC); break;
+	case ENGINE_A(MSPPP ); break;
+	case ENGINE_A(MSENC ); break;
+	case ENGINE_A(VIC   ); break;
+	case ENGINE_A(SEC2  ); break;
+	case ENGINE_A(NVDEC ); break;
+	case ENGINE_B(NVENC ); break;
 	default:
 		args->mthd = NV_DEVICE_INFO_INVALID;
 		break;

From eb47db4f3bb58b0143a911b29417e89f28e1b0c8 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1030/1286] drm/nouveau/fifo: support channel count query

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/include/nvif/cl0080.h |  4 +++
 drivers/gpu/drm/nouveau/include/nvif/device.h |  1 -
 drivers/gpu/drm/nouveau/nouveau_chan.c        | 25 +++++++++++++++++++
 drivers/gpu/drm/nouveau/nouveau_chan.h        |  1 +
 drivers/gpu/drm/nouveau/nouveau_drm.c         |  4 +++
 drivers/gpu/drm/nouveau/nouveau_drv.h         |  6 +++++
 drivers/gpu/drm/nouveau/nouveau_fence.c       | 11 ++++----
 drivers/gpu/drm/nouveau/nouveau_fence.h       |  2 --
 drivers/gpu/drm/nouveau/nv04_fence.c          |  2 --
 drivers/gpu/drm/nouveau/nv10_fence.c          |  2 --
 drivers/gpu/drm/nouveau/nv17_fence.c          |  2 --
 drivers/gpu/drm/nouveau/nv50_fence.c          |  2 --
 drivers/gpu/drm/nouveau/nv84_fence.c          | 11 +++-----
 .../gpu/drm/nouveau/nvkm/engine/device/user.c |  1 +
 .../gpu/drm/nouveau/nvkm/engine/fifo/base.c   | 14 +++++++++++
 15 files changed, 64 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
index 6a54cda9613e..5af610ea260e 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
@@ -57,6 +57,7 @@ struct nv_device_time_v0 {
 
 #define NV_DEVICE_INFO_UNIT                               (0xffffffffULL << 32)
 #define NV_DEVICE_INFO(n)                          ((n) | (0x00000000ULL << 32))
+#define NV_DEVICE_FIFO(n)                          ((n) | (0x00000001ULL << 32))
 
 /* This will be returned for unsupported queries. */
 #define NV_DEVICE_INFO_INVALID                                           ~0ULL
@@ -79,4 +80,7 @@ struct nv_device_time_v0 {
 #define NV_DEVICE_INFO_ENGINE_SEC2                   NV_DEVICE_INFO(0x0000000e)
 #define NV_DEVICE_INFO_ENGINE_NVDEC                  NV_DEVICE_INFO(0x0000000f)
 #define NV_DEVICE_INFO_ENGINE_NVENC                  NV_DEVICE_INFO(0x00000010)
+
+/* Returns the number of available channels. */
+#define NV_DEVICE_FIFO_CHANNELS                      NV_DEVICE_FIFO(0x00000000)
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/device.h b/drivers/gpu/drm/nouveau/include/nvif/device.h
index 6edb6266857e..216dbd9fa616 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/device.h
@@ -67,6 +67,5 @@ u64  nvif_device_time(struct nvif_device *);
 #include <engine/fifo.h>
 #include <engine/gr.h>
 
-#define nvxx_fifo(a) nvxx_device(a)->fifo
 #define nvxx_gr(a) nvxx_device(a)->gr
 #endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index af1116655910..db69d13f32a7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -474,3 +474,28 @@ done:
 	cli->base.super = super;
 	return ret;
 }
+
+int
+nouveau_channels_init(struct nouveau_drm *drm)
+{
+	struct {
+		struct nv_device_info_v1 m;
+		struct {
+			struct nv_device_info_v1_data channels;
+		} v;
+	} args = {
+		.m.version = 1,
+		.m.count = sizeof(args.v) / sizeof(args.v.channels),
+		.v.channels.mthd = NV_DEVICE_FIFO_CHANNELS,
+	};
+	struct nvif_object *device = &drm->client.device.object;
+	int ret;
+
+	ret = nvif_object_mthd(device, NV_DEVICE_V0_INFO, &args, sizeof(args));
+	if (ret || args.v.channels.mthd == NV_DEVICE_INFO_INVALID)
+		return -ENODEV;
+
+	drm->chan.nr = args.v.channels.data;
+	drm->chan.context_base = dma_fence_context_alloc(drm->chan.nr);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 14607c16a2bd..64454c2ebd90 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -45,6 +45,7 @@ struct nouveau_channel {
 	atomic_t killed;
 };
 
+int nouveau_channels_init(struct nouveau_drm *);
 
 int  nouveau_channel_new(struct nouveau_drm *, struct nvif_device *,
 			 u32 arg0, u32 arg1, struct nouveau_channel **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index bbbf353682e1..dddd42592472 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -307,6 +307,10 @@ nouveau_accel_init(struct nouveau_drm *drm)
 	if (nouveau_noaccel)
 		return;
 
+	ret = nouveau_channels_init(drm);
+	if (ret)
+		return;
+
 	/* initialise synchronisation routines */
 	/*XXX: this is crap, but the fence/channel stuff is a little
 	 *     backwards in some places.  this will be fixed.
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 881b44b89a01..6e1acaec3400 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -170,6 +170,12 @@ struct nouveau_drm {
 	/* synchronisation */
 	void *fence;
 
+	/* Global channel management. */
+	struct {
+		int nr;
+		u64 context_base;
+	} chan;
+
 	/* context for accelerated drm-internal operations */
 	struct nouveau_channel *cechan;
 	struct nouveau_channel *channel;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 503fa94dc06d..412d49bc6e56 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -74,15 +74,14 @@ nouveau_fence_signal(struct nouveau_fence *fence)
 }
 
 static struct nouveau_fence *
-nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm) {
-	struct nouveau_fence_priv *priv = (void*)drm->fence;
-
+nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm)
+{
 	if (fence->ops != &nouveau_fence_ops_legacy &&
 	    fence->ops != &nouveau_fence_ops_uevent)
 		return NULL;
 
-	if (fence->context < priv->context_base ||
-	    fence->context >= priv->context_base + priv->contexts)
+	if (fence->context < drm->chan.context_base ||
+	    fence->context >= drm->chan.context_base + drm->chan.nr)
 		return NULL;
 
 	return from_fence(fence);
@@ -176,7 +175,7 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
 	INIT_LIST_HEAD(&fctx->flip);
 	INIT_LIST_HEAD(&fctx->pending);
 	spin_lock_init(&fctx->lock);
-	fctx->context = priv->context_base + chan->chid;
+	fctx->context = chan->drm->chan.context_base + chan->chid;
 
 	if (chan == chan->drm->cechan)
 		strcpy(fctx->name, "copy engine channel");
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 5bd8d30d1657..b999e6058046 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -55,8 +55,6 @@ struct nouveau_fence_priv {
 	int  (*context_new)(struct nouveau_channel *);
 	void (*context_del)(struct nouveau_channel *);
 
-	u32 contexts;
-	u64 context_base;
 	bool uevent;
 };
 
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index fa8f2375c398..c41e82be4893 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -109,7 +109,5 @@ nv04_fence_create(struct nouveau_drm *drm)
 	priv->base.dtor = nv04_fence_destroy;
 	priv->base.context_new = nv04_fence_context_new;
 	priv->base.context_del = nv04_fence_context_del;
-	priv->base.contexts = 15;
-	priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 2998bde29211..4476b712dc84 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -103,8 +103,6 @@ nv10_fence_create(struct nouveau_drm *drm)
 	priv->base.dtor = nv10_fence_destroy;
 	priv->base.context_new = nv10_fence_context_new;
 	priv->base.context_del = nv10_fence_context_del;
-	priv->base.contexts = 31;
-	priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
 	spin_lock_init(&priv->lock);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 6477b7069e14..5d613d43b84d 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -125,8 +125,6 @@ nv17_fence_create(struct nouveau_drm *drm)
 	priv->base.resume = nv17_fence_resume;
 	priv->base.context_new = nv17_fence_context_new;
 	priv->base.context_del = nv10_fence_context_del;
-	priv->base.contexts = 31;
-	priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
 	spin_lock_init(&priv->lock);
 
 	ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index a369d978e267..a00ecc3de053 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -78,8 +78,6 @@ nv50_fence_create(struct nouveau_drm *drm)
 	priv->base.resume = nv17_fence_resume;
 	priv->base.context_new = nv50_fence_context_new;
 	priv->base.context_del = nv10_fence_context_del;
-	priv->base.contexts = 127;
-	priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
 	spin_lock_init(&priv->lock);
 
 	ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 5f0c0c27d5dc..090664899247 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -141,9 +141,9 @@ nv84_fence_suspend(struct nouveau_drm *drm)
 	struct nv84_fence_priv *priv = drm->fence;
 	int i;
 
-	priv->suspend = vmalloc(priv->base.contexts * sizeof(u32));
+	priv->suspend = vmalloc(drm->chan.nr * sizeof(u32));
 	if (priv->suspend) {
-		for (i = 0; i < priv->base.contexts; i++)
+		for (i = 0; i < drm->chan.nr; i++)
 			priv->suspend[i] = nouveau_bo_rd32(priv->bo, i*4);
 	}
 
@@ -157,7 +157,7 @@ nv84_fence_resume(struct nouveau_drm *drm)
 	int i;
 
 	if (priv->suspend) {
-		for (i = 0; i < priv->base.contexts; i++)
+		for (i = 0; i < drm->chan.nr; i++)
 			nouveau_bo_wr32(priv->bo, i*4, priv->suspend[i]);
 		vfree(priv->suspend);
 		priv->suspend = NULL;
@@ -179,7 +179,6 @@ nv84_fence_destroy(struct nouveau_drm *drm)
 int
 nv84_fence_create(struct nouveau_drm *drm)
 {
-	struct nvkm_fifo *fifo = nvxx_fifo(&drm->client.device);
 	struct nv84_fence_priv *priv;
 	u32 domain;
 	int ret;
@@ -194,8 +193,6 @@ nv84_fence_create(struct nouveau_drm *drm)
 	priv->base.context_new = nv84_fence_context_new;
 	priv->base.context_del = nv84_fence_context_del;
 
-	priv->base.contexts = fifo->nr;
-	priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
 	priv->base.uevent = true;
 
 	mutex_init(&priv->mutex);
@@ -207,7 +204,7 @@ nv84_fence_create(struct nouveau_drm *drm)
 			  * will lose CPU/GPU coherency!
 			  */
 			 TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED;
-	ret = nouveau_bo_new(&drm->client, 16 * priv->base.contexts, 0,
+	ret = nouveau_bo_new(&drm->client, 16 * drm->chan.nr, 0,
 			     domain, 0, 0, NULL, NULL, &priv->bo);
 	if (ret == 0) {
 		ret = nouveau_bo_pin(priv->bo, domain, false);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index 42a552d314ef..600bdb870462 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -46,6 +46,7 @@ nvkm_udevice_info_subdev(struct nvkm_device *device, u64 mthd, u64 *data)
 	enum nvkm_devidx subidx;
 
 	switch (mthd & NV_DEVICE_INFO_UNIT) {
+	case NV_DEVICE_FIFO(0): subidx = NVKM_ENGINE_FIFO; break;
 	default:
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
index 64f6b7654a08..49b37a8a94b7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
@@ -30,6 +30,7 @@
 #include <subdev/mc.h>
 
 #include <nvif/event.h>
+#include <nvif/cl0080.h>
 #include <nvif/unpack.h>
 
 void
@@ -270,6 +271,18 @@ nvkm_fifo_fini(struct nvkm_engine *engine, bool suspend)
 	return 0;
 }
 
+static int
+nvkm_fifo_info(struct nvkm_engine *engine, u64 mthd, u64 *data)
+{
+	struct nvkm_fifo *fifo = nvkm_fifo(engine);
+	switch (mthd) {
+	case NV_DEVICE_FIFO_CHANNELS: *data = fifo->nr; return 0;
+	default:
+		break;
+	}
+	return -ENOSYS;
+}
+
 static int
 nvkm_fifo_oneinit(struct nvkm_engine *engine)
 {
@@ -311,6 +324,7 @@ nvkm_fifo = {
 	.dtor = nvkm_fifo_dtor,
 	.preinit = nvkm_fifo_preinit,
 	.oneinit = nvkm_fifo_oneinit,
+	.info = nvkm_fifo_info,
 	.init = nvkm_fifo_init,
 	.fini = nvkm_fifo_fini,
 	.intr = nvkm_fifo_intr,

From 55b8e85b0b83150a20b17068e3229e64bd174d45 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1031/1286] drm/nouveau/fifo/gk104-: accept engine contexts for
 CE3 and up

These can exist on GP100 and newer.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
index 80c87521bebe..68461993394f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -62,9 +62,8 @@ gk104_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
 {
 	switch (engine->subdev.index) {
 	case NVKM_ENGINE_SW    :
-	case NVKM_ENGINE_CE0   :
-	case NVKM_ENGINE_CE1   :
-	case NVKM_ENGINE_CE2   : return 0x0000;
+	case NVKM_ENGINE_CE0...NVKM_ENGINE_CE_LAST:
+		return 0;
 	case NVKM_ENGINE_GR    : return 0x0210;
 	case NVKM_ENGINE_SEC   : return 0x0220;
 	case NVKM_ENGINE_MSPDEC: return 0x0250;

From ddc669e25645b32158cc2b6da26faf5dac92fffc Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1032/1286] drm/nouveau/fifo/gk104-: allow fault recovery code
 to be called by other subdevs

This will be required to support Volta.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../drm/nouveau/include/nvkm/engine/fifo.h    |   2 +
 .../drm/nouveau/include/nvkm/subdev/fault.h   |  13 ++
 .../gpu/drm/nouveau/nvkm/engine/fifo/base.c   |   6 +
 .../gpu/drm/nouveau/nvkm/engine/fifo/gk104.c  | 187 ++++++++++--------
 .../gpu/drm/nouveau/nvkm/engine/fifo/gk104.h  |  21 +-
 .../gpu/drm/nouveau/nvkm/engine/fifo/gk110.c  |   1 +
 .../gpu/drm/nouveau/nvkm/engine/fifo/gk208.c  |   1 +
 .../gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c  |   1 +
 .../gpu/drm/nouveau/nvkm/engine/fifo/gm107.c  |   1 +
 .../gpu/drm/nouveau/nvkm/engine/fifo/gm200.c  |   1 +
 .../gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c  |   1 +
 .../gpu/drm/nouveau/nvkm/engine/fifo/gp100.c  |   1 +
 .../gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c  |   1 +
 .../gpu/drm/nouveau/nvkm/engine/fifo/priv.h   |   1 +
 14 files changed, 139 insertions(+), 99 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index c17b3a9bf8fb..0d96edee1e6a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -4,6 +4,7 @@
 #include <core/engine.h>
 #include <core/object.h>
 #include <core/event.h>
+struct nvkm_fault_data;
 
 #define NVKM_FIFO_CHID_NR 4096
 
@@ -45,6 +46,7 @@ struct nvkm_fifo {
 	struct nvkm_event kevent; /* channel killed */
 };
 
+void nvkm_fifo_fault(struct nvkm_fifo *, struct nvkm_fault_data *);
 void nvkm_fifo_pause(struct nvkm_fifo *, unsigned long *);
 void nvkm_fifo_start(struct nvkm_fifo *, unsigned long *);
 
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
index 8ca66e572779..8e9bc30fe65d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
@@ -12,5 +12,18 @@ struct nvkm_fault {
 	struct nvkm_event event;
 };
 
+struct nvkm_fault_data {
+	u64  addr;
+	u64  inst;
+	u64  time;
+	u8 engine;
+	u8  valid;
+	u8    gpc;
+	u8    hub;
+	u8 access;
+	u8 client;
+	u8 reason;
+};
+
 int gp100_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
index 49b37a8a94b7..ed56087b4abe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
@@ -56,6 +56,12 @@ nvkm_fifo_start(struct nvkm_fifo *fifo, unsigned long *flags)
 	return fifo->func->start(fifo, flags);
 }
 
+void
+nvkm_fifo_fault(struct nvkm_fifo *fifo, struct nvkm_fault_data *info)
+{
+	return fifo->func->fault(fifo, info);
+}
+
 void
 nvkm_fifo_chan_put(struct nvkm_fifo *fifo, unsigned long flags,
 		   struct nvkm_fifo_chan **pchan)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 84bd703dd897..b5706b15a64d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -27,6 +27,7 @@
 #include <core/client.h>
 #include <core/gpuobj.h>
 #include <subdev/bar.h>
+#include <subdev/fault.h>
 #include <subdev/timer.h>
 #include <subdev/top.h>
 #include <engine/sw.h>
@@ -347,6 +348,90 @@ gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn)
 	schedule_work(&fifo->recover.work);
 }
 
+static void
+gk104_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info)
+{
+	struct gk104_fifo *fifo = gk104_fifo(base);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	const struct nvkm_enum *er, *ee, *ec, *ea;
+	struct nvkm_engine *engine = NULL;
+	struct nvkm_fifo_chan *chan;
+	unsigned long flags;
+	char ct[8] = "HUB/", en[16] = "";
+	int engn;
+
+	er = nvkm_enum_find(fifo->func->fault.reason, info->reason);
+	ee = nvkm_enum_find(fifo->func->fault.engine, info->engine);
+	if (info->hub) {
+		ec = nvkm_enum_find(fifo->func->fault.hubclient, info->client);
+	} else {
+		ec = nvkm_enum_find(fifo->func->fault.gpcclient, info->client);
+		snprintf(ct, sizeof(ct), "GPC%d/", info->gpc);
+	}
+	ea = nvkm_enum_find(fifo->func->fault.access, info->access);
+
+	if (ee && ee->data2) {
+		switch (ee->data2) {
+		case NVKM_SUBDEV_BAR:
+			nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
+			break;
+		case NVKM_SUBDEV_INSTMEM:
+			nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
+			break;
+		case NVKM_ENGINE_IFB:
+			nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
+			break;
+		default:
+			engine = nvkm_device_engine(device, ee->data2);
+			break;
+		}
+	}
+
+	if (ee == NULL) {
+		enum nvkm_devidx engidx = nvkm_top_fault(device, info->engine);
+		if (engidx < NVKM_SUBDEV_NR) {
+			const char *src = nvkm_subdev_name[engidx];
+			char *dst = en;
+			do {
+				*dst++ = toupper(*src++);
+			} while(*src);
+			engine = nvkm_device_engine(device, engidx);
+		}
+	} else {
+		snprintf(en, sizeof(en), "%s", ee->name);
+	}
+
+	spin_lock_irqsave(&fifo->base.lock, flags);
+	chan = nvkm_fifo_chan_inst_locked(&fifo->base, info->inst);
+
+	nvkm_error(subdev,
+		   "fault %02x [%s] at %016llx engine %02x [%s] client %02x "
+		   "[%s%s] reason %02x [%s] on channel %d [%010llx %s]\n",
+		   info->access, ea ? ea->name : "", info->addr,
+		   info->engine, ee ? ee->name : en,
+		   info->client, ct, ec ? ec->name : "",
+		   info->reason, er ? er->name : "", chan ? chan->chid : -1,
+		   info->inst, chan ? chan->object.client->name : "unknown");
+
+	/* Kill the channel that caused the fault. */
+	if (chan)
+		gk104_fifo_recover_chan(&fifo->base, chan->chid);
+
+	/* Channel recovery will probably have already done this for the
+	 * correct engine(s), but just in case we can't find the channel
+	 * information...
+	 */
+	for (engn = 0; engn < fifo->engine_nr && engine; engn++) {
+		if (fifo->engine[engn].engine == engine) {
+			gk104_fifo_recover_engn(fifo, engn);
+			break;
+		}
+	}
+
+	spin_unlock_irqrestore(&fifo->base.lock, flags);
+}
+
 static const struct nvkm_enum
 gk104_fifo_bind_reason[] = {
 	{ 0x01, "BIND_NOT_UNBOUND" },
@@ -456,88 +541,21 @@ gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
 	u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
 	u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
 	u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
-	u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
-	u32 gpc    = (stat & 0x1f000000) >> 24;
-	u32 client = (stat & 0x00001f00) >> 8;
-	u32 write  = (stat & 0x00000080);
-	u32 hub    = (stat & 0x00000040);
-	u32 reason = (stat & 0x0000000f);
-	const struct nvkm_enum *er, *eu, *ec;
-	struct nvkm_engine *engine = NULL;
-	struct nvkm_fifo_chan *chan;
-	unsigned long flags;
-	char gpcid[8] = "", en[16] = "";
-	int engn;
+	u32 type = nvkm_rd32(device, 0x00280c + (unit * 0x10));
+	struct nvkm_fault_data info;
 
-	er = nvkm_enum_find(fifo->func->fault.reason, reason);
-	eu = nvkm_enum_find(fifo->func->fault.engine, unit);
-	if (hub) {
-		ec = nvkm_enum_find(fifo->func->fault.hubclient, client);
-	} else {
-		ec = nvkm_enum_find(fifo->func->fault.gpcclient, client);
-		snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
-	}
+	info.inst   =  (u64)inst << 12;
+	info.addr   = ((u64)vahi << 32) | valo;
+	info.time   = 0;
+	info.engine = unit;
+	info.valid  = 1;
+	info.gpc    = (type & 0x1f000000) >> 24;
+	info.client = (type & 0x00001f00) >> 8;
+	info.access = (type & 0x00000080) >> 7;
+	info.hub    = (type & 0x00000040) >> 6;
+	info.reason = (type & 0x000000ff);
 
-	if (eu && eu->data2) {
-		switch (eu->data2) {
-		case NVKM_SUBDEV_BAR:
-			nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
-			break;
-		case NVKM_SUBDEV_INSTMEM:
-			nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
-			break;
-		case NVKM_ENGINE_IFB:
-			nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
-			break;
-		default:
-			engine = nvkm_device_engine(device, eu->data2);
-			break;
-		}
-	}
-
-	if (eu == NULL) {
-		enum nvkm_devidx engidx = nvkm_top_fault(device, unit);
-		if (engidx < NVKM_SUBDEV_NR) {
-			const char *src = nvkm_subdev_name[engidx];
-			char *dst = en;
-			do {
-				*dst++ = toupper(*src++);
-			} while(*src);
-			engine = nvkm_device_engine(device, engidx);
-		}
-	} else {
-		snprintf(en, sizeof(en), "%s", eu->name);
-	}
-
-	spin_lock_irqsave(&fifo->base.lock, flags);
-	chan = nvkm_fifo_chan_inst_locked(&fifo->base, (u64)inst << 12);
-
-	nvkm_error(subdev,
-		   "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
-		   "reason %02x [%s] on channel %d [%010llx %s]\n",
-		   write ? "write" : "read", (u64)vahi << 32 | valo,
-		   unit, en, client, gpcid, ec ? ec->name : "",
-		   reason, er ? er->name : "", chan ? chan->chid : -1,
-		   (u64)inst << 12,
-		   chan ? chan->object.client->name : "unknown");
-
-
-	/* Kill the channel that caused the fault. */
-	if (chan)
-		gk104_fifo_recover_chan(&fifo->base, chan->chid);
-
-	/* Channel recovery will probably have already done this for the
-	 * correct engine(s), but just in case we can't find the channel
-	 * information...
-	 */
-	for (engn = 0; engn < fifo->engine_nr && engine; engn++) {
-		if (fifo->engine[engn].engine == engine) {
-			gk104_fifo_recover_engn(fifo, engn);
-			break;
-		}
-	}
-
-	spin_unlock_irqrestore(&fifo->base.lock, flags);
+	nvkm_fifo_fault(&fifo->base, &info);
 }
 
 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
@@ -897,6 +915,7 @@ gk104_fifo_ = {
 	.init = gk104_fifo_init,
 	.fini = gk104_fifo_fini,
 	.intr = gk104_fifo_intr,
+	.fault = gk104_fifo_fault,
 	.uevent_init = gk104_fifo_uevent_init,
 	.uevent_fini = gk104_fifo_uevent_fini,
 	.recover_chan = gk104_fifo_recover_chan,
@@ -918,6 +937,13 @@ gk104_fifo_new_(const struct gk104_fifo_func *func, struct nvkm_device *device,
 	return nvkm_fifo_ctor(&gk104_fifo_, device, index, nr, &fifo->base);
 }
 
+const struct nvkm_enum
+gk104_fifo_fault_access[] = {
+	{ 0x0, "READ" },
+	{ 0x1, "WRITE" },
+	{}
+};
+
 const struct nvkm_enum
 gk104_fifo_fault_engine[] = {
 	{ 0x00, "GR", NULL, NVKM_ENGINE_GR },
@@ -1035,6 +1061,7 @@ gk104_fifo_fault_gpcclient[] = {
 
 static const struct gk104_fifo_func
 gk104_fifo = {
+	.fault.access = gk104_fifo_fault_access,
 	.fault.engine = gk104_fifo_fault_engine,
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index 1579785cf941..0e8b57275e9d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -44,6 +44,7 @@ struct gk104_fifo {
 
 struct gk104_fifo_func {
 	struct {
+		const struct nvkm_enum *access;
 		const struct nvkm_enum *engine;
 		const struct nvkm_enum *reason;
 		const struct nvkm_enum *hubclient;
@@ -59,25 +60,7 @@ void gk104_fifo_runlist_insert(struct gk104_fifo *, struct gk104_fifo_chan *);
 void gk104_fifo_runlist_remove(struct gk104_fifo *, struct gk104_fifo_chan *);
 void gk104_fifo_runlist_commit(struct gk104_fifo *, int runl);
 
-static inline u64
-gk104_fifo_engine_subdev(int engine)
-{
-	switch (engine) {
-	case 0: return (1ULL << NVKM_ENGINE_GR) |
-		       (1ULL << NVKM_ENGINE_SW) |
-		       (1ULL << NVKM_ENGINE_CE2);
-	case 1: return (1ULL << NVKM_ENGINE_MSPDEC);
-	case 2: return (1ULL << NVKM_ENGINE_MSPPP);
-	case 3: return (1ULL << NVKM_ENGINE_MSVLD);
-	case 4: return (1ULL << NVKM_ENGINE_CE0);
-	case 5: return (1ULL << NVKM_ENGINE_CE1);
-	case 6: return (1ULL << NVKM_ENGINE_MSENC);
-	default:
-		WARN_ON(1);
-		return 0;
-	}
-}
-
+extern const struct nvkm_enum gk104_fifo_fault_access[];
 extern const struct nvkm_enum gk104_fifo_fault_engine[];
 extern const struct nvkm_enum gk104_fifo_fault_reason[];
 extern const struct nvkm_enum gk104_fifo_fault_hubclient[];
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
index b2f8ab7bf847..ad792b6830e5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
@@ -26,6 +26,7 @@
 
 static const struct gk104_fifo_func
 gk110_fifo = {
+	.fault.access = gk104_fifo_fault_access,
 	.fault.engine = gk104_fifo_fault_engine,
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
index 160617d376e4..5402d22462e8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
@@ -26,6 +26,7 @@
 
 static const struct gk104_fifo_func
 gk208_fifo = {
+	.fault.access = gk104_fifo_fault_access,
 	.fault.engine = gk104_fifo_fault_engine,
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
index be9f5c16ed7d..0d7f9f59f80c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
@@ -24,6 +24,7 @@
 
 static const struct gk104_fifo_func
 gk20a_fifo = {
+	.fault.access = gk104_fifo_fault_access,
 	.fault.engine = gk104_fifo_fault_engine,
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
index 29c080683b32..58a46ee5ee44 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
@@ -49,6 +49,7 @@ gm107_fifo_fault_engine[] = {
 
 static const struct gk104_fifo_func
 gm107_fifo = {
+	.fault.access = gk104_fifo_fault_access,
 	.fault.engine = gm107_fifo_fault_engine,
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
index b069f785c5d8..f84d5398aebe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
@@ -26,6 +26,7 @@
 
 static const struct gk104_fifo_func
 gm200_fifo = {
+	.fault.access = gk104_fifo_fault_access,
 	.fault.engine = gm107_fifo_fault_engine,
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
index 2ed87c2e8299..66399b9572a6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
@@ -24,6 +24,7 @@
 
 static const struct gk104_fifo_func
 gm20b_fifo = {
+	.fault.access = gk104_fifo_fault_access,
 	.fault.engine = gm107_fifo_fault_engine,
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
index 41f16cf5a918..2b8a6cff7a68 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
@@ -50,6 +50,7 @@ gp100_fifo_fault_engine[] = {
 
 static const struct gk104_fifo_func
 gp100_fifo = {
+	.fault.access = gk104_fifo_fault_access,
 	.fault.engine = gp100_fifo_fault_engine,
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
index 4af96c3e69ff..71b8d93b4368 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
@@ -24,6 +24,7 @@
 
 static const struct gk104_fifo_func
 gp10b_fifo = {
+	.fault.access = gk104_fifo_fault_access,
 	.fault.engine = gp100_fifo_fault_engine,
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
index ae76b1aaccd4..df74b54773b9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
@@ -21,6 +21,7 @@ struct nvkm_fifo_func {
 	void (*init)(struct nvkm_fifo *);
 	void (*fini)(struct nvkm_fifo *);
 	void (*intr)(struct nvkm_fifo *);
+	void (*fault)(struct nvkm_fifo *, struct nvkm_fault_data *);
 	void (*pause)(struct nvkm_fifo *, unsigned long *);
 	void (*start)(struct nvkm_fifo *, unsigned long *);
 	void (*uevent_init)(struct nvkm_fifo *);

From cc36205085bb6e3a4eed1edbe413fd2235cadb27 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1033/1286] drm/nouveau/fifo/gk104-: support querying engines
 available on each runlist

Will be used to improve channel runlist selection.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/include/nvif/cl0080.h |  7 ++
 drivers/gpu/drm/nouveau/include/nvif/device.h |  5 +
 drivers/gpu/drm/nouveau/include/nvif/fifo.h   | 18 ++++
 drivers/gpu/drm/nouveau/nvif/Kbuild           |  1 +
 drivers/gpu/drm/nouveau/nvif/device.c         |  3 +
 drivers/gpu/drm/nouveau/nvif/fifo.c           | 99 +++++++++++++++++++
 .../gpu/drm/nouveau/nvkm/engine/fifo/base.c   |  2 +
 .../gpu/drm/nouveau/nvkm/engine/fifo/gk104.c  | 30 ++++++
 .../gpu/drm/nouveau/nvkm/engine/fifo/priv.h   |  1 +
 9 files changed, 166 insertions(+)
 create mode 100644 drivers/gpu/drm/nouveau/include/nvif/fifo.h
 create mode 100644 drivers/gpu/drm/nouveau/nvif/fifo.c

diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
index 5af610ea260e..49c1c90d2bde 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
@@ -83,4 +83,11 @@ struct nv_device_time_v0 {
 
 /* Returns the number of available channels. */
 #define NV_DEVICE_FIFO_CHANNELS                      NV_DEVICE_FIFO(0x00000000)
+
+/* Returns a mask of available runlists. */
+#define NV_DEVICE_FIFO_RUNLISTS                      NV_DEVICE_FIFO(0x00000001)
+
+/* These return a mask of engines available on a particular runlist. */
+#define NV_DEVICE_FIFO_RUNLIST_ENGINES(n)     ((n) + NV_DEVICE_FIFO(0x00000010))
+#define NV_DEVICE_FIFO_RUNLIST_ENGINES__SIZE                                64
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/device.h b/drivers/gpu/drm/nouveau/include/nvif/device.h
index 216dbd9fa616..76fe21e395de 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/device.h
@@ -8,6 +8,11 @@
 struct nvif_device {
 	struct nvif_object object;
 	struct nv_device_info_v0 info;
+
+	struct nvif_fifo_runlist {
+		u64 engines;
+	} *runlist;
+	int runlists;
 };
 
 int  nvif_device_init(struct nvif_object *, u32 handle, s32 oclass, void *, u32,
diff --git a/drivers/gpu/drm/nouveau/include/nvif/fifo.h b/drivers/gpu/drm/nouveau/include/nvif/fifo.h
new file mode 100644
index 000000000000..e9468c9f9abf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/fifo.h
@@ -0,0 +1,18 @@
+#ifndef __NVIF_FIFO_H__
+#define __NVIF_FIFO_H__
+#include <nvif/device.h>
+
+/* Returns mask of runlists that support a NV_DEVICE_INFO_ENGINE_* type. */
+u64 nvif_fifo_runlist(struct nvif_device *, u64 engine);
+
+/* CE-supporting runlists (excluding GRCE, if others exist). */
+static inline u64
+nvif_fifo_runlist_ce(struct nvif_device *device)
+{
+	u64 runmgr = nvif_fifo_runlist(device, NV_DEVICE_INFO_ENGINE_GR);
+	u64 runmce = nvif_fifo_runlist(device, NV_DEVICE_INFO_ENGINE_CE);
+	if (runmce && !(runmce &= ~runmgr))
+		runmce = runmgr;
+	return runmce;
+}
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvif/Kbuild b/drivers/gpu/drm/nouveau/nvif/Kbuild
index f1675a4ab6fa..c817b02b7acf 100644
--- a/drivers/gpu/drm/nouveau/nvif/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvif/Kbuild
@@ -2,6 +2,7 @@ nvif-y := nvif/object.o
 nvif-y += nvif/client.o
 nvif-y += nvif/device.o
 nvif-y += nvif/driver.o
+nvif-y += nvif/fifo.o
 nvif-y += nvif/mem.o
 nvif-y += nvif/mmu.o
 nvif-y += nvif/notify.o
diff --git a/drivers/gpu/drm/nouveau/nvif/device.c b/drivers/gpu/drm/nouveau/nvif/device.c
index 252d8c33215b..ca5eb3dde70a 100644
--- a/drivers/gpu/drm/nouveau/nvif/device.c
+++ b/drivers/gpu/drm/nouveau/nvif/device.c
@@ -37,6 +37,8 @@ nvif_device_time(struct nvif_device *device)
 void
 nvif_device_fini(struct nvif_device *device)
 {
+	kfree(device->runlist);
+	device->runlist = NULL;
 	nvif_object_fini(&device->object);
 }
 
@@ -46,6 +48,7 @@ nvif_device_init(struct nvif_object *parent, u32 handle, s32 oclass,
 {
 	int ret = nvif_object_init(parent, handle, oclass, data, size,
 				   &device->object);
+	device->runlist = NULL;
 	if (ret == 0) {
 		device->info.version = 0;
 		ret = nvif_object_mthd(&device->object, NV_DEVICE_V0_INFO,
diff --git a/drivers/gpu/drm/nouveau/nvif/fifo.c b/drivers/gpu/drm/nouveau/nvif/fifo.c
new file mode 100644
index 000000000000..99d4fd17543c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/fifo.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <nvif/fifo.h>
+
+static int
+nvif_fifo_runlists(struct nvif_device *device)
+{
+	struct nvif_object *object = &device->object;
+	struct {
+		struct nv_device_info_v1 m;
+		struct {
+			struct nv_device_info_v1_data runlists;
+			struct nv_device_info_v1_data runlist[64];
+		} v;
+	} *a;
+	int ret, i;
+
+	if (device->runlist)
+		return 0;
+
+	if (!(a = kmalloc(sizeof(*a), GFP_KERNEL)))
+		return -ENOMEM;
+	a->m.version = 1;
+	a->m.count = sizeof(a->v) / sizeof(a->v.runlists);
+	a->v.runlists.mthd = NV_DEVICE_FIFO_RUNLISTS;
+	for (i = 0; i < ARRAY_SIZE(a->v.runlist); i++)
+		a->v.runlist[i].mthd = NV_DEVICE_FIFO_RUNLIST_ENGINES(i);
+
+	ret = nvif_object_mthd(object, NV_DEVICE_V0_INFO, a, sizeof(*a));
+	if (ret)
+		goto done;
+
+	device->runlists = fls64(a->v.runlists.data);
+	device->runlist = kzalloc(sizeof(*device->runlist) *
+				  device->runlists, GFP_KERNEL);
+	if (!device->runlist) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	for (i = 0; i < device->runlists; i++) {
+		if (a->v.runlists.data & BIT_ULL(i))
+			device->runlist[i].engines = a->v.runlist[i].data;
+	}
+
+done:
+	kfree(a);
+	return ret;
+}
+
+u64
+nvif_fifo_runlist(struct nvif_device *device, u64 engine)
+{
+	struct nvif_object *object = &device->object;
+	struct {
+		struct nv_device_info_v1 m;
+		struct {
+			struct nv_device_info_v1_data engine;
+		} v;
+	} a = {
+		.m.version = 1,
+		.m.count = sizeof(a.v) / sizeof(a.v.engine),
+		.v.engine.mthd = engine,
+	};
+	u64 runm = 0;
+	int ret, i;
+
+	if ((ret = nvif_fifo_runlists(device)))
+		return runm;
+
+	ret = nvif_object_mthd(object, NV_DEVICE_V0_INFO, &a, sizeof(a));
+	if (ret == 0) {
+		for (i = 0; i < device->runlists; i++) {
+			if (device->runlist[i].engines & a.v.engine.data)
+				runm |= BIT_ULL(i);
+		}
+	}
+
+	return runm;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
index ed56087b4abe..1642d8ea68f1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
@@ -284,6 +284,8 @@ nvkm_fifo_info(struct nvkm_engine *engine, u64 mthd, u64 *data)
 	switch (mthd) {
 	case NV_DEVICE_FIFO_CHANNELS: *data = fifo->nr; return 0;
 	default:
+		if (fifo->func->info)
+			return fifo->func->info(fifo, mthd, data);
 		break;
 	}
 	return -ENOSYS;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index b5706b15a64d..b6c23cf43f83 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -33,6 +33,7 @@
 #include <engine/sw.h>
 
 #include <nvif/class.h>
+#include <nvif/cl0080.h>
 
 struct gk104_fifo_engine_status {
 	bool busy;
@@ -783,6 +784,34 @@ gk104_fifo_fini(struct nvkm_fifo *base)
 	nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
 }
 
+static int
+gk104_fifo_info(struct nvkm_fifo *base, u64 mthd, u64 *data)
+{
+	struct gk104_fifo *fifo = gk104_fifo(base);
+	switch (mthd) {
+	case NV_DEVICE_FIFO_RUNLISTS:
+		*data = (1ULL << fifo->runlist_nr) - 1;
+		return 0;
+	case NV_DEVICE_FIFO_RUNLIST_ENGINES(0)...
+	     NV_DEVICE_FIFO_RUNLIST_ENGINES(63): {
+		int runl = mthd - NV_DEVICE_FIFO_RUNLIST_ENGINES(0), engn;
+		if (runl < fifo->runlist_nr) {
+			unsigned long engm = fifo->runlist[runl].engm;
+			struct nvkm_engine *engine;
+			*data = 0;
+			for_each_set_bit(engn, &engm, fifo->engine_nr) {
+				if ((engine = fifo->engine[engn].engine))
+					*data |= BIT_ULL(engine->subdev.index);
+			}
+			return 0;
+		}
+	}
+		return -EINVAL;
+	default:
+		return -EINVAL;
+	}
+}
+
 static int
 gk104_fifo_oneinit(struct nvkm_fifo *base)
 {
@@ -912,6 +941,7 @@ static const struct nvkm_fifo_func
 gk104_fifo_ = {
 	.dtor = gk104_fifo_dtor,
 	.oneinit = gk104_fifo_oneinit,
+	.info = gk104_fifo_info,
 	.init = gk104_fifo_init,
 	.fini = gk104_fifo_fini,
 	.intr = gk104_fifo_intr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
index df74b54773b9..f9b4e9d2c08a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
@@ -18,6 +18,7 @@ struct nvkm_fifo_chan_oclass;
 struct nvkm_fifo_func {
 	void *(*dtor)(struct nvkm_fifo *);
 	int (*oneinit)(struct nvkm_fifo *);
+	int (*info)(struct nvkm_fifo *, u64 mthd, u64 *data);
 	void (*init)(struct nvkm_fifo *);
 	void (*fini)(struct nvkm_fifo *);
 	void (*intr)(struct nvkm_fifo *);

From a7cf01809bf23b95413d8047bd91cdc3cedd1ca1 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1034/1286] drm/nouveau/fifo/gk104-: require explicit runlist
 selection for channel allocation

We didn't used to be aware that runlist/engine IDs weren't the same thing,
or that there was such variability in configuration between GPUs.

By exposing this information to a client, and giving it explicit control
of which runlist it's allocating a channel on, we're able to make better
choices.

The immediate effect of this is that on GPUs where CE0 is the "GRCE", we
will now be allocating a copy engine running asynchronously to GR for BO
migrations - as intended.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/include/nvif/cla06f.h | 18 +---
 drivers/gpu/drm/nouveau/nouveau_abi16.c       | 35 ++++----
 drivers/gpu/drm/nouveau/nouveau_chan.c        |  4 +-
 drivers/gpu/drm/nouveau/nouveau_drm.c         |  8 +-
 .../nouveau/nvkm/engine/fifo/gpfifogk104.c    | 83 ++++---------------
 5 files changed, 43 insertions(+), 105 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
index 56f5bd81e480..fbfcffc5feb2 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
@@ -4,25 +4,11 @@
 
 struct kepler_channel_gpfifo_a_v0 {
 	__u8  version;
-	__u8  pad01[5];
+	__u8  pad01[1];
 	__u16 chid;
-#define NVA06F_V0_ENGINE_SW                                          0x00000001
-#define NVA06F_V0_ENGINE_GR                                          0x00000002
-#define NVA06F_V0_ENGINE_SEC                                         0x00000004
-#define NVA06F_V0_ENGINE_MSVLD                                       0x00000010
-#define NVA06F_V0_ENGINE_MSPDEC                                      0x00000020
-#define NVA06F_V0_ENGINE_MSPPP                                       0x00000040
-#define NVA06F_V0_ENGINE_MSENC                                       0x00000080
-#define NVA06F_V0_ENGINE_VIC                                         0x00000100
-#define NVA06F_V0_ENGINE_NVDEC                                       0x00000200
-#define NVA06F_V0_ENGINE_NVENC0                                      0x00000400
-#define NVA06F_V0_ENGINE_NVENC1                                      0x00000800
-#define NVA06F_V0_ENGINE_CE0                                         0x00010000
-#define NVA06F_V0_ENGINE_CE1                                         0x00020000
-#define NVA06F_V0_ENGINE_CE2                                         0x00040000
-	__u32 engines;
 	__u32 ilength;
 	__u64 ioffset;
+	__u64 runlist;
 	__u64 vmm;
 };
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index ece650a0c5f9..ea2472770b21 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -23,6 +23,7 @@
 
 #include <nvif/client.h>
 #include <nvif/driver.h>
+#include <nvif/fifo.h>
 #include <nvif/ioctl.h>
 #include <nvif/class.h>
 #include <nvif/cl0002.h>
@@ -256,6 +257,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
 	struct nouveau_abi16_chan *chan;
 	struct nvif_device *device;
+	u64 engine;
 	int ret;
 
 	if (unlikely(!abi16))
@@ -268,25 +270,26 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
 
 	/* hack to allow channel engine type specification on kepler */
 	if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
-		if (init->fb_ctxdma_handle != ~0)
-			init->fb_ctxdma_handle = NVA06F_V0_ENGINE_GR;
-		else {
-			init->fb_ctxdma_handle = 0;
-#define _(A,B) if (init->tt_ctxdma_handle & (A)) init->fb_ctxdma_handle |= (B)
-			_(0x01, NVA06F_V0_ENGINE_GR);
-			_(0x02, NVA06F_V0_ENGINE_MSPDEC);
-			_(0x04, NVA06F_V0_ENGINE_MSPPP);
-			_(0x08, NVA06F_V0_ENGINE_MSVLD);
-			_(0x10, NVA06F_V0_ENGINE_CE0);
-			_(0x20, NVA06F_V0_ENGINE_CE1);
-			_(0x40, NVA06F_V0_ENGINE_MSENC);
-#undef _
+		if (init->fb_ctxdma_handle == ~0) {
+			switch (init->tt_ctxdma_handle) {
+			case 0x01: engine = NV_DEVICE_INFO_ENGINE_GR    ; break;
+			case 0x02: engine = NV_DEVICE_INFO_ENGINE_MSPDEC; break;
+			case 0x04: engine = NV_DEVICE_INFO_ENGINE_MSPPP ; break;
+			case 0x08: engine = NV_DEVICE_INFO_ENGINE_MSVLD ; break;
+			case 0x30: engine = NV_DEVICE_INFO_ENGINE_CE    ; break;
+			default:
+				return nouveau_abi16_put(abi16, -ENOSYS);
+			}
+		} else {
+			engine = NV_DEVICE_INFO_ENGINE_GR;
 		}
 
-		/* allow flips to be executed if this is a graphics channel */
+		if (engine != NV_DEVICE_INFO_ENGINE_CE)
+			engine = nvif_fifo_runlist(device, engine);
+		else
+			engine = nvif_fifo_runlist_ce(device);
+		init->fb_ctxdma_handle = engine;
 		init->tt_ctxdma_handle = 0;
-		if (init->fb_ctxdma_handle == NVA06F_V0_ENGINE_GR)
-			init->tt_ctxdma_handle = 1;
 	}
 
 	if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index db69d13f32a7..67950a5c56ce 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -214,7 +214,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
 
 static int
 nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
-		    u32 engine, struct nouveau_channel **pchan)
+		    u64 runlist, struct nouveau_channel **pchan)
 {
 	struct nouveau_cli *cli = (void *)device->object.client;
 	static const u16 oclasses[] = { PASCAL_CHANNEL_GPFIFO_A,
@@ -245,9 +245,9 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
 	do {
 		if (oclass[0] >= KEPLER_CHANNEL_GPFIFO_A) {
 			args.kepler.version = 0;
-			args.kepler.engines = engine;
 			args.kepler.ilength = 0x02000;
 			args.kepler.ioffset = 0x10000 + chan->push.addr;
+			args.kepler.runlist = runlist;
 			args.kepler.vmm = nvif_handle(&cli->vmm.vmm.object);
 			size = sizeof(args.kepler);
 		} else
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index dddd42592472..6caece4f2f5f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -38,6 +38,7 @@
 #include <core/tegra.h>
 
 #include <nvif/driver.h>
+#include <nvif/fifo.h>
 
 #include <nvif/class.h>
 #include <nvif/cl0002.h>
@@ -358,13 +359,12 @@ nouveau_accel_init(struct nouveau_drm *drm)
 
 	if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
 		ret = nouveau_channel_new(drm, &drm->client.device,
-					  NVA06F_V0_ENGINE_CE0 |
-					  NVA06F_V0_ENGINE_CE1,
-					  0, &drm->cechan);
+					  nvif_fifo_runlist_ce(device), 0,
+					  &drm->cechan);
 		if (ret)
 			NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
 
-		arg0 = NVA06F_V0_ENGINE_GR;
+		arg0 = nvif_fifo_runlist(device, NV_DEVICE_INFO_ENGINE_GR);
 		arg1 = 1;
 	} else
 	if (device->info.chipset >= 0xa3 &&
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
index 68461993394f..e331ab1b702b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -222,62 +222,30 @@ gk104_fifo_gpfifo_func = {
 	.engine_fini = gk104_fifo_gpfifo_engine_fini,
 };
 
-struct gk104_fifo_chan_func {
-	u32 engine;
-	u64 subdev;
-};
-
 static int
-gk104_fifo_gpfifo_new_(const struct gk104_fifo_chan_func *func,
-		       struct gk104_fifo *fifo, u32 *engmask, u16 *chid,
+gk104_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
 		       u64 vmm, u64 ioffset, u64 ilength,
 		       const struct nvkm_oclass *oclass,
 		       struct nvkm_object **pobject)
 {
 	struct gk104_fifo_chan *chan;
-	int runlist = -1, ret = -ENOSYS, i, j;
-	u32 engines = 0, present = 0;
+	int runlist = ffs(*runlists) -1, ret, i;
+	unsigned long engm;
 	u64 subdevs = 0;
 	u64 usermem;
 
-	if (!vmm)
+	if (!vmm || runlist < 0 || runlist >= fifo->runlist_nr)
 		return -EINVAL;
+	*runlists = BIT_ULL(runlist);
 
-	/* Determine which downstream engines are present */
-	for (i = 0; i < fifo->engine_nr; i++) {
-		struct nvkm_engine *engine = fifo->engine[i].engine;
-		if (engine) {
-			u64 submask = BIT_ULL(engine->subdev.index);
-			for (j = 0; func[j].subdev; j++) {
-				if (func[j].subdev & submask) {
-					present |= func[j].engine;
-					break;
-				}
-			}
-
-			if (!func[j].subdev)
-				continue;
-
-			if (runlist < 0 && (*engmask & present))
-				runlist = fifo->engine[i].runl;
-			if (runlist == fifo->engine[i].runl) {
-				engines |= func[j].engine;
-				subdevs |= func[j].subdev;
-			}
-		}
+	engm = fifo->runlist[runlist].engm;
+	for_each_set_bit(i, &engm, fifo->engine_nr) {
+		if (fifo->engine[i].engine)
+			subdevs |= BIT_ULL(fifo->engine[i].engine->subdev.index);
 	}
 
-	/* Just an engine mask query?  All done here! */
-	if (!*engmask) {
-		*engmask = present;
-		return nvkm_object_new(oclass, NULL, 0, pobject);
-	}
-
-	/* No runlist?  No supported engines. */
-	*engmask = present;
-	if (runlist < 0)
-		return -ENODEV;
-	*engmask = engines;
+	if (subdevs & BIT_ULL(NVKM_ENGINE_GR))
+		subdevs |= BIT_ULL(NVKM_ENGINE_SW);
 
 	/* Allocate the channel. */
 	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
@@ -327,26 +295,6 @@ gk104_fifo_gpfifo_new_(const struct gk104_fifo_chan_func *func,
 	return 0;
 }
 
-static const struct gk104_fifo_chan_func
-gk104_fifo_gpfifo[] = {
-	{ NVA06F_V0_ENGINE_SW | NVA06F_V0_ENGINE_GR,
-		BIT_ULL(NVKM_ENGINE_SW) | BIT_ULL(NVKM_ENGINE_GR)
-	},
-	{ NVA06F_V0_ENGINE_SEC   , BIT_ULL(NVKM_ENGINE_SEC   ) },
-	{ NVA06F_V0_ENGINE_MSVLD , BIT_ULL(NVKM_ENGINE_MSVLD ) },
-	{ NVA06F_V0_ENGINE_MSPDEC, BIT_ULL(NVKM_ENGINE_MSPDEC) },
-	{ NVA06F_V0_ENGINE_MSPPP , BIT_ULL(NVKM_ENGINE_MSPPP ) },
-	{ NVA06F_V0_ENGINE_MSENC , BIT_ULL(NVKM_ENGINE_MSENC ) },
-	{ NVA06F_V0_ENGINE_VIC   , BIT_ULL(NVKM_ENGINE_VIC   ) },
-	{ NVA06F_V0_ENGINE_NVDEC , BIT_ULL(NVKM_ENGINE_NVDEC ) },
-	{ NVA06F_V0_ENGINE_NVENC0, BIT_ULL(NVKM_ENGINE_NVENC0) },
-	{ NVA06F_V0_ENGINE_NVENC1, BIT_ULL(NVKM_ENGINE_NVENC1) },
-	{ NVA06F_V0_ENGINE_CE0   , BIT_ULL(NVKM_ENGINE_CE0   ) },
-	{ NVA06F_V0_ENGINE_CE1   , BIT_ULL(NVKM_ENGINE_CE1   ) },
-	{ NVA06F_V0_ENGINE_CE2   , BIT_ULL(NVKM_ENGINE_CE2   ) },
-	{}
-};
-
 int
 gk104_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
 		      void *data, u32 size, struct nvkm_object **pobject)
@@ -361,11 +309,12 @@ gk104_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
 	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
 	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
 		nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
-				   "ioffset %016llx ilength %08x engine %08x\n",
+				   "ioffset %016llx ilength %08x "
+				   "runlist %016llx\n",
 			   args->v0.version, args->v0.vmm, args->v0.ioffset,
-			   args->v0.ilength, args->v0.engines);
-		return gk104_fifo_gpfifo_new_(gk104_fifo_gpfifo, fifo,
-					      &args->v0.engines,
+			   args->v0.ilength, args->v0.runlist);
+		return gk104_fifo_gpfifo_new_(fifo,
+					      &args->v0.runlist,
 					      &args->v0.chid,
 					       args->v0.vmm,
 					       args->v0.ioffset,

From f9360c3aa61f792de3c839c63bfadf8640255d8c Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1035/1286] drm/nouveau/fifo/gk104-: simplify definition of
 channel classes

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/fifo/Kbuild   |  3 --
 .../gpu/drm/nouveau/nvkm/engine/fifo/base.c   | 24 +++++++++----
 .../drm/nouveau/nvkm/engine/fifo/changk104.h  |  7 +---
 .../gpu/drm/nouveau/nvkm/engine/fifo/gk104.c  | 28 ++++++++++-----
 .../gpu/drm/nouveau/nvkm/engine/fifo/gk104.h  |  6 +++-
 .../gpu/drm/nouveau/nvkm/engine/fifo/gk110.c  |  7 ++--
 .../gpu/drm/nouveau/nvkm/engine/fifo/gk208.c  |  7 ++--
 .../gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c  |  7 ++--
 .../gpu/drm/nouveau/nvkm/engine/fifo/gm107.c  |  7 ++--
 .../gpu/drm/nouveau/nvkm/engine/fifo/gm200.c  |  7 ++--
 .../gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c  |  7 ++--
 .../gpu/drm/nouveau/nvkm/engine/fifo/gp100.c  |  7 ++--
 .../gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c  |  7 ++--
 .../nouveau/nvkm/engine/fifo/gpfifogk104.c    | 11 +-----
 .../nouveau/nvkm/engine/fifo/gpfifogk110.c    | 34 -------------------
 .../nouveau/nvkm/engine/fifo/gpfifogm200.c    | 34 -------------------
 .../nouveau/nvkm/engine/fifo/gpfifogp100.c    | 34 -------------------
 .../gpu/drm/nouveau/nvkm/engine/fifo/priv.h   |  5 +--
 18 files changed, 71 insertions(+), 171 deletions(-)
 delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk110.c
 delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm200.c
 delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogp100.c

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
index 64e51838edf8..b888ea64df21 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -31,6 +31,3 @@ nvkm-y += nvkm/engine/fifo/gpfifonv50.o
 nvkm-y += nvkm/engine/fifo/gpfifog84.o
 nvkm-y += nvkm/engine/fifo/gpfifogf100.o
 nvkm-y += nvkm/engine/fifo/gpfifogk104.o
-nvkm-y += nvkm/engine/fifo/gpfifogk110.o
-nvkm-y += nvkm/engine/fifo/gpfifogm200.o
-nvkm-y += nvkm/engine/fifo/gpfifogp100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
index 1642d8ea68f1..c773caf21f6b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
@@ -215,6 +215,20 @@ nvkm_fifo_uevent(struct nvkm_fifo *fifo)
 	nvkm_event_send(&fifo->uevent, 1, 0, &rep, sizeof(rep));
 }
 
+static int
+nvkm_fifo_class_new_(struct nvkm_device *device,
+		     const struct nvkm_oclass *oclass, void *data, u32 size,
+		     struct nvkm_object **pobject)
+{
+	struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
+	return fifo->func->class_new(fifo, oclass, data, size, pobject);
+}
+
+static const struct nvkm_device_oclass
+nvkm_fifo_class_ = {
+	.ctor = nvkm_fifo_class_new_,
+};
+
 static int
 nvkm_fifo_class_new(struct nvkm_device *device,
 		    const struct nvkm_oclass *oclass, void *data, u32 size,
@@ -239,13 +253,9 @@ nvkm_fifo_class_get(struct nvkm_oclass *oclass, int index,
 	int c = 0;
 
 	if (fifo->func->class_get) {
-		int ret = fifo->func->class_get(fifo, index, &sclass);
-		if (ret == 0) {
-			oclass->base = sclass->base;
-			oclass->engn = sclass;
-			*class = &nvkm_fifo_class;
-			return 0;
-		}
+		int ret = fifo->func->class_get(fifo, index, oclass);
+		if (ret == 0)
+			*class = &nvkm_fifo_class_;
 		return ret;
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
index 1208e3d9dbe2..08b4415f0e24 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
@@ -19,11 +19,6 @@ struct gk104_fifo_chan {
 	} engn[NVKM_SUBDEV_NR];
 };
 
-int gk104_fifo_gpfifo_new(struct nvkm_fifo *, const struct nvkm_oclass *,
+int gk104_fifo_gpfifo_new(struct gk104_fifo *, const struct nvkm_oclass *,
 			  void *data, u32 size, struct nvkm_object **);
-
-extern const struct nvkm_fifo_chan_oclass gk104_fifo_gpfifo_oclass;
-extern const struct nvkm_fifo_chan_oclass gk110_fifo_gpfifo_oclass;
-extern const struct nvkm_fifo_chan_oclass gm200_fifo_gpfifo_oclass;
-extern const struct nvkm_fifo_chan_oclass gp100_fifo_gpfifo_oclass;
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index b6c23cf43f83..316b2b1d08e8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -94,16 +94,30 @@ gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn,
 		   status->chan == &status->next ? "*" : " ");
 }
 
+static int
+gk104_fifo_class_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+		     void *argv, u32 argc, struct nvkm_object **pobject)
+{
+	struct gk104_fifo *fifo = gk104_fifo(base);
+	if (oclass->engn == &fifo->func->chan) {
+		const struct gk104_fifo_chan_user *user = oclass->engn;
+		return user->ctor(fifo, oclass, argv, argc, pobject);
+	}
+	WARN_ON(1);
+	return -EINVAL;
+}
+
 static int
 gk104_fifo_class_get(struct nvkm_fifo *base, int index,
-		     const struct nvkm_fifo_chan_oclass **psclass)
+		     struct nvkm_oclass *oclass)
 {
 	struct gk104_fifo *fifo = gk104_fifo(base);
 	int c = 0;
 
-	while ((*psclass = fifo->func->chan[c])) {
-		if (c++ == index)
-			return 0;
+	if (fifo->func->chan.ctor && c++ == index) {
+		oclass->base =  fifo->func->chan.user;
+		oclass->engn = &fifo->func->chan;
+		return 0;
 	}
 
 	return c;
@@ -950,6 +964,7 @@ gk104_fifo_ = {
 	.uevent_fini = gk104_fifo_uevent_fini,
 	.recover_chan = gk104_fifo_recover_chan,
 	.class_get = gk104_fifo_class_get,
+	.class_new = gk104_fifo_class_new,
 };
 
 int
@@ -1096,10 +1111,7 @@ gk104_fifo = {
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
 	.fault.gpcclient = gk104_fifo_fault_gpcclient,
-	.chan = {
-		&gk104_fifo_gpfifo_oclass,
-		NULL
-	},
+	.chan = {{0,0,KEPLER_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index 0e8b57275e9d..41f1f367eaeb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -51,7 +51,11 @@ struct gk104_fifo_func {
 		const struct nvkm_enum *gpcclient;
 	} fault;
 
-	const struct nvkm_fifo_chan_oclass *chan[];
+	struct gk104_fifo_chan_user {
+		struct nvkm_sclass user;
+		int (*ctor)(struct gk104_fifo *, const struct nvkm_oclass *,
+			    void *, u32, struct nvkm_object **);
+	} chan;
 };
 
 int gk104_fifo_new_(const struct gk104_fifo_func *, struct nvkm_device *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
index ad792b6830e5..9611bf1ad0b0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
@@ -24,6 +24,8 @@
 #include "gk104.h"
 #include "changk104.h"
 
+#include <nvif/class.h>
+
 static const struct gk104_fifo_func
 gk110_fifo = {
 	.fault.access = gk104_fifo_fault_access,
@@ -31,10 +33,7 @@ gk110_fifo = {
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
 	.fault.gpcclient = gk104_fifo_fault_gpcclient,
-	.chan = {
-		&gk110_fifo_gpfifo_oclass,
-		NULL
-	},
+	.chan = {{0,0,KEPLER_CHANNEL_GPFIFO_B}, gk104_fifo_gpfifo_new },
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
index 5402d22462e8..fabc690ddd56 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
@@ -24,6 +24,8 @@
 #include "gk104.h"
 #include "changk104.h"
 
+#include <nvif/class.h>
+
 static const struct gk104_fifo_func
 gk208_fifo = {
 	.fault.access = gk104_fifo_fault_access,
@@ -31,10 +33,7 @@ gk208_fifo = {
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
 	.fault.gpcclient = gk104_fifo_fault_gpcclient,
-	.chan = {
-		&gk104_fifo_gpfifo_oclass,
-		NULL
-	},
+	.chan = {{0,0,KEPLER_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
index 0d7f9f59f80c..dee1bd76882a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
@@ -22,6 +22,8 @@
 #include "gk104.h"
 #include "changk104.h"
 
+#include <nvif/class.h>
+
 static const struct gk104_fifo_func
 gk20a_fifo = {
 	.fault.access = gk104_fifo_fault_access,
@@ -29,10 +31,7 @@ gk20a_fifo = {
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
 	.fault.gpcclient = gk104_fifo_fault_gpcclient,
-	.chan = {
-		&gk104_fifo_gpfifo_oclass,
-		NULL
-	},
+	.chan = {{0,0,KEPLER_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
index 58a46ee5ee44..09f97827167a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
@@ -24,6 +24,8 @@
 #include "gk104.h"
 #include "changk104.h"
 
+#include <nvif/class.h>
+
 const struct nvkm_enum
 gm107_fifo_fault_engine[] = {
 	{ 0x01, "DISPLAY" },
@@ -54,10 +56,7 @@ gm107_fifo = {
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
 	.fault.gpcclient = gk104_fifo_fault_gpcclient,
-	.chan = {
-		&gk110_fifo_gpfifo_oclass,
-		NULL
-	},
+	.chan = {{0,0,KEPLER_CHANNEL_GPFIFO_B}, gk104_fifo_gpfifo_new },
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
index f84d5398aebe..052b7c2c5d76 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
@@ -24,6 +24,8 @@
 #include "gk104.h"
 #include "changk104.h"
 
+#include <nvif/class.h>
+
 static const struct gk104_fifo_func
 gm200_fifo = {
 	.fault.access = gk104_fifo_fault_access,
@@ -31,10 +33,7 @@ gm200_fifo = {
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
 	.fault.gpcclient = gk104_fifo_fault_gpcclient,
-	.chan = {
-		&gm200_fifo_gpfifo_oclass,
-		NULL
-	},
+	.chan = {{0,0,MAXWELL_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
index 66399b9572a6..844787c6fb62 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
@@ -22,6 +22,8 @@
 #include "gk104.h"
 #include "changk104.h"
 
+#include <nvif/class.h>
+
 static const struct gk104_fifo_func
 gm20b_fifo = {
 	.fault.access = gk104_fifo_fault_access,
@@ -29,10 +31,7 @@ gm20b_fifo = {
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
 	.fault.gpcclient = gk104_fifo_fault_gpcclient,
-	.chan = {
-		&gm200_fifo_gpfifo_oclass,
-		NULL
-	},
+	.chan = {{0,0,MAXWELL_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
index 2b8a6cff7a68..2e31847e2e4f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
@@ -24,6 +24,8 @@
 #include "gk104.h"
 #include "changk104.h"
 
+#include <nvif/class.h>
+
 const struct nvkm_enum
 gp100_fifo_fault_engine[] = {
 	{ 0x01, "DISPLAY" },
@@ -55,10 +57,7 @@ gp100_fifo = {
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
 	.fault.gpcclient = gk104_fifo_fault_gpcclient,
-	.chan = {
-		&gp100_fifo_gpfifo_oclass,
-		NULL
-	},
+	.chan = {{0,0,PASCAL_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
index 71b8d93b4368..38ab6e17ec15 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
@@ -22,6 +22,8 @@
 #include "gk104.h"
 #include "changk104.h"
 
+#include <nvif/class.h>
+
 static const struct gk104_fifo_func
 gp10b_fifo = {
 	.fault.access = gk104_fifo_fault_access,
@@ -29,10 +31,7 @@ gp10b_fifo = {
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
 	.fault.gpcclient = gk104_fifo_fault_gpcclient,
-	.chan = {
-		&gp100_fifo_gpfifo_oclass,
-		NULL
-	},
+	.chan = {{0,0,PASCAL_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
index e331ab1b702b..f7a4e0e86b23 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -296,14 +296,13 @@ gk104_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
 }
 
 int
-gk104_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+gk104_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
 		      void *data, u32 size, struct nvkm_object **pobject)
 {
 	struct nvkm_object *parent = oclass->parent;
 	union {
 		struct kepler_channel_gpfifo_a_v0 v0;
 	} *args = data;
-	struct gk104_fifo *fifo = gk104_fifo(base);
 	int ret = -ENOSYS;
 
 	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
@@ -324,11 +323,3 @@ gk104_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
 
 	return ret;
 }
-
-const struct nvkm_fifo_chan_oclass
-gk104_fifo_gpfifo_oclass = {
-	.base.oclass = KEPLER_CHANNEL_GPFIFO_A,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = gk104_fifo_gpfifo_new,
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk110.c
deleted file mode 100644
index a9aa69c82e8e..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk110.c
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright 2016 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "changk104.h"
-
-#include <nvif/class.h>
-
-const struct nvkm_fifo_chan_oclass
-gk110_fifo_gpfifo_oclass = {
-	.base.oclass = KEPLER_CHANNEL_GPFIFO_B,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = gk104_fifo_gpfifo_new,
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm200.c
deleted file mode 100644
index a13315147391..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm200.c
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright 2015 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "changk104.h"
-
-#include <nvif/class.h>
-
-const struct nvkm_fifo_chan_oclass
-gm200_fifo_gpfifo_oclass = {
-	.base.oclass = MAXWELL_CHANNEL_GPFIFO_A,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = gk104_fifo_gpfifo_new,
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogp100.c
deleted file mode 100644
index 1530a9217aea..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogp100.c
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright 2016 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "changk104.h"
-
-#include <nvif/class.h>
-
-const struct nvkm_fifo_chan_oclass
-gp100_fifo_gpfifo_oclass = {
-	.base.oclass = PASCAL_CHANNEL_GPFIFO_A,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = gk104_fifo_gpfifo_new,
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
index f9b4e9d2c08a..d5acbba293f4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
@@ -28,8 +28,9 @@ struct nvkm_fifo_func {
 	void (*uevent_init)(struct nvkm_fifo *);
 	void (*uevent_fini)(struct nvkm_fifo *);
 	void (*recover_chan)(struct nvkm_fifo *, int chid);
-	int (*class_get)(struct nvkm_fifo *, int index,
-			 const struct nvkm_fifo_chan_oclass **);
+	int (*class_get)(struct nvkm_fifo *, int index, struct nvkm_oclass *);
+	int (*class_new)(struct nvkm_fifo *, const struct nvkm_oclass *,
+			 void *, u32, struct nvkm_object **);
 	const struct nvkm_fifo_chan_oclass *chan[];
 };
 

From 665870837a764fc7ba6f0e1291291e86f909c29b Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1036/1286] drm/nouveau/fifo/gk104-: add interfaces to support
 different runlist layouts

This will be required to support features on newer hardware.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/fifo/gk104.c  | 40 ++++++++++++-------
 .../gpu/drm/nouveau/nvkm/engine/fifo/gk104.h  | 10 +++++
 .../gpu/drm/nouveau/nvkm/engine/fifo/gk110.c  |  1 +
 .../gpu/drm/nouveau/nvkm/engine/fifo/gk208.c  |  1 +
 .../gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c  |  1 +
 .../gpu/drm/nouveau/nvkm/engine/fifo/gm107.c  |  1 +
 .../gpu/drm/nouveau/nvkm/engine/fifo/gm200.c  |  1 +
 .../gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c  |  1 +
 .../gpu/drm/nouveau/nvkm/engine/fifo/gp100.c  |  1 +
 .../gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c  |  1 +
 10 files changed, 44 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 316b2b1d08e8..afb3ed06ec89 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -140,6 +140,7 @@ gk104_fifo_uevent_init(struct nvkm_fifo *fifo)
 void
 gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
 {
+	const struct gk104_fifo_runlist_func *func = fifo->func->runlist;
 	struct gk104_fifo_chan *chan;
 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
@@ -153,9 +154,7 @@ gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
 
 	nvkm_kmap(mem);
 	list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
-		nvkm_wo32(mem, (nr * 8) + 0, chan->base.chid);
-		nvkm_wo32(mem, (nr * 8) + 4, 0x00000000);
-		nr++;
+		func->chan(chan, mem, nr++ * func->size);
 	}
 	nvkm_done(mem);
 
@@ -196,6 +195,20 @@ gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
 	mutex_unlock(&fifo->base.engine.subdev.mutex);
 }
 
+void
+gk104_fifo_runlist_chan(struct gk104_fifo_chan *chan,
+			struct nvkm_memory *memory, u32 offset)
+{
+	nvkm_wo32(memory, offset + 0, chan->base.chid);
+	nvkm_wo32(memory, offset + 4, 0x00000000);
+}
+
+const struct gk104_fifo_runlist_func
+gk104_fifo_runlist = {
+	.size = 8,
+	.chan = gk104_fifo_runlist_chan,
+};
+
 static void
 gk104_fifo_recover_work(struct work_struct *w)
 {
@@ -874,17 +887,15 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
 	kfree(map);
 
 	for (i = 0; i < fifo->runlist_nr; i++) {
-		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
-				      0x8000, 0x1000, false,
-				      &fifo->runlist[i].mem[0]);
-		if (ret)
-			return ret;
-
-		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
-				      0x8000, 0x1000, false,
-				      &fifo->runlist[i].mem[1]);
-		if (ret)
-			return ret;
+		for (j = 0; j < ARRAY_SIZE(fifo->runlist[i].mem); j++) {
+			ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+					      fifo->base.nr * 2/* TSG+chan */ *
+					      fifo->func->runlist->size,
+					      0x1000, false,
+					      &fifo->runlist[i].mem[j]);
+			if (ret)
+				return ret;
+		}
 
 		init_waitqueue_head(&fifo->runlist[i].wait);
 		INIT_LIST_HEAD(&fifo->runlist[i].chan);
@@ -1111,6 +1122,7 @@ gk104_fifo = {
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
 	.fault.gpcclient = gk104_fifo_fault_gpcclient,
+	.runlist = &gk104_fifo_runlist,
 	.chan = {{0,0,KEPLER_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index 41f1f367eaeb..c32ea45f9456 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -51,6 +51,12 @@ struct gk104_fifo_func {
 		const struct nvkm_enum *gpcclient;
 	} fault;
 
+	const struct gk104_fifo_runlist_func {
+		u8 size;
+		void (*chan)(struct gk104_fifo_chan *,
+			     struct nvkm_memory *, u32 offset);
+	} *runlist;
+
 	struct gk104_fifo_chan_user {
 		struct nvkm_sclass user;
 		int (*ctor)(struct gk104_fifo *, const struct nvkm_oclass *,
@@ -65,10 +71,14 @@ void gk104_fifo_runlist_remove(struct gk104_fifo *, struct gk104_fifo_chan *);
 void gk104_fifo_runlist_commit(struct gk104_fifo *, int runl);
 
 extern const struct nvkm_enum gk104_fifo_fault_access[];
+
 extern const struct nvkm_enum gk104_fifo_fault_engine[];
 extern const struct nvkm_enum gk104_fifo_fault_reason[];
 extern const struct nvkm_enum gk104_fifo_fault_hubclient[];
 extern const struct nvkm_enum gk104_fifo_fault_gpcclient[];
+extern const struct gk104_fifo_runlist_func gk104_fifo_runlist;
+void gk104_fifo_runlist_chan(struct gk104_fifo_chan *,
+			     struct nvkm_memory *, u32);
 
 extern const struct nvkm_enum gm107_fifo_fault_engine[];
 extern const struct nvkm_enum gp100_fifo_fault_engine[];
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
index 9611bf1ad0b0..f7e160479558 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
@@ -33,6 +33,7 @@ gk110_fifo = {
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
 	.fault.gpcclient = gk104_fifo_fault_gpcclient,
+	.runlist = &gk104_fifo_runlist,
 	.chan = {{0,0,KEPLER_CHANNEL_GPFIFO_B}, gk104_fifo_gpfifo_new },
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
index fabc690ddd56..ff936ab6bf87 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
@@ -33,6 +33,7 @@ gk208_fifo = {
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
 	.fault.gpcclient = gk104_fifo_fault_gpcclient,
+	.runlist = &gk104_fifo_runlist,
 	.chan = {{0,0,KEPLER_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
index dee1bd76882a..cb78d4df9182 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
@@ -31,6 +31,7 @@ gk20a_fifo = {
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
 	.fault.gpcclient = gk104_fifo_fault_gpcclient,
+	.runlist = &gk104_fifo_runlist,
 	.chan = {{0,0,KEPLER_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
index 09f97827167a..f749d93a51c1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
@@ -56,6 +56,7 @@ gm107_fifo = {
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
 	.fault.gpcclient = gk104_fifo_fault_gpcclient,
+	.runlist = &gk104_fifo_runlist,
 	.chan = {{0,0,KEPLER_CHANNEL_GPFIFO_B}, gk104_fifo_gpfifo_new },
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
index 052b7c2c5d76..3b97bd48697d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
@@ -33,6 +33,7 @@ gm200_fifo = {
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
 	.fault.gpcclient = gk104_fifo_fault_gpcclient,
+	.runlist = &gk104_fifo_runlist,
 	.chan = {{0,0,MAXWELL_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
index 844787c6fb62..c7e11a40f016 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
@@ -31,6 +31,7 @@ gm20b_fifo = {
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
 	.fault.gpcclient = gk104_fifo_fault_gpcclient,
+	.runlist = &gk104_fifo_runlist,
 	.chan = {{0,0,MAXWELL_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
index 2e31847e2e4f..5c0cc40f7e3e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
@@ -57,6 +57,7 @@ gp100_fifo = {
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
 	.fault.gpcclient = gk104_fifo_fault_gpcclient,
+	.runlist = &gk104_fifo_runlist,
 	.chan = {{0,0,PASCAL_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
index 38ab6e17ec15..fe3a8fad8620 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
@@ -31,6 +31,7 @@ gp10b_fifo = {
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
 	.fault.gpcclient = gk104_fifo_fault_gpcclient,
+	.runlist = &gk104_fifo_runlist,
 	.chan = {{0,0,PASCAL_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
 };
 

From 4f2fc25c0f8bcc8db1b8a7b21e88c3d7f35c5acb Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1037/1286] drm/nouveau/fifo/gk104-: poll for runlist update
 completion

Newer HW doesn't appear to send this event, which will cause long delays
in runlist updates if they don't complete immediately.

RM doesn't use these events anywhere, and an NVGPU commit message notes
that polling is the preferred method even on HW that supports the event.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index afb3ed06ec89..2b8e0d3d2d96 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -170,10 +170,10 @@ gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
 				    (target << 28));
 	nvkm_wr32(device, 0x002274, (runl << 20) | nr);
 
-	if (wait_event_timeout(fifo->runlist[runl].wait,
-			       !(nvkm_rd32(device, 0x002284 + (runl * 0x08))
-				       & 0x00100000),
-			       msecs_to_jiffies(2000)) == 0)
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x002284 + (runl * 0x08)) & 0x00100000))
+			break;
+	) < 0)
 		nvkm_error(subdev, "runlist %d update timeout\n", runl);
 unlock:
 	mutex_unlock(&subdev->mutex);

From 8c4e9f9dffb96a891d31e108b47f081233cb3e81 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1038/1286] drm/nouveau/fifo/gk110-: support writing channel
 group runlist entries

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/fifo/cgrp.h   | 11 ++++++++
 .../drm/nouveau/nvkm/engine/fifo/changk104.h  |  1 +
 .../gpu/drm/nouveau/nvkm/engine/fifo/gk104.c  | 26 +++++++++++++++++--
 .../gpu/drm/nouveau/nvkm/engine/fifo/gk104.h  |  9 ++++++-
 .../gpu/drm/nouveau/nvkm/engine/fifo/gk110.c  | 21 ++++++++++++++-
 .../gpu/drm/nouveau/nvkm/engine/fifo/gk208.c  |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c  |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/fifo/gm107.c  |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/fifo/gm200.c  |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c  |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/fifo/gp100.c  |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c  |  2 +-
 12 files changed, 71 insertions(+), 11 deletions(-)
 create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/fifo/cgrp.h

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/cgrp.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/cgrp.h
new file mode 100644
index 000000000000..d0ac60b06720
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/cgrp.h
@@ -0,0 +1,11 @@
+#ifndef __NVKM_FIFO_CGRP_H__
+#define __NVKM_FIFO_CGRP_H__
+#include "priv.h"
+
+struct nvkm_fifo_cgrp {
+	int id;
+	struct list_head head;
+	struct list_head chan;
+	int chan_nr;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
index 08b4415f0e24..391e864c2a4a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
@@ -10,6 +10,7 @@ struct gk104_fifo_chan {
 	struct gk104_fifo *fifo;
 	int runl;
 
+	struct nvkm_fifo_cgrp *cgrp;
 	struct list_head head;
 	bool killed;
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 2b8e0d3d2d96..bc87f18c3092 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -22,6 +22,7 @@
  * Authors: Ben Skeggs
  */
 #include "gk104.h"
+#include "cgrp.h"
 #include "changk104.h"
 
 #include <core/client.h>
@@ -145,6 +146,7 @@ gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
 	struct nvkm_memory *mem;
+	struct nvkm_fifo_cgrp *cgrp;
 	int nr = 0;
 	int target;
 
@@ -156,6 +158,13 @@ gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
 	list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
 		func->chan(chan, mem, nr++ * func->size);
 	}
+
+	list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) {
+		func->cgrp(cgrp, mem, nr++ * func->size);
+		list_for_each_entry(chan, &cgrp->chan, head) {
+			func->chan(chan, mem, nr++ * func->size);
+		}
+	}
 	nvkm_done(mem);
 
 	switch (nvkm_memory_target(mem)) {
@@ -182,16 +191,28 @@ unlock:
 void
 gk104_fifo_runlist_remove(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
 {
+	struct nvkm_fifo_cgrp *cgrp = chan->cgrp;
 	mutex_lock(&fifo->base.engine.subdev.mutex);
-	list_del_init(&chan->head);
+	if (!list_empty(&chan->head)) {
+		list_del_init(&chan->head);
+		if (cgrp && !--cgrp->chan_nr)
+			list_del_init(&cgrp->head);
+	}
 	mutex_unlock(&fifo->base.engine.subdev.mutex);
 }
 
 void
 gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
 {
+	struct nvkm_fifo_cgrp *cgrp = chan->cgrp;
 	mutex_lock(&fifo->base.engine.subdev.mutex);
-	list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan);
+	if (cgrp) {
+		if (!cgrp->chan_nr++)
+			list_add_tail(&cgrp->head, &fifo->runlist[chan->runl].cgrp);
+		list_add_tail(&chan->head, &cgrp->chan);
+	} else {
+		list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan);
+	}
 	mutex_unlock(&fifo->base.engine.subdev.mutex);
 }
 
@@ -898,6 +919,7 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
 		}
 
 		init_waitqueue_head(&fifo->runlist[i].wait);
+		INIT_LIST_HEAD(&fifo->runlist[i].cgrp);
 		INIT_LIST_HEAD(&fifo->runlist[i].chan);
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index c32ea45f9456..dfb3da84c9d1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -3,6 +3,7 @@
 #define __GK104_FIFO_H__
 #define gk104_fifo(p) container_of((p), struct gk104_fifo, base)
 #include "priv.h"
+struct nvkm_fifo_cgrp;
 
 #include <core/enum.h>
 #include <subdev/mmu.h>
@@ -31,6 +32,7 @@ struct gk104_fifo {
 		struct nvkm_memory *mem[2];
 		int next;
 		wait_queue_head_t wait;
+		struct list_head cgrp;
 		struct list_head chan;
 		u32 engm;
 	} runlist[16];
@@ -53,6 +55,8 @@ struct gk104_fifo_func {
 
 	const struct gk104_fifo_runlist_func {
 		u8 size;
+		void (*cgrp)(struct nvkm_fifo_cgrp *,
+			     struct nvkm_memory *, u32 offset);
 		void (*chan)(struct gk104_fifo_chan *,
 			     struct nvkm_memory *, u32 offset);
 	} *runlist;
@@ -71,7 +75,6 @@ void gk104_fifo_runlist_remove(struct gk104_fifo *, struct gk104_fifo_chan *);
 void gk104_fifo_runlist_commit(struct gk104_fifo *, int runl);
 
 extern const struct nvkm_enum gk104_fifo_fault_access[];
-
 extern const struct nvkm_enum gk104_fifo_fault_engine[];
 extern const struct nvkm_enum gk104_fifo_fault_reason[];
 extern const struct nvkm_enum gk104_fifo_fault_hubclient[];
@@ -80,6 +83,10 @@ extern const struct gk104_fifo_runlist_func gk104_fifo_runlist;
 void gk104_fifo_runlist_chan(struct gk104_fifo_chan *,
 			     struct nvkm_memory *, u32);
 
+extern const struct gk104_fifo_runlist_func gk110_fifo_runlist;
+void gk110_fifo_runlist_cgrp(struct nvkm_fifo_cgrp *,
+			     struct nvkm_memory *, u32);
+
 extern const struct nvkm_enum gm107_fifo_fault_engine[];
 extern const struct nvkm_enum gp100_fifo_fault_engine[];
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
index f7e160479558..ac7655a130fb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
@@ -22,10 +22,29 @@
  * Authors: Ben Skeggs
  */
 #include "gk104.h"
+#include "cgrp.h"
 #include "changk104.h"
 
+#include <core/memory.h>
+
 #include <nvif/class.h>
 
+void
+gk110_fifo_runlist_cgrp(struct nvkm_fifo_cgrp *cgrp,
+			struct nvkm_memory *memory, u32 offset)
+{
+	nvkm_wo32(memory, offset + 0, (cgrp->chan_nr << 26) | (128 << 18) |
+				      (3 << 14) | 0x00002000 | cgrp->id);
+	nvkm_wo32(memory, offset + 4, 0x00000000);
+}
+
+const struct gk104_fifo_runlist_func
+gk110_fifo_runlist = {
+	.size = 8,
+	.cgrp = gk110_fifo_runlist_cgrp,
+	.chan = gk104_fifo_runlist_chan,
+};
+
 static const struct gk104_fifo_func
 gk110_fifo = {
 	.fault.access = gk104_fifo_fault_access,
@@ -33,7 +52,7 @@ gk110_fifo = {
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
 	.fault.gpcclient = gk104_fifo_fault_gpcclient,
-	.runlist = &gk104_fifo_runlist,
+	.runlist = &gk110_fifo_runlist,
 	.chan = {{0,0,KEPLER_CHANNEL_GPFIFO_B}, gk104_fifo_gpfifo_new },
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
index ff936ab6bf87..b7385cd70b25 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
@@ -33,7 +33,7 @@ gk208_fifo = {
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
 	.fault.gpcclient = gk104_fifo_fault_gpcclient,
-	.runlist = &gk104_fifo_runlist,
+	.runlist = &gk110_fifo_runlist,
 	.chan = {{0,0,KEPLER_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
index cb78d4df9182..15cc80c02649 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
@@ -31,7 +31,7 @@ gk20a_fifo = {
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
 	.fault.gpcclient = gk104_fifo_fault_gpcclient,
-	.runlist = &gk104_fifo_runlist,
+	.runlist = &gk110_fifo_runlist,
 	.chan = {{0,0,KEPLER_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
index f749d93a51c1..99b89f6ae34b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
@@ -56,7 +56,7 @@ gm107_fifo = {
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
 	.fault.gpcclient = gk104_fifo_fault_gpcclient,
-	.runlist = &gk104_fifo_runlist,
+	.runlist = &gk110_fifo_runlist,
 	.chan = {{0,0,KEPLER_CHANNEL_GPFIFO_B}, gk104_fifo_gpfifo_new },
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
index 3b97bd48697d..16fd8de135a7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
@@ -33,7 +33,7 @@ gm200_fifo = {
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
 	.fault.gpcclient = gk104_fifo_fault_gpcclient,
-	.runlist = &gk104_fifo_runlist,
+	.runlist = &gk110_fifo_runlist,
 	.chan = {{0,0,MAXWELL_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
index c7e11a40f016..bfa5fa5bc5e5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
@@ -31,7 +31,7 @@ gm20b_fifo = {
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
 	.fault.gpcclient = gk104_fifo_fault_gpcclient,
-	.runlist = &gk104_fifo_runlist,
+	.runlist = &gk110_fifo_runlist,
 	.chan = {{0,0,MAXWELL_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
index 5c0cc40f7e3e..c2852943fc3c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
@@ -57,7 +57,7 @@ gp100_fifo = {
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
 	.fault.gpcclient = gk104_fifo_fault_gpcclient,
-	.runlist = &gk104_fifo_runlist,
+	.runlist = &gk110_fifo_runlist,
 	.chan = {{0,0,PASCAL_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
index fe3a8fad8620..29a4029694de 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
@@ -31,7 +31,7 @@ gp10b_fifo = {
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
 	.fault.gpcclient = gk104_fifo_fault_gpcclient,
-	.runlist = &gk104_fifo_runlist,
+	.runlist = &gk110_fifo_runlist,
 	.chan = {{0,0,PASCAL_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
 };
 

From 79bb4b617f965736d2e1c616235302c1d0e823b2 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1039/1286] drm/nouveau/fifo/gk208-: write pbdma timeout regs
 during initialisation

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c |  3 +++
 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h |  4 ++++
 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c | 11 +++++++++++
 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c |  1 +
 9 files changed, 24 insertions(+)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index bc87f18c3092..5c22ce916530 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -962,6 +962,9 @@ gk104_fifo_init(struct nvkm_fifo *base)
 
 	nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar->addr >> 12);
 
+	if (fifo->func->init_pbdma_timeout)
+		fifo->func->init_pbdma_timeout(fifo);
+
 	nvkm_wr32(device, 0x002100, 0xffffffff);
 	nvkm_wr32(device, 0x002140, 0x7fffffff);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index dfb3da84c9d1..e232cee1b83c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -45,6 +45,8 @@ struct gk104_fifo {
 };
 
 struct gk104_fifo_func {
+	void (*init_pbdma_timeout)(struct gk104_fifo *);
+
 	struct {
 		const struct nvkm_enum *access;
 		const struct nvkm_enum *engine;
@@ -87,6 +89,8 @@ extern const struct gk104_fifo_runlist_func gk110_fifo_runlist;
 void gk110_fifo_runlist_cgrp(struct nvkm_fifo_cgrp *,
 			     struct nvkm_memory *, u32);
 
+void gk208_fifo_init_pbdma_timeout(struct gk104_fifo *);
+
 extern const struct nvkm_enum gm107_fifo_fault_engine[];
 extern const struct nvkm_enum gp100_fifo_fault_engine[];
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
index b7385cd70b25..5ea7e452cc66 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
@@ -26,8 +26,19 @@
 
 #include <nvif/class.h>
 
+void
+gk208_fifo_init_pbdma_timeout(struct gk104_fifo *fifo)
+{
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	int i;
+
+	for (i = 0; i < fifo->pbdma_nr; i++)
+		nvkm_wr32(device, 0x04012c + (i * 0x2000), 0x0000ffff);
+}
+
 static const struct gk104_fifo_func
 gk208_fifo = {
+	.init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
 	.fault.access = gk104_fifo_fault_access,
 	.fault.engine = gk104_fifo_fault_engine,
 	.fault.reason = gk104_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
index 15cc80c02649..535a0eb67a5f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
@@ -26,6 +26,7 @@
 
 static const struct gk104_fifo_func
 gk20a_fifo = {
+	.init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
 	.fault.access = gk104_fifo_fault_access,
 	.fault.engine = gk104_fifo_fault_engine,
 	.fault.reason = gk104_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
index 99b89f6ae34b..a28f5d213d6e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
@@ -51,6 +51,7 @@ gm107_fifo_fault_engine[] = {
 
 static const struct gk104_fifo_func
 gm107_fifo = {
+	.init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
 	.fault.access = gk104_fifo_fault_access,
 	.fault.engine = gm107_fifo_fault_engine,
 	.fault.reason = gk104_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
index 16fd8de135a7..0bd87a0b25e8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
@@ -28,6 +28,7 @@
 
 static const struct gk104_fifo_func
 gm200_fifo = {
+	.init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
 	.fault.access = gk104_fifo_fault_access,
 	.fault.engine = gm107_fifo_fault_engine,
 	.fault.reason = gk104_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
index bfa5fa5bc5e5..32a8e3deb87d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
@@ -26,6 +26,7 @@
 
 static const struct gk104_fifo_func
 gm20b_fifo = {
+	.init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
 	.fault.access = gk104_fifo_fault_access,
 	.fault.engine = gm107_fifo_fault_engine,
 	.fault.reason = gk104_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
index c2852943fc3c..69b201f29cd5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
@@ -52,6 +52,7 @@ gp100_fifo_fault_engine[] = {
 
 static const struct gk104_fifo_func
 gp100_fifo = {
+	.init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
 	.fault.access = gk104_fifo_fault_access,
 	.fault.engine = gp100_fifo_fault_engine,
 	.fault.reason = gk104_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
index 29a4029694de..fac7be50376c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
@@ -26,6 +26,7 @@
 
 static const struct gk104_fifo_func
 gp10b_fifo = {
+	.init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
 	.fault.access = gk104_fifo_fault_access,
 	.fault.engine = gp100_fifo_fault_engine,
 	.fault.reason = gk104_fifo_fault_reason,

From eda12417d3daf6cb37f41e9b1c46854e7dd75e91 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1040/1286] drm/nouveau/fifo/gm107-: write instance address in
 channel runlist entry

RM does this for some reason.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/fifo/gk104.h  |  2 ++
 .../gpu/drm/nouveau/nvkm/engine/fifo/gm107.c  | 19 ++++++++++++++++++-
 .../gpu/drm/nouveau/nvkm/engine/fifo/gm200.c  |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c  |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/fifo/gp100.c  |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c  |  2 +-
 6 files changed, 24 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index e232cee1b83c..1d70a5dc0762 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -92,5 +92,7 @@ void gk110_fifo_runlist_cgrp(struct nvkm_fifo_cgrp *,
 void gk208_fifo_init_pbdma_timeout(struct gk104_fifo *);
 
 extern const struct nvkm_enum gm107_fifo_fault_engine[];
+extern const struct gk104_fifo_runlist_func gm107_fifo_runlist;
+
 extern const struct nvkm_enum gp100_fifo_fault_engine[];
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
index a28f5d213d6e..79ae19b1db67 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
@@ -24,8 +24,25 @@
 #include "gk104.h"
 #include "changk104.h"
 
+#include <core/gpuobj.h>
+
 #include <nvif/class.h>
 
+static void
+gm107_fifo_runlist_chan(struct gk104_fifo_chan *chan,
+			struct nvkm_memory *memory, u32 offset)
+{
+	nvkm_wo32(memory, offset + 0, chan->base.chid);
+	nvkm_wo32(memory, offset + 4, chan->base.inst->addr >> 12);
+}
+
+const struct gk104_fifo_runlist_func
+gm107_fifo_runlist = {
+	.size = 8,
+	.cgrp = gk110_fifo_runlist_cgrp,
+	.chan = gm107_fifo_runlist_chan,
+};
+
 const struct nvkm_enum
 gm107_fifo_fault_engine[] = {
 	{ 0x01, "DISPLAY" },
@@ -57,7 +74,7 @@ gm107_fifo = {
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
 	.fault.gpcclient = gk104_fifo_fault_gpcclient,
-	.runlist = &gk110_fifo_runlist,
+	.runlist = &gm107_fifo_runlist,
 	.chan = {{0,0,KEPLER_CHANNEL_GPFIFO_B}, gk104_fifo_gpfifo_new },
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
index 0bd87a0b25e8..49565faa854d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
@@ -34,7 +34,7 @@ gm200_fifo = {
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
 	.fault.gpcclient = gk104_fifo_fault_gpcclient,
-	.runlist = &gk110_fifo_runlist,
+	.runlist = &gm107_fifo_runlist,
 	.chan = {{0,0,MAXWELL_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
index 32a8e3deb87d..46736513bd11 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
@@ -32,7 +32,7 @@ gm20b_fifo = {
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
 	.fault.gpcclient = gk104_fifo_fault_gpcclient,
-	.runlist = &gk110_fifo_runlist,
+	.runlist = &gm107_fifo_runlist,
 	.chan = {{0,0,MAXWELL_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
index 69b201f29cd5..f137baed7a61 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
@@ -58,7 +58,7 @@ gp100_fifo = {
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
 	.fault.gpcclient = gk104_fifo_fault_gpcclient,
-	.runlist = &gk110_fifo_runlist,
+	.runlist = &gm107_fifo_runlist,
 	.chan = {{0,0,PASCAL_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
index fac7be50376c..787e911d9599 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
@@ -32,7 +32,7 @@ gp10b_fifo = {
 	.fault.reason = gk104_fifo_fault_reason,
 	.fault.hubclient = gk104_fifo_fault_hubclient,
 	.fault.gpcclient = gk104_fifo_fault_gpcclient,
-	.runlist = &gk110_fifo_runlist,
+	.runlist = &gm107_fifo_runlist,
 	.chan = {{0,0,PASCAL_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
 };
 

From 334cc26d4db10ae7d8f18de27869b95fe84c7d28 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1041/1286] drm/nouveau/fifo/gp100-: force individual channels
 into a channel group

RM does this for some reason, and is enforced in HW on Volta.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/fifo/gk104.c  | 37 +++++++++++++++----
 .../gpu/drm/nouveau/nvkm/engine/fifo/gk104.h  |  1 +
 .../gpu/drm/nouveau/nvkm/engine/fifo/gp100.c  |  1 +
 .../gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c  |  1 +
 .../nouveau/nvkm/engine/fifo/gpfifogk104.c    | 28 ++++++++++++--
 5 files changed, 57 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 5c22ce916530..767e0ab44cb8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -285,6 +285,32 @@ gk104_fifo_recover_runl(struct gk104_fifo *fifo, int runl)
 	schedule_work(&fifo->recover.work);
 }
 
+static struct gk104_fifo_chan *
+gk104_fifo_recover_chid(struct gk104_fifo *fifo, int runl, int chid)
+{
+	struct gk104_fifo_chan *chan;
+	struct nvkm_fifo_cgrp *cgrp;
+
+	list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
+		if (chan->base.chid == chid) {
+			list_del_init(&chan->head);
+			return chan;
+		}
+	}
+
+	list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) {
+		if (cgrp->id == chid) {
+			chan = list_first_entry(&cgrp->chan, typeof(*chan), head);
+			list_del_init(&chan->head);
+			if (!--cgrp->chan_nr)
+				list_del_init(&cgrp->head);
+			return chan;
+		}
+	}
+
+	return NULL;
+}
+
 static void
 gk104_fifo_recover_chan(struct nvkm_fifo *base, int chid)
 {
@@ -302,13 +328,10 @@ gk104_fifo_recover_chan(struct nvkm_fifo *base, int chid)
 		return;
 
 	/* Lookup SW state for channel, and mark it as dead. */
-	list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
-		if (chan->base.chid == chid) {
-			list_del_init(&chan->head);
-			chan->killed = true;
-			nvkm_fifo_kevent(&fifo->base, chid);
-			break;
-		}
+	chan = gk104_fifo_recover_chid(fifo, runl, chid);
+	if (chan) {
+		chan->killed = true;
+		nvkm_fifo_kevent(&fifo->base, chid);
 	}
 
 	/* Disable channel. */
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index 1d70a5dc0762..1d182d8d2fce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -68,6 +68,7 @@ struct gk104_fifo_func {
 		int (*ctor)(struct gk104_fifo *, const struct nvkm_oclass *,
 			    void *, u32, struct nvkm_object **);
 	} chan;
+	bool cgrp_force;
 };
 
 int gk104_fifo_new_(const struct gk104_fifo_func *, struct nvkm_device *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
index f137baed7a61..e2f8f9087d7c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
@@ -60,6 +60,7 @@ gp100_fifo = {
 	.fault.gpcclient = gk104_fifo_fault_gpcclient,
 	.runlist = &gm107_fifo_runlist,
 	.chan = {{0,0,PASCAL_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
+	.cgrp_force = true,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
index 787e911d9599..7733bf7c6545 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
@@ -34,6 +34,7 @@ gp10b_fifo = {
 	.fault.gpcclient = gk104_fifo_fault_gpcclient,
 	.runlist = &gm107_fifo_runlist,
 	.chan = {{0,0,PASCAL_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
+	.cgrp_force = true,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
index f7a4e0e86b23..60e7d72d6e46 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -22,6 +22,7 @@
  * Authors: Ben Skeggs
  */
 #include "changk104.h"
+#include "cgrp.h"
 
 #include <core/client.h>
 #include <core/gpuobj.h>
@@ -40,16 +41,21 @@ gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
 	struct nvkm_client *client = chan->base.object.client;
+	struct nvkm_fifo_cgrp *cgrp = chan->cgrp;
 	int ret = 0;
 
 	mutex_lock(&subdev->mutex);
-	nvkm_wr32(device, 0x002634, chan->base.chid);
+	if (cgrp)
+		nvkm_wr32(device, 0x002634, cgrp->id | 0x01000000);
+	else
+		nvkm_wr32(device, 0x002634, chan->base.chid);
 	if (nvkm_msec(device, 2000,
 		if (!(nvkm_rd32(device, 0x002634) & 0x00100000))
 			break;
 	) < 0) {
-		nvkm_error(subdev, "channel %d [%s] kick timeout\n",
-			   chan->base.chid, client->name);
+		nvkm_error(subdev, "%s %d [%s] kick timeout\n",
+			   cgrp ? "tsg" : "channel",
+			   cgrp ? cgrp->id : chan->base.chid, client->name);
 		nvkm_fifo_recover_chan(&fifo->base, chan->base.chid);
 		ret = -ETIMEDOUT;
 	}
@@ -207,7 +213,9 @@ gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
 static void *
 gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
 {
-	return gk104_fifo_chan(base);
+	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+	kfree(chan->cgrp);
+	return chan;
 }
 
 static const struct nvkm_fifo_chan_func
@@ -264,6 +272,18 @@ gk104_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
 
 	*chid = chan->base.chid;
 
+	/* Hack to support GPUs where even individual channels should be
+	 * part of a channel group.
+	 */
+	if (fifo->func->cgrp_force) {
+		if (!(chan->cgrp = kmalloc(sizeof(*chan->cgrp), GFP_KERNEL)))
+			return -ENOMEM;
+		chan->cgrp->id = chan->base.chid;
+		INIT_LIST_HEAD(&chan->cgrp->head);
+		INIT_LIST_HEAD(&chan->cgrp->chan);
+		chan->cgrp->chan_nr = 0;
+	}
+
 	/* Clear channel control registers. */
 	usermem = chan->base.chid * 0x200;
 	ilength = order_base_2(ilength / 8);

From 1246f1dc224a2c4ab61a1454cff669918b92e9da Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1042/1286] drm/nouveau/gr/gf100-: virtualise init_gpc_mmu +
 apply fixes from traces

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/gr/gf100.c    |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/gm107.c    | 23 +++++++++++++------
 2 files changed, 17 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 2f8dc107047d..5be4111b0668 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1920,7 +1920,7 @@ gf100_gr_init_gpc_mmu(struct gf100_gr *gr)
 	struct nvkm_fb *fb = device->fb;
 
 	nvkm_wr32(device, 0x418880, nvkm_rd32(device, 0x100c80) & 0x00000001);
-	nvkm_wr32(device, 0x4188a4, 0x00000000);
+	nvkm_wr32(device, 0x4188a4, 0x03000000);
 	nvkm_wr32(device, 0x418888, 0x00000000);
 	nvkm_wr32(device, 0x41888c, 0x00000000);
 	nvkm_wr32(device, 0x418890, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index 2c67fac576d1..5ea74f62cee4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -308,22 +308,30 @@ gm107_gr_init_bios(struct gf100_gr *gr)
 	}
 }
 
-static int
-gm107_gr_init(struct gf100_gr *gr)
+static void
+gm107_gr_init_gpc_mmu(struct gf100_gr *gr)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
 	struct nvkm_fb *fb = device->fb;
-	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
-	u32 data[TPC_MAX / 8] = {};
-	u8  tpcnr[GPC_MAX];
-	int gpc, tpc, rop;
-	int i;
 
 	nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000);
 	nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000);
 	nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000);
 	nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(fb->mmu_wr) >> 8);
 	nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(fb->mmu_rd) >> 8);
+}
+
+static int
+gm107_gr_init(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
+	u32 data[TPC_MAX / 8] = {};
+	u8  tpcnr[GPC_MAX];
+	int gpc, tpc, rop;
+	int i;
+
+	gr->func->init_gpc_mmu(gr);
 
 	gf100_gr_mmio(gr, gr->func->mmio);
 
@@ -442,6 +450,7 @@ gm107_gr_gpccs_ucode = {
 static const struct gf100_gr_func
 gm107_gr = {
 	.init = gm107_gr_init,
+	.init_gpc_mmu = gm107_gr_init_gpc_mmu,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gm107_gr_pack_mmio,

From 6f63a5fb1ec37cd55d7c886ac7d76f95a4cea2ce Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1043/1286] drm/nouveau/gr/gf100-: support firmware-provided
 sw_nonctx everywhere

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 5be4111b0668..eb50cfc1b53f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1941,7 +1941,10 @@ gf100_gr_init(struct gf100_gr *gr)
 
 	gr->func->init_gpc_mmu(gr);
 
-	gf100_gr_mmio(gr, gr->func->mmio);
+	if (gr->fuc_sw_nonctx)
+		gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
+	else
+		gf100_gr_mmio(gr, gr->func->mmio);
 
 	nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
 

From 8b058ca5186535163bdcc55d81a9f341c71139f6 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1044/1286] drm/nouveau/gr/gf100-: virtualise r405a14

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c | 3 +++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h | 1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c | 7 +++++++
 3 files changed, 11 insertions(+)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index eb50cfc1b53f..d127c6a7363c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1946,6 +1946,9 @@ gf100_gr_init(struct gf100_gr *gr)
 	else
 		gf100_gr_mmio(gr, gr->func->mmio);
 
+	if (gr->func->init_r405a14)
+		gr->func->init_r405a14(gr);
+
 	nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
 
 	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index c8ec3fd97155..d940a1a239e8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -122,6 +122,7 @@ struct gf100_gr_func {
 	void (*dtor)(struct gf100_gr *);
 	int (*init)(struct gf100_gr *);
 	void (*init_gpc_mmu)(struct gf100_gr *);
+	void (*init_r405a14)(struct gf100_gr *);
 	void (*init_rop_active_fbps)(struct gf100_gr *);
 	void (*init_ppc_exceptions)(struct gf100_gr *);
 	void (*init_swdx_pes_mask)(struct gf100_gr *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index cc152eb74123..3a59dea5ad7a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -103,10 +103,17 @@ gf108_gr_pack_mmio[] = {
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
+static void
+gf108_gr_init_r405a14(struct gf100_gr *gr)
+{
+	nvkm_wr32(gr->base.engine.subdev.device, 0x405a14, 0x80000000);
+}
+
 static const struct gf100_gr_func
 gf108_gr = {
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
+	.init_r405a14 = gf108_gr_init_r405a14,
 	.mmio = gf108_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,

From cd9662f89e2cd953ce9b2fcf02fdaae847592bd1 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1045/1286] drm/nouveau/gr/gf100-: support clkgate_pack
 everywhere

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index d127c6a7363c..eaf32f79ee47 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -32,6 +32,7 @@
 #include <subdev/fb.h>
 #include <subdev/mc.h>
 #include <subdev/pmu.h>
+#include <subdev/therm.h>
 #include <subdev/timer.h>
 #include <engine/fifo.h>
 
@@ -1949,6 +1950,9 @@ gf100_gr_init(struct gf100_gr *gr)
 	if (gr->func->init_r405a14)
 		gr->func->init_r405a14(gr);
 
+	if (gr->func->clkgate_pack)
+		nvkm_therm_clkgate_init(device->therm, gr->func->clkgate_pack);
+
 	nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
 
 	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));

From a37279e94c91e3b30ee50dfd96f8f33f3be43f17 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1046/1286] drm/nouveau/gr/gf100-: virtualise init_bios

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c | 3 +++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h | 2 +-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c | 3 ++-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c | 5 +++--
 4 files changed, 9 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index eaf32f79ee47..6ecb4a05096d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1953,6 +1953,9 @@ gf100_gr_init(struct gf100_gr *gr)
 	if (gr->func->clkgate_pack)
 		nvkm_therm_clkgate_init(device->therm, gr->func->clkgate_pack);
 
+	if (gr->func->init_bios)
+		gr->func->init_bios(gr);
+
 	nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
 
 	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index d940a1a239e8..1320befd4f10 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -123,6 +123,7 @@ struct gf100_gr_func {
 	int (*init)(struct gf100_gr *);
 	void (*init_gpc_mmu)(struct gf100_gr *);
 	void (*init_r405a14)(struct gf100_gr *);
+	void (*init_bios)(struct gf100_gr *);
 	void (*init_rop_active_fbps)(struct gf100_gr *);
 	void (*init_ppc_exceptions)(struct gf100_gr *);
 	void (*init_swdx_pes_mask)(struct gf100_gr *);
@@ -151,7 +152,6 @@ void gk104_gr_init_ppc_exceptions(struct gf100_gr *);
 
 int gk20a_gr_init(struct gf100_gr *);
 
-int gm200_gr_init(struct gf100_gr *);
 int gm200_gr_rops(struct gf100_gr *);
 
 int gp100_gr_init(struct gf100_gr *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index 5ea74f62cee4..d5629cdddf4a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -335,7 +335,7 @@ gm107_gr_init(struct gf100_gr *gr)
 
 	gf100_gr_mmio(gr, gr->func->mmio);
 
-	gm107_gr_init_bios(gr);
+	gr->func->init_bios(gr);
 
 	nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
 
@@ -451,6 +451,7 @@ static const struct gf100_gr_func
 gm107_gr = {
 	.init = gm107_gr_init,
 	.init_gpc_mmu = gm107_gr_init_gpc_mmu,
+	.init_bios = gm107_gr_init_bios,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gm107_gr_pack_mmio,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
index 6435f1257572..7a793a0bb28e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -61,7 +61,7 @@ gm200_gr_init_rop_active_fbps(struct gf100_gr *gr)
 	nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
 }
 
-int
+static int
 gm200_gr_init(struct gf100_gr *gr)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
@@ -75,7 +75,7 @@ gm200_gr_init(struct gf100_gr *gr)
 
 	gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
 
-	gm107_gr_init_bios(gr);
+	gr->func->init_bios(gr);
 
 	nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
 
@@ -210,6 +210,7 @@ static const struct gf100_gr_func
 gm200_gr = {
 	.init = gm200_gr_init,
 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
+	.init_bios = gm107_gr_init_bios,
 	.init_rop_active_fbps = gm200_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.rops = gm200_gr_rops,

From 2fe5ff6371350ce224dc2cc1da0e01888a1f9999 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1047/1286] drm/nouveau/gr/gf100-: virtualise
 init_vsc_stream_master

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c  | 10 +++++++++-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h  |  5 ++++-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c  | 10 +++++++++-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c  |  3 ++-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c  |  3 ++-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c  |  3 ++-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c  |  1 +
 17 files changed, 39 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 6ecb4a05096d..0c92b8c6375a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1930,6 +1930,13 @@ gf100_gr_init_gpc_mmu(struct gf100_gr *gr)
 	nvkm_wr32(device, 0x4188b8, nvkm_memory_addr(fb->mmu_rd) >> 8);
 }
 
+void
+gf100_gr_init_vsc_stream_master(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
+}
+
 int
 gf100_gr_init(struct gf100_gr *gr)
 {
@@ -1956,7 +1963,7 @@ gf100_gr_init(struct gf100_gr *gr)
 	if (gr->func->init_bios)
 		gr->func->init_bios(gr);
 
-	nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
+	gr->func->init_vsc_stream_master(gr);
 
 	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
 	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
@@ -2068,6 +2075,7 @@ static const struct gf100_gr_func
 gf100_gr = {
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
+	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
 	.mmio = gf100_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 1320befd4f10..45792a33fd65 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -124,6 +124,7 @@ struct gf100_gr_func {
 	void (*init_gpc_mmu)(struct gf100_gr *);
 	void (*init_r405a14)(struct gf100_gr *);
 	void (*init_bios)(struct gf100_gr *);
+	void (*init_vsc_stream_master)(struct gf100_gr *);
 	void (*init_rop_active_fbps)(struct gf100_gr *);
 	void (*init_ppc_exceptions)(struct gf100_gr *);
 	void (*init_swdx_pes_mask)(struct gf100_gr *);
@@ -143,10 +144,12 @@ struct gf100_gr_func {
 	struct nvkm_sclass sclass[];
 };
 
-int gf100_gr_init(struct gf100_gr *);
 int gf100_gr_rops(struct gf100_gr *);
+int gf100_gr_init(struct gf100_gr *);
+void gf100_gr_init_vsc_stream_master(struct gf100_gr *);
 
 int gk104_gr_init(struct gf100_gr *);
+void gk104_gr_init_vsc_stream_master(struct gf100_gr *);
 void gk104_gr_init_rop_active_fbps(struct gf100_gr *);
 void gk104_gr_init_ppc_exceptions(struct gf100_gr *);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
index ec0f11983b23..61b27b179e15 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
@@ -116,6 +116,7 @@ static const struct gf100_gr_func
 gf104_gr = {
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
+	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
 	.mmio = gf104_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index 3a59dea5ad7a..992727f79439 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -114,6 +114,7 @@ gf108_gr = {
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_r405a14 = gf108_gr_init_r405a14,
+	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
 	.mmio = gf108_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index 10d2d73ca8c3..07a57cc5074f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -88,6 +88,7 @@ static const struct gf100_gr_func
 gf110_gr = {
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
+	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
 	.mmio = gf110_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index ac09a07c4150..29ab01be71ec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -124,6 +124,7 @@ static const struct gf100_gr_func
 gf117_gr = {
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
+	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
 	.mmio = gf117_gr_pack_mmio,
 	.fecs.ucode = &gf117_gr_fecs_ucode,
 	.gpccs.ucode = &gf117_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index 7f449ec6f760..d3b06dd7702d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -179,6 +179,7 @@ static const struct gf100_gr_func
 gf119_gr = {
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
+	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
 	.mmio = gf119_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index 1b52fcb2c49a..835f498d6b98 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -404,6 +404,13 @@ gk104_gr_init_ppc_exceptions(struct gf100_gr *gr)
 	}
 }
 
+void
+gk104_gr_init_vsc_stream_master(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
+}
+
 int
 gk104_gr_init(struct gf100_gr *gr)
 {
@@ -421,7 +428,7 @@ gk104_gr_init(struct gf100_gr *gr)
 		nvkm_therm_clkgate_init(gr->base.engine.subdev.device->therm,
 					gr->func->clkgate_pack);
 
-	nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
+	gr->func->init_vsc_stream_master(gr);
 
 	memset(data, 0x00, sizeof(data));
 	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
@@ -536,6 +543,7 @@ static const struct gf100_gr_func
 gk104_gr = {
 	.init = gk104_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
+	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk104_gr_pack_mmio,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index 4da916a9fc73..eef7476e0ee8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -338,6 +338,7 @@ static const struct gf100_gr_func
 gk110_gr = {
 	.init = gk104_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
+	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk110_gr_pack_mmio,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index 1912c0bfd7ee..ed4361a830a5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -104,6 +104,7 @@ static const struct gf100_gr_func
 gk110b_gr = {
 	.init = gk104_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
+	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk110b_gr_pack_mmio,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index 1fc258163f25..4c8beb9c1708 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -163,6 +163,7 @@ static const struct gf100_gr_func
 gk208_gr = {
 	.init = gk104_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
+	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk208_gr_pack_mmio,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index d5629cdddf4a..58c03d6c1c2d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -337,7 +337,7 @@ gm107_gr_init(struct gf100_gr *gr)
 
 	gr->func->init_bios(gr);
 
-	nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
+	gr->func->init_vsc_stream_master(gr);
 
 	memset(data, 0x00, sizeof(data));
 	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
@@ -452,6 +452,7 @@ gm107_gr = {
 	.init = gm107_gr_init,
 	.init_gpc_mmu = gm107_gr_init_gpc_mmu,
 	.init_bios = gm107_gr_init_bios,
+	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gm107_gr_pack_mmio,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
index 7a793a0bb28e..8583aad367af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -77,7 +77,7 @@ gm200_gr_init(struct gf100_gr *gr)
 
 	gr->func->init_bios(gr);
 
-	nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
+	gr->func->init_vsc_stream_master(gr);
 
 	memset(data, 0x00, sizeof(data));
 	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
@@ -211,6 +211,7 @@ gm200_gr = {
 	.init = gm200_gr_init,
 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
 	.init_bios = gm107_gr_init_bios,
+	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
 	.init_rop_active_fbps = gm200_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.rops = gm200_gr_rops,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
index 867a5f7cc5bc..edaaebbe7613 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -63,7 +63,7 @@ gp100_gr_init(struct gf100_gr *gr)
 
 	gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
 
-	nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
+	gr->func->init_vsc_stream_master(gr);
 
 	memset(data, 0x00, sizeof(data));
 	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
@@ -160,6 +160,7 @@ static const struct gf100_gr_func
 gp100_gr = {
 	.init = gp100_gr_init,
 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
+	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
 	.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.init_num_active_ltcs = gp100_gr_init_num_active_ltcs,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
index 61e3a0b08559..821a6c4589e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
@@ -44,6 +44,7 @@ static const struct gf100_gr_func
 gp102_gr = {
 	.init = gp100_gr_init,
 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
+	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
 	.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
index f7272323f694..e2e12975bb2c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
@@ -30,6 +30,7 @@ static const struct gf100_gr_func
 gp107_gr = {
 	.init = gp100_gr_init,
 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
+	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
 	.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
index 5f3d161a0842..e90cc3c845ec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
@@ -37,6 +37,7 @@ static const struct gf100_gr_func
 gp10b_gr = {
 	.init = gp100_gr_init,
 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
+	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
 	.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.init_num_active_ltcs = gp10b_gr_init_num_active_ltcs,

From 02917aa39d56f504b47354135120000da1efa760 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1048/1286] drm/nouveau/gr/gf100-: virtualise init_zcull

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/gr/gf100.c    | 62 +++++++++----------
 .../gpu/drm/nouveau/nvkm/engine/gr/gf100.h    |  4 ++
 .../gpu/drm/nouveau/nvkm/engine/gr/gf104.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gf108.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gf110.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gf117.c    | 29 +++++++++
 .../gpu/drm/nouveau/nvkm/engine/gr/gf119.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gk104.c    | 31 +---------
 .../gpu/drm/nouveau/nvkm/engine/gr/gk110.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gk110b.c   |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gk208.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gk20a.c    | 33 +---------
 .../gpu/drm/nouveau/nvkm/engine/gr/gm107.c    | 31 +---------
 .../gpu/drm/nouveau/nvkm/engine/gr/gm200.c    | 31 +---------
 .../gpu/drm/nouveau/nvkm/engine/gr/gm20b.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gp100.c    | 31 +---------
 .../gpu/drm/nouveau/nvkm/engine/gr/gp102.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gp107.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gp10b.c    |  1 +
 19 files changed, 85 insertions(+), 178 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 0c92b8c6375a..56d9ead0b380 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1930,6 +1930,34 @@ gf100_gr_init_gpc_mmu(struct gf100_gr *gr)
 	nvkm_wr32(device, 0x4188b8, nvkm_memory_addr(fb->mmu_rd) >> 8);
 }
 
+void
+gf100_gr_init_zcull(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
+	u32 data[TPC_MAX / 8] = {};
+	u8  tpcnr[GPC_MAX];
+	int gpc, tpc;
+	int i;
+
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
+		do {
+			gpc = (gpc + 1) % gr->gpc_nr;
+		} while (!tpcnr[gpc]);
+		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
+
+		data[i / 8] |= tpc << ((i % 8) * 4);
+	}
+
+	nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
+	nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
+	nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
+	nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
+
+	nvkm_wr32(device, GPC_BCAST(0x1bd4), magicgpc918);
+}
+
 void
 gf100_gr_init_vsc_stream_master(struct gf100_gr *gr)
 {
@@ -1941,11 +1969,7 @@ int
 gf100_gr_init(struct gf100_gr *gr)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
-	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
-	u32 data[TPC_MAX / 8] = {};
-	u8  tpcnr[GPC_MAX];
 	int gpc, tpc, rop;
-	int i;
 
 	gr->func->init_gpc_mmu(gr);
 
@@ -1964,34 +1988,7 @@ gf100_gr_init(struct gf100_gr *gr)
 		gr->func->init_bios(gr);
 
 	gr->func->init_vsc_stream_master(gr);
-
-	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
-	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
-		do {
-			gpc = (gpc + 1) % gr->gpc_nr;
-		} while (!tpcnr[gpc]);
-		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
-
-		data[i / 8] |= tpc << ((i % 8) * 4);
-	}
-
-	nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
-	nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
-	nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
-	nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
-
-	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
-			  gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
-							 gr->tpc_total);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
-	}
-
-	if (device->chipset != 0xd7)
-		nvkm_wr32(device, GPC_BCAST(0x1bd4), magicgpc918);
-	else
-		nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+	gr->func->init_zcull(gr);
 
 	nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
 
@@ -2076,6 +2073,7 @@ gf100_gr = {
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
+	.init_zcull = gf100_gr_init_zcull,
 	.mmio = gf100_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 45792a33fd65..b322dc424761 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -125,6 +125,7 @@ struct gf100_gr_func {
 	void (*init_r405a14)(struct gf100_gr *);
 	void (*init_bios)(struct gf100_gr *);
 	void (*init_vsc_stream_master)(struct gf100_gr *);
+	void (*init_zcull)(struct gf100_gr *);
 	void (*init_rop_active_fbps)(struct gf100_gr *);
 	void (*init_ppc_exceptions)(struct gf100_gr *);
 	void (*init_swdx_pes_mask)(struct gf100_gr *);
@@ -147,6 +148,9 @@ struct gf100_gr_func {
 int gf100_gr_rops(struct gf100_gr *);
 int gf100_gr_init(struct gf100_gr *);
 void gf100_gr_init_vsc_stream_master(struct gf100_gr *);
+void gf100_gr_init_zcull(struct gf100_gr *);
+
+void gf117_gr_init_zcull(struct gf100_gr *);
 
 int gk104_gr_init(struct gf100_gr *);
 void gk104_gr_init_vsc_stream_master(struct gf100_gr *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
index 61b27b179e15..23c2613fa6bf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
@@ -117,6 +117,7 @@ gf104_gr = {
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
+	.init_zcull = gf100_gr_init_zcull,
 	.mmio = gf104_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index 992727f79439..a79a7860732a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -115,6 +115,7 @@ gf108_gr = {
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_r405a14 = gf108_gr_init_r405a14,
 	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
+	.init_zcull = gf100_gr_init_zcull,
 	.mmio = gf108_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index 07a57cc5074f..fd8e13716d94 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -89,6 +89,7 @@ gf110_gr = {
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
+	.init_zcull = gf100_gr_init_zcull,
 	.mmio = gf110_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index 29ab01be71ec..01e213e4d1a9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -120,11 +120,40 @@ gf117_gr_gpccs_ucode = {
 	.data.size = sizeof(gf117_grgpc_data),
 };
 
+void
+gf117_gr_init_zcull(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
+	u32 data[TPC_MAX / 8] = {};
+	u8  tpcnr[GPC_MAX];
+	int gpc, tpc;
+	int i;
+
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
+		do {
+			gpc = (gpc + 1) % gr->gpc_nr;
+		} while (!tpcnr[gpc]);
+		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
+
+		data[i / 8] |= tpc << ((i % 8) * 4);
+	}
+
+	nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
+	nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
+	nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
+	nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
+
+	nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+}
+
 static const struct gf100_gr_func
 gf117_gr = {
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
+	.init_zcull = gf117_gr_init_zcull,
 	.mmio = gf117_gr_pack_mmio,
 	.fecs.ucode = &gf117_gr_fecs_ucode,
 	.gpccs.ucode = &gf117_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index d3b06dd7702d..269922e9fca2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -180,6 +180,7 @@ gf119_gr = {
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
+	.init_zcull = gf100_gr_init_zcull,
 	.mmio = gf119_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index 835f498d6b98..d1b7fb8957fb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -415,11 +415,7 @@ int
 gk104_gr_init(struct gf100_gr *gr)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
-	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
-	u32 data[TPC_MAX / 8] = {};
-	u8  tpcnr[GPC_MAX];
 	int gpc, tpc, rop;
-	int i;
 
 	gr->func->init_gpc_mmu(gr);
 
@@ -429,32 +425,8 @@ gk104_gr_init(struct gf100_gr *gr)
 					gr->func->clkgate_pack);
 
 	gr->func->init_vsc_stream_master(gr);
+	gr->func->init_zcull(gr);
 
-	memset(data, 0x00, sizeof(data));
-	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
-	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
-		do {
-			gpc = (gpc + 1) % gr->gpc_nr;
-		} while (!tpcnr[gpc]);
-		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
-
-		data[i / 8] |= tpc << ((i % 8) * 4);
-	}
-
-	nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
-	nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
-	nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
-	nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
-
-	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
-			  gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
-							 gr->tpc_total);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
-	}
-
-	nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
 	nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
 
 	gr->func->init_rop_active_fbps(gr);
@@ -544,6 +516,7 @@ gk104_gr = {
 	.init = gk104_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
+	.init_zcull = gf117_gr_init_zcull,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk104_gr_pack_mmio,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index eef7476e0ee8..0eba041cd399 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -339,6 +339,7 @@ gk110_gr = {
 	.init = gk104_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
+	.init_zcull = gf117_gr_init_zcull,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk110_gr_pack_mmio,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index ed4361a830a5..84a1664f9e46 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -105,6 +105,7 @@ gk110b_gr = {
 	.init = gk104_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
+	.init_zcull = gf117_gr_init_zcull,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk110b_gr_pack_mmio,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index 4c8beb9c1708..701199abf5f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -164,6 +164,7 @@ gk208_gr = {
 	.init = gk104_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
+	.init_zcull = gf117_gr_init_zcull,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk208_gr_pack_mmio,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index de8b806b88fd..a806643ede7e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -219,11 +219,7 @@ int
 gk20a_gr_init(struct gf100_gr *gr)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
-	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
-	u32 data[TPC_MAX / 8] = {};
-	u8  tpcnr[GPC_MAX];
-	int gpc, tpc;
-	int ret, i;
+	int ret;
 
 	/* Clear SCC RAM */
 	nvkm_wr32(device, 0x40802c, 0x1);
@@ -246,31 +242,7 @@ gk20a_gr_init(struct gf100_gr *gr)
 	nvkm_mask(device, 0x503018, 0x1, 0x1);
 
 	/* Zcull init */
-	memset(data, 0x00, sizeof(data));
-	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
-	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
-		do {
-			gpc = (gpc + 1) % gr->gpc_nr;
-		} while (!tpcnr[gpc]);
-		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
-
-		data[i / 8] |= tpc << ((i % 8) * 4);
-	}
-
-	nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
-	nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
-	nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
-	nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
-
-	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
-			  gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
-			  gr->tpc_total);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
-	}
-
-	nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+	gr->func->init_zcull(gr);
 
 	gr->func->init_rop_active_fbps(gr);
 
@@ -311,6 +283,7 @@ gk20a_gr_init(struct gf100_gr *gr)
 static const struct gf100_gr_func
 gk20a_gr = {
 	.init = gk20a_gr_init,
+	.init_zcull = gf117_gr_init_zcull,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.set_hww_esr_report_mask = gk20a_gr_set_hww_esr_report_mask,
 	.rops = gf100_gr_rops,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index 58c03d6c1c2d..a5f5e05c21ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -325,11 +325,7 @@ static int
 gm107_gr_init(struct gf100_gr *gr)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
-	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
-	u32 data[TPC_MAX / 8] = {};
-	u8  tpcnr[GPC_MAX];
 	int gpc, tpc, rop;
-	int i;
 
 	gr->func->init_gpc_mmu(gr);
 
@@ -338,32 +334,8 @@ gm107_gr_init(struct gf100_gr *gr)
 	gr->func->init_bios(gr);
 
 	gr->func->init_vsc_stream_master(gr);
+	gr->func->init_zcull(gr);
 
-	memset(data, 0x00, sizeof(data));
-	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
-	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
-		do {
-			gpc = (gpc + 1) % gr->gpc_nr;
-		} while (!tpcnr[gpc]);
-		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
-
-		data[i / 8] |= tpc << ((i % 8) * 4);
-	}
-
-	nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
-	nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
-	nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
-	nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
-
-	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
-			  gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
-							 gr->tpc_total);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
-	}
-
-	nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
 	nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
 
 	gr->func->init_rop_active_fbps(gr);
@@ -453,6 +425,7 @@ gm107_gr = {
 	.init_gpc_mmu = gm107_gr_init_gpc_mmu,
 	.init_bios = gm107_gr_init_bios,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
+	.init_zcull = gf117_gr_init_zcull,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gm107_gr_pack_mmio,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
index 8583aad367af..2ceef1737f84 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -65,11 +65,7 @@ static int
 gm200_gr_init(struct gf100_gr *gr)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
-	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
-	u32 data[TPC_MAX / 8] = {};
-	u8  tpcnr[GPC_MAX];
 	int gpc, tpc, rop;
-	int i;
 
 	gr->func->init_gpc_mmu(gr);
 
@@ -78,32 +74,8 @@ gm200_gr_init(struct gf100_gr *gr)
 	gr->func->init_bios(gr);
 
 	gr->func->init_vsc_stream_master(gr);
+	gr->func->init_zcull(gr);
 
-	memset(data, 0x00, sizeof(data));
-	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
-	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
-		do {
-			gpc = (gpc + 1) % gr->gpc_nr;
-		} while (!tpcnr[gpc]);
-		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
-
-		data[i / 8] |= tpc << ((i % 8) * 4);
-	}
-
-	nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
-	nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
-	nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
-	nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
-
-	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
-			  gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
-							 gr->tpc_total);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
-	}
-
-	nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
 	nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
 	nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
 
@@ -212,6 +184,7 @@ gm200_gr = {
 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
 	.init_bios = gm107_gr_init_bios,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
+	.init_zcull = gf117_gr_init_zcull,
 	.init_rop_active_fbps = gm200_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.rops = gm200_gr_rops,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
index 69479af1d829..fcf86d5cf26f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
@@ -65,6 +65,7 @@ gm20b_gr_set_hww_esr_report_mask(struct gf100_gr *gr)
 static const struct gf100_gr_func
 gm20b_gr = {
 	.init = gk20a_gr_init,
+	.init_zcull = gf117_gr_init_zcull,
 	.init_gpc_mmu = gm20b_gr_init_gpc_mmu,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.set_hww_esr_report_mask = gm20b_gr_set_hww_esr_report_mask,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
index edaaebbe7613..7d3317eb3e54 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -53,43 +53,15 @@ int
 gp100_gr_init(struct gf100_gr *gr)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
-	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
-	u32 data[TPC_MAX / 8] = {};
-	u8  tpcnr[GPC_MAX];
 	int gpc, tpc, rop;
-	int i;
 
 	gr->func->init_gpc_mmu(gr);
 
 	gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
 
 	gr->func->init_vsc_stream_master(gr);
+	gr->func->init_zcull(gr);
 
-	memset(data, 0x00, sizeof(data));
-	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
-	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
-		do {
-			gpc = (gpc + 1) % gr->gpc_nr;
-		} while (!tpcnr[gpc]);
-		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
-
-		data[i / 8] |= tpc << ((i % 8) * 4);
-	}
-
-	nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
-	nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
-	nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
-	nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
-
-	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
-			  gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
-							 gr->tpc_total);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
-	}
-
-	nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
 	gr->func->init_num_active_ltcs(gr);
 
 	gr->func->init_rop_active_fbps(gr);
@@ -161,6 +133,7 @@ gp100_gr = {
 	.init = gp100_gr_init,
 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
+	.init_zcull = gf117_gr_init_zcull,
 	.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.init_num_active_ltcs = gp100_gr_init_num_active_ltcs,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
index 821a6c4589e2..0d9bf6c27a7b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
@@ -45,6 +45,7 @@ gp102_gr = {
 	.init = gp100_gr_init,
 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
+	.init_zcull = gf117_gr_init_zcull,
 	.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
index e2e12975bb2c..67d567e3a2ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
@@ -31,6 +31,7 @@ gp107_gr = {
 	.init = gp100_gr_init,
 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
+	.init_zcull = gf117_gr_init_zcull,
 	.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
index e90cc3c845ec..af06b10ba92f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
@@ -38,6 +38,7 @@ gp10b_gr = {
 	.init = gp100_gr_init,
 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
+	.init_zcull = gf117_gr_init_zcull,
 	.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.init_num_active_ltcs = gp10b_gr_init_num_active_ltcs,

From bfd27f39b5419724883bbd04910c4c35e57b6154 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1049/1286] drm/nouveau/gr/gf100-: virtualise
 init_num_active_ltcs

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c  | 11 +++++++++--
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h  |  6 +++---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c  |  4 ++--
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c  |  4 ++--
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c  | 13 ++++++++++---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c  | 11 +----------
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c  |  2 +-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c  |  2 +-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c  | 10 +---------
 17 files changed, 38 insertions(+), 33 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 56d9ead0b380..d33e531bb1a0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1930,6 +1930,13 @@ gf100_gr_init_gpc_mmu(struct gf100_gr *gr)
 	nvkm_wr32(device, 0x4188b8, nvkm_memory_addr(fb->mmu_rd) >> 8);
 }
 
+void
+gf100_gr_init_num_active_ltcs(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
+}
+
 void
 gf100_gr_init_zcull(struct gf100_gr *gr)
 {
@@ -1989,8 +1996,7 @@ gf100_gr_init(struct gf100_gr *gr)
 
 	gr->func->init_vsc_stream_master(gr);
 	gr->func->init_zcull(gr);
-
-	nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
+	gr->func->init_num_active_ltcs(gr);
 
 	nvkm_wr32(device, 0x400500, 0x00010001);
 
@@ -2074,6 +2080,7 @@ gf100_gr = {
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
 	.init_zcull = gf100_gr_init_zcull,
+	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.mmio = gf100_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index b322dc424761..7f6aed873408 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -126,10 +126,10 @@ struct gf100_gr_func {
 	void (*init_bios)(struct gf100_gr *);
 	void (*init_vsc_stream_master)(struct gf100_gr *);
 	void (*init_zcull)(struct gf100_gr *);
+	void (*init_num_active_ltcs)(struct gf100_gr *);
 	void (*init_rop_active_fbps)(struct gf100_gr *);
 	void (*init_ppc_exceptions)(struct gf100_gr *);
 	void (*init_swdx_pes_mask)(struct gf100_gr *);
-	void (*init_num_active_ltcs)(struct gf100_gr *);
 	void (*set_hww_esr_report_mask)(struct gf100_gr *);
 	const struct gf100_gr_pack *mmio;
 	struct {
@@ -149,6 +149,7 @@ int gf100_gr_rops(struct gf100_gr *);
 int gf100_gr_init(struct gf100_gr *);
 void gf100_gr_init_vsc_stream_master(struct gf100_gr *);
 void gf100_gr_init_zcull(struct gf100_gr *);
+void gf100_gr_init_num_active_ltcs(struct gf100_gr *);
 
 void gf117_gr_init_zcull(struct gf100_gr *);
 
@@ -160,6 +161,7 @@ void gk104_gr_init_ppc_exceptions(struct gf100_gr *);
 int gk20a_gr_init(struct gf100_gr *);
 
 int gm200_gr_rops(struct gf100_gr *);
+void gm200_gr_init_num_active_ltcs(struct gf100_gr *);
 
 int gp100_gr_init(struct gf100_gr *);
 void gp100_gr_init_rop_active_fbps(struct gf100_gr *);
@@ -315,7 +317,5 @@ void gm107_gr_init_bios(struct gf100_gr *);
 
 void gm200_gr_init_gpc_mmu(struct gf100_gr *);
 
-void gp100_gr_init_num_active_ltcs(struct gf100_gr *gr);
-
 void gp102_gr_init_swdx_pes_mask(struct gf100_gr *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
index 23c2613fa6bf..b6d67f5e5e71 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
@@ -118,6 +118,7 @@ gf104_gr = {
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
 	.init_zcull = gf100_gr_init_zcull,
+	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.mmio = gf104_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index a79a7860732a..9b5796bd1dc0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -116,6 +116,7 @@ gf108_gr = {
 	.init_r405a14 = gf108_gr_init_r405a14,
 	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
 	.init_zcull = gf100_gr_init_zcull,
+	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.mmio = gf108_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index fd8e13716d94..7e346a0e9540 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -90,6 +90,7 @@ gf110_gr = {
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
 	.init_zcull = gf100_gr_init_zcull,
+	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.mmio = gf110_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index 01e213e4d1a9..5c68c5f904ce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -154,6 +154,7 @@ gf117_gr = {
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
 	.init_zcull = gf117_gr_init_zcull,
+	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.mmio = gf117_gr_pack_mmio,
 	.fecs.ucode = &gf117_gr_fecs_ucode,
 	.gpccs.ucode = &gf117_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index 269922e9fca2..b642bd224ecf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -181,6 +181,7 @@ gf119_gr = {
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
 	.init_zcull = gf100_gr_init_zcull,
+	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.mmio = gf119_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index d1b7fb8957fb..3f19de24ef69 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -426,8 +426,7 @@ gk104_gr_init(struct gf100_gr *gr)
 
 	gr->func->init_vsc_stream_master(gr);
 	gr->func->init_zcull(gr);
-
-	nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
+	gr->func->init_num_active_ltcs(gr);
 
 	gr->func->init_rop_active_fbps(gr);
 
@@ -517,6 +516,7 @@ gk104_gr = {
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
 	.init_zcull = gf117_gr_init_zcull,
+	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk104_gr_pack_mmio,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index 0eba041cd399..9a8428c5df45 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -340,6 +340,7 @@ gk110_gr = {
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
 	.init_zcull = gf117_gr_init_zcull,
+	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk110_gr_pack_mmio,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index 84a1664f9e46..08f3f8c2c2bf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -106,6 +106,7 @@ gk110b_gr = {
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
 	.init_zcull = gf117_gr_init_zcull,
+	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk110b_gr_pack_mmio,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index 701199abf5f9..49ee5490c7c2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -165,6 +165,7 @@ gk208_gr = {
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
 	.init_zcull = gf117_gr_init_zcull,
+	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk208_gr_pack_mmio,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index a5f5e05c21ac..bbb0a28365f5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -335,8 +335,7 @@ gm107_gr_init(struct gf100_gr *gr)
 
 	gr->func->init_vsc_stream_master(gr);
 	gr->func->init_zcull(gr);
-
-	nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
+	gr->func->init_num_active_ltcs(gr);
 
 	gr->func->init_rop_active_fbps(gr);
 
@@ -426,6 +425,7 @@ gm107_gr = {
 	.init_bios = gm107_gr_init_bios,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
 	.init_zcull = gf117_gr_init_zcull,
+	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gm107_gr_pack_mmio,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
index 2ceef1737f84..5007435e6f7d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -38,6 +38,14 @@ gm200_gr_rops(struct gf100_gr *gr)
 	return nvkm_rd32(gr->base.engine.subdev.device, 0x12006c);
 }
 
+void
+gm200_gr_init_num_active_ltcs(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
+	nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
+}
+
 void
 gm200_gr_init_gpc_mmu(struct gf100_gr *gr)
 {
@@ -75,9 +83,7 @@ gm200_gr_init(struct gf100_gr *gr)
 
 	gr->func->init_vsc_stream_master(gr);
 	gr->func->init_zcull(gr);
-
-	nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
-	nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
+	gr->func->init_num_active_ltcs(gr);
 
 	gr->func->init_rop_active_fbps(gr);
 
@@ -185,6 +191,7 @@ gm200_gr = {
 	.init_bios = gm107_gr_init_bios,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
 	.init_zcull = gf117_gr_init_zcull,
+	.init_num_active_ltcs = gm200_gr_init_num_active_ltcs,
 	.init_rop_active_fbps = gm200_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.rops = gm200_gr_rops,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
index 7d3317eb3e54..1e6795956abc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -40,15 +40,6 @@ gp100_gr_init_rop_active_fbps(struct gf100_gr *gr)
 	nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
 }
 
-void
-gp100_gr_init_num_active_ltcs(struct gf100_gr *gr)
-{
-	struct nvkm_device *device = gr->base.engine.subdev.device;
-
-	nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
-	nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
-}
-
 int
 gp100_gr_init(struct gf100_gr *gr)
 {
@@ -134,9 +125,9 @@ gp100_gr = {
 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
 	.init_zcull = gf117_gr_init_zcull,
+	.init_num_active_ltcs = gm200_gr_init_num_active_ltcs,
 	.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
-	.init_num_active_ltcs = gp100_gr_init_num_active_ltcs,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 2,
 	.grctx = &gp100_grctx,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
index 0d9bf6c27a7b..17bd872b8dc9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
@@ -46,10 +46,10 @@ gp102_gr = {
 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
 	.init_zcull = gf117_gr_init_zcull,
+	.init_num_active_ltcs = gm200_gr_init_num_active_ltcs,
 	.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
-	.init_num_active_ltcs = gp100_gr_init_num_active_ltcs,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 3,
 	.grctx = &gp102_grctx,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
index 67d567e3a2ac..3fba3af2f5a6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
@@ -32,10 +32,10 @@ gp107_gr = {
 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
 	.init_zcull = gf117_gr_init_zcull,
+	.init_num_active_ltcs = gm200_gr_init_num_active_ltcs,
 	.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
-	.init_num_active_ltcs = gp100_gr_init_num_active_ltcs,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 1,
 	.grctx = &gp107_grctx,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
index af06b10ba92f..97f86677a22c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
@@ -25,23 +25,15 @@
 
 #include <nvif/class.h>
 
-static void
-gp10b_gr_init_num_active_ltcs(struct gf100_gr *gr)
-{
-	struct nvkm_device *device = gr->base.engine.subdev.device;
-
-	nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
-}
-
 static const struct gf100_gr_func
 gp10b_gr = {
 	.init = gp100_gr_init,
 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
 	.init_zcull = gf117_gr_init_zcull,
+	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
-	.init_num_active_ltcs = gp10b_gr_init_num_active_ltcs,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 1,
 	.grctx = &gp102_grctx,

From 429412e231a27d48cb492dc1c647e857677240b3 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1050/1286] drm/nouveau/gr/gf100-: virtualise
 init_rop_active_fbps

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index d33e531bb1a0..6912eaa5a90a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1997,6 +1997,8 @@ gf100_gr_init(struct gf100_gr *gr)
 	gr->func->init_vsc_stream_master(gr);
 	gr->func->init_zcull(gr);
 	gr->func->init_num_active_ltcs(gr);
+	if (gr->func->init_rop_active_fbps)
+		gr->func->init_rop_active_fbps(gr);
 
 	nvkm_wr32(device, 0x400500, 0x00010001);
 

From 0f78acc86bbfc60cdaffd4eb03e4a35b28397cbb Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1051/1286] drm/nouveau/gr/gf100-: implement another chunk of
 bios-provided init

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/gr/gf100.c    |  2 ++
 .../gpu/drm/nouveau/nvkm/engine/gr/gf100.h    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gm107.c    | 28 +++++++++++++++++++
 3 files changed, 31 insertions(+)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 6912eaa5a90a..01b903acde4a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1999,6 +1999,8 @@ gf100_gr_init(struct gf100_gr *gr)
 	gr->func->init_num_active_ltcs(gr);
 	if (gr->func->init_rop_active_fbps)
 		gr->func->init_rop_active_fbps(gr);
+	if (gr->func->init_bios_2)
+		gr->func->init_bios_2(gr);
 
 	nvkm_wr32(device, 0x400500, 0x00010001);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 7f6aed873408..11cf2dff0f81 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -128,6 +128,7 @@ struct gf100_gr_func {
 	void (*init_zcull)(struct gf100_gr *);
 	void (*init_num_active_ltcs)(struct gf100_gr *);
 	void (*init_rop_active_fbps)(struct gf100_gr *);
+	void (*init_bios_2)(struct gf100_gr *);
 	void (*init_ppc_exceptions)(struct gf100_gr *);
 	void (*init_swdx_pes_mask)(struct gf100_gr *);
 	void (*set_hww_esr_report_mask)(struct gf100_gr *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index bbb0a28365f5..c653df323f1a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -25,6 +25,8 @@
 #include "ctxgf100.h"
 
 #include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/init.h>
 #include <subdev/bios/P0260.h>
 #include <subdev/fb.h>
 
@@ -279,6 +281,31 @@ gm107_gr_pack_mmio[] = {
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
+static void
+gm107_gr_init_bios_2(struct gf100_gr *gr)
+{
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
+	struct bit_entry bit_P;
+	if (!bit_entry(bios, 'P', &bit_P) &&
+	    bit_P.version == 2 && bit_P.length >= 0x2c) {
+		u32 data = nvbios_rd32(bios, bit_P.offset + 0x28);
+		if (data) {
+			u8 ver = nvbios_rd08(bios, data + 0x00);
+			u8 hdr = nvbios_rd08(bios, data + 0x01);
+			if (ver == 0x20 && hdr >= 8) {
+				data = nvbios_rd32(bios, data + 0x04);
+				if (data) {
+					u32 save = nvkm_rd32(device, 0x619444);
+					nvbios_init(subdev, data);
+					nvkm_wr32(device, 0x619444, save);
+				}
+			}
+		}
+	}
+}
+
 void
 gm107_gr_init_bios(struct gf100_gr *gr)
 {
@@ -427,6 +454,7 @@ gm107_gr = {
 	.init_zcull = gf117_gr_init_zcull,
 	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
+	.init_bios_2 = gm107_gr_init_bios_2,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gm107_gr_pack_mmio,
 	.fecs.ucode = &gm107_gr_fecs_ucode,

From dff30dbd1d9336687ae1aa0b13e326c44f879c4e Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1052/1286] drm/nouveau/gr/gf100-: virtualise
 init_swdx_pes_mask

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c | 2 ++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h | 6 +++---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c | 2 +-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c | 2 +-
 4 files changed, 7 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 01b903acde4a..7ad6ea0533a4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -2001,6 +2001,8 @@ gf100_gr_init(struct gf100_gr *gr)
 		gr->func->init_rop_active_fbps(gr);
 	if (gr->func->init_bios_2)
 		gr->func->init_bios_2(gr);
+	if (gr->func->init_swdx_pes_mask)
+		gr->func->init_swdx_pes_mask(gr);
 
 	nvkm_wr32(device, 0x400500, 0x00010001);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 11cf2dff0f81..858024ba1186 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -129,8 +129,8 @@ struct gf100_gr_func {
 	void (*init_num_active_ltcs)(struct gf100_gr *);
 	void (*init_rop_active_fbps)(struct gf100_gr *);
 	void (*init_bios_2)(struct gf100_gr *);
-	void (*init_ppc_exceptions)(struct gf100_gr *);
 	void (*init_swdx_pes_mask)(struct gf100_gr *);
+	void (*init_ppc_exceptions)(struct gf100_gr *);
 	void (*set_hww_esr_report_mask)(struct gf100_gr *);
 	const struct gf100_gr_pack *mmio;
 	struct {
@@ -167,6 +167,8 @@ void gm200_gr_init_num_active_ltcs(struct gf100_gr *);
 int gp100_gr_init(struct gf100_gr *);
 void gp100_gr_init_rop_active_fbps(struct gf100_gr *);
 
+void gp102_gr_init_swdx_pes_mask(struct gf100_gr *);
+
 #define gf100_gr_chan(p) container_of((p), struct gf100_gr_chan, object)
 #include <core/object.h>
 
@@ -317,6 +319,4 @@ extern const struct gf100_gr_init gm107_gr_init_cbm_0[];
 void gm107_gr_init_bios(struct gf100_gr *);
 
 void gm200_gr_init_gpc_mmu(struct gf100_gr *);
-
-void gp102_gr_init_swdx_pes_mask(struct gf100_gr *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
index 17bd872b8dc9..860a78976980 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
@@ -48,8 +48,8 @@ gp102_gr = {
 	.init_zcull = gf117_gr_init_zcull,
 	.init_num_active_ltcs = gm200_gr_init_num_active_ltcs,
 	.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
-	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
+	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 3,
 	.grctx = &gp102_grctx,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
index 3fba3af2f5a6..03f22646eb78 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
@@ -34,8 +34,8 @@ gp107_gr = {
 	.init_zcull = gf117_gr_init_zcull,
 	.init_num_active_ltcs = gm200_gr_init_num_active_ltcs,
 	.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
-	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
+	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 1,
 	.grctx = &gp107_grctx,

From 7c76ebb65a9c8d29780e7324c8ae067c5cd980ca Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1053/1286] drm/nouveau/gr/gf100: write 0x400124 during init

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c | 1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c | 1 +
 2 files changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 7ad6ea0533a4..0c53d456ebbc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -2008,6 +2008,7 @@ gf100_gr_init(struct gf100_gr *gr)
 
 	nvkm_wr32(device, 0x400100, 0xffffffff);
 	nvkm_wr32(device, 0x40013c, 0xffffffff);
+	nvkm_wr32(device, 0x400124, 0x00000002);
 
 	nvkm_wr32(device, 0x409c24, 0x000f0000);
 	nvkm_wr32(device, 0x404000, 0xc0000000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index 3f19de24ef69..703aed48f737 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -434,6 +434,7 @@ gk104_gr_init(struct gf100_gr *gr)
 
 	nvkm_wr32(device, 0x400100, 0xffffffff);
 	nvkm_wr32(device, 0x40013c, 0xffffffff);
+	nvkm_wr32(device, 0x400124, 0x00000002);
 
 	nvkm_wr32(device, 0x409ffc, 0x00000000);
 	nvkm_wr32(device, 0x409c14, 0x00003e3e);

From 2585a1b1312e96c6a28f3008029408b5feca3ff4 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1054/1286] drm/nouveau/gr/gf100-: virtualise
 init_fecs_exceptions + apply fixes from traces

The value for GF100 has changed here, but it matches RM now.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c  | 11 ++++++++++-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h  |  3 +++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c  | 14 +++++++++++---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c  |  3 ++-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c  |  3 ++-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c  |  9 ++++++++-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c  |  1 +
 17 files changed, 47 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 0c53d456ebbc..0493483597ed 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1914,6 +1914,13 @@ gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
 	return 0;
 }
 
+void
+gf100_gr_init_fecs_exceptions(struct gf100_gr *gr)
+{
+	const u32 data = gr->firmware ? 0x000e0000 : 0x000e0001;
+	nvkm_wr32(gr->base.engine.subdev.device, 0x409c24, data);
+}
+
 void
 gf100_gr_init_gpc_mmu(struct gf100_gr *gr)
 {
@@ -2010,7 +2017,8 @@ gf100_gr_init(struct gf100_gr *gr)
 	nvkm_wr32(device, 0x40013c, 0xffffffff);
 	nvkm_wr32(device, 0x400124, 0x00000002);
 
-	nvkm_wr32(device, 0x409c24, 0x000f0000);
+	gr->func->init_fecs_exceptions(gr);
+
 	nvkm_wr32(device, 0x404000, 0xc0000000);
 	nvkm_wr32(device, 0x404600, 0xc0000000);
 	nvkm_wr32(device, 0x408030, 0xc0000000);
@@ -2088,6 +2096,7 @@ gf100_gr = {
 	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
 	.init_zcull = gf100_gr_init_zcull,
 	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
+	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
 	.mmio = gf100_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 858024ba1186..387938fa352b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -130,6 +130,7 @@ struct gf100_gr_func {
 	void (*init_rop_active_fbps)(struct gf100_gr *);
 	void (*init_bios_2)(struct gf100_gr *);
 	void (*init_swdx_pes_mask)(struct gf100_gr *);
+	void (*init_fecs_exceptions)(struct gf100_gr *);
 	void (*init_ppc_exceptions)(struct gf100_gr *);
 	void (*set_hww_esr_report_mask)(struct gf100_gr *);
 	const struct gf100_gr_pack *mmio;
@@ -151,6 +152,7 @@ int gf100_gr_init(struct gf100_gr *);
 void gf100_gr_init_vsc_stream_master(struct gf100_gr *);
 void gf100_gr_init_zcull(struct gf100_gr *);
 void gf100_gr_init_num_active_ltcs(struct gf100_gr *);
+void gf100_gr_init_fecs_exceptions(struct gf100_gr *);
 
 void gf117_gr_init_zcull(struct gf100_gr *);
 
@@ -166,6 +168,7 @@ void gm200_gr_init_num_active_ltcs(struct gf100_gr *);
 
 int gp100_gr_init(struct gf100_gr *);
 void gp100_gr_init_rop_active_fbps(struct gf100_gr *);
+void gp100_gr_init_fecs_exceptions(struct gf100_gr *);
 
 void gp102_gr_init_swdx_pes_mask(struct gf100_gr *);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
index b6d67f5e5e71..f76995b54eab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
@@ -119,6 +119,7 @@ gf104_gr = {
 	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
 	.init_zcull = gf100_gr_init_zcull,
 	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
+	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
 	.mmio = gf104_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index 9b5796bd1dc0..ada2697a075a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -117,6 +117,7 @@ gf108_gr = {
 	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
 	.init_zcull = gf100_gr_init_zcull,
 	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
+	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
 	.mmio = gf108_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index 7e346a0e9540..80ced8fc2e3e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -91,6 +91,7 @@ gf110_gr = {
 	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
 	.init_zcull = gf100_gr_init_zcull,
 	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
+	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
 	.mmio = gf110_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index 5c68c5f904ce..37ca1216372c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -155,6 +155,7 @@ gf117_gr = {
 	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
 	.init_zcull = gf117_gr_init_zcull,
 	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
+	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
 	.mmio = gf117_gr_pack_mmio,
 	.fecs.ucode = &gf117_gr_fecs_ucode,
 	.gpccs.ucode = &gf117_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index b642bd224ecf..ddf05c5fa2fc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -182,6 +182,7 @@ gf119_gr = {
 	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
 	.init_zcull = gf100_gr_init_zcull,
 	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
+	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
 	.mmio = gf119_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index 703aed48f737..746ad3d0d1ce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -380,6 +380,15 @@ gk104_clkgate_pack[] = {
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
+static void
+gk104_gr_init_fecs_exceptions(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_wr32(device, 0x409ffc, 0x00000000);
+	nvkm_wr32(device, 0x409c14, 0x00003e3e);
+	nvkm_wr32(device, 0x409c24, 0x000f0001);
+}
+
 void
 gk104_gr_init_rop_active_fbps(struct gf100_gr *gr)
 {
@@ -436,9 +445,7 @@ gk104_gr_init(struct gf100_gr *gr)
 	nvkm_wr32(device, 0x40013c, 0xffffffff);
 	nvkm_wr32(device, 0x400124, 0x00000002);
 
-	nvkm_wr32(device, 0x409ffc, 0x00000000);
-	nvkm_wr32(device, 0x409c14, 0x00003e3e);
-	nvkm_wr32(device, 0x409c24, 0x000f0001);
+	gr->func->init_fecs_exceptions(gr);
 	nvkm_wr32(device, 0x404000, 0xc0000000);
 	nvkm_wr32(device, 0x404600, 0xc0000000);
 	nvkm_wr32(device, 0x408030, 0xc0000000);
@@ -519,6 +526,7 @@ gk104_gr = {
 	.init_zcull = gf117_gr_init_zcull,
 	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
+	.init_fecs_exceptions = gk104_gr_init_fecs_exceptions,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk104_gr_pack_mmio,
 	.fecs.ucode = &gk104_gr_fecs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index 9a8428c5df45..8a6340d23766 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -342,6 +342,7 @@ gk110_gr = {
 	.init_zcull = gf117_gr_init_zcull,
 	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
+	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk110_gr_pack_mmio,
 	.fecs.ucode = &gk110_gr_fecs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index 08f3f8c2c2bf..b50e68165df7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -108,6 +108,7 @@ gk110b_gr = {
 	.init_zcull = gf117_gr_init_zcull,
 	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
+	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk110b_gr_pack_mmio,
 	.fecs.ucode = &gk110_gr_fecs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index 49ee5490c7c2..7a938bb3af9f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -167,6 +167,7 @@ gk208_gr = {
 	.init_zcull = gf117_gr_init_zcull,
 	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
+	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk208_gr_pack_mmio,
 	.fecs.ucode = &gk208_gr_fecs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index c653df323f1a..9f2df29fd4e4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -371,7 +371,7 @@ gm107_gr_init(struct gf100_gr *gr)
 	nvkm_wr32(device, 0x400100, 0xffffffff);
 	nvkm_wr32(device, 0x40013c, 0xffffffff);
 	nvkm_wr32(device, 0x400124, 0x00000002);
-	nvkm_wr32(device, 0x409c24, 0x000e0000);
+	gr->func->init_fecs_exceptions(gr);
 
 	nvkm_wr32(device, 0x404000, 0xc0000000);
 	nvkm_wr32(device, 0x404600, 0xc0000000);
@@ -455,6 +455,7 @@ gm107_gr = {
 	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.init_bios_2 = gm107_gr_init_bios_2,
+	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gm107_gr_pack_mmio,
 	.fecs.ucode = &gm107_gr_fecs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
index 5007435e6f7d..a957993a0cc0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -91,7 +91,7 @@ gm200_gr_init(struct gf100_gr *gr)
 	nvkm_wr32(device, 0x400100, 0xffffffff);
 	nvkm_wr32(device, 0x40013c, 0xffffffff);
 	nvkm_wr32(device, 0x400124, 0x00000002);
-	nvkm_wr32(device, 0x409c24, 0x000e0000);
+	gr->func->init_fecs_exceptions(gr);
 	nvkm_wr32(device, 0x405848, 0xc0000000);
 	nvkm_wr32(device, 0x40584c, 0x00000001);
 	nvkm_wr32(device, 0x404000, 0xc0000000);
@@ -193,6 +193,7 @@ gm200_gr = {
 	.init_zcull = gf117_gr_init_zcull,
 	.init_num_active_ltcs = gm200_gr_init_num_active_ltcs,
 	.init_rop_active_fbps = gm200_gr_init_rop_active_fbps,
+	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 2,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
index 1e6795956abc..564b5b17b503 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -30,6 +30,12 @@
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
+void
+gp100_gr_init_fecs_exceptions(struct gf100_gr *gr)
+{
+	nvkm_wr32(gr->base.engine.subdev.device, 0x409c24, 0x000f0002);
+}
+
 void
 gp100_gr_init_rop_active_fbps(struct gf100_gr *gr)
 {
@@ -63,7 +69,7 @@ gp100_gr_init(struct gf100_gr *gr)
 	nvkm_wr32(device, 0x400100, 0xffffffff);
 	nvkm_wr32(device, 0x40013c, 0xffffffff);
 	nvkm_wr32(device, 0x400124, 0x00000002);
-	nvkm_wr32(device, 0x409c24, 0x000f0002);
+	gr->func->init_fecs_exceptions(gr);
 	nvkm_wr32(device, 0x405848, 0xc0000000);
 	nvkm_mask(device, 0x40584c, 0x00000000, 0x00000001);
 	nvkm_wr32(device, 0x404000, 0xc0000000);
@@ -127,6 +133,7 @@ gp100_gr = {
 	.init_zcull = gf117_gr_init_zcull,
 	.init_num_active_ltcs = gm200_gr_init_num_active_ltcs,
 	.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
+	.init_fecs_exceptions = gp100_gr_init_fecs_exceptions,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 2,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
index 860a78976980..04803fa7937a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
@@ -49,6 +49,7 @@ gp102_gr = {
 	.init_num_active_ltcs = gm200_gr_init_num_active_ltcs,
 	.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
 	.init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
+	.init_fecs_exceptions = gp100_gr_init_fecs_exceptions,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 3,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
index 03f22646eb78..c21cb8ae9a8b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
@@ -35,6 +35,7 @@ gp107_gr = {
 	.init_num_active_ltcs = gm200_gr_init_num_active_ltcs,
 	.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
 	.init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
+	.init_fecs_exceptions = gp100_gr_init_fecs_exceptions,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 1,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
index 97f86677a22c..222b5b0c6e38 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
@@ -33,6 +33,7 @@ gp10b_gr = {
 	.init_zcull = gf117_gr_init_zcull,
 	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
+	.init_fecs_exceptions = gp100_gr_init_fecs_exceptions,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 1,

From 3ac72e98b40ead6225eb38bcf78ec540357106c0 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1055/1286] drm/nouveau/gr/gf100-: virtualise init_ds_hww_esr_2

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c |  2 ++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h |  2 ++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c | 12 ++++++++++--
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c |  4 ++--
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c |  1 +
 7 files changed, 19 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 0493483597ed..1f764df141bd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -2018,6 +2018,8 @@ gf100_gr_init(struct gf100_gr *gr)
 	nvkm_wr32(device, 0x400124, 0x00000002);
 
 	gr->func->init_fecs_exceptions(gr);
+	if (gr->func->init_ds_hww_esr_2)
+		gr->func->init_ds_hww_esr_2(gr);
 
 	nvkm_wr32(device, 0x404000, 0xc0000000);
 	nvkm_wr32(device, 0x404600, 0xc0000000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 387938fa352b..ff3e265925c5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -131,6 +131,7 @@ struct gf100_gr_func {
 	void (*init_bios_2)(struct gf100_gr *);
 	void (*init_swdx_pes_mask)(struct gf100_gr *);
 	void (*init_fecs_exceptions)(struct gf100_gr *);
+	void (*init_ds_hww_esr_2)(struct gf100_gr *);
 	void (*init_ppc_exceptions)(struct gf100_gr *);
 	void (*set_hww_esr_report_mask)(struct gf100_gr *);
 	const struct gf100_gr_pack *mmio;
@@ -165,6 +166,7 @@ int gk20a_gr_init(struct gf100_gr *);
 
 int gm200_gr_rops(struct gf100_gr *);
 void gm200_gr_init_num_active_ltcs(struct gf100_gr *);
+void gm200_gr_init_ds_hww_esr_2(struct gf100_gr *);
 
 int gp100_gr_init(struct gf100_gr *);
 void gp100_gr_init_rop_active_fbps(struct gf100_gr *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
index a957993a0cc0..b5994dca5d03 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -38,6 +38,14 @@ gm200_gr_rops(struct gf100_gr *gr)
 	return nvkm_rd32(gr->base.engine.subdev.device, 0x12006c);
 }
 
+void
+gm200_gr_init_ds_hww_esr_2(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_wr32(device, 0x405848, 0xc0000000);
+	nvkm_mask(device, 0x40584c, 0x00000001, 0x00000001);
+}
+
 void
 gm200_gr_init_num_active_ltcs(struct gf100_gr *gr)
 {
@@ -92,8 +100,7 @@ gm200_gr_init(struct gf100_gr *gr)
 	nvkm_wr32(device, 0x40013c, 0xffffffff);
 	nvkm_wr32(device, 0x400124, 0x00000002);
 	gr->func->init_fecs_exceptions(gr);
-	nvkm_wr32(device, 0x405848, 0xc0000000);
-	nvkm_wr32(device, 0x40584c, 0x00000001);
+	gr->func->init_ds_hww_esr_2(gr);
 	nvkm_wr32(device, 0x404000, 0xc0000000);
 	nvkm_wr32(device, 0x404600, 0xc0000000);
 	nvkm_wr32(device, 0x408030, 0xc0000000);
@@ -194,6 +201,7 @@ gm200_gr = {
 	.init_num_active_ltcs = gm200_gr_init_num_active_ltcs,
 	.init_rop_active_fbps = gm200_gr_init_rop_active_fbps,
 	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
+	.init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 2,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
index 564b5b17b503..676f58a9acee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -70,8 +70,7 @@ gp100_gr_init(struct gf100_gr *gr)
 	nvkm_wr32(device, 0x40013c, 0xffffffff);
 	nvkm_wr32(device, 0x400124, 0x00000002);
 	gr->func->init_fecs_exceptions(gr);
-	nvkm_wr32(device, 0x405848, 0xc0000000);
-	nvkm_mask(device, 0x40584c, 0x00000000, 0x00000001);
+	gr->func->init_ds_hww_esr_2(gr);
 	nvkm_wr32(device, 0x404000, 0xc0000000);
 	nvkm_wr32(device, 0x404600, 0xc0000000);
 	nvkm_wr32(device, 0x408030, 0xc0000000);
@@ -134,6 +133,7 @@ gp100_gr = {
 	.init_num_active_ltcs = gm200_gr_init_num_active_ltcs,
 	.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
 	.init_fecs_exceptions = gp100_gr_init_fecs_exceptions,
+	.init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 2,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
index 04803fa7937a..3694687c85db 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
@@ -50,6 +50,7 @@ gp102_gr = {
 	.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
 	.init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
 	.init_fecs_exceptions = gp100_gr_init_fecs_exceptions,
+	.init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 3,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
index c21cb8ae9a8b..c83ad01bad53 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
@@ -36,6 +36,7 @@ gp107_gr = {
 	.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
 	.init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
 	.init_fecs_exceptions = gp100_gr_init_fecs_exceptions,
+	.init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 1,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
index 222b5b0c6e38..8fef3b56cf8c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
@@ -34,6 +34,7 @@ gp10b_gr = {
 	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
 	.init_fecs_exceptions = gp100_gr_init_fecs_exceptions,
+	.init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 1,

From 2b297b0d6d33aee99254b43c3e41100fc75ea4ab Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1056/1286] drm/nouveau/gr/gf100-: virtualise init_40601c

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c | 12 +++++++++++-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h |  2 ++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c |  1 +
 7 files changed, 18 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 1f764df141bd..1b067e600d74 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1914,6 +1914,12 @@ gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
 	return 0;
 }
 
+void
+gf100_gr_init_40601c(struct gf100_gr *gr)
+{
+	nvkm_wr32(gr->base.engine.subdev.device, 0x40601c, 0xc0000000);
+}
+
 void
 gf100_gr_init_fecs_exceptions(struct gf100_gr *gr)
 {
@@ -2024,7 +2030,10 @@ gf100_gr_init(struct gf100_gr *gr)
 	nvkm_wr32(device, 0x404000, 0xc0000000);
 	nvkm_wr32(device, 0x404600, 0xc0000000);
 	nvkm_wr32(device, 0x408030, 0xc0000000);
-	nvkm_wr32(device, 0x40601c, 0xc0000000);
+
+	if (gr->func->init_40601c)
+		gr->func->init_40601c(gr);
+
 	nvkm_wr32(device, 0x404490, 0xc0000000);
 	nvkm_wr32(device, 0x406018, 0xc0000000);
 	nvkm_wr32(device, 0x405840, 0xc0000000);
@@ -2099,6 +2108,7 @@ gf100_gr = {
 	.init_zcull = gf100_gr_init_zcull,
 	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
+	.init_40601c = gf100_gr_init_40601c,
 	.mmio = gf100_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index ff3e265925c5..8c42a7aff183 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -132,6 +132,7 @@ struct gf100_gr_func {
 	void (*init_swdx_pes_mask)(struct gf100_gr *);
 	void (*init_fecs_exceptions)(struct gf100_gr *);
 	void (*init_ds_hww_esr_2)(struct gf100_gr *);
+	void (*init_40601c)(struct gf100_gr *);
 	void (*init_ppc_exceptions)(struct gf100_gr *);
 	void (*set_hww_esr_report_mask)(struct gf100_gr *);
 	const struct gf100_gr_pack *mmio;
@@ -154,6 +155,7 @@ void gf100_gr_init_vsc_stream_master(struct gf100_gr *);
 void gf100_gr_init_zcull(struct gf100_gr *);
 void gf100_gr_init_num_active_ltcs(struct gf100_gr *);
 void gf100_gr_init_fecs_exceptions(struct gf100_gr *);
+void gf100_gr_init_40601c(struct gf100_gr *);
 
 void gf117_gr_init_zcull(struct gf100_gr *);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
index f76995b54eab..61d6eef70e68 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
@@ -120,6 +120,7 @@ gf104_gr = {
 	.init_zcull = gf100_gr_init_zcull,
 	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
+	.init_40601c = gf100_gr_init_40601c,
 	.mmio = gf104_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index ada2697a075a..d4f712e7d6e9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -118,6 +118,7 @@ gf108_gr = {
 	.init_zcull = gf100_gr_init_zcull,
 	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
+	.init_40601c = gf100_gr_init_40601c,
 	.mmio = gf108_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index 80ced8fc2e3e..1b6c2f32ec92 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -92,6 +92,7 @@ gf110_gr = {
 	.init_zcull = gf100_gr_init_zcull,
 	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
+	.init_40601c = gf100_gr_init_40601c,
 	.mmio = gf110_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index 37ca1216372c..ae76e8183d1d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -156,6 +156,7 @@ gf117_gr = {
 	.init_zcull = gf117_gr_init_zcull,
 	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
+	.init_40601c = gf100_gr_init_40601c,
 	.mmio = gf117_gr_pack_mmio,
 	.fecs.ucode = &gf117_gr_fecs_ucode,
 	.gpccs.ucode = &gf117_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index ddf05c5fa2fc..a3970c31f951 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -183,6 +183,7 @@ gf119_gr = {
 	.init_zcull = gf100_gr_init_zcull,
 	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
+	.init_40601c = gf100_gr_init_40601c,
 	.mmio = gf119_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,

From 0a5b97304b9e2cd07c78a399c5395d5fb0118341 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1057/1286] drm/nouveau/gr/gf100-: virtualise init_sked_hww_esr

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c  | 4 ++++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h  | 2 ++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c  | 9 ++++++++-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c  | 1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c | 1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c  | 1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c  | 3 ++-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c  | 3 ++-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c  | 3 ++-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c  | 1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c  | 1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c  | 1 +
 12 files changed, 26 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 1b067e600d74..7ccd8f21c1e0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -2036,6 +2036,10 @@ gf100_gr_init(struct gf100_gr *gr)
 
 	nvkm_wr32(device, 0x404490, 0xc0000000);
 	nvkm_wr32(device, 0x406018, 0xc0000000);
+
+	if (gr->func->init_sked_hww_esr)
+		gr->func->init_sked_hww_esr(gr);
+
 	nvkm_wr32(device, 0x405840, 0xc0000000);
 	nvkm_wr32(device, 0x405844, 0x00ffffff);
 	nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 8c42a7aff183..c292cf3bebb1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -133,6 +133,7 @@ struct gf100_gr_func {
 	void (*init_fecs_exceptions)(struct gf100_gr *);
 	void (*init_ds_hww_esr_2)(struct gf100_gr *);
 	void (*init_40601c)(struct gf100_gr *);
+	void (*init_sked_hww_esr)(struct gf100_gr *);
 	void (*init_ppc_exceptions)(struct gf100_gr *);
 	void (*set_hww_esr_report_mask)(struct gf100_gr *);
 	const struct gf100_gr_pack *mmio;
@@ -163,6 +164,7 @@ int gk104_gr_init(struct gf100_gr *);
 void gk104_gr_init_vsc_stream_master(struct gf100_gr *);
 void gk104_gr_init_rop_active_fbps(struct gf100_gr *);
 void gk104_gr_init_ppc_exceptions(struct gf100_gr *);
+void gk104_gr_init_sked_hww_esr(struct gf100_gr *);
 
 int gk20a_gr_init(struct gf100_gr *);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index 746ad3d0d1ce..6ba604edaf95 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -380,6 +380,12 @@ gk104_clkgate_pack[] = {
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
+void
+gk104_gr_init_sked_hww_esr(struct gf100_gr *gr)
+{
+	nvkm_wr32(gr->base.engine.subdev.device, 0x407020, 0x40000000);
+}
+
 static void
 gk104_gr_init_fecs_exceptions(struct gf100_gr *gr)
 {
@@ -451,7 +457,7 @@ gk104_gr_init(struct gf100_gr *gr)
 	nvkm_wr32(device, 0x408030, 0xc0000000);
 	nvkm_wr32(device, 0x404490, 0xc0000000);
 	nvkm_wr32(device, 0x406018, 0xc0000000);
-	nvkm_wr32(device, 0x407020, 0x40000000);
+	gr->func->init_sked_hww_esr(gr);
 	nvkm_wr32(device, 0x405840, 0xc0000000);
 	nvkm_wr32(device, 0x405844, 0x00ffffff);
 	nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
@@ -527,6 +533,7 @@ gk104_gr = {
 	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.init_fecs_exceptions = gk104_gr_init_fecs_exceptions,
+	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk104_gr_pack_mmio,
 	.fecs.ucode = &gk104_gr_fecs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index 8a6340d23766..7a07d24cc227 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -343,6 +343,7 @@ gk110_gr = {
 	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
+	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk110_gr_pack_mmio,
 	.fecs.ucode = &gk110_gr_fecs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index b50e68165df7..1c9f59cde3d2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -109,6 +109,7 @@ gk110b_gr = {
 	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
+	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk110b_gr_pack_mmio,
 	.fecs.ucode = &gk110_gr_fecs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index 7a938bb3af9f..40c87242b54d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -168,6 +168,7 @@ gk208_gr = {
 	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
+	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk208_gr_pack_mmio,
 	.fecs.ucode = &gk208_gr_fecs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index 9f2df29fd4e4..3d180edbdcd7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -378,7 +378,7 @@ gm107_gr_init(struct gf100_gr *gr)
 	nvkm_wr32(device, 0x408030, 0xc0000000);
 	nvkm_wr32(device, 0x404490, 0xc0000000);
 	nvkm_wr32(device, 0x406018, 0xc0000000);
-	nvkm_wr32(device, 0x407020, 0x40000000);
+	gr->func->init_sked_hww_esr(gr);
 	nvkm_wr32(device, 0x405840, 0xc0000000);
 	nvkm_wr32(device, 0x405844, 0x00ffffff);
 	nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
@@ -456,6 +456,7 @@ gm107_gr = {
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.init_bios_2 = gm107_gr_init_bios_2,
 	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
+	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gm107_gr_pack_mmio,
 	.fecs.ucode = &gm107_gr_fecs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
index b5994dca5d03..9436ab37aa9e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -106,7 +106,7 @@ gm200_gr_init(struct gf100_gr *gr)
 	nvkm_wr32(device, 0x408030, 0xc0000000);
 	nvkm_wr32(device, 0x404490, 0xc0000000);
 	nvkm_wr32(device, 0x406018, 0xc0000000);
-	nvkm_wr32(device, 0x407020, 0x40000000);
+	gr->func->init_sked_hww_esr(gr);
 	nvkm_wr32(device, 0x405840, 0xc0000000);
 	nvkm_wr32(device, 0x405844, 0x00ffffff);
 	nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
@@ -202,6 +202,7 @@ gm200_gr = {
 	.init_rop_active_fbps = gm200_gr_init_rop_active_fbps,
 	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
 	.init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
+	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 2,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
index 676f58a9acee..72ea16ee842a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -76,7 +76,7 @@ gp100_gr_init(struct gf100_gr *gr)
 	nvkm_wr32(device, 0x408030, 0xc0000000);
 	nvkm_wr32(device, 0x404490, 0xc0000000);
 	nvkm_wr32(device, 0x406018, 0xc0000000);
-	nvkm_wr32(device, 0x407020, 0x40000000);
+	gr->func->init_sked_hww_esr(gr);
 	nvkm_wr32(device, 0x405840, 0xc0000000);
 	nvkm_wr32(device, 0x405844, 0x00ffffff);
 	nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
@@ -134,6 +134,7 @@ gp100_gr = {
 	.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
 	.init_fecs_exceptions = gp100_gr_init_fecs_exceptions,
 	.init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
+	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 2,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
index 3694687c85db..309815bef601 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
@@ -51,6 +51,7 @@ gp102_gr = {
 	.init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
 	.init_fecs_exceptions = gp100_gr_init_fecs_exceptions,
 	.init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
+	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 3,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
index c83ad01bad53..1ae9e7d991f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
@@ -37,6 +37,7 @@ gp107_gr = {
 	.init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
 	.init_fecs_exceptions = gp100_gr_init_fecs_exceptions,
 	.init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
+	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 1,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
index 8fef3b56cf8c..68e212823063 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
@@ -35,6 +35,7 @@ gp10b_gr = {
 	.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
 	.init_fecs_exceptions = gp100_gr_init_fecs_exceptions,
 	.init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
+	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 1,

From 0feab0250d34c7114b442f49b1ce18a9906b543d Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1058/1286] drm/nouveau/gr/gf100-: virtualise init_419cc0 +
 apply fixes from traces

Pulled some init out of main per-GPC/TPC loops to match RM.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/gr/gf100.c    | 21 +++++++++++++++++--
 .../gpu/drm/nouveau/nvkm/engine/gr/gf100.h    |  2 ++
 .../gpu/drm/nouveau/nvkm/engine/gr/gf104.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gf108.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gf110.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gf117.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gf119.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gk104.c    |  4 ++--
 .../gpu/drm/nouveau/nvkm/engine/gr/gk110.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gk110b.c   |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gk208.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gm107.c    |  4 ++--
 .../gpu/drm/nouveau/nvkm/engine/gr/gm200.c    |  4 ++--
 .../gpu/drm/nouveau/nvkm/engine/gr/gp100.c    |  4 ++--
 .../gpu/drm/nouveau/nvkm/engine/gr/gp102.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gp107.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gp10b.c    |  1 +
 17 files changed, 40 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 7ccd8f21c1e0..ad18ef91d34e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1914,6 +1914,20 @@ gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
 	return 0;
 }
 
+void
+gf100_gr_init_419cc0(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	int gpc, tpc;
+
+	nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
+
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++)
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+	}
+}
+
 void
 gf100_gr_init_40601c(struct gf100_gr *gr)
 {
@@ -2042,7 +2056,10 @@ gf100_gr_init(struct gf100_gr *gr)
 
 	nvkm_wr32(device, 0x405840, 0xc0000000);
 	nvkm_wr32(device, 0x405844, 0x00ffffff);
-	nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
+
+	if (gr->func->init_419cc0)
+		gr->func->init_419cc0(gr);
+
 	nvkm_mask(device, 0x419eb4, 0x00001000, 0x00001000);
 
 	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
@@ -2054,7 +2071,6 @@ gf100_gr_init(struct gf100_gr *gr)
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
@@ -2113,6 +2129,7 @@ gf100_gr = {
 	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
 	.init_40601c = gf100_gr_init_40601c,
+	.init_419cc0 = gf100_gr_init_419cc0,
 	.mmio = gf100_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index c292cf3bebb1..cab0948f507d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -134,6 +134,7 @@ struct gf100_gr_func {
 	void (*init_ds_hww_esr_2)(struct gf100_gr *);
 	void (*init_40601c)(struct gf100_gr *);
 	void (*init_sked_hww_esr)(struct gf100_gr *);
+	void (*init_419cc0)(struct gf100_gr *);
 	void (*init_ppc_exceptions)(struct gf100_gr *);
 	void (*set_hww_esr_report_mask)(struct gf100_gr *);
 	const struct gf100_gr_pack *mmio;
@@ -157,6 +158,7 @@ void gf100_gr_init_zcull(struct gf100_gr *);
 void gf100_gr_init_num_active_ltcs(struct gf100_gr *);
 void gf100_gr_init_fecs_exceptions(struct gf100_gr *);
 void gf100_gr_init_40601c(struct gf100_gr *);
+void gf100_gr_init_419cc0(struct gf100_gr *);
 
 void gf117_gr_init_zcull(struct gf100_gr *);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
index 61d6eef70e68..8598e15b40af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
@@ -121,6 +121,7 @@ gf104_gr = {
 	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
 	.init_40601c = gf100_gr_init_40601c,
+	.init_419cc0 = gf100_gr_init_419cc0,
 	.mmio = gf104_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index d4f712e7d6e9..83c71ff51a2e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -119,6 +119,7 @@ gf108_gr = {
 	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
 	.init_40601c = gf100_gr_init_40601c,
+	.init_419cc0 = gf100_gr_init_419cc0,
 	.mmio = gf108_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index 1b6c2f32ec92..b2b44890ae95 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -93,6 +93,7 @@ gf110_gr = {
 	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
 	.init_40601c = gf100_gr_init_40601c,
+	.init_419cc0 = gf100_gr_init_419cc0,
 	.mmio = gf110_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index ae76e8183d1d..ec91a595affe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -157,6 +157,7 @@ gf117_gr = {
 	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
 	.init_40601c = gf100_gr_init_40601c,
+	.init_419cc0 = gf100_gr_init_419cc0,
 	.mmio = gf117_gr_pack_mmio,
 	.fecs.ucode = &gf117_gr_fecs_ucode,
 	.gpccs.ucode = &gf117_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index a3970c31f951..570527f0370c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -184,6 +184,7 @@ gf119_gr = {
 	.init_num_active_ltcs = gf100_gr_init_num_active_ltcs,
 	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
 	.init_40601c = gf100_gr_init_40601c,
+	.init_419cc0 = gf100_gr_init_419cc0,
 	.mmio = gf119_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index 6ba604edaf95..dcd59af25420 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -460,7 +460,7 @@ gk104_gr_init(struct gf100_gr *gr)
 	gr->func->init_sked_hww_esr(gr);
 	nvkm_wr32(device, 0x405840, 0xc0000000);
 	nvkm_wr32(device, 0x405844, 0x00ffffff);
-	nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
+	gr->func->init_419cc0(gr);
 	nvkm_mask(device, 0x419eb4, 0x00001000, 0x00001000);
 
 	gr->func->init_ppc_exceptions(gr);
@@ -474,7 +474,6 @@ gk104_gr_init(struct gf100_gr *gr)
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
@@ -534,6 +533,7 @@ gk104_gr = {
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.init_fecs_exceptions = gk104_gr_init_fecs_exceptions,
 	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
+	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk104_gr_pack_mmio,
 	.fecs.ucode = &gk104_gr_fecs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index 7a07d24cc227..bea0cd324a72 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -344,6 +344,7 @@ gk110_gr = {
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
 	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
+	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk110_gr_pack_mmio,
 	.fecs.ucode = &gk110_gr_fecs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index 1c9f59cde3d2..e4ae88de8958 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -110,6 +110,7 @@ gk110b_gr = {
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
 	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
+	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk110b_gr_pack_mmio,
 	.fecs.ucode = &gk110_gr_fecs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index 40c87242b54d..1e214d94ae98 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -169,6 +169,7 @@ gk208_gr = {
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
 	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
 	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
+	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk208_gr_pack_mmio,
 	.fecs.ucode = &gk208_gr_fecs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index 3d180edbdcd7..df668b9e00e4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -381,7 +381,7 @@ gm107_gr_init(struct gf100_gr *gr)
 	gr->func->init_sked_hww_esr(gr);
 	nvkm_wr32(device, 0x405840, 0xc0000000);
 	nvkm_wr32(device, 0x405844, 0x00ffffff);
-	nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
+	gr->func->init_419cc0(gr);
 
 	gr->func->init_ppc_exceptions(gr);
 
@@ -394,7 +394,6 @@ gm107_gr_init(struct gf100_gr *gr)
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
@@ -457,6 +456,7 @@ gm107_gr = {
 	.init_bios_2 = gm107_gr_init_bios_2,
 	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
 	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
+	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gm107_gr_pack_mmio,
 	.fecs.ucode = &gm107_gr_fecs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
index 9436ab37aa9e..9f8171aae601 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -109,7 +109,7 @@ gm200_gr_init(struct gf100_gr *gr)
 	gr->func->init_sked_hww_esr(gr);
 	nvkm_wr32(device, 0x405840, 0xc0000000);
 	nvkm_wr32(device, 0x405844, 0x00ffffff);
-	nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
+	gr->func->init_419cc0(gr);
 
 	gr->func->init_ppc_exceptions(gr);
 
@@ -122,7 +122,6 @@ gm200_gr_init(struct gf100_gr *gr)
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
@@ -203,6 +202,7 @@ gm200_gr = {
 	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
 	.init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
 	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
+	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 2,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
index 72ea16ee842a..7518d249f648 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -79,7 +79,7 @@ gp100_gr_init(struct gf100_gr *gr)
 	gr->func->init_sked_hww_esr(gr);
 	nvkm_wr32(device, 0x405840, 0xc0000000);
 	nvkm_wr32(device, 0x405844, 0x00ffffff);
-	nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
+	gr->func->init_419cc0(gr);
 
 	nvkm_mask(device, 0x419c9c, 0x00010000, 0x00010000);
 	nvkm_mask(device, 0x419c9c, 0x00020000, 0x00020000);
@@ -95,7 +95,6 @@ gp100_gr_init(struct gf100_gr *gr)
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
@@ -135,6 +134,7 @@ gp100_gr = {
 	.init_fecs_exceptions = gp100_gr_init_fecs_exceptions,
 	.init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
 	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
+	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 2,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
index 309815bef601..230e2eeb1701 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
@@ -52,6 +52,7 @@ gp102_gr = {
 	.init_fecs_exceptions = gp100_gr_init_fecs_exceptions,
 	.init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
 	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
+	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 3,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
index 1ae9e7d991f9..55b7a7e70cde 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
@@ -38,6 +38,7 @@ gp107_gr = {
 	.init_fecs_exceptions = gp100_gr_init_fecs_exceptions,
 	.init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
 	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
+	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 1,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
index 68e212823063..b13a48bc7da7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
@@ -36,6 +36,7 @@ gp10b_gr = {
 	.init_fecs_exceptions = gp100_gr_init_fecs_exceptions,
 	.init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
 	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
+	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 1,

From 0a84a51334b5d75decd23b735aab00ff4698eeb2 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1059/1286] drm/nouveau/gr/gf100-: virtualise init_419eb4 +
 apply fixes from traces

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c  | 12 ++++++++++--
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h  |  4 ++++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c  |  3 ++-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c  | 15 +++++++++++++++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c |  1 +
 10 files changed, 37 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index ad18ef91d34e..947278274846 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1914,6 +1914,13 @@ gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
 	return 0;
 }
 
+void
+gf100_gr_init_419eb4(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_mask(device, 0x419eb4, 0x00001000, 0x00001000);
+}
+
 void
 gf100_gr_init_419cc0(struct gf100_gr *gr)
 {
@@ -2059,8 +2066,8 @@ gf100_gr_init(struct gf100_gr *gr)
 
 	if (gr->func->init_419cc0)
 		gr->func->init_419cc0(gr);
-
-	nvkm_mask(device, 0x419eb4, 0x00001000, 0x00001000);
+	if (gr->func->init_419eb4)
+		gr->func->init_419eb4(gr);
 
 	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
@@ -2130,6 +2137,7 @@ gf100_gr = {
 	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
 	.init_40601c = gf100_gr_init_40601c,
 	.init_419cc0 = gf100_gr_init_419cc0,
+	.init_419eb4 = gf100_gr_init_419eb4,
 	.mmio = gf100_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index cab0948f507d..1dae373fa0d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -135,6 +135,7 @@ struct gf100_gr_func {
 	void (*init_40601c)(struct gf100_gr *);
 	void (*init_sked_hww_esr)(struct gf100_gr *);
 	void (*init_419cc0)(struct gf100_gr *);
+	void (*init_419eb4)(struct gf100_gr *);
 	void (*init_ppc_exceptions)(struct gf100_gr *);
 	void (*set_hww_esr_report_mask)(struct gf100_gr *);
 	const struct gf100_gr_pack *mmio;
@@ -159,6 +160,7 @@ void gf100_gr_init_num_active_ltcs(struct gf100_gr *);
 void gf100_gr_init_fecs_exceptions(struct gf100_gr *);
 void gf100_gr_init_40601c(struct gf100_gr *);
 void gf100_gr_init_419cc0(struct gf100_gr *);
+void gf100_gr_init_419eb4(struct gf100_gr *);
 
 void gf117_gr_init_zcull(struct gf100_gr *);
 
@@ -168,6 +170,8 @@ void gk104_gr_init_rop_active_fbps(struct gf100_gr *);
 void gk104_gr_init_ppc_exceptions(struct gf100_gr *);
 void gk104_gr_init_sked_hww_esr(struct gf100_gr *);
 
+void gk110_gr_init_419eb4(struct gf100_gr *);
+
 int gk20a_gr_init(struct gf100_gr *);
 
 int gm200_gr_rops(struct gf100_gr *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
index 8598e15b40af..59d4dac97d8c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
@@ -122,6 +122,7 @@ gf104_gr = {
 	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
 	.init_40601c = gf100_gr_init_40601c,
 	.init_419cc0 = gf100_gr_init_419cc0,
+	.init_419eb4 = gf100_gr_init_419eb4,
 	.mmio = gf104_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index 83c71ff51a2e..73cb23a71c52 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -120,6 +120,7 @@ gf108_gr = {
 	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
 	.init_40601c = gf100_gr_init_40601c,
 	.init_419cc0 = gf100_gr_init_419cc0,
+	.init_419eb4 = gf100_gr_init_419eb4,
 	.mmio = gf108_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index b2b44890ae95..1d7188e844f5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -94,6 +94,7 @@ gf110_gr = {
 	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
 	.init_40601c = gf100_gr_init_40601c,
 	.init_419cc0 = gf100_gr_init_419cc0,
+	.init_419eb4 = gf100_gr_init_419eb4,
 	.mmio = gf110_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index ec91a595affe..34655676ce39 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -158,6 +158,7 @@ gf117_gr = {
 	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
 	.init_40601c = gf100_gr_init_40601c,
 	.init_419cc0 = gf100_gr_init_419cc0,
+	.init_419eb4 = gf100_gr_init_419eb4,
 	.mmio = gf117_gr_pack_mmio,
 	.fecs.ucode = &gf117_gr_fecs_ucode,
 	.gpccs.ucode = &gf117_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index 570527f0370c..28c00cb19995 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -185,6 +185,7 @@ gf119_gr = {
 	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
 	.init_40601c = gf100_gr_init_40601c,
 	.init_419cc0 = gf100_gr_init_419cc0,
+	.init_419eb4 = gf100_gr_init_419eb4,
 	.mmio = gf119_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index dcd59af25420..775b47428486 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -461,7 +461,7 @@ gk104_gr_init(struct gf100_gr *gr)
 	nvkm_wr32(device, 0x405840, 0xc0000000);
 	nvkm_wr32(device, 0x405844, 0x00ffffff);
 	gr->func->init_419cc0(gr);
-	nvkm_mask(device, 0x419eb4, 0x00001000, 0x00001000);
+	gr->func->init_419eb4(gr);
 
 	gr->func->init_ppc_exceptions(gr);
 
@@ -534,6 +534,7 @@ gk104_gr = {
 	.init_fecs_exceptions = gk104_gr_init_fecs_exceptions,
 	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
 	.init_419cc0 = gf100_gr_init_419cc0,
+	.init_419eb4 = gf100_gr_init_419eb4,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk104_gr_pack_mmio,
 	.fecs.ucode = &gk104_gr_fecs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index bea0cd324a72..daaa4492d764 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -334,6 +334,20 @@ gk110_gr_gpccs_ucode = {
 	.data.size = sizeof(gk110_grgpc_data),
 };
 
+void
+gk110_gr_init_419eb4(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_mask(device, 0x419eb4, 0x00001000, 0x00001000);
+	nvkm_mask(device, 0x419eb4, 0x00002000, 0x00002000);
+	nvkm_mask(device, 0x419eb4, 0x00004000, 0x00004000);
+	nvkm_mask(device, 0x419eb4, 0x00008000, 0x00008000);
+	nvkm_mask(device, 0x419eb4, 0x00001000, 0x00000000);
+	nvkm_mask(device, 0x419eb4, 0x00002000, 0x00000000);
+	nvkm_mask(device, 0x419eb4, 0x00004000, 0x00000000);
+	nvkm_mask(device, 0x419eb4, 0x00008000, 0x00000000);
+}
+
 static const struct gf100_gr_func
 gk110_gr = {
 	.init = gk104_gr_init,
@@ -345,6 +359,7 @@ gk110_gr = {
 	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
 	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
 	.init_419cc0 = gf100_gr_init_419cc0,
+	.init_419eb4 = gk110_gr_init_419eb4,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk110_gr_pack_mmio,
 	.fecs.ucode = &gk110_gr_fecs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index e4ae88de8958..ee56b00c25d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -111,6 +111,7 @@ gk110b_gr = {
 	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
 	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
 	.init_419cc0 = gf100_gr_init_419cc0,
+	.init_419eb4 = gk110_gr_init_419eb4,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.mmio = gk110b_gr_pack_mmio,
 	.fecs.ucode = &gk110_gr_fecs_ucode,

From 778f18c607e30206c1a791a4d356f1ed32bc1947 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1060/1286] drm/nouveau/gr/gf100-: virtualise init_419c9c +
 apply fixes from traces

Deliberately removed from non-GP100, as RM doesn't touch it.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c |  2 ++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c | 14 +++++++++++---
 3 files changed, 14 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 947278274846..a379794df761 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -2068,6 +2068,8 @@ gf100_gr_init(struct gf100_gr *gr)
 		gr->func->init_419cc0(gr);
 	if (gr->func->init_419eb4)
 		gr->func->init_419eb4(gr);
+	if (gr->func->init_419c9c)
+		gr->func->init_419c9c(gr);
 
 	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 1dae373fa0d7..e328957453fb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -136,6 +136,7 @@ struct gf100_gr_func {
 	void (*init_sked_hww_esr)(struct gf100_gr *);
 	void (*init_419cc0)(struct gf100_gr *);
 	void (*init_419eb4)(struct gf100_gr *);
+	void (*init_419c9c)(struct gf100_gr *);
 	void (*init_ppc_exceptions)(struct gf100_gr *);
 	void (*set_hww_esr_report_mask)(struct gf100_gr *);
 	const struct gf100_gr_pack *mmio;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
index 7518d249f648..3fcbe1fc19cf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -30,6 +30,14 @@
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
+static void
+gp100_gr_init_419c9c(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_mask(device, 0x419c9c, 0x00010000, 0x00010000);
+	nvkm_mask(device, 0x419c9c, 0x00020000, 0x00020000);
+}
+
 void
 gp100_gr_init_fecs_exceptions(struct gf100_gr *gr)
 {
@@ -80,9 +88,8 @@ gp100_gr_init(struct gf100_gr *gr)
 	nvkm_wr32(device, 0x405840, 0xc0000000);
 	nvkm_wr32(device, 0x405844, 0x00ffffff);
 	gr->func->init_419cc0(gr);
-
-	nvkm_mask(device, 0x419c9c, 0x00010000, 0x00010000);
-	nvkm_mask(device, 0x419c9c, 0x00020000, 0x00020000);
+	if (gr->func->init_419c9c)
+		gr->func->init_419c9c(gr);
 
 	gr->func->init_ppc_exceptions(gr);
 
@@ -135,6 +142,7 @@ gp100_gr = {
 	.init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
 	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
 	.init_419cc0 = gf100_gr_init_419cc0,
+	.init_419c9c = gp100_gr_init_419c9c,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 2,

From 70d2148209abd851768396cb4719710de9aeddc0 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1061/1286] drm/nouveau/gr/gf100-: virtualise
 init_ppc_exceptions

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index a379794df761..776442582b97 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -2071,6 +2071,9 @@ gf100_gr_init(struct gf100_gr *gr)
 	if (gr->func->init_419c9c)
 		gr->func->init_419c9c(gr);
 
+	if (gr->func->init_ppc_exceptions)
+		gr->func->init_ppc_exceptions(gr);
+
 	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);

From f3ef80c0c491bd1a5ae4c02acbdabb8c9cedb315 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1062/1286] drm/nouveau/gr/gf100-: virtualise init_tex_hww_esr

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c  | 11 ++++++++++-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h  |  2 ++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c  |  3 ++-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c  |  3 ++-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c  |  3 ++-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c  |  3 ++-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c  |  1 +
 17 files changed, 31 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 776442582b97..25f64ece92a4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1914,6 +1914,13 @@ gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
 	return 0;
 }
 
+void
+gf100_gr_init_tex_hww_esr(struct gf100_gr *gr, int tpc, int gpc)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+}
+
 void
 gf100_gr_init_419eb4(struct gf100_gr *gr)
 {
@@ -2082,7 +2089,8 @@ gf100_gr_init(struct gf100_gr *gr)
 		for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+			if (gr->func->init_tex_hww_esr)
+				gr->func->init_tex_hww_esr(gr, gpc, tpc);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
@@ -2143,6 +2151,7 @@ gf100_gr = {
 	.init_40601c = gf100_gr_init_40601c,
 	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_419eb4 = gf100_gr_init_419eb4,
+	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.mmio = gf100_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index e328957453fb..0bbe11dd8bdd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -138,6 +138,7 @@ struct gf100_gr_func {
 	void (*init_419eb4)(struct gf100_gr *);
 	void (*init_419c9c)(struct gf100_gr *);
 	void (*init_ppc_exceptions)(struct gf100_gr *);
+	void (*init_tex_hww_esr)(struct gf100_gr *, int gpc, int tpc);
 	void (*set_hww_esr_report_mask)(struct gf100_gr *);
 	const struct gf100_gr_pack *mmio;
 	struct {
@@ -162,6 +163,7 @@ void gf100_gr_init_fecs_exceptions(struct gf100_gr *);
 void gf100_gr_init_40601c(struct gf100_gr *);
 void gf100_gr_init_419cc0(struct gf100_gr *);
 void gf100_gr_init_419eb4(struct gf100_gr *);
+void gf100_gr_init_tex_hww_esr(struct gf100_gr *, int, int);
 
 void gf117_gr_init_zcull(struct gf100_gr *);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
index 59d4dac97d8c..ff03f13b087f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
@@ -123,6 +123,7 @@ gf104_gr = {
 	.init_40601c = gf100_gr_init_40601c,
 	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_419eb4 = gf100_gr_init_419eb4,
+	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.mmio = gf104_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index 73cb23a71c52..ce60f5aa9901 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -121,6 +121,7 @@ gf108_gr = {
 	.init_40601c = gf100_gr_init_40601c,
 	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_419eb4 = gf100_gr_init_419eb4,
+	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.mmio = gf108_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index 1d7188e844f5..4c21cbe3df97 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -95,6 +95,7 @@ gf110_gr = {
 	.init_40601c = gf100_gr_init_40601c,
 	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_419eb4 = gf100_gr_init_419eb4,
+	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.mmio = gf110_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index 34655676ce39..10340e9f3e1a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -159,6 +159,7 @@ gf117_gr = {
 	.init_40601c = gf100_gr_init_40601c,
 	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_419eb4 = gf100_gr_init_419eb4,
+	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.mmio = gf117_gr_pack_mmio,
 	.fecs.ucode = &gf117_gr_fecs_ucode,
 	.gpccs.ucode = &gf117_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index 28c00cb19995..5147c590dc20 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -186,6 +186,7 @@ gf119_gr = {
 	.init_40601c = gf100_gr_init_40601c,
 	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_419eb4 = gf100_gr_init_419eb4,
+	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.mmio = gf119_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index 775b47428486..1805196372d5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -473,7 +473,7 @@ gk104_gr_init(struct gf100_gr *gr)
 		for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+			gr->func->init_tex_hww_esr(gr, gpc, tpc);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
@@ -536,6 +536,7 @@ gk104_gr = {
 	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_419eb4 = gf100_gr_init_419eb4,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
+	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.mmio = gk104_gr_pack_mmio,
 	.fecs.ucode = &gk104_gr_fecs_ucode,
 	.gpccs.ucode = &gk104_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index daaa4492d764..f4c73a929756 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -361,6 +361,7 @@ gk110_gr = {
 	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_419eb4 = gk110_gr_init_419eb4,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
+	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.mmio = gk110_gr_pack_mmio,
 	.fecs.ucode = &gk110_gr_fecs_ucode,
 	.gpccs.ucode = &gk110_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index ee56b00c25d0..b585ab22ccc1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -113,6 +113,7 @@ gk110b_gr = {
 	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_419eb4 = gk110_gr_init_419eb4,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
+	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.mmio = gk110b_gr_pack_mmio,
 	.fecs.ucode = &gk110_gr_fecs_ucode,
 	.gpccs.ucode = &gk110_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index 1e214d94ae98..7d1f585a7e54 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -171,6 +171,7 @@ gk208_gr = {
 	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
 	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
+	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.mmio = gk208_gr_pack_mmio,
 	.fecs.ucode = &gk208_gr_fecs_ucode,
 	.gpccs.ucode = &gk208_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index df668b9e00e4..8ee7723bfee4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -393,7 +393,7 @@ gm107_gr_init(struct gf100_gr *gr)
 		for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+			gr->func->init_tex_hww_esr(gr, gpc, tpc);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
@@ -458,6 +458,7 @@ gm107_gr = {
 	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
 	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
+	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.mmio = gm107_gr_pack_mmio,
 	.fecs.ucode = &gm107_gr_fecs_ucode,
 	.gpccs.ucode = &gm107_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
index 9f8171aae601..0d020098f6f3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -121,7 +121,7 @@ gm200_gr_init(struct gf100_gr *gr)
 		for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+			gr->func->init_tex_hww_esr(gr, gpc, tpc);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
@@ -204,6 +204,7 @@ gm200_gr = {
 	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
 	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
+	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 2,
 	.grctx = &gm200_grctx,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
index 3fcbe1fc19cf..c7986087a3df 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -101,7 +101,7 @@ gp100_gr_init(struct gf100_gr *gr)
 		for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+			gr->func->init_tex_hww_esr(gr, gpc, tpc);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
@@ -144,6 +144,7 @@ gp100_gr = {
 	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_419c9c = gp100_gr_init_419c9c,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
+	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 2,
 	.grctx = &gp100_grctx,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
index 230e2eeb1701..a7fe2d9f4859 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
@@ -54,6 +54,7 @@ gp102_gr = {
 	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
 	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
+	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 3,
 	.grctx = &gp102_grctx,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
index 55b7a7e70cde..f085d01f3611 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
@@ -40,6 +40,7 @@ gp107_gr = {
 	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
 	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
+	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 1,
 	.grctx = &gp107_grctx,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
index b13a48bc7da7..9530edc837a9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
@@ -38,6 +38,7 @@ gp10b_gr = {
 	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
 	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
+	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 1,
 	.grctx = &gp102_grctx,

From ab4d49a349653dcd902be8974c4f7927cd49b11d Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1063/1286] drm/nouveau/gr/gf100-: virtualise init_504430

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c |  2 ++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h |  3 +++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c | 10 +++++++++-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c |  3 ++-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c |  3 ++-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c |  1 +
 8 files changed, 21 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 25f64ece92a4..274ff014ab9e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -2092,6 +2092,8 @@ gf100_gr_init(struct gf100_gr *gr)
 			if (gr->func->init_tex_hww_esr)
 				gr->func->init_tex_hww_esr(gr, gpc, tpc);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+			if (gr->func->init_504430)
+				gr->func->init_504430(gr, gpc, tpc);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
 		}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 0bbe11dd8bdd..81d5931eff32 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -139,6 +139,7 @@ struct gf100_gr_func {
 	void (*init_419c9c)(struct gf100_gr *);
 	void (*init_ppc_exceptions)(struct gf100_gr *);
 	void (*init_tex_hww_esr)(struct gf100_gr *, int gpc, int tpc);
+	void (*init_504430)(struct gf100_gr *, int gpc, int tpc);
 	void (*set_hww_esr_report_mask)(struct gf100_gr *);
 	const struct gf100_gr_pack *mmio;
 	struct {
@@ -175,6 +176,8 @@ void gk104_gr_init_sked_hww_esr(struct gf100_gr *);
 
 void gk110_gr_init_419eb4(struct gf100_gr *);
 
+void gm107_gr_init_504430(struct gf100_gr *, int, int);
+
 int gk20a_gr_init(struct gf100_gr *);
 
 int gm200_gr_rops(struct gf100_gr *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index 8ee7723bfee4..4c8c2561282b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -281,6 +281,13 @@ gm107_gr_pack_mmio[] = {
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
+void
+gm107_gr_init_504430(struct gf100_gr *gr, int gpc, int tpc)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
+}
+
 static void
 gm107_gr_init_bios_2(struct gf100_gr *gr)
 {
@@ -395,7 +402,7 @@ gm107_gr_init(struct gf100_gr *gr)
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
 			gr->func->init_tex_hww_esr(gr, gpc, tpc);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
+			gr->func->init_504430(gr, gpc, tpc);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
 		}
@@ -459,6 +466,7 @@ gm107_gr = {
 	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
+	.init_504430 = gm107_gr_init_504430,
 	.mmio = gm107_gr_pack_mmio,
 	.fecs.ucode = &gm107_gr_fecs_ucode,
 	.gpccs.ucode = &gm107_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
index 0d020098f6f3..69f191ec78ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -123,7 +123,7 @@ gm200_gr_init(struct gf100_gr *gr)
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
 			gr->func->init_tex_hww_esr(gr, gpc, tpc);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
+			gr->func->init_504430(gr, gpc, tpc);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
 		}
@@ -205,6 +205,7 @@ gm200_gr = {
 	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
+	.init_504430 = gm107_gr_init_504430,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 2,
 	.grctx = &gm200_grctx,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
index c7986087a3df..76a9416fa4ed 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -103,7 +103,7 @@ gp100_gr_init(struct gf100_gr *gr)
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
 			gr->func->init_tex_hww_esr(gr, gpc, tpc);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
+			gr->func->init_504430(gr, gpc, gpc);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000105);
 		}
@@ -145,6 +145,7 @@ gp100_gr = {
 	.init_419c9c = gp100_gr_init_419c9c,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
+	.init_504430 = gm107_gr_init_504430,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 2,
 	.grctx = &gp100_grctx,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
index a7fe2d9f4859..7072578be648 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
@@ -55,6 +55,7 @@ gp102_gr = {
 	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
+	.init_504430 = gm107_gr_init_504430,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 3,
 	.grctx = &gp102_grctx,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
index f085d01f3611..f23f70f027e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
@@ -41,6 +41,7 @@ gp107_gr = {
 	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
+	.init_504430 = gm107_gr_init_504430,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 1,
 	.grctx = &gp107_grctx,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
index 9530edc837a9..0ff175960a36 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
@@ -39,6 +39,7 @@ gp10b_gr = {
 	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
+	.init_504430 = gm107_gr_init_504430,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 1,
 	.grctx = &gp102_grctx,

From 4615e9b4387f142e4ff495dc61525249a6926e91 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1064/1286] drm/nouveau/gr/gf100-: virtualise
 init_shader_exceptions

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c  | 14 +++++++++++---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h  |  4 ++++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c  |  4 ++--
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c  | 12 ++++++++++--
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c  |  4 ++--
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c  | 12 ++++++++++--
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c  |  1 +
 17 files changed, 50 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 274ff014ab9e..071b3c1a93ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1915,7 +1915,15 @@ gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
 }
 
 void
-gf100_gr_init_tex_hww_esr(struct gf100_gr *gr, int tpc, int gpc)
+gf100_gr_init_shader_exceptions(struct gf100_gr *gr, int gpc, int tpc)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
+	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
+}
+
+void
+gf100_gr_init_tex_hww_esr(struct gf100_gr *gr, int gpc, int tpc)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
 	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
@@ -2094,8 +2102,7 @@ gf100_gr_init(struct gf100_gr *gr)
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
 			if (gr->func->init_504430)
 				gr->func->init_504430(gr, gpc, tpc);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
+			gr->func->init_shader_exceptions(gr, gpc, tpc);
 		}
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
@@ -2154,6 +2161,7 @@ gf100_gr = {
 	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_419eb4 = gf100_gr_init_419eb4,
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
+	.init_shader_exceptions = gf100_gr_init_shader_exceptions,
 	.mmio = gf100_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 81d5931eff32..02505c020f6a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -140,6 +140,7 @@ struct gf100_gr_func {
 	void (*init_ppc_exceptions)(struct gf100_gr *);
 	void (*init_tex_hww_esr)(struct gf100_gr *, int gpc, int tpc);
 	void (*init_504430)(struct gf100_gr *, int gpc, int tpc);
+	void (*init_shader_exceptions)(struct gf100_gr *, int gpc, int tpc);
 	void (*set_hww_esr_report_mask)(struct gf100_gr *);
 	const struct gf100_gr_pack *mmio;
 	struct {
@@ -165,6 +166,7 @@ void gf100_gr_init_40601c(struct gf100_gr *);
 void gf100_gr_init_419cc0(struct gf100_gr *);
 void gf100_gr_init_419eb4(struct gf100_gr *);
 void gf100_gr_init_tex_hww_esr(struct gf100_gr *, int, int);
+void gf100_gr_init_shader_exceptions(struct gf100_gr *, int, int);
 
 void gf117_gr_init_zcull(struct gf100_gr *);
 
@@ -177,6 +179,7 @@ void gk104_gr_init_sked_hww_esr(struct gf100_gr *);
 void gk110_gr_init_419eb4(struct gf100_gr *);
 
 void gm107_gr_init_504430(struct gf100_gr *, int, int);
+void gm107_gr_init_shader_exceptions(struct gf100_gr *, int, int);
 
 int gk20a_gr_init(struct gf100_gr *);
 
@@ -187,6 +190,7 @@ void gm200_gr_init_ds_hww_esr_2(struct gf100_gr *);
 int gp100_gr_init(struct gf100_gr *);
 void gp100_gr_init_rop_active_fbps(struct gf100_gr *);
 void gp100_gr_init_fecs_exceptions(struct gf100_gr *);
+void gp100_gr_init_shader_exceptions(struct gf100_gr *, int, int);
 
 void gp102_gr_init_swdx_pes_mask(struct gf100_gr *);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
index ff03f13b087f..c61544487ed7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
@@ -124,6 +124,7 @@ gf104_gr = {
 	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_419eb4 = gf100_gr_init_419eb4,
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
+	.init_shader_exceptions = gf100_gr_init_shader_exceptions,
 	.mmio = gf104_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index ce60f5aa9901..8e8a99e54f56 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -122,6 +122,7 @@ gf108_gr = {
 	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_419eb4 = gf100_gr_init_419eb4,
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
+	.init_shader_exceptions = gf100_gr_init_shader_exceptions,
 	.mmio = gf108_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index 4c21cbe3df97..6496411b7dfe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -96,6 +96,7 @@ gf110_gr = {
 	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_419eb4 = gf100_gr_init_419eb4,
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
+	.init_shader_exceptions = gf100_gr_init_shader_exceptions,
 	.mmio = gf110_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index 10340e9f3e1a..21aa8e227002 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -160,6 +160,7 @@ gf117_gr = {
 	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_419eb4 = gf100_gr_init_419eb4,
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
+	.init_shader_exceptions = gf100_gr_init_shader_exceptions,
 	.mmio = gf117_gr_pack_mmio,
 	.fecs.ucode = &gf117_gr_fecs_ucode,
 	.gpccs.ucode = &gf117_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index 5147c590dc20..7d73a9f8ca48 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -187,6 +187,7 @@ gf119_gr = {
 	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_419eb4 = gf100_gr_init_419eb4,
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
+	.init_shader_exceptions = gf100_gr_init_shader_exceptions,
 	.mmio = gf119_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index 1805196372d5..20735319e920 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -475,8 +475,7 @@ gk104_gr_init(struct gf100_gr *gr)
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
 			gr->func->init_tex_hww_esr(gr, gpc, tpc);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
+			gr->func->init_shader_exceptions(gr, gpc, tpc);
 		}
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
@@ -537,6 +536,7 @@ gk104_gr = {
 	.init_419eb4 = gf100_gr_init_419eb4,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
+	.init_shader_exceptions = gf100_gr_init_shader_exceptions,
 	.mmio = gk104_gr_pack_mmio,
 	.fecs.ucode = &gk104_gr_fecs_ucode,
 	.gpccs.ucode = &gk104_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index f4c73a929756..e52cc2423353 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -362,6 +362,7 @@ gk110_gr = {
 	.init_419eb4 = gk110_gr_init_419eb4,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
+	.init_shader_exceptions = gf100_gr_init_shader_exceptions,
 	.mmio = gk110_gr_pack_mmio,
 	.fecs.ucode = &gk110_gr_fecs_ucode,
 	.gpccs.ucode = &gk110_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index b585ab22ccc1..35303818c4a2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -114,6 +114,7 @@ gk110b_gr = {
 	.init_419eb4 = gk110_gr_init_419eb4,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
+	.init_shader_exceptions = gf100_gr_init_shader_exceptions,
 	.mmio = gk110b_gr_pack_mmio,
 	.fecs.ucode = &gk110_gr_fecs_ucode,
 	.gpccs.ucode = &gk110_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index 7d1f585a7e54..4e5cd540a321 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -172,6 +172,7 @@ gk208_gr = {
 	.init_419cc0 = gf100_gr_init_419cc0,
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
+	.init_shader_exceptions = gf100_gr_init_shader_exceptions,
 	.mmio = gk208_gr_pack_mmio,
 	.fecs.ucode = &gk208_gr_fecs_ucode,
 	.gpccs.ucode = &gk208_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index 4c8c2561282b..3b8ae137798f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -281,6 +281,14 @@ gm107_gr_pack_mmio[] = {
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
+void
+gm107_gr_init_shader_exceptions(struct gf100_gr *gr, int gpc, int tpc)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
+	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
+}
+
 void
 gm107_gr_init_504430(struct gf100_gr *gr, int gpc, int tpc)
 {
@@ -403,8 +411,7 @@ gm107_gr_init(struct gf100_gr *gr)
 			gr->func->init_tex_hww_esr(gr, gpc, tpc);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
 			gr->func->init_504430(gr, gpc, tpc);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
+			gr->func->init_shader_exceptions(gr, gpc, tpc);
 		}
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
@@ -467,6 +474,7 @@ gm107_gr = {
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.init_504430 = gm107_gr_init_504430,
+	.init_shader_exceptions = gm107_gr_init_shader_exceptions,
 	.mmio = gm107_gr_pack_mmio,
 	.fecs.ucode = &gm107_gr_fecs_ucode,
 	.gpccs.ucode = &gm107_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
index 69f191ec78ab..3e2a78dfb1fd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -124,8 +124,7 @@ gm200_gr_init(struct gf100_gr *gr)
 			gr->func->init_tex_hww_esr(gr, gpc, tpc);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
 			gr->func->init_504430(gr, gpc, tpc);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
+			gr->func->init_shader_exceptions(gr, gpc, tpc);
 		}
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
@@ -206,6 +205,7 @@ gm200_gr = {
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.init_504430 = gm107_gr_init_504430,
+	.init_shader_exceptions = gm107_gr_init_shader_exceptions,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 2,
 	.grctx = &gm200_grctx,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
index 76a9416fa4ed..48a0e144d983 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -30,6 +30,14 @@
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
+void
+gp100_gr_init_shader_exceptions(struct gf100_gr *gr, int gpc, int tpc)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
+	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000105);
+}
+
 static void
 gp100_gr_init_419c9c(struct gf100_gr *gr)
 {
@@ -104,8 +112,7 @@ gp100_gr_init(struct gf100_gr *gr)
 			gr->func->init_tex_hww_esr(gr, gpc, tpc);
 			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
 			gr->func->init_504430(gr, gpc, gpc);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000105);
+			gr->func->init_shader_exceptions(gr, gpc, tpc);
 		}
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
 		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
@@ -146,6 +153,7 @@ gp100_gr = {
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.init_504430 = gm107_gr_init_504430,
+	.init_shader_exceptions = gp100_gr_init_shader_exceptions,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 2,
 	.grctx = &gp100_grctx,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
index 7072578be648..7ce06520fdc9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
@@ -56,6 +56,7 @@ gp102_gr = {
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.init_504430 = gm107_gr_init_504430,
+	.init_shader_exceptions = gp100_gr_init_shader_exceptions,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 3,
 	.grctx = &gp102_grctx,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
index f23f70f027e2..dbb2d4e71442 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
@@ -42,6 +42,7 @@ gp107_gr = {
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.init_504430 = gm107_gr_init_504430,
+	.init_shader_exceptions = gp100_gr_init_shader_exceptions,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 1,
 	.grctx = &gp107_grctx,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
index 0ff175960a36..7f23d8bd977f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
@@ -40,6 +40,7 @@ gp10b_gr = {
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.init_504430 = gm107_gr_init_504430,
+	.init_shader_exceptions = gp100_gr_init_shader_exceptions,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 1,
 	.grctx = &gp102_grctx,

From 6df6d2b95e9411f499b7d71a6cc495cff7548d6e Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1065/1286] drm/nouveau/gr/gf100-: apply be exception fixes
 from traces

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c | 4 ++--
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c | 4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 071b3c1a93ab..452b52798603 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -2109,8 +2109,8 @@ gf100_gr_init(struct gf100_gr *gr)
 	}
 
 	for (rop = 0; rop < gr->rop_nr; rop++) {
-		nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0xc0000000);
-		nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0xc0000000);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000);
 		nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
 		nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
 	}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index 20735319e920..1822509fca21 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -482,8 +482,8 @@ gk104_gr_init(struct gf100_gr *gr)
 	}
 
 	for (rop = 0; rop < gr->rop_nr; rop++) {
-		nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0xc0000000);
-		nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0xc0000000);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000);
 		nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
 		nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
 	}

From 04547482aed8c77b823de9427c3f0a7b481a351a Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1066/1286] drm/nouveau/gr/gf100-: virtualise init_400054

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c  | 10 +++++++++-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h  |  3 +++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c  |  3 ++-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c  |  9 ++++++++-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c  |  3 ++-
 13 files changed, 32 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 452b52798603..ac4c0d8478fa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1914,6 +1914,12 @@ gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
 	return 0;
 }
 
+void
+gf100_gr_init_400054(struct gf100_gr *gr)
+{
+	nvkm_wr32(gr->base.engine.subdev.device, 0x400054, 0x34ce3464);
+}
+
 void
 gf100_gr_init_shader_exceptions(struct gf100_gr *gr, int gpc, int tpc)
 {
@@ -2122,7 +2128,8 @@ gf100_gr_init(struct gf100_gr *gr)
 	nvkm_wr32(device, 0x40011c, 0xffffffff);
 	nvkm_wr32(device, 0x400134, 0xffffffff);
 
-	nvkm_wr32(device, 0x400054, 0x34ce3464);
+	if (gr->func->init_400054)
+		gr->func->init_400054(gr);
 
 	gf100_gr_zbc_init(gr);
 
@@ -2162,6 +2169,7 @@ gf100_gr = {
 	.init_419eb4 = gf100_gr_init_419eb4,
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.init_shader_exceptions = gf100_gr_init_shader_exceptions,
+	.init_400054 = gf100_gr_init_400054,
 	.mmio = gf100_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 02505c020f6a..9a57f1a7fb7c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -141,6 +141,7 @@ struct gf100_gr_func {
 	void (*init_tex_hww_esr)(struct gf100_gr *, int gpc, int tpc);
 	void (*init_504430)(struct gf100_gr *, int gpc, int tpc);
 	void (*init_shader_exceptions)(struct gf100_gr *, int gpc, int tpc);
+	void (*init_400054)(struct gf100_gr *);
 	void (*set_hww_esr_report_mask)(struct gf100_gr *);
 	const struct gf100_gr_pack *mmio;
 	struct {
@@ -167,6 +168,7 @@ void gf100_gr_init_419cc0(struct gf100_gr *);
 void gf100_gr_init_419eb4(struct gf100_gr *);
 void gf100_gr_init_tex_hww_esr(struct gf100_gr *, int, int);
 void gf100_gr_init_shader_exceptions(struct gf100_gr *, int, int);
+void gf100_gr_init_400054(struct gf100_gr *);
 
 void gf117_gr_init_zcull(struct gf100_gr *);
 
@@ -180,6 +182,7 @@ void gk110_gr_init_419eb4(struct gf100_gr *);
 
 void gm107_gr_init_504430(struct gf100_gr *, int, int);
 void gm107_gr_init_shader_exceptions(struct gf100_gr *, int, int);
+void gm107_gr_init_400054(struct gf100_gr *);
 
 int gk20a_gr_init(struct gf100_gr *);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
index c61544487ed7..d5276cab7f7b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
@@ -125,6 +125,7 @@ gf104_gr = {
 	.init_419eb4 = gf100_gr_init_419eb4,
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.init_shader_exceptions = gf100_gr_init_shader_exceptions,
+	.init_400054 = gf100_gr_init_400054,
 	.mmio = gf104_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index 8e8a99e54f56..8f22a311dccb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -123,6 +123,7 @@ gf108_gr = {
 	.init_419eb4 = gf100_gr_init_419eb4,
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.init_shader_exceptions = gf100_gr_init_shader_exceptions,
+	.init_400054 = gf100_gr_init_400054,
 	.mmio = gf108_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index 6496411b7dfe..fcbfdc7e9b26 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -97,6 +97,7 @@ gf110_gr = {
 	.init_419eb4 = gf100_gr_init_419eb4,
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.init_shader_exceptions = gf100_gr_init_shader_exceptions,
+	.init_400054 = gf100_gr_init_400054,
 	.mmio = gf110_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index 21aa8e227002..f526ccddaf5e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -161,6 +161,7 @@ gf117_gr = {
 	.init_419eb4 = gf100_gr_init_419eb4,
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.init_shader_exceptions = gf100_gr_init_shader_exceptions,
+	.init_400054 = gf100_gr_init_400054,
 	.mmio = gf117_gr_pack_mmio,
 	.fecs.ucode = &gf117_gr_fecs_ucode,
 	.gpccs.ucode = &gf117_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index 7d73a9f8ca48..5d3d22fb5c86 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -188,6 +188,7 @@ gf119_gr = {
 	.init_419eb4 = gf100_gr_init_419eb4,
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.init_shader_exceptions = gf100_gr_init_shader_exceptions,
+	.init_400054 = gf100_gr_init_400054,
 	.mmio = gf119_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index 1822509fca21..36f32e6ef937 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -495,7 +495,7 @@ gk104_gr_init(struct gf100_gr *gr)
 	nvkm_wr32(device, 0x40011c, 0xffffffff);
 	nvkm_wr32(device, 0x400134, 0xffffffff);
 
-	nvkm_wr32(device, 0x400054, 0x34ce3464);
+	gr->func->init_400054(gr);
 
 	gf100_gr_zbc_init(gr);
 
@@ -537,6 +537,7 @@ gk104_gr = {
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.init_shader_exceptions = gf100_gr_init_shader_exceptions,
+	.init_400054 = gf100_gr_init_400054,
 	.mmio = gk104_gr_pack_mmio,
 	.fecs.ucode = &gk104_gr_fecs_ucode,
 	.gpccs.ucode = &gk104_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index e52cc2423353..7bcea8181919 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -363,6 +363,7 @@ gk110_gr = {
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.init_shader_exceptions = gf100_gr_init_shader_exceptions,
+	.init_400054 = gf100_gr_init_400054,
 	.mmio = gk110_gr_pack_mmio,
 	.fecs.ucode = &gk110_gr_fecs_ucode,
 	.gpccs.ucode = &gk110_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index 35303818c4a2..5af2a54e60bc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -115,6 +115,7 @@ gk110b_gr = {
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.init_shader_exceptions = gf100_gr_init_shader_exceptions,
+	.init_400054 = gf100_gr_init_400054,
 	.mmio = gk110b_gr_pack_mmio,
 	.fecs.ucode = &gk110_gr_fecs_ucode,
 	.gpccs.ucode = &gk110_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index 4e5cd540a321..dd08373a96b5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -173,6 +173,7 @@ gk208_gr = {
 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.init_shader_exceptions = gf100_gr_init_shader_exceptions,
+	.init_400054 = gf100_gr_init_400054,
 	.mmio = gk208_gr_pack_mmio,
 	.fecs.ucode = &gk208_gr_fecs_ucode,
 	.gpccs.ucode = &gk208_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index 3b8ae137798f..4680d99b59ba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -281,6 +281,12 @@ gm107_gr_pack_mmio[] = {
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
+void
+gm107_gr_init_400054(struct gf100_gr *gr)
+{
+	nvkm_wr32(gr->base.engine.subdev.device, 0x400054, 0x2c350f63);
+}
+
 void
 gm107_gr_init_shader_exceptions(struct gf100_gr *gr, int gpc, int tpc)
 {
@@ -431,7 +437,7 @@ gm107_gr_init(struct gf100_gr *gr)
 	nvkm_wr32(device, 0x40011c, 0xffffffff);
 	nvkm_wr32(device, 0x400134, 0xffffffff);
 
-	nvkm_wr32(device, 0x400054, 0x2c350f63);
+	gr->func->init_400054(gr);
 
 	gf100_gr_zbc_init(gr);
 
@@ -475,6 +481,7 @@ gm107_gr = {
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.init_504430 = gm107_gr_init_504430,
 	.init_shader_exceptions = gm107_gr_init_shader_exceptions,
+	.init_400054 = gm107_gr_init_400054,
 	.mmio = gm107_gr_pack_mmio,
 	.fecs.ucode = &gm107_gr_fecs_ucode,
 	.gpccs.ucode = &gm107_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
index 3e2a78dfb1fd..bbe18873aee2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -144,7 +144,7 @@ gm200_gr_init(struct gf100_gr *gr)
 	nvkm_wr32(device, 0x40011c, 0xffffffff);
 	nvkm_wr32(device, 0x400134, 0xffffffff);
 
-	nvkm_wr32(device, 0x400054, 0x2c350f63);
+	gr->func->init_400054(gr);
 
 	gf100_gr_zbc_init(gr);
 
@@ -206,6 +206,7 @@ gm200_gr = {
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.init_504430 = gm107_gr_init_504430,
 	.init_shader_exceptions = gm107_gr_init_shader_exceptions,
+	.init_400054 = gm107_gr_init_400054,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 2,
 	.grctx = &gm200_grctx,

From 525230cb204db5edb0ffc42e324612809c663c75 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1067/1286] drm/nouveau/gr/gf100-: delete duplicated init code

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/gr/gf100.h    |  2 -
 .../gpu/drm/nouveau/nvkm/engine/gr/gk104.c    | 78 +------------------
 .../gpu/drm/nouveau/nvkm/engine/gr/gk110.c    |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/gk110b.c   |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/gk208.c    |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/gm107.c    | 77 +-----------------
 .../gpu/drm/nouveau/nvkm/engine/gr/gm200.c    | 76 +-----------------
 .../gpu/drm/nouveau/nvkm/engine/gr/gp100.c    | 77 +-----------------
 .../gpu/drm/nouveau/nvkm/engine/gr/gp102.c    |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/gp107.c    |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/gp10b.c    |  2 +-
 11 files changed, 10 insertions(+), 312 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 9a57f1a7fb7c..9ed51dc851df 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -172,7 +172,6 @@ void gf100_gr_init_400054(struct gf100_gr *);
 
 void gf117_gr_init_zcull(struct gf100_gr *);
 
-int gk104_gr_init(struct gf100_gr *);
 void gk104_gr_init_vsc_stream_master(struct gf100_gr *);
 void gk104_gr_init_rop_active_fbps(struct gf100_gr *);
 void gk104_gr_init_ppc_exceptions(struct gf100_gr *);
@@ -190,7 +189,6 @@ int gm200_gr_rops(struct gf100_gr *);
 void gm200_gr_init_num_active_ltcs(struct gf100_gr *);
 void gm200_gr_init_ds_hww_esr_2(struct gf100_gr *);
 
-int gp100_gr_init(struct gf100_gr *);
 void gp100_gr_init_rop_active_fbps(struct gf100_gr *);
 void gp100_gr_init_fecs_exceptions(struct gf100_gr *);
 void gp100_gr_init_shader_exceptions(struct gf100_gr *, int, int);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index 36f32e6ef937..2e5c48b61ac8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -426,82 +426,6 @@ gk104_gr_init_vsc_stream_master(struct gf100_gr *gr)
 	nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
 }
 
-int
-gk104_gr_init(struct gf100_gr *gr)
-{
-	struct nvkm_device *device = gr->base.engine.subdev.device;
-	int gpc, tpc, rop;
-
-	gr->func->init_gpc_mmu(gr);
-
-	gf100_gr_mmio(gr, gr->func->mmio);
-	if (gr->func->clkgate_pack)
-		nvkm_therm_clkgate_init(gr->base.engine.subdev.device->therm,
-					gr->func->clkgate_pack);
-
-	gr->func->init_vsc_stream_master(gr);
-	gr->func->init_zcull(gr);
-	gr->func->init_num_active_ltcs(gr);
-
-	gr->func->init_rop_active_fbps(gr);
-
-	nvkm_wr32(device, 0x400500, 0x00010001);
-
-	nvkm_wr32(device, 0x400100, 0xffffffff);
-	nvkm_wr32(device, 0x40013c, 0xffffffff);
-	nvkm_wr32(device, 0x400124, 0x00000002);
-
-	gr->func->init_fecs_exceptions(gr);
-	nvkm_wr32(device, 0x404000, 0xc0000000);
-	nvkm_wr32(device, 0x404600, 0xc0000000);
-	nvkm_wr32(device, 0x408030, 0xc0000000);
-	nvkm_wr32(device, 0x404490, 0xc0000000);
-	nvkm_wr32(device, 0x406018, 0xc0000000);
-	gr->func->init_sked_hww_esr(gr);
-	nvkm_wr32(device, 0x405840, 0xc0000000);
-	nvkm_wr32(device, 0x405844, 0x00ffffff);
-	gr->func->init_419cc0(gr);
-	gr->func->init_419eb4(gr);
-
-	gr->func->init_ppc_exceptions(gr);
-
-	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
-		for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
-			gr->func->init_tex_hww_esr(gr, gpc, tpc);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
-			gr->func->init_shader_exceptions(gr, gpc, tpc);
-		}
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
-	}
-
-	for (rop = 0; rop < gr->rop_nr; rop++) {
-		nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000);
-		nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000);
-		nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
-		nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
-	}
-
-	nvkm_wr32(device, 0x400108, 0xffffffff);
-	nvkm_wr32(device, 0x400138, 0xffffffff);
-	nvkm_wr32(device, 0x400118, 0xffffffff);
-	nvkm_wr32(device, 0x400130, 0xffffffff);
-	nvkm_wr32(device, 0x40011c, 0xffffffff);
-	nvkm_wr32(device, 0x400134, 0xffffffff);
-
-	gr->func->init_400054(gr);
-
-	gf100_gr_zbc_init(gr);
-
-	return gf100_gr_init_ctxctl(gr);
-}
-
 #include "fuc/hubgk104.fuc3.h"
 
 static struct gf100_gr_ucode
@@ -524,7 +448,7 @@ gk104_gr_gpccs_ucode = {
 
 static const struct gf100_gr_func
 gk104_gr = {
-	.init = gk104_gr_init,
+	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
 	.init_zcull = gf117_gr_init_zcull,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index 7bcea8181919..9adb55f658c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -350,7 +350,7 @@ gk110_gr_init_419eb4(struct gf100_gr *gr)
 
 static const struct gf100_gr_func
 gk110_gr = {
-	.init = gk104_gr_init,
+	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
 	.init_zcull = gf117_gr_init_zcull,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index 5af2a54e60bc..f848f1578cd3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -102,7 +102,7 @@ gk110b_gr_pack_mmio[] = {
 
 static const struct gf100_gr_func
 gk110b_gr = {
-	.init = gk104_gr_init,
+	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
 	.init_zcull = gf117_gr_init_zcull,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index dd08373a96b5..9c678f17b2fe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -161,7 +161,7 @@ gk208_gr_gpccs_ucode = {
 
 static const struct gf100_gr_func
 gk208_gr = {
-	.init = gk104_gr_init,
+	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
 	.init_zcull = gf117_gr_init_zcull,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index 4680d99b59ba..c598fa5a68e0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -369,81 +369,6 @@ gm107_gr_init_gpc_mmu(struct gf100_gr *gr)
 	nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(fb->mmu_rd) >> 8);
 }
 
-static int
-gm107_gr_init(struct gf100_gr *gr)
-{
-	struct nvkm_device *device = gr->base.engine.subdev.device;
-	int gpc, tpc, rop;
-
-	gr->func->init_gpc_mmu(gr);
-
-	gf100_gr_mmio(gr, gr->func->mmio);
-
-	gr->func->init_bios(gr);
-
-	gr->func->init_vsc_stream_master(gr);
-	gr->func->init_zcull(gr);
-	gr->func->init_num_active_ltcs(gr);
-
-	gr->func->init_rop_active_fbps(gr);
-
-	nvkm_wr32(device, 0x400500, 0x00010001);
-
-	nvkm_wr32(device, 0x400100, 0xffffffff);
-	nvkm_wr32(device, 0x40013c, 0xffffffff);
-	nvkm_wr32(device, 0x400124, 0x00000002);
-	gr->func->init_fecs_exceptions(gr);
-
-	nvkm_wr32(device, 0x404000, 0xc0000000);
-	nvkm_wr32(device, 0x404600, 0xc0000000);
-	nvkm_wr32(device, 0x408030, 0xc0000000);
-	nvkm_wr32(device, 0x404490, 0xc0000000);
-	nvkm_wr32(device, 0x406018, 0xc0000000);
-	gr->func->init_sked_hww_esr(gr);
-	nvkm_wr32(device, 0x405840, 0xc0000000);
-	nvkm_wr32(device, 0x405844, 0x00ffffff);
-	gr->func->init_419cc0(gr);
-
-	gr->func->init_ppc_exceptions(gr);
-
-	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
-		for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
-			gr->func->init_tex_hww_esr(gr, gpc, tpc);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
-			gr->func->init_504430(gr, gpc, tpc);
-			gr->func->init_shader_exceptions(gr, gpc, tpc);
-		}
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
-	}
-
-	for (rop = 0; rop < gr->rop_nr; rop++) {
-		nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000);
-		nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000);
-		nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
-		nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
-	}
-
-	nvkm_wr32(device, 0x400108, 0xffffffff);
-	nvkm_wr32(device, 0x400138, 0xffffffff);
-	nvkm_wr32(device, 0x400118, 0xffffffff);
-	nvkm_wr32(device, 0x400130, 0xffffffff);
-	nvkm_wr32(device, 0x40011c, 0xffffffff);
-	nvkm_wr32(device, 0x400134, 0xffffffff);
-
-	gr->func->init_400054(gr);
-
-	gf100_gr_zbc_init(gr);
-
-	return gf100_gr_init_ctxctl(gr);
-}
-
 #include "fuc/hubgm107.fuc5.h"
 
 static struct gf100_gr_ucode
@@ -466,7 +391,7 @@ gm107_gr_gpccs_ucode = {
 
 static const struct gf100_gr_func
 gm107_gr = {
-	.init = gm107_gr_init,
+	.init = gf100_gr_init,
 	.init_gpc_mmu = gm107_gr_init_gpc_mmu,
 	.init_bios = gm107_gr_init_bios,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
index bbe18873aee2..4dcb56bfbca1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -77,80 +77,6 @@ gm200_gr_init_rop_active_fbps(struct gf100_gr *gr)
 	nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
 }
 
-static int
-gm200_gr_init(struct gf100_gr *gr)
-{
-	struct nvkm_device *device = gr->base.engine.subdev.device;
-	int gpc, tpc, rop;
-
-	gr->func->init_gpc_mmu(gr);
-
-	gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
-
-	gr->func->init_bios(gr);
-
-	gr->func->init_vsc_stream_master(gr);
-	gr->func->init_zcull(gr);
-	gr->func->init_num_active_ltcs(gr);
-
-	gr->func->init_rop_active_fbps(gr);
-
-	nvkm_wr32(device, 0x400500, 0x00010001);
-	nvkm_wr32(device, 0x400100, 0xffffffff);
-	nvkm_wr32(device, 0x40013c, 0xffffffff);
-	nvkm_wr32(device, 0x400124, 0x00000002);
-	gr->func->init_fecs_exceptions(gr);
-	gr->func->init_ds_hww_esr_2(gr);
-	nvkm_wr32(device, 0x404000, 0xc0000000);
-	nvkm_wr32(device, 0x404600, 0xc0000000);
-	nvkm_wr32(device, 0x408030, 0xc0000000);
-	nvkm_wr32(device, 0x404490, 0xc0000000);
-	nvkm_wr32(device, 0x406018, 0xc0000000);
-	gr->func->init_sked_hww_esr(gr);
-	nvkm_wr32(device, 0x405840, 0xc0000000);
-	nvkm_wr32(device, 0x405844, 0x00ffffff);
-	gr->func->init_419cc0(gr);
-
-	gr->func->init_ppc_exceptions(gr);
-
-	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
-		for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
-			gr->func->init_tex_hww_esr(gr, gpc, tpc);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
-			gr->func->init_504430(gr, gpc, tpc);
-			gr->func->init_shader_exceptions(gr, gpc, tpc);
-		}
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
-	}
-
-	for (rop = 0; rop < gr->rop_nr; rop++) {
-		nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000);
-		nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000);
-		nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
-		nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
-	}
-
-	nvkm_wr32(device, 0x400108, 0xffffffff);
-	nvkm_wr32(device, 0x400138, 0xffffffff);
-	nvkm_wr32(device, 0x400118, 0xffffffff);
-	nvkm_wr32(device, 0x400130, 0xffffffff);
-	nvkm_wr32(device, 0x40011c, 0xffffffff);
-	nvkm_wr32(device, 0x400134, 0xffffffff);
-
-	gr->func->init_400054(gr);
-
-	gf100_gr_zbc_init(gr);
-
-	return gf100_gr_init_ctxctl(gr);
-}
-
 int
 gm200_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
 	      int index, struct nvkm_gr **pgr)
@@ -191,7 +117,7 @@ gm200_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
 
 static const struct gf100_gr_func
 gm200_gr = {
-	.init = gm200_gr_init,
+	.init = gf100_gr_init,
 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
 	.init_bios = gm107_gr_init_bios,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
index 48a0e144d983..cc507e830511 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -62,84 +62,9 @@ gp100_gr_init_rop_active_fbps(struct gf100_gr *gr)
 	nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
 }
 
-int
-gp100_gr_init(struct gf100_gr *gr)
-{
-	struct nvkm_device *device = gr->base.engine.subdev.device;
-	int gpc, tpc, rop;
-
-	gr->func->init_gpc_mmu(gr);
-
-	gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
-
-	gr->func->init_vsc_stream_master(gr);
-	gr->func->init_zcull(gr);
-
-	gr->func->init_num_active_ltcs(gr);
-
-	gr->func->init_rop_active_fbps(gr);
-	if (gr->func->init_swdx_pes_mask)
-		gr->func->init_swdx_pes_mask(gr);
-
-	nvkm_wr32(device, 0x400500, 0x00010001);
-	nvkm_wr32(device, 0x400100, 0xffffffff);
-	nvkm_wr32(device, 0x40013c, 0xffffffff);
-	nvkm_wr32(device, 0x400124, 0x00000002);
-	gr->func->init_fecs_exceptions(gr);
-	gr->func->init_ds_hww_esr_2(gr);
-	nvkm_wr32(device, 0x404000, 0xc0000000);
-	nvkm_wr32(device, 0x404600, 0xc0000000);
-	nvkm_wr32(device, 0x408030, 0xc0000000);
-	nvkm_wr32(device, 0x404490, 0xc0000000);
-	nvkm_wr32(device, 0x406018, 0xc0000000);
-	gr->func->init_sked_hww_esr(gr);
-	nvkm_wr32(device, 0x405840, 0xc0000000);
-	nvkm_wr32(device, 0x405844, 0x00ffffff);
-	gr->func->init_419cc0(gr);
-	if (gr->func->init_419c9c)
-		gr->func->init_419c9c(gr);
-
-	gr->func->init_ppc_exceptions(gr);
-
-	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
-		for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
-			gr->func->init_tex_hww_esr(gr, gpc, tpc);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
-			gr->func->init_504430(gr, gpc, gpc);
-			gr->func->init_shader_exceptions(gr, gpc, tpc);
-		}
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
-	}
-
-	for (rop = 0; rop < gr->rop_nr; rop++) {
-		nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000);
-		nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000);
-		nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
-		nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
-	}
-
-	nvkm_wr32(device, 0x400108, 0xffffffff);
-	nvkm_wr32(device, 0x400138, 0xffffffff);
-	nvkm_wr32(device, 0x400118, 0xffffffff);
-	nvkm_wr32(device, 0x400130, 0xffffffff);
-	nvkm_wr32(device, 0x40011c, 0xffffffff);
-	nvkm_wr32(device, 0x400134, 0xffffffff);
-
-	gf100_gr_zbc_init(gr);
-
-	return gf100_gr_init_ctxctl(gr);
-}
-
 static const struct gf100_gr_func
 gp100_gr = {
-	.init = gp100_gr_init,
+	.init = gf100_gr_init,
 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
 	.init_zcull = gf117_gr_init_zcull,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
index 7ce06520fdc9..86d1ff777d67 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
@@ -42,7 +42,7 @@ gp102_gr_init_swdx_pes_mask(struct gf100_gr *gr)
 
 static const struct gf100_gr_func
 gp102_gr = {
-	.init = gp100_gr_init,
+	.init = gf100_gr_init,
 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
 	.init_zcull = gf117_gr_init_zcull,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
index dbb2d4e71442..14007b5d2e41 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
@@ -28,7 +28,7 @@
 
 static const struct gf100_gr_func
 gp107_gr = {
-	.init = gp100_gr_init,
+	.init = gf100_gr_init,
 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
 	.init_zcull = gf117_gr_init_zcull,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
index 7f23d8bd977f..450a96d1cd07 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
@@ -27,7 +27,7 @@
 
 static const struct gf100_gr_func
 gp10b_gr = {
-	.init = gp100_gr_init,
+	.init = gf100_gr_init,
 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
 	.init_zcull = gf117_gr_init_zcull,

From aedc49fd0ebc2aaca2176aae27f170224b139b15 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1068/1286] drm/nouveau/gr/gf100-: support firmware-provided
 sw_ctx everywhere

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c | 14 +++++++++-----
 1 file changed, 9 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index 881015080d83..e2c8077b70df 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1239,11 +1239,15 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 
 	nvkm_mc_unk260(device, 0);
 
-	gf100_gr_mmio(gr, grctx->hub);
-	gf100_gr_mmio(gr, grctx->gpc);
-	gf100_gr_mmio(gr, grctx->zcull);
-	gf100_gr_mmio(gr, grctx->tpc);
-	gf100_gr_mmio(gr, grctx->ppc);
+	if (!gr->fuc_sw_ctx) {
+		gf100_gr_mmio(gr, grctx->hub);
+		gf100_gr_mmio(gr, grctx->gpc);
+		gf100_gr_mmio(gr, grctx->zcull);
+		gf100_gr_mmio(gr, grctx->tpc);
+		gf100_gr_mmio(gr, grctx->ppc);
+	} else {
+		gf100_gr_mmio(gr, gr->fuc_sw_ctx);
+	}
 
 	idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
 

From ea4a2bb530464f9bbd2728a3c3c58dd758fb36c4 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1069/1286] drm/nouveau/gr/gf100-: virtualise patch_ltc, noting
 missing init

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c  |  2 ++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h  |  3 +++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c  | 12 ++++++++++++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c  |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c  |  1 +
 6 files changed, 20 insertions(+)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index e2c8077b70df..450059996b95 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1254,6 +1254,8 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	grctx->bundle(info);
 	grctx->pagepool(info);
 	grctx->attrib(info);
+	if (grctx->patch_ltc)
+		grctx->patch_ltc(info);
 	grctx->unkn(gr);
 
 	gf100_grctx_generate_tpcid(gr);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 5199e5aa0cb7..94612ca37043 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -48,6 +48,8 @@ struct gf100_grctx_func {
 	u32 attrib_nr;
 	u32 alpha_nr_max;
 	u32 alpha_nr;
+	/* other patch buffer stuff */
+	void (*patch_ltc)(struct gf100_grctx *);
 };
 
 extern const struct gf100_grctx_func gf100_grctx;
@@ -80,6 +82,7 @@ extern const struct gf100_grctx_func gk20a_grctx;
 void gk104_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
 void gk104_grctx_generate_bundle(struct gf100_grctx *);
 void gk104_grctx_generate_pagepool(struct gf100_grctx *);
+void gk104_grctx_generate_patch_ltc(struct gf100_grctx *);
 void gk104_grctx_generate_unkn(struct gf100_gr *);
 void gk104_grctx_generate_r418bb8(struct gf100_gr *);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index 825c8fd500bc..da019c4904ba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -840,6 +840,17 @@ gk104_grctx_pack_ppc[] = {
  * PGRAPH context implementation
  ******************************************************************************/
 
+void
+gk104_grctx_generate_patch_ltc(struct gf100_grctx *info)
+{
+	struct nvkm_device *device = info->gr->base.engine.subdev.device;
+	u32 data0 = nvkm_rd32(device, 0x17e91c);
+	u32 data1 = nvkm_rd32(device, 0x17e920);
+	/*XXX: Figure out how to modify this correctly! */
+	mmio_wr32(info, 0x17e91c, data0);
+	mmio_wr32(info, 0x17e920, data1);
+}
+
 void
 gk104_grctx_generate_bundle(struct gf100_grctx *info)
 {
@@ -1005,4 +1016,5 @@ gk104_grctx = {
 	.attrib_nr = 0x218,
 	.alpha_nr_max = 0x7ff,
 	.alpha_nr = 0x648,
+	.patch_ltc = gk104_grctx_generate_patch_ltc,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
index 7b95ec2fe453..3ad98cd8d531 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
@@ -830,4 +830,5 @@ gk110_grctx = {
 	.attrib_nr = 0x218,
 	.alpha_nr_max = 0x7ff,
 	.alpha_nr = 0x648,
+	.patch_ltc = gk104_grctx_generate_patch_ltc,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
index 048b1152da44..47a4a071a712 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
@@ -91,4 +91,5 @@ gk110b_grctx = {
 	.attrib_nr = 0x218,
 	.alpha_nr_max = 0x7ff,
 	.alpha_nr = 0x648,
+	.patch_ltc = gk104_grctx_generate_patch_ltc,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
index 67b7a1b43617..ba04c86b54a2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
@@ -552,4 +552,5 @@ gk208_grctx = {
 	.attrib_nr = 0x218,
 	.alpha_nr_max = 0x7ff,
 	.alpha_nr = 0x648,
+	.patch_ltc = gk104_grctx_generate_patch_ltc,
 };

From fc740f545d912b32e26f12e240270e1dc36fa26e Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1070/1286] drm/nouveau/gr/gf100-: virtualise sm_id/tpc_nr

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c | 60 ++++++++++++-------
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h |  9 ++-
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c |  2 +
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c |  2 +
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c |  2 +
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c |  4 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c |  2 +
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c |  4 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c |  2 +
 .../drm/nouveau/nvkm/engine/gr/ctxgk110b.c    |  2 +
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c |  2 +
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c |  4 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c | 26 +++-----
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c | 21 +------
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c |  3 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c |  3 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gf100.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gf100.h    |  1 +
 20 files changed, 85 insertions(+), 67 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index 450059996b95..14e6bf07535a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1079,28 +1079,6 @@ gf100_grctx_generate_unkn(struct gf100_gr *gr)
 {
 }
 
-void
-gf100_grctx_generate_tpcid(struct gf100_gr *gr)
-{
-	struct nvkm_device *device = gr->base.engine.subdev.device;
-	int gpc, tpc, id;
-
-	for (tpc = 0, id = 0; tpc < 4; tpc++) {
-		for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
-			if (tpc < gr->tpc_nr[gpc]) {
-				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x698), id);
-				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x4e8), id);
-				nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
-				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), id);
-				id++;
-			}
-
-			nvkm_wr32(device, GPC_UNIT(gpc, 0x0c08), gr->tpc_nr[gpc]);
-			nvkm_wr32(device, GPC_UNIT(gpc, 0x0c8c), gr->tpc_nr[gpc]);
-		}
-	}
-}
-
 void
 gf100_grctx_generate_r406028(struct gf100_gr *gr)
 {
@@ -1230,6 +1208,40 @@ gf100_grctx_generate_r406800(struct gf100_gr *gr)
 	}
 }
 
+void
+gf100_grctx_generate_tpc_nr(struct gf100_gr *gr, int gpc)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_wr32(device, GPC_UNIT(gpc, 0x0c08), gr->tpc_nr[gpc]);
+	nvkm_wr32(device, GPC_UNIT(gpc, 0x0c8c), gr->tpc_nr[gpc]);
+}
+
+void
+gf100_grctx_generate_sm_id(struct gf100_gr *gr, int gpc, int tpc, int sm)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x698), sm);
+	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x4e8), sm);
+	nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), sm);
+	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), sm);
+}
+
+void
+gf100_grctx_generate_floorsweep(struct gf100_gr *gr)
+{
+	const struct gf100_grctx_func *func = gr->func->grctx;
+	int tpc, gpc, sm;
+
+	for (tpc = 0, sm = 0; tpc < gr->tpc_max; tpc++) {
+		for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+			if (tpc < gr->tpc_nr[gpc])
+				func->sm_id(gr, gpc, tpc, sm++);
+			if (func->tpc_nr)
+				func->tpc_nr(gr, gpc);
+		}
+	}
+}
+
 void
 gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 {
@@ -1258,7 +1270,7 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 		grctx->patch_ltc(info);
 	grctx->unkn(gr);
 
-	gf100_grctx_generate_tpcid(gr);
+	gf100_grctx_generate_floorsweep(gr);
 	gf100_grctx_generate_r406028(gr);
 	gf100_grctx_generate_r4060a8(gr);
 	gf100_grctx_generate_r418bb8(gr);
@@ -1410,4 +1422,6 @@ gf100_grctx = {
 	.attrib = gf100_grctx_generate_attrib,
 	.attrib_nr_max = 0x324,
 	.attrib_nr = 0x218,
+	.sm_id = gf100_grctx_generate_sm_id,
+	.tpc_nr = gf100_grctx_generate_tpc_nr,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 94612ca37043..6fa9b629367b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -50,6 +50,9 @@ struct gf100_grctx_func {
 	u32 alpha_nr;
 	/* other patch buffer stuff */
 	void (*patch_ltc)(struct gf100_grctx *);
+	/* floorsweeping */
+	void (*sm_id)(struct gf100_gr *, int gpc, int tpc, int sm);
+	void (*tpc_nr)(struct gf100_gr *, int gpc);
 };
 
 extern const struct gf100_grctx_func gf100_grctx;
@@ -59,11 +62,13 @@ void gf100_grctx_generate_bundle(struct gf100_grctx *);
 void gf100_grctx_generate_pagepool(struct gf100_grctx *);
 void gf100_grctx_generate_attrib(struct gf100_grctx *);
 void gf100_grctx_generate_unkn(struct gf100_gr *);
-void gf100_grctx_generate_tpcid(struct gf100_gr *);
+void gf100_grctx_generate_floorsweep(struct gf100_gr *);
 void gf100_grctx_generate_r406028(struct gf100_gr *);
 void gf100_grctx_generate_r4060a8(struct gf100_gr *);
 void gf100_grctx_generate_r418bb8(struct gf100_gr *);
 void gf100_grctx_generate_r406800(struct gf100_gr *);
+void gf100_grctx_generate_sm_id(struct gf100_gr *, int, int, int);
+void gf100_grctx_generate_tpc_nr(struct gf100_gr *, int);
 
 extern const struct gf100_grctx_func gf108_grctx;
 void gf108_grctx_generate_attrib(struct gf100_grctx *);
@@ -89,6 +94,7 @@ void gk104_grctx_generate_r418bb8(struct gf100_gr *);
 void gm107_grctx_generate_bundle(struct gf100_grctx *);
 void gm107_grctx_generate_pagepool(struct gf100_grctx *);
 void gm107_grctx_generate_attrib(struct gf100_grctx *);
+void gm107_grctx_generate_sm_id(struct gf100_gr *, int, int, int);
 
 extern const struct gf100_grctx_func gk110_grctx;
 extern const struct gf100_grctx_func gk110b_grctx;
@@ -100,7 +106,6 @@ void gm107_grctx_generate_pagepool(struct gf100_grctx *);
 void gm107_grctx_generate_attrib(struct gf100_grctx *);
 
 extern const struct gf100_grctx_func gm200_grctx;
-void gm200_grctx_generate_tpcid(struct gf100_gr *);
 void gm200_grctx_generate_405b60(struct gf100_gr *);
 
 extern const struct gf100_grctx_func gm20b_grctx;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
index 54fd74e9cca0..19cc84e7d7e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
@@ -96,4 +96,6 @@ gf104_grctx = {
 	.attrib = gf100_grctx_generate_attrib,
 	.attrib_nr_max = 0x324,
 	.attrib_nr = 0x218,
+	.sm_id = gf100_grctx_generate_sm_id,
+	.tpc_nr = gf100_grctx_generate_tpc_nr,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
index 82f71b10c06e..370373111a39 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
@@ -794,4 +794,6 @@ gf108_grctx = {
 	.attrib_nr = 0x218,
 	.alpha_nr_max = 0x324,
 	.alpha_nr = 0x218,
+	.sm_id = gf100_grctx_generate_sm_id,
+	.tpc_nr = gf100_grctx_generate_tpc_nr,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
index 7df398b53f8f..6d72ae035917 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
@@ -347,4 +347,6 @@ gf110_grctx = {
 	.attrib = gf100_grctx_generate_attrib,
 	.attrib_nr_max = 0x324,
 	.attrib_nr = 0x218,
+	.sm_id = gf100_grctx_generate_sm_id,
+	.tpc_nr = gf100_grctx_generate_tpc_nr,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
index 19301d88577d..806102a54a0c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
@@ -240,7 +240,7 @@ gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	grctx->attrib(info);
 	grctx->unkn(gr);
 
-	gf100_grctx_generate_tpcid(gr);
+	gf100_grctx_generate_floorsweep(gr);
 	gf100_grctx_generate_r406028(gr);
 	gf100_grctx_generate_r4060a8(gr);
 	gk104_grctx_generate_r418bb8(gr);
@@ -275,4 +275,6 @@ gf117_grctx = {
 	.attrib_nr = 0x218,
 	.alpha_nr_max = 0x7ff,
 	.alpha_nr = 0x324,
+	.sm_id = gf100_grctx_generate_sm_id,
+	.tpc_nr = gf100_grctx_generate_tpc_nr,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
index 605185b078be..784ab94ba267 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
@@ -517,4 +517,6 @@ gf119_grctx = {
 	.attrib_nr = 0x218,
 	.alpha_nr_max = 0x324,
 	.alpha_nr = 0x218,
+	.sm_id = gf100_grctx_generate_sm_id,
+	.tpc_nr = gf100_grctx_generate_tpc_nr,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index da019c4904ba..2be72fd58b1c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -974,7 +974,7 @@ gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	grctx->attrib(info);
 	grctx->unkn(gr);
 
-	gf100_grctx_generate_tpcid(gr);
+	gf100_grctx_generate_floorsweep(gr);
 	gf100_grctx_generate_r406028(gr);
 	gk104_grctx_generate_r418bb8(gr);
 	gf100_grctx_generate_r406800(gr);
@@ -1017,4 +1017,6 @@ gk104_grctx = {
 	.alpha_nr_max = 0x7ff,
 	.alpha_nr = 0x648,
 	.patch_ltc = gk104_grctx_generate_patch_ltc,
+	.sm_id = gf100_grctx_generate_sm_id,
+	.tpc_nr = gf100_grctx_generate_tpc_nr,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
index 3ad98cd8d531..5013fc5c9399 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
@@ -831,4 +831,6 @@ gk110_grctx = {
 	.alpha_nr_max = 0x7ff,
 	.alpha_nr = 0x648,
 	.patch_ltc = gk104_grctx_generate_patch_ltc,
+	.sm_id = gf100_grctx_generate_sm_id,
+	.tpc_nr = gf100_grctx_generate_tpc_nr,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
index 47a4a071a712..1dd574232c63 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
@@ -92,4 +92,6 @@ gk110b_grctx = {
 	.alpha_nr_max = 0x7ff,
 	.alpha_nr = 0x648,
 	.patch_ltc = gk104_grctx_generate_patch_ltc,
+	.sm_id = gf100_grctx_generate_sm_id,
+	.tpc_nr = gf100_grctx_generate_tpc_nr,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
index ba04c86b54a2..214f4dad98d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
@@ -553,4 +553,6 @@ gk208_grctx = {
 	.alpha_nr_max = 0x7ff,
 	.alpha_nr = 0x648,
 	.patch_ltc = gk104_grctx_generate_patch_ltc,
+	.sm_id = gf100_grctx_generate_sm_id,
+	.tpc_nr = gf100_grctx_generate_tpc_nr,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
index da7c35a6a3d2..faa5f18a1da5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
@@ -42,7 +42,7 @@ gk20a_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 
 	grctx->unkn(gr);
 
-	gf100_grctx_generate_tpcid(gr);
+	gf100_grctx_generate_floorsweep(gr);
 	gf100_grctx_generate_r406028(gr);
 	gk104_grctx_generate_r418bb8(gr);
 	gf100_grctx_generate_r406800(gr);
@@ -82,4 +82,6 @@ gk20a_grctx = {
 	.attrib_nr = 0x240,
 	.alpha_nr_max = 0x648 + (0x648 / 2),
 	.alpha_nr = 0x648,
+	.sm_id = gf100_grctx_generate_sm_id,
+	.tpc_nr = gf100_grctx_generate_tpc_nr,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index 9b43d4ce3eaa..e5c0273e0374 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -930,25 +930,13 @@ gm107_grctx_generate_attrib(struct gf100_grctx *info)
 	}
 }
 
-static void
-gm107_grctx_generate_tpcid(struct gf100_gr *gr)
+void
+gm107_grctx_generate_sm_id(struct gf100_gr *gr, int gpc, int tpc, int sm)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
-	int gpc, tpc, id;
-
-	for (tpc = 0, id = 0; tpc < 4; tpc++) {
-		for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
-			if (tpc < gr->tpc_nr[gpc]) {
-				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x698), id);
-				nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
-				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), id);
-				id++;
-			}
-
-			nvkm_wr32(device, GPC_UNIT(gpc, 0x0c08), gr->tpc_nr[gpc]);
-			nvkm_wr32(device, GPC_UNIT(gpc, 0x0c8c), gr->tpc_nr[gpc]);
-		}
-	}
+	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x698), sm);
+	nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), sm);
+	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), sm);
 }
 
 static void
@@ -972,7 +960,7 @@ gm107_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	grctx->attrib(info);
 	grctx->unkn(gr);
 
-	gm107_grctx_generate_tpcid(gr);
+	gf100_grctx_generate_floorsweep(gr);
 	gf100_grctx_generate_r406028(gr);
 	gk104_grctx_generate_r418bb8(gr);
 	gf100_grctx_generate_r406800(gr);
@@ -1016,4 +1004,6 @@ gm107_grctx = {
 	.attrib_nr = 0xaa0,
 	.alpha_nr_max = 0x1800,
 	.alpha_nr = 0x1000,
+	.sm_id = gm107_grctx_generate_sm_id,
+	.tpc_nr = gf100_grctx_generate_tpc_nr,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
index db209d33f486..be5e25ab4361 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
@@ -27,24 +27,6 @@
  * PGRAPH context implementation
  ******************************************************************************/
 
-void
-gm200_grctx_generate_tpcid(struct gf100_gr *gr)
-{
-	struct nvkm_device *device = gr->base.engine.subdev.device;
-	int gpc, tpc, id;
-
-	for (tpc = 0, id = 0; tpc < TPC_MAX_PER_GPC; tpc++) {
-		for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
-			if (tpc < gr->tpc_nr[gpc]) {
-				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x698), id);
-				nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
-				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), id);
-				id++;
-			}
-		}
-	}
-}
-
 void
 gm200_grctx_generate_405b60(struct gf100_gr *gr)
 {
@@ -94,7 +76,7 @@ gm200_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	grctx->attrib(info);
 	grctx->unkn(gr);
 
-	gm200_grctx_generate_tpcid(gr);
+	gf100_grctx_generate_floorsweep(gr);
 	gf100_grctx_generate_r406028(gr);
 	gk104_grctx_generate_r418bb8(gr);
 
@@ -133,4 +115,5 @@ gm200_grctx = {
 	.attrib_nr = 0x400,
 	.alpha_nr_max = 0x1800,
 	.alpha_nr = 0x1000,
+	.sm_id = gm107_grctx_generate_sm_id,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
index e5702e3e0a5a..363d198dda60 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
@@ -53,7 +53,7 @@ gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 
 	grctx->unkn(gr);
 
-	gm200_grctx_generate_tpcid(gr);
+	gf100_grctx_generate_floorsweep(gr);
 	gm20b_grctx_generate_r406028(gr);
 	gk104_grctx_generate_r418bb8(gr);
 
@@ -98,4 +98,5 @@ gm20b_grctx = {
 	.attrib_nr = 0x400,
 	.alpha_nr_max = 0xc00,
 	.alpha_nr = 0x800,
+	.sm_id = gm107_grctx_generate_sm_id,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
index 88ea322d956c..ac8618f849a5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
@@ -138,7 +138,7 @@ gp100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	grctx->attrib(info);
 	grctx->unkn(gr);
 
-	gm200_grctx_generate_tpcid(gr);
+	gf100_grctx_generate_floorsweep(gr);
 	gf100_grctx_generate_r406028(gr);
 	gk104_grctx_generate_r418bb8(gr);
 
@@ -174,4 +174,5 @@ gp100_grctx = {
 	.attrib_nr = 0x440,
 	.alpha_nr_max = 0xc00,
 	.alpha_nr = 0x800,
+	.sm_id = gm107_grctx_generate_sm_id,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
index 7a66b4c2eb18..a267abc2976b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
@@ -94,4 +94,5 @@ gp102_grctx = {
 	.attrib_nr = 0x320,
 	.alpha_nr_max = 0xc00,
 	.alpha_nr = 0x800,
+	.sm_id = gm107_grctx_generate_sm_id,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
index 8da91a0b3bd2..77345b202fbb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
@@ -44,4 +44,5 @@ gp107_grctx = {
 	.attrib_nr = 0x540,
 	.alpha_nr_max = 0xc00,
 	.alpha_nr = 0x800,
+	.sm_id = gm107_grctx_generate_sm_id,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index ac4c0d8478fa..b9519fa5bbca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1675,6 +1675,7 @@ gf100_gr_oneinit(struct nvkm_gr *base)
 	gr->gpc_nr = nvkm_rd32(device, 0x409604) & 0x0000001f;
 	for (i = 0; i < gr->gpc_nr; i++) {
 		gr->tpc_nr[i]  = nvkm_rd32(device, GPC_UNIT(i, 0x2608));
+		gr->tpc_max = max(gr->tpc_max, gr->tpc_nr[i]);
 		gr->tpc_total += gr->tpc_nr[i];
 		gr->ppc_nr[i]  = gr->func->ppc_nr;
 		for (j = 0; j < gr->ppc_nr[i]; j++) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 9ed51dc851df..ad352ee8143c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -99,6 +99,7 @@ struct gf100_gr {
 	u8 rop_nr;
 	u8 gpc_nr;
 	u8 tpc_nr[GPC_MAX];
+	u8 tpc_max;
 	u8 tpc_total;
 	u8 ppc_nr[GPC_MAX];
 	u8 ppc_mask[GPC_MAX];

From e51f75d5012e88c90b3a05b6706475d83cb7a6eb Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1071/1286] drm/nouveau/gr/gf100-: virtualise tpc_per_gpc

GM20B now also shares the same code, as NVGPU shows it doesn't need
special treatment.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c | 25 ++++++++-----------
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h |  1 -
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c |  1 -
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c |  1 -
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c |  1 -
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c |  1 -
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c |  1 -
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c | 15 -----------
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c |  1 -
 9 files changed, 10 insertions(+), 37 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index 14e6bf07535a..a4865f7b9071 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1079,19 +1079,6 @@ gf100_grctx_generate_unkn(struct gf100_gr *gr)
 {
 }
 
-void
-gf100_grctx_generate_r406028(struct gf100_gr *gr)
-{
-	struct nvkm_device *device = gr->base.engine.subdev.device;
-	u32 tmp[GPC_MAX / 8] = {}, i = 0;
-	for (i = 0; i < gr->gpc_nr; i++)
-		tmp[i / 8] |= gr->tpc_nr[i] << ((i % 8) * 4);
-	for (i = 0; i < 4; i++) {
-		nvkm_wr32(device, 0x406028 + (i * 4), tmp[i]);
-		nvkm_wr32(device, 0x405870 + (i * 4), tmp[i]);
-	}
-}
-
 void
 gf100_grctx_generate_r4060a8(struct gf100_gr *gr)
 {
@@ -1229,8 +1216,10 @@ gf100_grctx_generate_sm_id(struct gf100_gr *gr, int gpc, int tpc, int sm)
 void
 gf100_grctx_generate_floorsweep(struct gf100_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const struct gf100_grctx_func *func = gr->func->grctx;
-	int tpc, gpc, sm;
+	int tpc, gpc, sm, i, j;
+	u32 data;
 
 	for (tpc = 0, sm = 0; tpc < gr->tpc_max; tpc++) {
 		for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
@@ -1240,6 +1229,13 @@ gf100_grctx_generate_floorsweep(struct gf100_gr *gr)
 				func->tpc_nr(gr, gpc);
 		}
 	}
+
+	for (gpc = 0, i = 0; i < 4; i++) {
+		for (data = 0, j = 0; j < 8 && gpc < gr->gpc_nr; j++, gpc++)
+			data |= gr->tpc_nr[gpc] << (j * 4);
+		nvkm_wr32(device, 0x406028 + (i * 4), data);
+		nvkm_wr32(device, 0x405870 + (i * 4), data);
+	}
 }
 
 void
@@ -1271,7 +1267,6 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	grctx->unkn(gr);
 
 	gf100_grctx_generate_floorsweep(gr);
-	gf100_grctx_generate_r406028(gr);
 	gf100_grctx_generate_r4060a8(gr);
 	gf100_grctx_generate_r418bb8(gr);
 	gf100_grctx_generate_r406800(gr);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 6fa9b629367b..b9e287985e67 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -63,7 +63,6 @@ void gf100_grctx_generate_pagepool(struct gf100_grctx *);
 void gf100_grctx_generate_attrib(struct gf100_grctx *);
 void gf100_grctx_generate_unkn(struct gf100_gr *);
 void gf100_grctx_generate_floorsweep(struct gf100_gr *);
-void gf100_grctx_generate_r406028(struct gf100_gr *);
 void gf100_grctx_generate_r4060a8(struct gf100_gr *);
 void gf100_grctx_generate_r418bb8(struct gf100_gr *);
 void gf100_grctx_generate_r406800(struct gf100_gr *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
index 806102a54a0c..a091485426b0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
@@ -241,7 +241,6 @@ gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	grctx->unkn(gr);
 
 	gf100_grctx_generate_floorsweep(gr);
-	gf100_grctx_generate_r406028(gr);
 	gf100_grctx_generate_r4060a8(gr);
 	gk104_grctx_generate_r418bb8(gr);
 	gf100_grctx_generate_r406800(gr);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index 2be72fd58b1c..cb6c15686adb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -975,7 +975,6 @@ gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	grctx->unkn(gr);
 
 	gf100_grctx_generate_floorsweep(gr);
-	gf100_grctx_generate_r406028(gr);
 	gk104_grctx_generate_r418bb8(gr);
 	gf100_grctx_generate_r406800(gr);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
index faa5f18a1da5..dda305116772 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
@@ -43,7 +43,6 @@ gk20a_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	grctx->unkn(gr);
 
 	gf100_grctx_generate_floorsweep(gr);
-	gf100_grctx_generate_r406028(gr);
 	gk104_grctx_generate_r418bb8(gr);
 	gf100_grctx_generate_r406800(gr);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index e5c0273e0374..77cce9bffd79 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -961,7 +961,6 @@ gm107_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	grctx->unkn(gr);
 
 	gf100_grctx_generate_floorsweep(gr);
-	gf100_grctx_generate_r406028(gr);
 	gk104_grctx_generate_r418bb8(gr);
 	gf100_grctx_generate_r406800(gr);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
index be5e25ab4361..ac6724b61e50 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
@@ -77,7 +77,6 @@ gm200_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	grctx->unkn(gr);
 
 	gf100_grctx_generate_floorsweep(gr);
-	gf100_grctx_generate_r406028(gr);
 	gk104_grctx_generate_r418bb8(gr);
 
 	for (i = 0; i < 8; i++)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
index 363d198dda60..6420d1391573 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
@@ -21,20 +21,6 @@
  */
 #include "ctxgf100.h"
 
-static void
-gm20b_grctx_generate_r406028(struct gf100_gr *gr)
-{
-	struct nvkm_device *device = gr->base.engine.subdev.device;
-	u32 tpc_per_gpc = 0;
-	int i;
-
-	for (i = 0; i < gr->gpc_nr; i++)
-		tpc_per_gpc |= gr->tpc_nr[i] << (4 * i);
-
-	nvkm_wr32(device, 0x406028, tpc_per_gpc);
-	nvkm_wr32(device, 0x405870, tpc_per_gpc);
-}
-
 static void
 gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 {
@@ -54,7 +40,6 @@ gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	grctx->unkn(gr);
 
 	gf100_grctx_generate_floorsweep(gr);
-	gm20b_grctx_generate_r406028(gr);
 	gk104_grctx_generate_r418bb8(gr);
 
 	for (i = 0; i < 8; i++)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
index ac8618f849a5..701341d8b39c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
@@ -139,7 +139,6 @@ gp100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	grctx->unkn(gr);
 
 	gf100_grctx_generate_floorsweep(gr);
-	gf100_grctx_generate_r406028(gr);
 	gk104_grctx_generate_r418bb8(gr);
 
 	for (i = 0; i < 8; i++)

From 9d8a80df73b58c700e36a0051b2fb44f252693e2 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1072/1286] drm/nouveau/gr/gf100-: virtualise r4060a8 + apply
 fixes from traces

Also fixes some GPUs where we write too many registers.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c | 11 ++++++++---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h |  3 ++-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c |  2 +-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c |  1 +
 7 files changed, 15 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index a4865f7b9071..aededb2b9ad3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1083,7 +1083,9 @@ void
 gf100_grctx_generate_r4060a8(struct gf100_gr *gr)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
-	u8  tpcnr[GPC_MAX], data[TPC_MAX];
+	const u8 gpcmax = nvkm_rd32(device, 0x022430);
+	const u8 tpcmax = nvkm_rd32(device, 0x022434) * gpcmax;
+	u8 tpcnr[GPC_MAX], data[TPC_MAX];
 	int gpc, tpc, i;
 
 	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
@@ -1098,7 +1100,7 @@ gf100_grctx_generate_r4060a8(struct gf100_gr *gr)
 		data[tpc] = gpc;
 	}
 
-	for (i = 0; i < 4; i++)
+	for (i = 0; i < DIV_ROUND_UP(tpcmax, 4); i++)
 		nvkm_wr32(device, 0x4060a8 + (i * 4), ((u32 *)data)[i]);
 }
 
@@ -1236,6 +1238,9 @@ gf100_grctx_generate_floorsweep(struct gf100_gr *gr)
 		nvkm_wr32(device, 0x406028 + (i * 4), data);
 		nvkm_wr32(device, 0x405870 + (i * 4), data);
 	}
+
+	if (func->r4060a8)
+		func->r4060a8(gr);
 }
 
 void
@@ -1267,7 +1272,6 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	grctx->unkn(gr);
 
 	gf100_grctx_generate_floorsweep(gr);
-	gf100_grctx_generate_r4060a8(gr);
 	gf100_grctx_generate_r418bb8(gr);
 	gf100_grctx_generate_r406800(gr);
 
@@ -1419,4 +1423,5 @@ gf100_grctx = {
 	.attrib_nr = 0x218,
 	.sm_id = gf100_grctx_generate_sm_id,
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
+	.r4060a8 = gf100_grctx_generate_r4060a8,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index b9e287985e67..8430d8229a2b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -53,6 +53,7 @@ struct gf100_grctx_func {
 	/* floorsweeping */
 	void (*sm_id)(struct gf100_gr *, int gpc, int tpc, int sm);
 	void (*tpc_nr)(struct gf100_gr *, int gpc);
+	void (*r4060a8)(struct gf100_gr *);
 };
 
 extern const struct gf100_grctx_func gf100_grctx;
@@ -63,11 +64,11 @@ void gf100_grctx_generate_pagepool(struct gf100_grctx *);
 void gf100_grctx_generate_attrib(struct gf100_grctx *);
 void gf100_grctx_generate_unkn(struct gf100_gr *);
 void gf100_grctx_generate_floorsweep(struct gf100_gr *);
-void gf100_grctx_generate_r4060a8(struct gf100_gr *);
 void gf100_grctx_generate_r418bb8(struct gf100_gr *);
 void gf100_grctx_generate_r406800(struct gf100_gr *);
 void gf100_grctx_generate_sm_id(struct gf100_gr *, int, int, int);
 void gf100_grctx_generate_tpc_nr(struct gf100_gr *, int);
+void gf100_grctx_generate_r4060a8(struct gf100_gr *);
 
 extern const struct gf100_grctx_func gf108_grctx;
 void gf108_grctx_generate_attrib(struct gf100_grctx *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
index 19cc84e7d7e6..471fa9e83324 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
@@ -98,4 +98,5 @@ gf104_grctx = {
 	.attrib_nr = 0x218,
 	.sm_id = gf100_grctx_generate_sm_id,
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
+	.r4060a8 = gf100_grctx_generate_r4060a8,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
index 370373111a39..b472a3e8a589 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
@@ -796,4 +796,5 @@ gf108_grctx = {
 	.alpha_nr = 0x218,
 	.sm_id = gf100_grctx_generate_sm_id,
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
+	.r4060a8 = gf100_grctx_generate_r4060a8,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
index 6d72ae035917..12a98f67b5c5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
@@ -349,4 +349,5 @@ gf110_grctx = {
 	.attrib_nr = 0x218,
 	.sm_id = gf100_grctx_generate_sm_id,
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
+	.r4060a8 = gf100_grctx_generate_r4060a8,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
index a091485426b0..5c4d6d92a684 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
@@ -241,7 +241,6 @@ gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	grctx->unkn(gr);
 
 	gf100_grctx_generate_floorsweep(gr);
-	gf100_grctx_generate_r4060a8(gr);
 	gk104_grctx_generate_r418bb8(gr);
 	gf100_grctx_generate_r406800(gr);
 
@@ -276,4 +275,5 @@ gf117_grctx = {
 	.alpha_nr = 0x324,
 	.sm_id = gf100_grctx_generate_sm_id,
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
+	.r4060a8 = gf100_grctx_generate_r4060a8,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
index 784ab94ba267..cbf6c6a69403 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
@@ -519,4 +519,5 @@ gf119_grctx = {
 	.alpha_nr = 0x218,
 	.sm_id = gf100_grctx_generate_sm_id,
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
+	.r4060a8 = gf100_grctx_generate_r4060a8,
 };

From ff209c235de9c3437e131b39eb976ff4bcc4c516 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1073/1286] drm/nouveau/gr/gf100-: virtualise rop_mapping

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c |  6 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h |  5 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c | 61 ++++++++++++++++++-
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c | 61 +------------------
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c |  1 +
 .../drm/nouveau/nvkm/engine/gr/ctxgk110b.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c |  1 +
 18 files changed, 82 insertions(+), 70 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index aededb2b9ad3..d35711b24612 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1105,7 +1105,7 @@ gf100_grctx_generate_r4060a8(struct gf100_gr *gr)
 }
 
 void
-gf100_grctx_generate_r418bb8(struct gf100_gr *gr)
+gf100_grctx_generate_rop_mapping(struct gf100_gr *gr)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
 	u32 data[6] = {}, data2[2] = {};
@@ -1241,6 +1241,8 @@ gf100_grctx_generate_floorsweep(struct gf100_gr *gr)
 
 	if (func->r4060a8)
 		func->r4060a8(gr);
+
+	func->rop_mapping(gr);
 }
 
 void
@@ -1272,7 +1274,6 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	grctx->unkn(gr);
 
 	gf100_grctx_generate_floorsweep(gr);
-	gf100_grctx_generate_r418bb8(gr);
 	gf100_grctx_generate_r406800(gr);
 
 	gf100_gr_icmd(gr, grctx->icmd);
@@ -1424,4 +1425,5 @@ gf100_grctx = {
 	.sm_id = gf100_grctx_generate_sm_id,
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
 	.r4060a8 = gf100_grctx_generate_r4060a8,
+	.rop_mapping = gf100_grctx_generate_rop_mapping,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 8430d8229a2b..0c0d2a55fd11 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -54,6 +54,7 @@ struct gf100_grctx_func {
 	void (*sm_id)(struct gf100_gr *, int gpc, int tpc, int sm);
 	void (*tpc_nr)(struct gf100_gr *, int gpc);
 	void (*r4060a8)(struct gf100_gr *);
+	void (*rop_mapping)(struct gf100_gr *);
 };
 
 extern const struct gf100_grctx_func gf100_grctx;
@@ -64,11 +65,11 @@ void gf100_grctx_generate_pagepool(struct gf100_grctx *);
 void gf100_grctx_generate_attrib(struct gf100_grctx *);
 void gf100_grctx_generate_unkn(struct gf100_gr *);
 void gf100_grctx_generate_floorsweep(struct gf100_gr *);
-void gf100_grctx_generate_r418bb8(struct gf100_gr *);
 void gf100_grctx_generate_r406800(struct gf100_gr *);
 void gf100_grctx_generate_sm_id(struct gf100_gr *, int, int, int);
 void gf100_grctx_generate_tpc_nr(struct gf100_gr *, int);
 void gf100_grctx_generate_r4060a8(struct gf100_gr *);
+void gf100_grctx_generate_rop_mapping(struct gf100_gr *);
 
 extern const struct gf100_grctx_func gf108_grctx;
 void gf108_grctx_generate_attrib(struct gf100_grctx *);
@@ -79,6 +80,7 @@ extern const struct gf100_grctx_func gf110_grctx;
 
 extern const struct gf100_grctx_func gf117_grctx;
 void gf117_grctx_generate_attrib(struct gf100_grctx *);
+void gf117_grctx_generate_rop_mapping(struct gf100_gr *);
 
 extern const struct gf100_grctx_func gf119_grctx;
 
@@ -89,7 +91,6 @@ void gk104_grctx_generate_bundle(struct gf100_grctx *);
 void gk104_grctx_generate_pagepool(struct gf100_grctx *);
 void gk104_grctx_generate_patch_ltc(struct gf100_grctx *);
 void gk104_grctx_generate_unkn(struct gf100_gr *);
-void gk104_grctx_generate_r418bb8(struct gf100_gr *);
 
 void gm107_grctx_generate_bundle(struct gf100_grctx *);
 void gm107_grctx_generate_pagepool(struct gf100_grctx *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
index 471fa9e83324..eb0d3778d3fe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
@@ -99,4 +99,5 @@ gf104_grctx = {
 	.sm_id = gf100_grctx_generate_sm_id,
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
 	.r4060a8 = gf100_grctx_generate_r4060a8,
+	.rop_mapping = gf100_grctx_generate_rop_mapping,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
index b472a3e8a589..f218d98a1645 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
@@ -797,4 +797,5 @@ gf108_grctx = {
 	.sm_id = gf100_grctx_generate_sm_id,
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
 	.r4060a8 = gf100_grctx_generate_r4060a8,
+	.rop_mapping = gf100_grctx_generate_rop_mapping,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
index 12a98f67b5c5..35c163851477 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
@@ -350,4 +350,5 @@ gf110_grctx = {
 	.sm_id = gf100_grctx_generate_sm_id,
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
 	.r4060a8 = gf100_grctx_generate_r4060a8,
+	.rop_mapping = gf100_grctx_generate_rop_mapping,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
index 5c4d6d92a684..7aaf8a26031f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
@@ -179,6 +179,65 @@ gf117_grctx_pack_ppc[] = {
  * PGRAPH context implementation
  ******************************************************************************/
 
+void
+gf117_grctx_generate_rop_mapping(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	u32 data[6] = {}, data2[2] = {};
+	u8  tpcnr[GPC_MAX];
+	u8  shift, ntpcv;
+	int gpc, tpc, i;
+
+	/* calculate first set of magics */
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+
+	gpc = -1;
+	for (tpc = 0; tpc < gr->tpc_total; tpc++) {
+		do {
+			gpc = (gpc + 1) % gr->gpc_nr;
+		} while (!tpcnr[gpc]);
+		tpcnr[gpc]--;
+
+		data[tpc / 6] |= gpc << ((tpc % 6) * 5);
+	}
+
+	for (; tpc < 32; tpc++)
+		data[tpc / 6] |= 7 << ((tpc % 6) * 5);
+
+	/* and the second... */
+	shift = 0;
+	ntpcv = gr->tpc_total;
+	while (!(ntpcv & (1 << 4))) {
+		ntpcv <<= 1;
+		shift++;
+	}
+
+	data2[0]  = (ntpcv << 16);
+	data2[0] |= (shift << 21);
+	data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
+	for (i = 1; i < 7; i++)
+		data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
+
+	/* GPC_BROADCAST */
+	nvkm_wr32(device, 0x418bb8, (gr->tpc_total << 8) |
+				     gr->screen_tile_row_offset);
+	for (i = 0; i < 6; i++)
+		nvkm_wr32(device, 0x418b08 + (i * 4), data[i]);
+
+	/* GPC_BROADCAST.TP_BROADCAST */
+	nvkm_wr32(device, 0x41bfd0, (gr->tpc_total << 8) |
+				     gr->screen_tile_row_offset | data2[0]);
+	nvkm_wr32(device, 0x41bfe4, data2[1]);
+	for (i = 0; i < 6; i++)
+		nvkm_wr32(device, 0x41bf00 + (i * 4), data[i]);
+
+	/* UNK78xx */
+	nvkm_wr32(device, 0x4078bc, (gr->tpc_total << 8) |
+				     gr->screen_tile_row_offset);
+	for (i = 0; i < 6; i++)
+		nvkm_wr32(device, 0x40780c + (i * 4), data[i]);
+}
+
 void
 gf117_grctx_generate_attrib(struct gf100_grctx *info)
 {
@@ -241,7 +300,6 @@ gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	grctx->unkn(gr);
 
 	gf100_grctx_generate_floorsweep(gr);
-	gk104_grctx_generate_r418bb8(gr);
 	gf100_grctx_generate_r406800(gr);
 
 	for (i = 0; i < 8; i++)
@@ -276,4 +334,5 @@ gf117_grctx = {
 	.sm_id = gf100_grctx_generate_sm_id,
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
 	.r4060a8 = gf100_grctx_generate_r4060a8,
+	.rop_mapping = gf117_grctx_generate_rop_mapping,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
index cbf6c6a69403..4cd5d8615e01 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
@@ -520,4 +520,5 @@ gf119_grctx = {
 	.sm_id = gf100_grctx_generate_sm_id,
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
 	.r4060a8 = gf100_grctx_generate_r4060a8,
+	.rop_mapping = gf100_grctx_generate_rop_mapping,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index cb6c15686adb..25a5209db5e8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -892,65 +892,6 @@ gk104_grctx_generate_unkn(struct gf100_gr *gr)
 	nvkm_mask(device, 0x419c00, 0x00000008, 0x00000008);
 }
 
-void
-gk104_grctx_generate_r418bb8(struct gf100_gr *gr)
-{
-	struct nvkm_device *device = gr->base.engine.subdev.device;
-	u32 data[6] = {}, data2[2] = {};
-	u8  tpcnr[GPC_MAX];
-	u8  shift, ntpcv;
-	int gpc, tpc, i;
-
-	/* calculate first set of magics */
-	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
-
-	gpc = -1;
-	for (tpc = 0; tpc < gr->tpc_total; tpc++) {
-		do {
-			gpc = (gpc + 1) % gr->gpc_nr;
-		} while (!tpcnr[gpc]);
-		tpcnr[gpc]--;
-
-		data[tpc / 6] |= gpc << ((tpc % 6) * 5);
-	}
-
-	for (; tpc < 32; tpc++)
-		data[tpc / 6] |= 7 << ((tpc % 6) * 5);
-
-	/* and the second... */
-	shift = 0;
-	ntpcv = gr->tpc_total;
-	while (!(ntpcv & (1 << 4))) {
-		ntpcv <<= 1;
-		shift++;
-	}
-
-	data2[0]  = (ntpcv << 16);
-	data2[0] |= (shift << 21);
-	data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
-	for (i = 1; i < 7; i++)
-		data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
-
-	/* GPC_BROADCAST */
-	nvkm_wr32(device, 0x418bb8, (gr->tpc_total << 8) |
-				     gr->screen_tile_row_offset);
-	for (i = 0; i < 6; i++)
-		nvkm_wr32(device, 0x418b08 + (i * 4), data[i]);
-
-	/* GPC_BROADCAST.TP_BROADCAST */
-	nvkm_wr32(device, 0x41bfd0, (gr->tpc_total << 8) |
-				     gr->screen_tile_row_offset | data2[0]);
-	nvkm_wr32(device, 0x41bfe4, data2[1]);
-	for (i = 0; i < 6; i++)
-		nvkm_wr32(device, 0x41bf00 + (i * 4), data[i]);
-
-	/* UNK78xx */
-	nvkm_wr32(device, 0x4078bc, (gr->tpc_total << 8) |
-				     gr->screen_tile_row_offset);
-	for (i = 0; i < 6; i++)
-		nvkm_wr32(device, 0x40780c + (i * 4), data[i]);
-}
-
 void
 gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 {
@@ -975,7 +916,6 @@ gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	grctx->unkn(gr);
 
 	gf100_grctx_generate_floorsweep(gr);
-	gk104_grctx_generate_r418bb8(gr);
 	gf100_grctx_generate_r406800(gr);
 
 	for (i = 0; i < 8; i++)
@@ -1018,4 +958,5 @@ gk104_grctx = {
 	.patch_ltc = gk104_grctx_generate_patch_ltc,
 	.sm_id = gf100_grctx_generate_sm_id,
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
+	.rop_mapping = gf117_grctx_generate_rop_mapping,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
index 5013fc5c9399..038cc47602c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
@@ -833,4 +833,5 @@ gk110_grctx = {
 	.patch_ltc = gk104_grctx_generate_patch_ltc,
 	.sm_id = gf100_grctx_generate_sm_id,
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
+	.rop_mapping = gf117_grctx_generate_rop_mapping,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
index 1dd574232c63..8b025e2c338f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
@@ -94,4 +94,5 @@ gk110b_grctx = {
 	.patch_ltc = gk104_grctx_generate_patch_ltc,
 	.sm_id = gf100_grctx_generate_sm_id,
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
+	.rop_mapping = gf117_grctx_generate_rop_mapping,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
index 214f4dad98d6..9479b4043343 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
@@ -555,4 +555,5 @@ gk208_grctx = {
 	.patch_ltc = gk104_grctx_generate_patch_ltc,
 	.sm_id = gf100_grctx_generate_sm_id,
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
+	.rop_mapping = gf117_grctx_generate_rop_mapping,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
index dda305116772..23abbfad1bdb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
@@ -43,7 +43,6 @@ gk20a_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	grctx->unkn(gr);
 
 	gf100_grctx_generate_floorsweep(gr);
-	gk104_grctx_generate_r418bb8(gr);
 	gf100_grctx_generate_r406800(gr);
 
 	for (i = 0; i < 8; i++)
@@ -83,4 +82,5 @@ gk20a_grctx = {
 	.alpha_nr = 0x648,
 	.sm_id = gf100_grctx_generate_sm_id,
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
+	.rop_mapping = gf117_grctx_generate_rop_mapping,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index 77cce9bffd79..fef6652f471c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -961,7 +961,6 @@ gm107_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	grctx->unkn(gr);
 
 	gf100_grctx_generate_floorsweep(gr);
-	gk104_grctx_generate_r418bb8(gr);
 	gf100_grctx_generate_r406800(gr);
 
 	nvkm_wr32(device, 0x4064d0, 0x00000001);
@@ -1005,4 +1004,5 @@ gm107_grctx = {
 	.alpha_nr = 0x1000,
 	.sm_id = gm107_grctx_generate_sm_id,
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
+	.rop_mapping = gf117_grctx_generate_rop_mapping,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
index ac6724b61e50..cfccd75dbc30 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
@@ -77,7 +77,6 @@ gm200_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	grctx->unkn(gr);
 
 	gf100_grctx_generate_floorsweep(gr);
-	gk104_grctx_generate_r418bb8(gr);
 
 	for (i = 0; i < 8; i++)
 		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
@@ -115,4 +114,5 @@ gm200_grctx = {
 	.alpha_nr_max = 0x1800,
 	.alpha_nr = 0x1000,
 	.sm_id = gm107_grctx_generate_sm_id,
+	.rop_mapping = gf117_grctx_generate_rop_mapping,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
index 6420d1391573..3dd4e18d2525 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
@@ -40,7 +40,6 @@ gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	grctx->unkn(gr);
 
 	gf100_grctx_generate_floorsweep(gr);
-	gk104_grctx_generate_r418bb8(gr);
 
 	for (i = 0; i < 8; i++)
 		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
@@ -84,4 +83,5 @@ gm20b_grctx = {
 	.alpha_nr_max = 0xc00,
 	.alpha_nr = 0x800,
 	.sm_id = gm107_grctx_generate_sm_id,
+	.rop_mapping = gf117_grctx_generate_rop_mapping,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
index 701341d8b39c..e09990785cb9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
@@ -139,7 +139,6 @@ gp100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	grctx->unkn(gr);
 
 	gf100_grctx_generate_floorsweep(gr);
-	gk104_grctx_generate_r418bb8(gr);
 
 	for (i = 0; i < 8; i++)
 		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
@@ -174,4 +173,5 @@ gp100_grctx = {
 	.alpha_nr_max = 0xc00,
 	.alpha_nr = 0x800,
 	.sm_id = gm107_grctx_generate_sm_id,
+	.rop_mapping = gf117_grctx_generate_rop_mapping,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
index a267abc2976b..553a609c4f98 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
@@ -95,4 +95,5 @@ gp102_grctx = {
 	.alpha_nr_max = 0xc00,
 	.alpha_nr = 0x800,
 	.sm_id = gm107_grctx_generate_sm_id,
+	.rop_mapping = gf117_grctx_generate_rop_mapping,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
index 77345b202fbb..db3fff89bc2f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
@@ -45,4 +45,5 @@ gp107_grctx = {
 	.alpha_nr_max = 0xc00,
 	.alpha_nr = 0x800,
 	.sm_id = gm107_grctx_generate_sm_id,
+	.rop_mapping = gf117_grctx_generate_rop_mapping,
 };

From 43952c6f43106c88b4dcdc99285d92172d8c57cd Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1074/1286] drm/nouveau/gr/gf100-: virtualise alpha_beta_tables
 + improve algorithms

I haven't yet been able to find a fully programatic way of calculating the
same mapping as NVIDIA for GF100-GF119, so the algorithm partially depends
on data tables for specific configurations.

I couldn't find traces for every possibility, so the algorithm will switch
to a mapping similar to what GK104-GM10x use if it encounters one.  We did
the wrong thing before anyway, so shouldn't matter too much.

The algorithm used in the GK104 implementation was ported from NVGPU.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c | 156 +++++++++++++++---
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h |   5 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c |   1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c |   1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c |   1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c |   2 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c |   1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c |  49 +++++-
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c |   1 +
 .../drm/nouveau/nvkm/engine/gr/ctxgk110b.c    |   1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c |   1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c |   2 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c |   2 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/gf100.c    |  10 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/gf100.h    |   1 +
 15 files changed, 200 insertions(+), 34 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index d35711b24612..7a22614a24f7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1163,37 +1163,140 @@ gf100_grctx_generate_rop_mapping(struct gf100_gr *gr)
 		nvkm_wr32(device, 0x40780c + (i * 4), data[i]);
 }
 
+static const u32
+gf100_grctx_alpha_beta_map[17][32] = {
+	[1] = {
+		1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+		1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+	},
+	[2] = {
+		1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+		1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+	},
+	//XXX: 3
+	[4] = {
+		1, 1, 1, 1, 1, 1, 1, 1,
+		2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+		3, 3, 3, 3, 3, 3, 3, 3,
+	},
+	//XXX: 5
+	//XXX: 6
+	[7] = {
+		1, 1, 1, 1,
+		2, 2, 2, 2, 2, 2,
+		3, 3, 3, 3, 3, 3,
+		4, 4, 4, 4, 4, 4,
+		5, 5, 5, 5, 5, 5,
+		6, 6, 6, 6,
+	},
+	[8] = {
+		1, 1, 1,
+		2, 2, 2, 2, 2,
+		3, 3, 3, 3, 3,
+		4, 4, 4, 4, 4, 4,
+		5, 5, 5, 5, 5,
+		6, 6, 6, 6, 6,
+		7, 7, 7,
+	},
+	//XXX: 9
+	//XXX: 10
+	[11] = {
+		1, 1,
+		2, 2, 2, 2,
+		3, 3, 3,
+		4, 4, 4, 4,
+		5, 5, 5,
+		6, 6, 6,
+		7, 7, 7, 7,
+		8, 8, 8,
+		9, 9, 9, 9,
+		10, 10,
+	},
+	//XXX: 12
+	//XXX: 13
+	[14] = {
+		1, 1,
+		2, 2,
+		3, 3, 3,
+		4, 4, 4,
+		5, 5,
+		6, 6, 6,
+		7, 7,
+		8, 8, 8,
+		9, 9,
+		10, 10, 10,
+		11, 11, 11,
+		12, 12,
+		13, 13,
+	},
+	[15] = {
+		1, 1,
+		2, 2,
+		3, 3,
+		4, 4, 4,
+		5, 5,
+		6, 6, 6,
+		7, 7,
+		8, 8,
+		9, 9, 9,
+		10, 10,
+		11, 11, 11,
+		12, 12,
+		13, 13,
+		14, 14,
+	},
+	[16] = {
+		1, 1,
+		2, 2,
+		3, 3,
+		4, 4,
+		5, 5,
+		6, 6, 6,
+		7, 7,
+		8, 8,
+		9, 9,
+		10, 10, 10,
+		11, 11,
+		12, 12,
+		13, 13,
+		14, 14,
+		15, 15,
+	},
+};
+
 void
-gf100_grctx_generate_r406800(struct gf100_gr *gr)
+gf100_grctx_generate_alpha_beta_tables(struct gf100_gr *gr)
 {
-	struct nvkm_device *device = gr->base.engine.subdev.device;
-	u64 tpc_mask = 0, tpc_set = 0;
-	u8  tpcnr[GPC_MAX];
-	int gpc, tpc;
-	int i, a, b;
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int i, gpc;
 
-	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
-	for (gpc = 0; gpc < gr->gpc_nr; gpc++)
-		tpc_mask |= ((1ULL << gr->tpc_nr[gpc]) - 1) << (gpc * 8);
+	for (i = 0; i < 32; i++) {
+		u32 atarget = gf100_grctx_alpha_beta_map[gr->tpc_total][i];
+		u32 abits[GPC_MAX] = {}, amask = 0, bmask = 0;
 
-	for (i = 0, gpc = -1, b = -1; i < 32; i++) {
-		a = (i * (gr->tpc_total - 1)) / 32;
-		if (a != b) {
-			b = a;
-			do {
-				gpc = (gpc + 1) % gr->gpc_nr;
-			} while (!tpcnr[gpc]);
-			tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
-
-			tpc_set |= 1ULL << ((gpc * 8) + tpc);
+		if (!atarget) {
+			nvkm_warn(subdev, "missing alpha/beta mapping table\n");
+			atarget = max_t(u32, gr->tpc_total * i / 32, 1);
 		}
 
-		nvkm_wr32(device, 0x406800 + (i * 0x20), lower_32_bits(tpc_set));
-		nvkm_wr32(device, 0x406c00 + (i * 0x20), lower_32_bits(tpc_set ^ tpc_mask));
-		if (gr->gpc_nr > 4) {
-			nvkm_wr32(device, 0x406804 + (i * 0x20), upper_32_bits(tpc_set));
-			nvkm_wr32(device, 0x406c04 + (i * 0x20), upper_32_bits(tpc_set ^ tpc_mask));
+		while (atarget) {
+			for (gpc = 0; atarget && gpc < gr->gpc_nr; gpc++) {
+				if (abits[gpc] < gr->tpc_nr[gpc]) {
+					abits[gpc]++;
+					atarget--;
+				}
+			}
 		}
+
+		for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+			u32 bbits = gr->tpc_nr[gpc] - abits[gpc];
+			amask |= ((1 << abits[gpc]) - 1) << (gpc * 8);
+			bmask |= ((1 << bbits) - 1) << abits[gpc] << (gpc * 8);
+		}
+
+		nvkm_wr32(device, 0x406800 + (i * 0x20), amask);
+		nvkm_wr32(device, 0x406c00 + (i * 0x20), bmask);
 	}
 }
 
@@ -1243,6 +1346,9 @@ gf100_grctx_generate_floorsweep(struct gf100_gr *gr)
 		func->r4060a8(gr);
 
 	func->rop_mapping(gr);
+
+	if (func->alpha_beta_tables)
+		func->alpha_beta_tables(gr);
 }
 
 void
@@ -1274,7 +1380,6 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	grctx->unkn(gr);
 
 	gf100_grctx_generate_floorsweep(gr);
-	gf100_grctx_generate_r406800(gr);
 
 	gf100_gr_icmd(gr, grctx->icmd);
 	nvkm_wr32(device, 0x404154, idle_timeout);
@@ -1426,4 +1531,5 @@ gf100_grctx = {
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
 	.r4060a8 = gf100_grctx_generate_r4060a8,
 	.rop_mapping = gf100_grctx_generate_rop_mapping,
+	.alpha_beta_tables = gf100_grctx_generate_alpha_beta_tables,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 0c0d2a55fd11..a531076aa360 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -55,6 +55,7 @@ struct gf100_grctx_func {
 	void (*tpc_nr)(struct gf100_gr *, int gpc);
 	void (*r4060a8)(struct gf100_gr *);
 	void (*rop_mapping)(struct gf100_gr *);
+	void (*alpha_beta_tables)(struct gf100_gr *);
 };
 
 extern const struct gf100_grctx_func gf100_grctx;
@@ -65,11 +66,11 @@ void gf100_grctx_generate_pagepool(struct gf100_grctx *);
 void gf100_grctx_generate_attrib(struct gf100_grctx *);
 void gf100_grctx_generate_unkn(struct gf100_gr *);
 void gf100_grctx_generate_floorsweep(struct gf100_gr *);
-void gf100_grctx_generate_r406800(struct gf100_gr *);
 void gf100_grctx_generate_sm_id(struct gf100_gr *, int, int, int);
 void gf100_grctx_generate_tpc_nr(struct gf100_gr *, int);
 void gf100_grctx_generate_r4060a8(struct gf100_gr *);
 void gf100_grctx_generate_rop_mapping(struct gf100_gr *);
+void gf100_grctx_generate_alpha_beta_tables(struct gf100_gr *);
 
 extern const struct gf100_grctx_func gf108_grctx;
 void gf108_grctx_generate_attrib(struct gf100_grctx *);
@@ -85,6 +86,8 @@ void gf117_grctx_generate_rop_mapping(struct gf100_gr *);
 extern const struct gf100_grctx_func gf119_grctx;
 
 extern const struct gf100_grctx_func gk104_grctx;
+void gk104_grctx_generate_alpha_beta_tables(struct gf100_gr *);
+
 extern const struct gf100_grctx_func gk20a_grctx;
 void gk104_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
 void gk104_grctx_generate_bundle(struct gf100_grctx *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
index eb0d3778d3fe..edeb36942b80 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
@@ -100,4 +100,5 @@ gf104_grctx = {
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
 	.r4060a8 = gf100_grctx_generate_r4060a8,
 	.rop_mapping = gf100_grctx_generate_rop_mapping,
+	.alpha_beta_tables = gf100_grctx_generate_alpha_beta_tables,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
index f218d98a1645..267cfdf9b001 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
@@ -798,4 +798,5 @@ gf108_grctx = {
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
 	.r4060a8 = gf100_grctx_generate_r4060a8,
 	.rop_mapping = gf100_grctx_generate_rop_mapping,
+	.alpha_beta_tables = gf100_grctx_generate_alpha_beta_tables,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
index 35c163851477..1fb934f899f4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
@@ -351,4 +351,5 @@ gf110_grctx = {
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
 	.r4060a8 = gf100_grctx_generate_r4060a8,
 	.rop_mapping = gf100_grctx_generate_rop_mapping,
+	.alpha_beta_tables = gf100_grctx_generate_alpha_beta_tables,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
index 7aaf8a26031f..f159b550807f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
@@ -300,7 +300,6 @@ gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	grctx->unkn(gr);
 
 	gf100_grctx_generate_floorsweep(gr);
-	gf100_grctx_generate_r406800(gr);
 
 	for (i = 0; i < 8; i++)
 		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
@@ -335,4 +334,5 @@ gf117_grctx = {
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
 	.r4060a8 = gf100_grctx_generate_r4060a8,
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
+	.alpha_beta_tables = gf100_grctx_generate_alpha_beta_tables,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
index 4cd5d8615e01..3720afde9e9f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
@@ -521,4 +521,5 @@ gf119_grctx = {
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
 	.r4060a8 = gf100_grctx_generate_r4060a8,
 	.rop_mapping = gf100_grctx_generate_rop_mapping,
+	.alpha_beta_tables = gf100_grctx_generate_alpha_beta_tables,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index 25a5209db5e8..25576c1ea9cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -916,7 +916,6 @@ gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	grctx->unkn(gr);
 
 	gf100_grctx_generate_floorsweep(gr);
-	gf100_grctx_generate_r406800(gr);
 
 	for (i = 0; i < 8; i++)
 		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
@@ -933,6 +932,53 @@ gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	nvkm_mask(device, 0x41be10, 0x00800000, 0x00800000);
 }
 
+void
+gk104_grctx_generate_alpha_beta_tables(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	int i, j, gpc, ppc;
+
+	for (i = 0; i < 32; i++) {
+		u32 atarget = max_t(u32, gr->tpc_total * i / 32, 1);
+		u32 btarget = gr->tpc_total - atarget;
+		bool alpha = atarget < btarget;
+		u64 amask = 0, bmask = 0;
+
+		for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+			for (ppc = 0; ppc < gr->func->ppc_nr; ppc++) {
+				u32 ppc_tpcs = gr->ppc_tpc_nr[gpc][ppc];
+				u32 abits, bbits, pmask;
+
+				if (alpha) {
+					abits = atarget ? ppc_tpcs : 0;
+					bbits = ppc_tpcs - abits;
+				} else {
+					bbits = btarget ? ppc_tpcs : 0;
+					abits = ppc_tpcs - bbits;
+				}
+
+				pmask = gr->ppc_tpc_mask[gpc][ppc];
+				while (ppc_tpcs-- > abits)
+					pmask &= pmask - 1;
+				amask |= (u64)pmask << (gpc * 8);
+
+				pmask ^= gr->ppc_tpc_mask[gpc][ppc];
+				bmask |= (u64)pmask << (gpc * 8);
+
+				atarget -= min(abits, atarget);
+				btarget -= min(bbits, btarget);
+				if ((abits > 0) || (bbits > 0))
+					alpha = !alpha;
+			}
+		}
+
+		for (j = 0; j < gr->gpc_nr; j += 4, amask >>= 32, bmask >>= 32) {
+			nvkm_wr32(device, 0x406800 + (i * 0x20) + j, amask);
+			nvkm_wr32(device, 0x406c00 + (i * 0x20) + j, bmask);
+		}
+	}
+}
+
 const struct gf100_grctx_func
 gk104_grctx = {
 	.main  = gk104_grctx_generate_main,
@@ -959,4 +1005,5 @@ gk104_grctx = {
 	.sm_id = gf100_grctx_generate_sm_id,
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
+	.alpha_beta_tables = gk104_grctx_generate_alpha_beta_tables,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
index 038cc47602c6..284570a0b5cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
@@ -834,4 +834,5 @@ gk110_grctx = {
 	.sm_id = gf100_grctx_generate_sm_id,
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
+	.alpha_beta_tables = gk104_grctx_generate_alpha_beta_tables,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
index 8b025e2c338f..ffd8cf989309 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
@@ -95,4 +95,5 @@ gk110b_grctx = {
 	.sm_id = gf100_grctx_generate_sm_id,
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
+	.alpha_beta_tables = gk104_grctx_generate_alpha_beta_tables,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
index 9479b4043343..e5e4d4dce86e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
@@ -556,4 +556,5 @@ gk208_grctx = {
 	.sm_id = gf100_grctx_generate_sm_id,
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
+	.alpha_beta_tables = gk104_grctx_generate_alpha_beta_tables,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
index 23abbfad1bdb..896d473dcc0f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
@@ -43,7 +43,6 @@ gk20a_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	grctx->unkn(gr);
 
 	gf100_grctx_generate_floorsweep(gr);
-	gf100_grctx_generate_r406800(gr);
 
 	for (i = 0; i < 8; i++)
 		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
@@ -83,4 +82,5 @@ gk20a_grctx = {
 	.sm_id = gf100_grctx_generate_sm_id,
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
+	.alpha_beta_tables = gk104_grctx_generate_alpha_beta_tables,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index fef6652f471c..c209bf38b5d9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -961,7 +961,6 @@ gm107_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	grctx->unkn(gr);
 
 	gf100_grctx_generate_floorsweep(gr);
-	gf100_grctx_generate_r406800(gr);
 
 	nvkm_wr32(device, 0x4064d0, 0x00000001);
 	for (i = 1; i < 8; i++)
@@ -1005,4 +1004,5 @@ gm107_grctx = {
 	.sm_id = gm107_grctx_generate_sm_id,
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
+	.alpha_beta_tables = gk104_grctx_generate_alpha_beta_tables,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index b9519fa5bbca..fe3b44d18a67 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1679,10 +1679,12 @@ gf100_gr_oneinit(struct nvkm_gr *base)
 		gr->tpc_total += gr->tpc_nr[i];
 		gr->ppc_nr[i]  = gr->func->ppc_nr;
 		for (j = 0; j < gr->ppc_nr[i]; j++) {
-			u8 mask = nvkm_rd32(device, GPC_UNIT(i, 0x0c30 + (j * 4)));
-			if (mask)
-				gr->ppc_mask[i] |= (1 << j);
-			gr->ppc_tpc_nr[i][j] = hweight8(mask);
+			gr->ppc_tpc_mask[i][j] =
+				nvkm_rd32(device, GPC_UNIT(i, 0x0c30 + (j * 4)));
+			if (gr->ppc_tpc_mask[i][j] == 0)
+				continue;
+			gr->ppc_mask[i] |= (1 << j);
+			gr->ppc_tpc_nr[i][j] = hweight8(gr->ppc_tpc_mask[i][j]);
 		}
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index ad352ee8143c..6f7a7864d66f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -103,6 +103,7 @@ struct gf100_gr {
 	u8 tpc_total;
 	u8 ppc_nr[GPC_MAX];
 	u8 ppc_mask[GPC_MAX];
+	u8 ppc_tpc_mask[GPC_MAX][4];
 	u8 ppc_tpc_nr[GPC_MAX][4];
 
 	struct gf100_gr_data mmio_data[4];

From c4a2b6385dd4a32759acf8e7884acd1115054887 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1075/1286] drm/nouveau/gr/gf100-gf119: modify max_ways_evict
 where required

I don't think this is done after Fermi, NVGPU used to do it but removed
the code, and I've not seen RM traces touching it either.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c | 12 ++++++++++++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h |  2 ++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c |  1 +
 7 files changed, 19 insertions(+)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index 7a22614a24f7..cdf74f31d4be 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1163,6 +1163,15 @@ gf100_grctx_generate_rop_mapping(struct gf100_gr *gr)
 		nvkm_wr32(device, 0x40780c + (i * 4), data[i]);
 }
 
+void
+gf100_grctx_generate_max_ways_evict(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	u32 fbps = nvkm_rd32(device, 0x121c74);
+	if (fbps == 1)
+		nvkm_mask(device, 0x17e91c, 0x001f0000, 0x00090000);
+}
+
 static const u32
 gf100_grctx_alpha_beta_map[17][32] = {
 	[1] = {
@@ -1349,6 +1358,8 @@ gf100_grctx_generate_floorsweep(struct gf100_gr *gr)
 
 	if (func->alpha_beta_tables)
 		func->alpha_beta_tables(gr);
+	if (func->max_ways_evict)
+		func->max_ways_evict(gr);
 }
 
 void
@@ -1532,4 +1543,5 @@ gf100_grctx = {
 	.r4060a8 = gf100_grctx_generate_r4060a8,
 	.rop_mapping = gf100_grctx_generate_rop_mapping,
 	.alpha_beta_tables = gf100_grctx_generate_alpha_beta_tables,
+	.max_ways_evict = gf100_grctx_generate_max_ways_evict,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index a531076aa360..41cb875464de 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -56,6 +56,7 @@ struct gf100_grctx_func {
 	void (*r4060a8)(struct gf100_gr *);
 	void (*rop_mapping)(struct gf100_gr *);
 	void (*alpha_beta_tables)(struct gf100_gr *);
+	void (*max_ways_evict)(struct gf100_gr *);
 };
 
 extern const struct gf100_grctx_func gf100_grctx;
@@ -71,6 +72,7 @@ void gf100_grctx_generate_tpc_nr(struct gf100_gr *, int);
 void gf100_grctx_generate_r4060a8(struct gf100_gr *);
 void gf100_grctx_generate_rop_mapping(struct gf100_gr *);
 void gf100_grctx_generate_alpha_beta_tables(struct gf100_gr *);
+void gf100_grctx_generate_max_ways_evict(struct gf100_gr *);
 
 extern const struct gf100_grctx_func gf108_grctx;
 void gf108_grctx_generate_attrib(struct gf100_grctx *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
index edeb36942b80..d246bb62ab02 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
@@ -101,4 +101,5 @@ gf104_grctx = {
 	.r4060a8 = gf100_grctx_generate_r4060a8,
 	.rop_mapping = gf100_grctx_generate_rop_mapping,
 	.alpha_beta_tables = gf100_grctx_generate_alpha_beta_tables,
+	.max_ways_evict = gf100_grctx_generate_max_ways_evict,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
index 267cfdf9b001..3b6e645b46a7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
@@ -799,4 +799,5 @@ gf108_grctx = {
 	.r4060a8 = gf100_grctx_generate_r4060a8,
 	.rop_mapping = gf100_grctx_generate_rop_mapping,
 	.alpha_beta_tables = gf100_grctx_generate_alpha_beta_tables,
+	.max_ways_evict = gf100_grctx_generate_max_ways_evict,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
index 1fb934f899f4..b65a1f329ebd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
@@ -352,4 +352,5 @@ gf110_grctx = {
 	.r4060a8 = gf100_grctx_generate_r4060a8,
 	.rop_mapping = gf100_grctx_generate_rop_mapping,
 	.alpha_beta_tables = gf100_grctx_generate_alpha_beta_tables,
+	.max_ways_evict = gf100_grctx_generate_max_ways_evict,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
index f159b550807f..423b09753bb7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
@@ -335,4 +335,5 @@ gf117_grctx = {
 	.r4060a8 = gf100_grctx_generate_r4060a8,
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
 	.alpha_beta_tables = gf100_grctx_generate_alpha_beta_tables,
+	.max_ways_evict = gf100_grctx_generate_max_ways_evict,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
index 3720afde9e9f..6b2fcfe44df2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
@@ -522,4 +522,5 @@ gf119_grctx = {
 	.r4060a8 = gf100_grctx_generate_r4060a8,
 	.rop_mapping = gf100_grctx_generate_rop_mapping,
 	.alpha_beta_tables = gf100_grctx_generate_alpha_beta_tables,
+	.max_ways_evict = gf100_grctx_generate_max_ways_evict,
 };

From 60770fa28bd7d69097d3a186fe8cfa1ec21c9c1d Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1076/1286] drm/nouveau/gr/gf100-: virtualise dist_skip_table +
 improve algorithm

The algorithm for GM200 and newer matches RM for all the boards I have, but
I don't have enough data to try and figure something out for earlier boards,
so these will still write zeroes to the table as we did before.

The code in NVGPU isn't helpful here, it appears to handle specific cases.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c |  2 ++
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h |  3 +++
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c | 15 ++++++++---
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c |  5 +---
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c |  1 +
 .../drm/nouveau/nvkm/engine/gr/ctxgk110b.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c |  5 +---
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c | 25 +++++++++++++++++--
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c |  3 +--
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gf100.c    |  3 +++
 .../gpu/drm/nouveau/nvkm/engine/gr/gf100.h    |  1 +
 14 files changed, 51 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index cdf74f31d4be..176be7124f29 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1360,6 +1360,8 @@ gf100_grctx_generate_floorsweep(struct gf100_gr *gr)
 		func->alpha_beta_tables(gr);
 	if (func->max_ways_evict)
 		func->max_ways_evict(gr);
+	if (func->dist_skip_table)
+		func->dist_skip_table(gr);
 }
 
 void
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 41cb875464de..dd1c73b725cd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -57,6 +57,7 @@ struct gf100_grctx_func {
 	void (*rop_mapping)(struct gf100_gr *);
 	void (*alpha_beta_tables)(struct gf100_gr *);
 	void (*max_ways_evict)(struct gf100_gr *);
+	void (*dist_skip_table)(struct gf100_gr *);
 };
 
 extern const struct gf100_grctx_func gf100_grctx;
@@ -84,6 +85,7 @@ extern const struct gf100_grctx_func gf110_grctx;
 extern const struct gf100_grctx_func gf117_grctx;
 void gf117_grctx_generate_attrib(struct gf100_grctx *);
 void gf117_grctx_generate_rop_mapping(struct gf100_gr *);
+void gf117_grctx_generate_dist_skip_table(struct gf100_gr *);
 
 extern const struct gf100_grctx_func gf119_grctx;
 
@@ -112,6 +114,7 @@ void gm107_grctx_generate_pagepool(struct gf100_grctx *);
 void gm107_grctx_generate_attrib(struct gf100_grctx *);
 
 extern const struct gf100_grctx_func gm200_grctx;
+void gm200_grctx_generate_dist_skip_table(struct gf100_gr *);
 void gm200_grctx_generate_405b60(struct gf100_gr *);
 
 extern const struct gf100_grctx_func gm20b_grctx;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
index 423b09753bb7..b3f4127f7520 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
@@ -179,6 +179,16 @@ gf117_grctx_pack_ppc[] = {
  * PGRAPH context implementation
  ******************************************************************************/
 
+void
+gf117_grctx_generate_dist_skip_table(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	int i;
+
+	for (i = 0; i < 8; i++)
+		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
+}
+
 void
 gf117_grctx_generate_rop_mapping(struct gf100_gr *gr)
 {
@@ -282,7 +292,6 @@ gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const struct gf100_grctx_func *grctx = gr->func->grctx;
 	u32 idle_timeout;
-	int i;
 
 	nvkm_mc_unk260(device, 0);
 
@@ -301,9 +310,6 @@ gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 
 	gf100_grctx_generate_floorsweep(gr);
 
-	for (i = 0; i < 8; i++)
-		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
-
 	gf100_gr_icmd(gr, grctx->icmd);
 	nvkm_wr32(device, 0x404154, idle_timeout);
 	gf100_gr_mthd(gr, grctx->mthd);
@@ -336,4 +342,5 @@ gf117_grctx = {
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
 	.alpha_beta_tables = gf100_grctx_generate_alpha_beta_tables,
 	.max_ways_evict = gf100_grctx_generate_max_ways_evict,
+	.dist_skip_table = gf117_grctx_generate_dist_skip_table,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index 25576c1ea9cc..12169314f3e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -898,7 +898,6 @@ gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const struct gf100_grctx_func *grctx = gr->func->grctx;
 	u32 idle_timeout;
-	int i;
 
 	nvkm_mc_unk260(device, 0);
 
@@ -917,9 +916,6 @@ gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 
 	gf100_grctx_generate_floorsweep(gr);
 
-	for (i = 0; i < 8; i++)
-		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
-
 	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
 	nvkm_mask(device, 0x419f78, 0x00000001, 0x00000000);
 
@@ -1006,4 +1002,5 @@ gk104_grctx = {
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
 	.alpha_beta_tables = gk104_grctx_generate_alpha_beta_tables,
+	.dist_skip_table = gf117_grctx_generate_dist_skip_table,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
index 284570a0b5cc..e6a54dc1a01a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
@@ -835,4 +835,5 @@ gk110_grctx = {
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
 	.alpha_beta_tables = gk104_grctx_generate_alpha_beta_tables,
+	.dist_skip_table = gf117_grctx_generate_dist_skip_table,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
index ffd8cf989309..ef82ebee82c9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
@@ -96,4 +96,5 @@ gk110b_grctx = {
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
 	.alpha_beta_tables = gk104_grctx_generate_alpha_beta_tables,
+	.dist_skip_table = gf117_grctx_generate_dist_skip_table,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
index e5e4d4dce86e..226f8aa9e7f6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
@@ -557,4 +557,5 @@ gk208_grctx = {
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
 	.alpha_beta_tables = gk104_grctx_generate_alpha_beta_tables,
+	.dist_skip_table = gf117_grctx_generate_dist_skip_table,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index c209bf38b5d9..cdf9d60683e0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -945,7 +945,6 @@ gm107_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const struct gf100_grctx_func *grctx = gr->func->grctx;
 	u32 idle_timeout;
-	int i;
 
 	gf100_gr_mmio(gr, grctx->hub);
 	gf100_gr_mmio(gr, grctx->gpc);
@@ -962,9 +961,6 @@ gm107_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 
 	gf100_grctx_generate_floorsweep(gr);
 
-	nvkm_wr32(device, 0x4064d0, 0x00000001);
-	for (i = 1; i < 8; i++)
-		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
 	nvkm_wr32(device, 0x406500, 0x00000001);
 
 	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
@@ -1005,4 +1001,5 @@ gm107_grctx = {
 	.tpc_nr = gf100_grctx_generate_tpc_nr,
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
 	.alpha_beta_tables = gk104_grctx_generate_alpha_beta_tables,
+	.dist_skip_table = gf117_grctx_generate_dist_skip_table,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
index cfccd75dbc30..689120683fb4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
@@ -78,8 +78,6 @@ gm200_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 
 	gf100_grctx_generate_floorsweep(gr);
 
-	for (i = 0; i < 8; i++)
-		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
 	nvkm_wr32(device, 0x406500, 0x00000000);
 
 	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
@@ -98,6 +96,28 @@ gm200_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	nvkm_mask(device, 0x418e4c, 0xffffffff, 0x70000000);
 }
 
+void
+gm200_grctx_generate_dist_skip_table(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	u32 data[8] = {};
+	int gpc, ppc, i;
+
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++) {
+			u8 ppc_tpcs = gr->ppc_tpc_nr[gpc][ppc];
+			u8 ppc_tpcm = gr->ppc_tpc_mask[gpc][ppc];
+			while (ppc_tpcs-- > gr->ppc_tpc_min)
+				ppc_tpcm &= ppc_tpcm - 1;
+			ppc_tpcm ^= gr->ppc_tpc_mask[gpc][ppc];
+			((u8 *)data)[gpc] |= ppc_tpcm;
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(data); i++)
+		nvkm_wr32(device, 0x4064d0 + (i * 0x04), data[i]);
+}
+
 const struct gf100_grctx_func
 gm200_grctx = {
 	.main  = gm200_grctx_generate_main,
@@ -115,4 +135,5 @@ gm200_grctx = {
 	.alpha_nr = 0x1000,
 	.sm_id = gm107_grctx_generate_sm_id,
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
+	.dist_skip_table = gm200_grctx_generate_dist_skip_table,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
index e09990785cb9..1a3d0c566fea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
@@ -140,8 +140,6 @@ gp100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 
 	gf100_grctx_generate_floorsweep(gr);
 
-	for (i = 0; i < 8; i++)
-		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
 	nvkm_wr32(device, 0x406500, 0x00000000);
 
 	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
@@ -174,4 +172,5 @@ gp100_grctx = {
 	.alpha_nr = 0x800,
 	.sm_id = gm107_grctx_generate_sm_id,
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
+	.dist_skip_table = gm200_grctx_generate_dist_skip_table,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
index 553a609c4f98..2aeabb362447 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
@@ -96,4 +96,5 @@ gp102_grctx = {
 	.alpha_nr = 0x800,
 	.sm_id = gm107_grctx_generate_sm_id,
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
+	.dist_skip_table = gm200_grctx_generate_dist_skip_table,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
index db3fff89bc2f..4aea2f6552cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
@@ -46,4 +46,5 @@ gp107_grctx = {
 	.alpha_nr = 0x800,
 	.sm_id = gm107_grctx_generate_sm_id,
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
+	.dist_skip_table = gm200_grctx_generate_dist_skip_table,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index fe3b44d18a67..dd4a4104306c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1685,6 +1685,9 @@ gf100_gr_oneinit(struct nvkm_gr *base)
 				continue;
 			gr->ppc_mask[i] |= (1 << j);
 			gr->ppc_tpc_nr[i][j] = hweight8(gr->ppc_tpc_mask[i][j]);
+			if (gr->ppc_tpc_min == 0 ||
+			    gr->ppc_tpc_min > gr->ppc_tpc_nr[i][j])
+				gr->ppc_tpc_min = gr->ppc_tpc_nr[i][j];
 		}
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 6f7a7864d66f..c2a1b2adff36 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -105,6 +105,7 @@ struct gf100_gr {
 	u8 ppc_mask[GPC_MAX];
 	u8 ppc_tpc_mask[GPC_MAX][4];
 	u8 ppc_tpc_nr[GPC_MAX][4];
+	u8 ppc_tpc_min;
 
 	struct gf100_gr_data mmio_data[4];
 	struct gf100_gr_mmio mmio_list[4096/8];

From e7163b192226206bc350a09a52603bd103a5ff6a Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1077/1286] drm/nouveau/gr/gf100-: virtualise r406500

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c | 2 ++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h | 2 ++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c | 9 +++++++--
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c | 9 +++++++--
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c | 3 +--
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c | 1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c | 1 +
 7 files changed, 21 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index 176be7124f29..7bd28c1bb289 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1362,6 +1362,8 @@ gf100_grctx_generate_floorsweep(struct gf100_gr *gr)
 		func->max_ways_evict(gr);
 	if (func->dist_skip_table)
 		func->dist_skip_table(gr);
+	if (func->r406500)
+		func->r406500(gr);
 }
 
 void
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index dd1c73b725cd..7d949a54f958 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -58,6 +58,7 @@ struct gf100_grctx_func {
 	void (*alpha_beta_tables)(struct gf100_gr *);
 	void (*max_ways_evict)(struct gf100_gr *);
 	void (*dist_skip_table)(struct gf100_gr *);
+	void (*r406500)(struct gf100_gr *);
 };
 
 extern const struct gf100_grctx_func gf100_grctx;
@@ -115,6 +116,7 @@ void gm107_grctx_generate_attrib(struct gf100_grctx *);
 
 extern const struct gf100_grctx_func gm200_grctx;
 void gm200_grctx_generate_dist_skip_table(struct gf100_gr *);
+void gm200_grctx_generate_r406500(struct gf100_gr *);
 void gm200_grctx_generate_405b60(struct gf100_gr *);
 
 extern const struct gf100_grctx_func gm20b_grctx;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index cdf9d60683e0..aa5fff3c0d74 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -930,6 +930,12 @@ gm107_grctx_generate_attrib(struct gf100_grctx *info)
 	}
 }
 
+static void
+gm107_grctx_generate_r406500(struct gf100_gr *gr)
+{
+	nvkm_wr32(gr->base.engine.subdev.device, 0x406500, 0x00000001);
+}
+
 void
 gm107_grctx_generate_sm_id(struct gf100_gr *gr, int gpc, int tpc, int sm)
 {
@@ -961,8 +967,6 @@ gm107_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 
 	gf100_grctx_generate_floorsweep(gr);
 
-	nvkm_wr32(device, 0x406500, 0x00000001);
-
 	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
 
 	gf100_gr_icmd(gr, grctx->icmd);
@@ -1002,4 +1006,5 @@ gm107_grctx = {
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
 	.alpha_beta_tables = gk104_grctx_generate_alpha_beta_tables,
 	.dist_skip_table = gf117_grctx_generate_dist_skip_table,
+	.r406500 = gm107_grctx_generate_r406500,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
index 689120683fb4..9c4db049f9c9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
@@ -59,6 +59,12 @@ gm200_grctx_generate_405b60(struct gf100_gr *gr)
 		nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]);
 }
 
+void
+gm200_grctx_generate_r406500(struct gf100_gr *gr)
+{
+	nvkm_wr32(gr->base.engine.subdev.device, 0x406500, 0x00000000);
+}
+
 static void
 gm200_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 {
@@ -78,8 +84,6 @@ gm200_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 
 	gf100_grctx_generate_floorsweep(gr);
 
-	nvkm_wr32(device, 0x406500, 0x00000000);
-
 	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
 
 	for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
@@ -136,4 +140,5 @@ gm200_grctx = {
 	.sm_id = gm107_grctx_generate_sm_id,
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
 	.dist_skip_table = gm200_grctx_generate_dist_skip_table,
+	.r406500 = gm200_grctx_generate_r406500,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
index 1a3d0c566fea..3bad1a573ee3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
@@ -140,8 +140,6 @@ gp100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 
 	gf100_grctx_generate_floorsweep(gr);
 
-	nvkm_wr32(device, 0x406500, 0x00000000);
-
 	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
 
 	for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
@@ -173,4 +171,5 @@ gp100_grctx = {
 	.sm_id = gm107_grctx_generate_sm_id,
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
 	.dist_skip_table = gm200_grctx_generate_dist_skip_table,
+	.r406500 = gm200_grctx_generate_r406500,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
index 2aeabb362447..dea009702cc4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
@@ -97,4 +97,5 @@ gp102_grctx = {
 	.sm_id = gm107_grctx_generate_sm_id,
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
 	.dist_skip_table = gm200_grctx_generate_dist_skip_table,
+	.r406500 = gm200_grctx_generate_r406500,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
index 4aea2f6552cc..a97c4b02acb4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
@@ -47,4 +47,5 @@ gp107_grctx = {
 	.sm_id = gm107_grctx_generate_sm_id,
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
 	.dist_skip_table = gm200_grctx_generate_dist_skip_table,
+	.r406500 = gm200_grctx_generate_r406500,
 };

From 60c0264a667fe80ac48d746d073e9d869a5d52f0 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1078/1286] drm/nouveau/gr/gf100-: virtualise gpc_tpc_nr

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c  | 2 ++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h  | 2 ++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c  | 9 ++++++++-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c  | 1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c | 1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c  | 1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c  | 3 +--
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c  | 3 +--
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c  | 3 +--
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c  | 1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c  | 1 +
 11 files changed, 20 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index 7bd28c1bb289..c1c83e5bf0a9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1364,6 +1364,8 @@ gf100_grctx_generate_floorsweep(struct gf100_gr *gr)
 		func->dist_skip_table(gr);
 	if (func->r406500)
 		func->r406500(gr);
+	if (func->gpc_tpc_nr)
+		func->gpc_tpc_nr(gr);
 }
 
 void
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 7d949a54f958..c91904d11b24 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -59,6 +59,7 @@ struct gf100_grctx_func {
 	void (*max_ways_evict)(struct gf100_gr *);
 	void (*dist_skip_table)(struct gf100_gr *);
 	void (*r406500)(struct gf100_gr *);
+	void (*gpc_tpc_nr)(struct gf100_gr *);
 };
 
 extern const struct gf100_grctx_func gf100_grctx;
@@ -92,6 +93,7 @@ extern const struct gf100_grctx_func gf119_grctx;
 
 extern const struct gf100_grctx_func gk104_grctx;
 void gk104_grctx_generate_alpha_beta_tables(struct gf100_gr *);
+void gk104_grctx_generate_gpc_tpc_nr(struct gf100_gr *);
 
 extern const struct gf100_grctx_func gk20a_grctx;
 void gk104_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index 12169314f3e2..302b8c7b68a8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -892,6 +892,13 @@ gk104_grctx_generate_unkn(struct gf100_gr *gr)
 	nvkm_mask(device, 0x419c00, 0x00000008, 0x00000008);
 }
 
+void
+gk104_grctx_generate_gpc_tpc_nr(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
+}
+
 void
 gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 {
@@ -916,7 +923,6 @@ gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 
 	gf100_grctx_generate_floorsweep(gr);
 
-	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
 	nvkm_mask(device, 0x419f78, 0x00000001, 0x00000000);
 
 	gf100_gr_icmd(gr, grctx->icmd);
@@ -1003,4 +1009,5 @@ gk104_grctx = {
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
 	.alpha_beta_tables = gk104_grctx_generate_alpha_beta_tables,
 	.dist_skip_table = gf117_grctx_generate_dist_skip_table,
+	.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
index e6a54dc1a01a..64e00d7dffdd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
@@ -836,4 +836,5 @@ gk110_grctx = {
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
 	.alpha_beta_tables = gk104_grctx_generate_alpha_beta_tables,
 	.dist_skip_table = gf117_grctx_generate_dist_skip_table,
+	.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
index ef82ebee82c9..f800ed5562a1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
@@ -97,4 +97,5 @@ gk110b_grctx = {
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
 	.alpha_beta_tables = gk104_grctx_generate_alpha_beta_tables,
 	.dist_skip_table = gf117_grctx_generate_dist_skip_table,
+	.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
index 226f8aa9e7f6..494d9a9a200a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
@@ -558,4 +558,5 @@ gk208_grctx = {
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
 	.alpha_beta_tables = gk104_grctx_generate_alpha_beta_tables,
 	.dist_skip_table = gf117_grctx_generate_dist_skip_table,
+	.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index aa5fff3c0d74..6b279bb49401 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -967,8 +967,6 @@ gm107_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 
 	gf100_grctx_generate_floorsweep(gr);
 
-	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
-
 	gf100_gr_icmd(gr, grctx->icmd);
 	nvkm_wr32(device, 0x404154, idle_timeout);
 	gf100_gr_mthd(gr, grctx->mthd);
@@ -1007,4 +1005,5 @@ gm107_grctx = {
 	.alpha_beta_tables = gk104_grctx_generate_alpha_beta_tables,
 	.dist_skip_table = gf117_grctx_generate_dist_skip_table,
 	.r406500 = gm107_grctx_generate_r406500,
+	.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
index 9c4db049f9c9..3ba5e95d8c15 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
@@ -84,8 +84,6 @@ gm200_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 
 	gf100_grctx_generate_floorsweep(gr);
 
-	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
-
 	for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
 		tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 4);
 	nvkm_wr32(device, 0x4041c4, tmp);
@@ -141,4 +139,5 @@ gm200_grctx = {
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
 	.dist_skip_table = gm200_grctx_generate_dist_skip_table,
 	.r406500 = gm200_grctx_generate_r406500,
+	.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
index 3bad1a573ee3..c48617b74d8a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
@@ -140,8 +140,6 @@ gp100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 
 	gf100_grctx_generate_floorsweep(gr);
 
-	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
-
 	for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
 		tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 5);
 	nvkm_wr32(device, 0x4041c4, tmp);
@@ -172,4 +170,5 @@ gp100_grctx = {
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
 	.dist_skip_table = gm200_grctx_generate_dist_skip_table,
 	.r406500 = gm200_grctx_generate_r406500,
+	.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
index dea009702cc4..ec4fbe87facf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
@@ -98,4 +98,5 @@ gp102_grctx = {
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
 	.dist_skip_table = gm200_grctx_generate_dist_skip_table,
 	.r406500 = gm200_grctx_generate_r406500,
+	.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
index a97c4b02acb4..84c98cd5cad9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
@@ -48,4 +48,5 @@ gp107_grctx = {
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
 	.dist_skip_table = gm200_grctx_generate_dist_skip_table,
 	.r406500 = gm200_grctx_generate_r406500,
+	.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
 };

From aa5e38dc9fdf0a11724561777d712bfdf0d6ad99 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1079/1286] drm/nouveau/gr/gf100-: virtualise r419f78 + apply
 fixes from traces

Removed from GK110[B]/GK208 as RM traces show it not being touched.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c |  2 ++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c | 10 ++++++++--
 3 files changed, 11 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index c1c83e5bf0a9..3793d481e851 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1366,6 +1366,8 @@ gf100_grctx_generate_floorsweep(struct gf100_gr *gr)
 		func->r406500(gr);
 	if (func->gpc_tpc_nr)
 		func->gpc_tpc_nr(gr);
+	if (func->r419f78)
+		func->r419f78(gr);
 }
 
 void
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index c91904d11b24..e84b46f6210d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -60,6 +60,7 @@ struct gf100_grctx_func {
 	void (*dist_skip_table)(struct gf100_gr *);
 	void (*r406500)(struct gf100_gr *);
 	void (*gpc_tpc_nr)(struct gf100_gr *);
+	void (*r419f78)(struct gf100_gr *);
 };
 
 extern const struct gf100_grctx_func gf100_grctx;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index 302b8c7b68a8..f527bca30a02 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -892,6 +892,13 @@ gk104_grctx_generate_unkn(struct gf100_gr *gr)
 	nvkm_mask(device, 0x419c00, 0x00000008, 0x00000008);
 }
 
+static void
+gk104_grctx_generate_r419f78(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_mask(device, 0x419f78, 0x00000001, 0x00000000);
+}
+
 void
 gk104_grctx_generate_gpc_tpc_nr(struct gf100_gr *gr)
 {
@@ -923,8 +930,6 @@ gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 
 	gf100_grctx_generate_floorsweep(gr);
 
-	nvkm_mask(device, 0x419f78, 0x00000001, 0x00000000);
-
 	gf100_gr_icmd(gr, grctx->icmd);
 	nvkm_wr32(device, 0x404154, idle_timeout);
 	gf100_gr_mthd(gr, grctx->mthd);
@@ -1010,4 +1015,5 @@ gk104_grctx = {
 	.alpha_beta_tables = gk104_grctx_generate_alpha_beta_tables,
 	.dist_skip_table = gf117_grctx_generate_dist_skip_table,
 	.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
+	.r419f78 = gk104_grctx_generate_r419f78,
 };

From fc36076441bae141893bd79899d19aa1b5fdf524 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:46 +1000
Subject: [PATCH 1080/1286] drm/nouveau/gr/gf100-: virtualise tpc_mask + apply
 fixes from traces

We weren't placing higher TPC IDs in the right place on some configurations.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c |  4 ++++
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h | 12 +++++-----
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c | 22 +++++++++++--------
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c | 22 +++++++------------
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c |  2 ++
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c |  2 ++
 .../gpu/drm/nouveau/nvkm/engine/gr/gf100.h    |  2 ++
 .../gpu/drm/nouveau/nvkm/engine/gr/gm200.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gp100.c    |  2 ++
 .../gpu/drm/nouveau/nvkm/engine/gr/gp102.c    |  2 ++
 .../gpu/drm/nouveau/nvkm/engine/gr/gp107.c    |  2 ++
 .../gpu/drm/nouveau/nvkm/engine/gr/gp10b.c    |  2 ++
 13 files changed, 47 insertions(+), 30 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index 3793d481e851..a52f27f1e5a6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1368,6 +1368,10 @@ gf100_grctx_generate_floorsweep(struct gf100_gr *gr)
 		func->gpc_tpc_nr(gr);
 	if (func->r419f78)
 		func->r419f78(gr);
+	if (func->tpc_mask)
+		func->tpc_mask(gr);
+	if (func->smid_config)
+		func->smid_config(gr);
 }
 
 void
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index e84b46f6210d..d319e76fbfbd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -61,6 +61,8 @@ struct gf100_grctx_func {
 	void (*r406500)(struct gf100_gr *);
 	void (*gpc_tpc_nr)(struct gf100_gr *);
 	void (*r419f78)(struct gf100_gr *);
+	void (*tpc_mask)(struct gf100_gr *);
+	void (*smid_config)(struct gf100_gr *);
 };
 
 extern const struct gf100_grctx_func gf100_grctx;
@@ -103,11 +105,6 @@ void gk104_grctx_generate_pagepool(struct gf100_grctx *);
 void gk104_grctx_generate_patch_ltc(struct gf100_grctx *);
 void gk104_grctx_generate_unkn(struct gf100_gr *);
 
-void gm107_grctx_generate_bundle(struct gf100_grctx *);
-void gm107_grctx_generate_pagepool(struct gf100_grctx *);
-void gm107_grctx_generate_attrib(struct gf100_grctx *);
-void gm107_grctx_generate_sm_id(struct gf100_gr *, int, int, int);
-
 extern const struct gf100_grctx_func gk110_grctx;
 extern const struct gf100_grctx_func gk110b_grctx;
 extern const struct gf100_grctx_func gk208_grctx;
@@ -116,17 +113,20 @@ extern const struct gf100_grctx_func gm107_grctx;
 void gm107_grctx_generate_bundle(struct gf100_grctx *);
 void gm107_grctx_generate_pagepool(struct gf100_grctx *);
 void gm107_grctx_generate_attrib(struct gf100_grctx *);
+void gm107_grctx_generate_sm_id(struct gf100_gr *, int, int, int);
 
 extern const struct gf100_grctx_func gm200_grctx;
 void gm200_grctx_generate_dist_skip_table(struct gf100_gr *);
 void gm200_grctx_generate_r406500(struct gf100_gr *);
-void gm200_grctx_generate_405b60(struct gf100_gr *);
+void gm200_grctx_generate_tpc_mask(struct gf100_gr *);
+void gm200_grctx_generate_smid_config(struct gf100_gr *);
 
 extern const struct gf100_grctx_func gm20b_grctx;
 
 extern const struct gf100_grctx_func gp100_grctx;
 void gp100_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
 void gp100_grctx_generate_pagepool(struct gf100_grctx *);
+void gp100_grctx_generate_smid_config(struct gf100_gr *);
 
 extern const struct gf100_grctx_func gp102_grctx;
 void gp102_grctx_generate_attrib(struct gf100_grctx *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
index 3ba5e95d8c15..f1e87b97480d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
@@ -28,7 +28,7 @@
  ******************************************************************************/
 
 void
-gm200_grctx_generate_405b60(struct gf100_gr *gr)
+gm200_grctx_generate_smid_config(struct gf100_gr *gr)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const u32 dist_nr = DIV_ROUND_UP(gr->tpc_total, 4);
@@ -59,6 +59,15 @@ gm200_grctx_generate_405b60(struct gf100_gr *gr)
 		nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]);
 }
 
+void
+gm200_grctx_generate_tpc_mask(struct gf100_gr *gr)
+{
+	u32 tmp, i;
+	for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
+		tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * gr->func->tpc_nr);
+	nvkm_wr32(gr->base.engine.subdev.device, 0x4041c4, tmp);
+}
+
 void
 gm200_grctx_generate_r406500(struct gf100_gr *gr)
 {
@@ -70,8 +79,7 @@ gm200_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const struct gf100_grctx_func *grctx = gr->func->grctx;
-	u32 idle_timeout, tmp;
-	int i;
+	u32 idle_timeout;
 
 	gf100_gr_mmio(gr, gr->fuc_sw_ctx);
 
@@ -84,12 +92,6 @@ gm200_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 
 	gf100_grctx_generate_floorsweep(gr);
 
-	for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
-		tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 4);
-	nvkm_wr32(device, 0x4041c4, tmp);
-
-	gm200_grctx_generate_405b60(gr);
-
 	gf100_gr_icmd(gr, gr->fuc_bundle);
 	nvkm_wr32(device, 0x404154, idle_timeout);
 	gf100_gr_mthd(gr, gr->fuc_method);
@@ -140,4 +142,6 @@ gm200_grctx = {
 	.dist_skip_table = gm200_grctx_generate_dist_skip_table,
 	.r406500 = gm200_grctx_generate_r406500,
 	.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
+	.tpc_mask = gm200_grctx_generate_tpc_mask,
+	.smid_config = gm200_grctx_generate_smid_config,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
index 3dd4e18d2525..a1d9e114ebeb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
@@ -52,7 +52,7 @@ gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 		tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 4);
 	nvkm_wr32(device, 0x4041c4, tmp);
 
-	gm200_grctx_generate_405b60(gr);
+	gm200_grctx_generate_smid_config(gr);
 
 	gf100_gr_wait_idle(gr);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
index c48617b74d8a..821219a04197 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
@@ -89,13 +89,12 @@ gp100_grctx_generate_attrib(struct gf100_grctx *info)
 	mmio_wr32(info, 0x41befc, 0x00000000);
 }
 
-static void
-gp100_grctx_generate_405b60(struct gf100_gr *gr)
+void
+gp100_grctx_generate_smid_config(struct gf100_gr *gr)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const u32 dist_nr = DIV_ROUND_UP(gr->tpc_total, 4);
-	u32 dist[TPC_MAX / 4] = {};
-	u32 gpcs[GPC_MAX * 2] = {};
+	u32 dist[TPC_MAX / 4] = {}, gpcs[16] = {};
 	u8  tpcnr[GPC_MAX];
 	int tpc, gpc, i;
 
@@ -112,12 +111,12 @@ gp100_grctx_generate_405b60(struct gf100_gr *gr)
 		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
 
 		dist[i / 4] |= ((gpc << 4) | tpc) << ((i % 4) * 8);
-		gpcs[gpc + (gr->gpc_nr * (tpc / 4))] |= i << (tpc * 8);
+		gpcs[gpc + (gr->func->gpc_nr * (tpc / 4))] |= i << (tpc * 8);
 	}
 
 	for (i = 0; i < dist_nr; i++)
 		nvkm_wr32(device, 0x405b60 + (i * 4), dist[i]);
-	for (i = 0; i < gr->gpc_nr * 2; i++)
+	for (i = 0; i < ARRAY_SIZE(gpcs); i++)
 		nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]);
 }
 
@@ -126,8 +125,7 @@ gp100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const struct gf100_grctx_func *grctx = gr->func->grctx;
-	u32 idle_timeout, tmp;
-	int i;
+	u32 idle_timeout;
 
 	gf100_gr_mmio(gr, gr->fuc_sw_ctx);
 
@@ -140,12 +138,6 @@ gp100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 
 	gf100_grctx_generate_floorsweep(gr);
 
-	for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
-		tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 5);
-	nvkm_wr32(device, 0x4041c4, tmp);
-
-	gp100_grctx_generate_405b60(gr);
-
 	gf100_gr_icmd(gr, gr->fuc_bundle);
 	nvkm_wr32(device, 0x404154, idle_timeout);
 	gf100_gr_mthd(gr, gr->fuc_method);
@@ -171,4 +163,6 @@ gp100_grctx = {
 	.dist_skip_table = gm200_grctx_generate_dist_skip_table,
 	.r406500 = gm200_grctx_generate_r406500,
 	.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
+	.tpc_mask = gm200_grctx_generate_tpc_mask,
+	.smid_config = gp100_grctx_generate_smid_config,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
index ec4fbe87facf..611819ffb1f8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
@@ -99,4 +99,6 @@ gp102_grctx = {
 	.dist_skip_table = gm200_grctx_generate_dist_skip_table,
 	.r406500 = gm200_grctx_generate_r406500,
 	.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
+	.tpc_mask = gm200_grctx_generate_tpc_mask,
+	.smid_config = gp100_grctx_generate_smid_config,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
index 84c98cd5cad9..d908317079e0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
@@ -49,4 +49,6 @@ gp107_grctx = {
 	.dist_skip_table = gm200_grctx_generate_dist_skip_table,
 	.r406500 = gm200_grctx_generate_r406500,
 	.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
+	.tpc_mask = gm200_grctx_generate_tpc_mask,
+	.smid_config = gp100_grctx_generate_smid_config,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index c2a1b2adff36..31109cec5a76 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -154,6 +154,8 @@ struct gf100_gr_func {
 		struct gf100_gr_ucode *ucode;
 	} gpccs;
 	int (*rops)(struct gf100_gr *);
+	int gpc_nr;
+	int tpc_nr;
 	int ppc_nr;
 	const struct gf100_grctx_func *grctx;
 	const struct nvkm_therm_clkgate_pack *clkgate_pack;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
index 4dcb56bfbca1..ae0eaf8e6d71 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -134,6 +134,7 @@ gm200_gr = {
 	.init_shader_exceptions = gm107_gr_init_shader_exceptions,
 	.init_400054 = gm107_gr_init_400054,
 	.rops = gm200_gr_rops,
+	.tpc_nr = 4,
 	.ppc_nr = 2,
 	.grctx = &gm200_grctx,
 	.sclass = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
index cc507e830511..3addbc1d62c7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -80,6 +80,8 @@ gp100_gr = {
 	.init_504430 = gm107_gr_init_504430,
 	.init_shader_exceptions = gp100_gr_init_shader_exceptions,
 	.rops = gm200_gr_rops,
+	.gpc_nr = 6,
+	.tpc_nr = 5,
 	.ppc_nr = 2,
 	.grctx = &gp100_grctx,
 	.sclass = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
index 86d1ff777d67..ea99c15487ef 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
@@ -58,6 +58,8 @@ gp102_gr = {
 	.init_504430 = gm107_gr_init_504430,
 	.init_shader_exceptions = gp100_gr_init_shader_exceptions,
 	.rops = gm200_gr_rops,
+	.gpc_nr = 6,
+	.tpc_nr = 5,
 	.ppc_nr = 3,
 	.grctx = &gp102_grctx,
 	.sclass = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
index 14007b5d2e41..09cba537b8b9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
@@ -44,6 +44,8 @@ gp107_gr = {
 	.init_504430 = gm107_gr_init_504430,
 	.init_shader_exceptions = gp100_gr_init_shader_exceptions,
 	.rops = gm200_gr_rops,
+	.gpc_nr = 2,
+	.tpc_nr = 3,
 	.ppc_nr = 1,
 	.grctx = &gp107_grctx,
 	.sclass = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
index 450a96d1cd07..4972bf8d2530 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
@@ -42,6 +42,8 @@ gp10b_gr = {
 	.init_504430 = gm107_gr_init_504430,
 	.init_shader_exceptions = gp100_gr_init_shader_exceptions,
 	.rops = gm200_gr_rops,
+	.gpc_nr = 1,
+	.tpc_nr = 2,
 	.ppc_nr = 1,
 	.grctx = &gp102_grctx,
 	.sclass = {

From 0e5a5e86f3edb0845f6caf8f9819eebb26ec040a Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1081/1286] drm/nouveau/gr/gf100-: support firmware-provided
 bundle/method everywhere

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c | 12 ++++++++++--
 1 file changed, 10 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index a52f27f1e5a6..5350ab0a58f2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1404,9 +1404,17 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 
 	gf100_grctx_generate_floorsweep(gr);
 
-	gf100_gr_icmd(gr, grctx->icmd);
+	if (gr->fuc_bundle)
+		gf100_gr_icmd(gr, gr->fuc_bundle);
+	else
+		gf100_gr_icmd(gr, grctx->icmd);
+
 	nvkm_wr32(device, 0x404154, idle_timeout);
-	gf100_gr_mthd(gr, grctx->mthd);
+
+	if (gr->fuc_method)
+		gf100_gr_mthd(gr, gr->fuc_method);
+	else
+		gf100_gr_mthd(gr, grctx->mthd);
 	nvkm_mc_unk260(device, 1);
 }
 

From 99a3c67e84c955736a109e645371c7132c1188f2 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1082/1286] drm/nouveau/gr/gf100-gf119: update 419cb8 where
 required

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c | 11 +++++++++++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h |  3 +++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c |  1 +
 7 files changed, 19 insertions(+)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index 5350ab0a58f2..e29ac88841ad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1024,6 +1024,13 @@ gf100_grctx_mmio_item(struct gf100_grctx *info, u32 addr, u32 data,
 	nvkm_wr32(device, addr, data);
 }
 
+void
+gf100_grctx_generate_r419cb8(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_mask(device, 0x419cb8, 0x00007c00, 0x00000000);
+}
+
 void
 gf100_grctx_generate_bundle(struct gf100_grctx *info)
 {
@@ -1416,6 +1423,9 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	else
 		gf100_gr_mthd(gr, grctx->mthd);
 	nvkm_mc_unk260(device, 1);
+
+	if (grctx->r419cb8)
+		grctx->r419cb8(gr);
 }
 
 #define CB_RESERVED 0x80000
@@ -1564,4 +1574,5 @@ gf100_grctx = {
 	.rop_mapping = gf100_grctx_generate_rop_mapping,
 	.alpha_beta_tables = gf100_grctx_generate_alpha_beta_tables,
 	.max_ways_evict = gf100_grctx_generate_max_ways_evict,
+	.r419cb8 = gf100_grctx_generate_r419cb8,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index d319e76fbfbd..67628b9a742d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -63,6 +63,8 @@ struct gf100_grctx_func {
 	void (*r419f78)(struct gf100_gr *);
 	void (*tpc_mask)(struct gf100_gr *);
 	void (*smid_config)(struct gf100_gr *);
+	/* misc other things */
+	void (*r419cb8)(struct gf100_gr *);
 };
 
 extern const struct gf100_grctx_func gf100_grctx;
@@ -79,6 +81,7 @@ void gf100_grctx_generate_r4060a8(struct gf100_gr *);
 void gf100_grctx_generate_rop_mapping(struct gf100_gr *);
 void gf100_grctx_generate_alpha_beta_tables(struct gf100_gr *);
 void gf100_grctx_generate_max_ways_evict(struct gf100_gr *);
+void gf100_grctx_generate_r419cb8(struct gf100_gr *);
 
 extern const struct gf100_grctx_func gf108_grctx;
 void gf108_grctx_generate_attrib(struct gf100_grctx *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
index d246bb62ab02..7f3b9289a66b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
@@ -102,4 +102,5 @@ gf104_grctx = {
 	.rop_mapping = gf100_grctx_generate_rop_mapping,
 	.alpha_beta_tables = gf100_grctx_generate_alpha_beta_tables,
 	.max_ways_evict = gf100_grctx_generate_max_ways_evict,
+	.r419cb8 = gf100_grctx_generate_r419cb8,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
index 3b6e645b46a7..369d64f867c7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
@@ -800,4 +800,5 @@ gf108_grctx = {
 	.rop_mapping = gf100_grctx_generate_rop_mapping,
 	.alpha_beta_tables = gf100_grctx_generate_alpha_beta_tables,
 	.max_ways_evict = gf100_grctx_generate_max_ways_evict,
+	.r419cb8 = gf100_grctx_generate_r419cb8,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
index b65a1f329ebd..d59c2480f04d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
@@ -353,4 +353,5 @@ gf110_grctx = {
 	.rop_mapping = gf100_grctx_generate_rop_mapping,
 	.alpha_beta_tables = gf100_grctx_generate_alpha_beta_tables,
 	.max_ways_evict = gf100_grctx_generate_max_ways_evict,
+	.r419cb8 = gf100_grctx_generate_r419cb8,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
index b3f4127f7520..e922e3e983cf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
@@ -343,4 +343,5 @@ gf117_grctx = {
 	.alpha_beta_tables = gf100_grctx_generate_alpha_beta_tables,
 	.max_ways_evict = gf100_grctx_generate_max_ways_evict,
 	.dist_skip_table = gf117_grctx_generate_dist_skip_table,
+	.r419cb8 = gf100_grctx_generate_r419cb8,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
index 6b2fcfe44df2..cc1a9354fecc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
@@ -523,4 +523,5 @@ gf119_grctx = {
 	.rop_mapping = gf100_grctx_generate_rop_mapping,
 	.alpha_beta_tables = gf100_grctx_generate_alpha_beta_tables,
 	.max_ways_evict = gf100_grctx_generate_max_ways_evict,
+	.r419cb8 = gf100_grctx_generate_r419cb8,
 };

From 5b54b5b92543cac6b9f3728935869fafdb3bf0d9 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1083/1286] drm/nouveau/gr/gf100-: note missing 418800
 modifications

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c |  2 ++
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h |  2 ++
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c | 19 +++++++++++++++++--
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c |  1 +
 .../drm/nouveau/nvkm/engine/gr/ctxgk110b.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c |  1 +
 6 files changed, 24 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index e29ac88841ad..0dcb227c59f1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1426,6 +1426,8 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 
 	if (grctx->r419cb8)
 		grctx->r419cb8(gr);
+	if (grctx->r418800)
+		grctx->r418800(gr);
 }
 
 #define CB_RESERVED 0x80000
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 67628b9a742d..0d05664f9ee6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -65,6 +65,7 @@ struct gf100_grctx_func {
 	void (*smid_config)(struct gf100_gr *);
 	/* misc other things */
 	void (*r419cb8)(struct gf100_gr *);
+	void (*r418800)(struct gf100_gr *);
 };
 
 extern const struct gf100_grctx_func gf100_grctx;
@@ -107,6 +108,7 @@ void gk104_grctx_generate_bundle(struct gf100_grctx *);
 void gk104_grctx_generate_pagepool(struct gf100_grctx *);
 void gk104_grctx_generate_patch_ltc(struct gf100_grctx *);
 void gk104_grctx_generate_unkn(struct gf100_gr *);
+void gk104_grctx_generate_r418800(struct gf100_gr *);
 
 extern const struct gf100_grctx_func gk110_grctx;
 extern const struct gf100_grctx_func gk110b_grctx;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index f527bca30a02..a4fe36c136fd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -840,6 +840,21 @@ gk104_grctx_pack_ppc[] = {
  * PGRAPH context implementation
  ******************************************************************************/
 
+void
+gk104_grctx_generate_r418800(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	/*XXX: Not real sure where to apply these, there doesn't seem
+	 *     to be any pattern to which chipsets it's done on.
+	 *
+	 *     Perhaps a VBIOS tweak?
+	 */
+	if (0) {
+		nvkm_mask(device, 0x418800, 0x00200000, 0x00200000);
+		nvkm_mask(device, 0x41be10, 0x00800000, 0x00800000);
+	}
+}
+
 void
 gk104_grctx_generate_patch_ltc(struct gf100_grctx *info)
 {
@@ -935,8 +950,7 @@ gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	gf100_gr_mthd(gr, grctx->mthd);
 	nvkm_mc_unk260(device, 1);
 
-	nvkm_mask(device, 0x418800, 0x00200000, 0x00200000);
-	nvkm_mask(device, 0x41be10, 0x00800000, 0x00800000);
+	grctx->r418800(gr);
 }
 
 void
@@ -1016,4 +1030,5 @@ gk104_grctx = {
 	.dist_skip_table = gf117_grctx_generate_dist_skip_table,
 	.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
 	.r419f78 = gk104_grctx_generate_r419f78,
+	.r418800 = gk104_grctx_generate_r418800,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
index 64e00d7dffdd..7102a24a934e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
@@ -837,4 +837,5 @@ gk110_grctx = {
 	.alpha_beta_tables = gk104_grctx_generate_alpha_beta_tables,
 	.dist_skip_table = gf117_grctx_generate_dist_skip_table,
 	.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
+	.r418800 = gk104_grctx_generate_r418800,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
index f800ed5562a1..049de07d7bc5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
@@ -98,4 +98,5 @@ gk110b_grctx = {
 	.alpha_beta_tables = gk104_grctx_generate_alpha_beta_tables,
 	.dist_skip_table = gf117_grctx_generate_dist_skip_table,
 	.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
+	.r418800 = gk104_grctx_generate_r418800,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
index 494d9a9a200a..c69494f7418e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
@@ -559,4 +559,5 @@ gk208_grctx = {
 	.alpha_beta_tables = gk104_grctx_generate_alpha_beta_tables,
 	.dist_skip_table = gf117_grctx_generate_dist_skip_table,
 	.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
+	.r418800 = gk104_grctx_generate_r418800,
 };

From ad45a92b9a104285dd3c95b6a8a4d5e0b50b6929 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1084/1286] drm/nouveau/gr/gf100-: update 419eb0 where required

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c  | 2 ++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h  | 3 +++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c  | 8 ++++++++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c | 1 +
 4 files changed, 14 insertions(+)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index 0dcb227c59f1..e8be2fbb6c75 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1428,6 +1428,8 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 		grctx->r419cb8(gr);
 	if (grctx->r418800)
 		grctx->r418800(gr);
+	if (grctx->r419eb0)
+		grctx->r419eb0(gr);
 }
 
 #define CB_RESERVED 0x80000
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 0d05664f9ee6..4a6d47010cab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -66,6 +66,7 @@ struct gf100_grctx_func {
 	/* misc other things */
 	void (*r419cb8)(struct gf100_gr *);
 	void (*r418800)(struct gf100_gr *);
+	void (*r419eb0)(struct gf100_gr *);
 };
 
 extern const struct gf100_grctx_func gf100_grctx;
@@ -111,6 +112,8 @@ void gk104_grctx_generate_unkn(struct gf100_gr *);
 void gk104_grctx_generate_r418800(struct gf100_gr *);
 
 extern const struct gf100_grctx_func gk110_grctx;
+void gk110_grctx_generate_r419eb0(struct gf100_gr *);
+
 extern const struct gf100_grctx_func gk110b_grctx;
 extern const struct gf100_grctx_func gk208_grctx;
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
index 7102a24a934e..f00e7afef9c4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
@@ -808,6 +808,13 @@ gk110_grctx_pack_ppc[] = {
  * PGRAPH context implementation
  ******************************************************************************/
 
+void
+gk110_grctx_generate_r419eb0(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_mask(device, 0x419eb0, 0x00001000, 0x00001000);
+}
+
 const struct gf100_grctx_func
 gk110_grctx = {
 	.main  = gk104_grctx_generate_main,
@@ -838,4 +845,5 @@ gk110_grctx = {
 	.dist_skip_table = gf117_grctx_generate_dist_skip_table,
 	.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
 	.r418800 = gk104_grctx_generate_r418800,
+	.r419eb0 = gk110_grctx_generate_r419eb0,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
index 049de07d7bc5..ed09cfdfb024 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
@@ -99,4 +99,5 @@ gk110b_grctx = {
 	.dist_skip_table = gf117_grctx_generate_dist_skip_table,
 	.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
 	.r418800 = gk104_grctx_generate_r418800,
+	.r419eb0 = gk110_grctx_generate_r419eb0,
 };

From 18d17221dd58741a8590ba0a40a9ded82aa5d619 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1085/1286] drm/nouveau/gr/gf100-: virtualise r419e00

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c    |  2 ++
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c    | 16 ++++++++++++----
 3 files changed, 15 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index e8be2fbb6c75..d2b78ed6556a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1430,6 +1430,8 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 		grctx->r418800(gr);
 	if (grctx->r419eb0)
 		grctx->r419eb0(gr);
+	if (grctx->r419e00)
+		grctx->r419e00(gr);
 }
 
 #define CB_RESERVED 0x80000
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 4a6d47010cab..474b5ddba4ed 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -67,6 +67,7 @@ struct gf100_grctx_func {
 	void (*r419cb8)(struct gf100_gr *);
 	void (*r418800)(struct gf100_gr *);
 	void (*r419eb0)(struct gf100_gr *);
+	void (*r419e00)(struct gf100_gr *);
 };
 
 extern const struct gf100_grctx_func gf100_grctx;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index 6b279bb49401..12a9431dc8b4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -860,6 +860,16 @@ gm107_grctx_pack_ppc[] = {
  * PGRAPH context implementation
  ******************************************************************************/
 
+static void
+gm107_grctx_generate_r419e00(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_mask(device, 0x419e00, 0x00808080, 0x00808080);
+	nvkm_mask(device, 0x419ccc, 0x80000000, 0x80000000);
+	nvkm_mask(device, 0x419f80, 0x80000000, 0x80000000);
+	nvkm_mask(device, 0x419f88, 0x80000000, 0x80000000);
+}
+
 void
 gm107_grctx_generate_bundle(struct gf100_grctx *info)
 {
@@ -971,10 +981,7 @@ gm107_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	nvkm_wr32(device, 0x404154, idle_timeout);
 	gf100_gr_mthd(gr, grctx->mthd);
 
-	nvkm_mask(device, 0x419e00, 0x00808080, 0x00808080);
-	nvkm_mask(device, 0x419ccc, 0x80000000, 0x80000000);
-	nvkm_mask(device, 0x419f80, 0x80000000, 0x80000000);
-	nvkm_mask(device, 0x419f88, 0x80000000, 0x80000000);
+	grctx->r419e00(gr);
 }
 
 const struct gf100_grctx_func
@@ -1006,4 +1013,5 @@ gm107_grctx = {
 	.dist_skip_table = gf117_grctx_generate_dist_skip_table,
 	.r406500 = gm107_grctx_generate_r406500,
 	.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
+	.r419e00 = gm107_grctx_generate_r419e00,
 };

From c2592adea7a81857bf27f5b820640e67a0c6b664 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1086/1286] drm/nouveau/gr/gf100-: virtualise r418e94

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c |  2 ++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c | 12 ++++++++++--
 3 files changed, 13 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index d2b78ed6556a..24792be61f9b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1432,6 +1432,8 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 		grctx->r419eb0(gr);
 	if (grctx->r419e00)
 		grctx->r419e00(gr);
+	if (grctx->r418e94)
+		grctx->r418e94(gr);
 }
 
 #define CB_RESERVED 0x80000
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 474b5ddba4ed..ac7f4f52b707 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -68,6 +68,7 @@ struct gf100_grctx_func {
 	void (*r418800)(struct gf100_gr *);
 	void (*r419eb0)(struct gf100_gr *);
 	void (*r419e00)(struct gf100_gr *);
+	void (*r418e94)(struct gf100_gr *);
 };
 
 extern const struct gf100_grctx_func gf100_grctx;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
index f1e87b97480d..13951aa41ba4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
@@ -27,6 +27,14 @@
  * PGRAPH context implementation
  ******************************************************************************/
 
+static void
+gm200_grctx_generate_r418e94(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_mask(device, 0x418e94, 0xffffffff, 0xc4230000);
+	nvkm_mask(device, 0x418e4c, 0xffffffff, 0x70000000);
+}
+
 void
 gm200_grctx_generate_smid_config(struct gf100_gr *gr)
 {
@@ -96,8 +104,7 @@ gm200_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 	nvkm_wr32(device, 0x404154, idle_timeout);
 	gf100_gr_mthd(gr, gr->fuc_method);
 
-	nvkm_mask(device, 0x418e94, 0xffffffff, 0xc4230000);
-	nvkm_mask(device, 0x418e4c, 0xffffffff, 0x70000000);
+	grctx->r418e94(gr);
 }
 
 void
@@ -144,4 +151,5 @@ gm200_grctx = {
 	.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
 	.tpc_mask = gm200_grctx_generate_tpc_mask,
 	.smid_config = gm200_grctx_generate_smid_config,
+	.r418e94 = gm200_grctx_generate_r418e94,
 };

From 8d56fc48d3563cb3767c1d3dd243974168067f99 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1087/1286] drm/nouveau/gr/gf100-: update 419a3c where required

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c | 2 ++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h | 2 ++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c | 8 ++++++++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c | 1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c | 1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c | 1 +
 6 files changed, 15 insertions(+)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index 24792be61f9b..1d1d0ccd1ddc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1434,6 +1434,8 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 		grctx->r419e00(gr);
 	if (grctx->r418e94)
 		grctx->r418e94(gr);
+	if (grctx->r419a3c)
+		grctx->r419a3c(gr);
 }
 
 #define CB_RESERVED 0x80000
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index ac7f4f52b707..bff24e66d39e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -69,6 +69,7 @@ struct gf100_grctx_func {
 	void (*r419eb0)(struct gf100_gr *);
 	void (*r419e00)(struct gf100_gr *);
 	void (*r418e94)(struct gf100_gr *);
+	void (*r419a3c)(struct gf100_gr *);
 };
 
 extern const struct gf100_grctx_func gf100_grctx;
@@ -130,6 +131,7 @@ void gm200_grctx_generate_dist_skip_table(struct gf100_gr *);
 void gm200_grctx_generate_r406500(struct gf100_gr *);
 void gm200_grctx_generate_tpc_mask(struct gf100_gr *);
 void gm200_grctx_generate_smid_config(struct gf100_gr *);
+void gm200_grctx_generate_r419a3c(struct gf100_gr *);
 
 extern const struct gf100_grctx_func gm20b_grctx;
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
index 13951aa41ba4..7a9be045e684 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
@@ -27,6 +27,13 @@
  * PGRAPH context implementation
  ******************************************************************************/
 
+void
+gm200_grctx_generate_r419a3c(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_mask(device, 0x419a3c, 0x00000014, 0x00000000);
+}
+
 static void
 gm200_grctx_generate_r418e94(struct gf100_gr *gr)
 {
@@ -152,4 +159,5 @@ gm200_grctx = {
 	.tpc_mask = gm200_grctx_generate_tpc_mask,
 	.smid_config = gm200_grctx_generate_smid_config,
 	.r418e94 = gm200_grctx_generate_r418e94,
+	.r419a3c = gm200_grctx_generate_r419a3c,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
index 821219a04197..c60f9244fd7c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
@@ -165,4 +165,5 @@ gp100_grctx = {
 	.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
 	.tpc_mask = gm200_grctx_generate_tpc_mask,
 	.smid_config = gp100_grctx_generate_smid_config,
+	.r419a3c = gm200_grctx_generate_r419a3c,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
index 611819ffb1f8..3af42c14c8f3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
@@ -101,4 +101,5 @@ gp102_grctx = {
 	.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
 	.tpc_mask = gm200_grctx_generate_tpc_mask,
 	.smid_config = gp100_grctx_generate_smid_config,
+	.r419a3c = gm200_grctx_generate_r419a3c,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
index d908317079e0..1864674e5824 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
@@ -51,4 +51,5 @@ gp107_grctx = {
 	.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
 	.tpc_mask = gm200_grctx_generate_tpc_mask,
 	.smid_config = gp100_grctx_generate_smid_config,
+	.r419a3c = gm200_grctx_generate_r419a3c,
 };

From a5537f980e4aba64ce1a0b14ee8fb27d0fd10362 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1088/1286] drm/nouveau/gr/gf100-: update r408840 where
 required

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/include/nvkm/engine/gr.h  |  1 +
 .../gpu/drm/nouveau/nvkm/engine/device/base.c |  4 +-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild |  2 +
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c |  2 +
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h |  3 +
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c |  8 +++
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgp104.c | 47 ++++++++++++++
 .../gpu/drm/nouveau/nvkm/engine/gr/gp104.c    | 62 +++++++++++++++++++
 8 files changed, 127 insertions(+), 2 deletions(-)
 create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp104.c
 create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c

diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index fb18f105fc43..33b2f2e543ee 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -45,6 +45,7 @@ int gm200_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
 int gm20b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
 int gp100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
 int gp102_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gp104_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
 int gp107_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
 int gp10b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 379e701962a7..16e8090082ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2256,7 +2256,7 @@ nv134_chipset = {
 	.disp = gp102_disp_new,
 	.dma = gf119_dma_new,
 	.fifo = gp100_fifo_new,
-	.gr = gp102_gr_new,
+	.gr = gp104_gr_new,
 	.nvdec = gp102_nvdec_new,
 	.sec2 = gp102_sec2_new,
 	.sw = gf100_sw_new,
@@ -2292,7 +2292,7 @@ nv136_chipset = {
 	.disp = gp102_disp_new,
 	.dma = gf119_dma_new,
 	.fifo = gp100_fifo_new,
-	.gr = gp102_gr_new,
+	.gr = gp104_gr_new,
 	.nvdec = gp102_nvdec_new,
 	.sec2 = gp102_sec2_new,
 	.sw = gf100_sw_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
index 8a22558b7b52..42342b4a9abe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
@@ -33,6 +33,7 @@ nvkm-y += nvkm/engine/gr/gm200.o
 nvkm-y += nvkm/engine/gr/gm20b.o
 nvkm-y += nvkm/engine/gr/gp100.o
 nvkm-y += nvkm/engine/gr/gp102.o
+nvkm-y += nvkm/engine/gr/gp104.o
 nvkm-y += nvkm/engine/gr/gp107.o
 nvkm-y += nvkm/engine/gr/gp10b.o
 
@@ -54,4 +55,5 @@ nvkm-y += nvkm/engine/gr/ctxgm200.o
 nvkm-y += nvkm/engine/gr/ctxgm20b.o
 nvkm-y += nvkm/engine/gr/ctxgp100.o
 nvkm-y += nvkm/engine/gr/ctxgp102.o
+nvkm-y += nvkm/engine/gr/ctxgp104.o
 nvkm-y += nvkm/engine/gr/ctxgp107.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index 1d1d0ccd1ddc..55603766c7f1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1436,6 +1436,8 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 		grctx->r418e94(gr);
 	if (grctx->r419a3c)
 		grctx->r419a3c(gr);
+	if (grctx->r408840)
+		grctx->r408840(gr);
 }
 
 #define CB_RESERVED 0x80000
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index bff24e66d39e..668b2c71ff42 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -70,6 +70,7 @@ struct gf100_grctx_func {
 	void (*r419e00)(struct gf100_gr *);
 	void (*r418e94)(struct gf100_gr *);
 	void (*r419a3c)(struct gf100_gr *);
+	void (*r408840)(struct gf100_gr *);
 };
 
 extern const struct gf100_grctx_func gf100_grctx;
@@ -143,6 +144,8 @@ void gp100_grctx_generate_smid_config(struct gf100_gr *);
 extern const struct gf100_grctx_func gp102_grctx;
 void gp102_grctx_generate_attrib(struct gf100_grctx *);
 
+extern const struct gf100_grctx_func gp104_grctx;
+
 extern const struct gf100_grctx_func gp107_grctx;
 
 /* context init value lists */
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
index 3af42c14c8f3..3c78a6d1b1ba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
@@ -29,6 +29,13 @@
  * PGRAPH context implementation
  ******************************************************************************/
 
+static void
+gp102_grctx_generate_r408840(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_mask(device, 0x408840, 0x00000003, 0x00000000);
+}
+
 void
 gp102_grctx_generate_attrib(struct gf100_grctx *info)
 {
@@ -102,4 +109,5 @@ gp102_grctx = {
 	.tpc_mask = gm200_grctx_generate_tpc_mask,
 	.smid_config = gp100_grctx_generate_smid_config,
 	.r419a3c = gm200_grctx_generate_r419a3c,
+	.r408840 = gp102_grctx_generate_r408840,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp104.c
new file mode 100644
index 000000000000..020cb041c5de
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp104.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ctxgf100.h"
+
+const struct gf100_grctx_func
+gp104_grctx = {
+	.main = gp100_grctx_generate_main,
+	.unkn = gk104_grctx_generate_unkn,
+	.bundle = gm107_grctx_generate_bundle,
+	.bundle_size = 0x3000,
+	.bundle_min_gpm_fifo_depth = 0x180,
+	.bundle_token_limit = 0x900,
+	.pagepool = gp100_grctx_generate_pagepool,
+	.pagepool_size = 0x20000,
+	.attrib = gp102_grctx_generate_attrib,
+	.attrib_nr_max = 0x5d4,
+	.attrib_nr = 0x320,
+	.alpha_nr_max = 0xc00,
+	.alpha_nr = 0x800,
+	.sm_id = gm107_grctx_generate_sm_id,
+	.rop_mapping = gf117_grctx_generate_rop_mapping,
+	.dist_skip_table = gm200_grctx_generate_dist_skip_table,
+	.r406500 = gm200_grctx_generate_r406500,
+	.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
+	.tpc_mask = gm200_grctx_generate_tpc_mask,
+	.smid_config = gp100_grctx_generate_smid_config,
+	.r419a3c = gm200_grctx_generate_r419a3c,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
new file mode 100644
index 000000000000..289d8b272b42
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+#include "ctxgf100.h"
+
+#include <nvif/class.h>
+
+static const struct gf100_gr_func
+gp104_gr = {
+	.init = gf100_gr_init,
+	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
+	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
+	.init_zcull = gf117_gr_init_zcull,
+	.init_num_active_ltcs = gm200_gr_init_num_active_ltcs,
+	.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
+	.init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
+	.init_fecs_exceptions = gp100_gr_init_fecs_exceptions,
+	.init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
+	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
+	.init_419cc0 = gf100_gr_init_419cc0,
+	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
+	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
+	.init_504430 = gm107_gr_init_504430,
+	.init_shader_exceptions = gp100_gr_init_shader_exceptions,
+	.rops = gm200_gr_rops,
+	.gpc_nr = 6,
+	.tpc_nr = 5,
+	.ppc_nr = 3,
+	.grctx = &gp104_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+		{ -1, -1, PASCAL_B, &gf100_fermi },
+		{ -1, -1, PASCAL_COMPUTE_B },
+		{}
+	}
+};
+
+int
+gp104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gm200_gr_new_(&gp104_gr, device, index, pgr);
+}

From 201ed6f651e72d886d9066237a830aa49d2ffcd1 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1089/1286] drm/nouveau/gr/gf100-: delete duplicated grctx init
 code

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h |  2 --
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c | 32 +----------------
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c | 34 +------------------
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c |  2 +-
 .../drm/nouveau/nvkm/engine/gr/ctxgk110b.c    |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c | 31 +----------------
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c | 27 +--------------
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c | 25 +-------------
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgp104.c |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c |  2 +-
 12 files changed, 11 insertions(+), 152 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 668b2c71ff42..be57ff086022 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -108,7 +108,6 @@ void gk104_grctx_generate_alpha_beta_tables(struct gf100_gr *);
 void gk104_grctx_generate_gpc_tpc_nr(struct gf100_gr *);
 
 extern const struct gf100_grctx_func gk20a_grctx;
-void gk104_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
 void gk104_grctx_generate_bundle(struct gf100_grctx *);
 void gk104_grctx_generate_pagepool(struct gf100_grctx *);
 void gk104_grctx_generate_patch_ltc(struct gf100_grctx *);
@@ -137,7 +136,6 @@ void gm200_grctx_generate_r419a3c(struct gf100_gr *);
 extern const struct gf100_grctx_func gm20b_grctx;
 
 extern const struct gf100_grctx_func gp100_grctx;
-void gp100_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
 void gp100_grctx_generate_pagepool(struct gf100_grctx *);
 void gp100_grctx_generate_smid_config(struct gf100_gr *);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
index e922e3e983cf..4b7b4f8f75fd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
@@ -286,39 +286,9 @@ gf117_grctx_generate_attrib(struct gf100_grctx *info)
 	}
 }
 
-static void
-gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
-{
-	struct nvkm_device *device = gr->base.engine.subdev.device;
-	const struct gf100_grctx_func *grctx = gr->func->grctx;
-	u32 idle_timeout;
-
-	nvkm_mc_unk260(device, 0);
-
-	gf100_gr_mmio(gr, grctx->hub);
-	gf100_gr_mmio(gr, grctx->gpc);
-	gf100_gr_mmio(gr, grctx->zcull);
-	gf100_gr_mmio(gr, grctx->tpc);
-	gf100_gr_mmio(gr, grctx->ppc);
-
-	idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
-
-	grctx->bundle(info);
-	grctx->pagepool(info);
-	grctx->attrib(info);
-	grctx->unkn(gr);
-
-	gf100_grctx_generate_floorsweep(gr);
-
-	gf100_gr_icmd(gr, grctx->icmd);
-	nvkm_wr32(device, 0x404154, idle_timeout);
-	gf100_gr_mthd(gr, grctx->mthd);
-	nvkm_mc_unk260(device, 1);
-}
-
 const struct gf100_grctx_func
 gf117_grctx = {
-	.main  = gf117_grctx_generate_main,
+	.main  = gf100_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gf117_grctx_pack_hub,
 	.gpc   = gf117_grctx_pack_gpc,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index a4fe36c136fd..bdf2a1e6d3b6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -921,38 +921,6 @@ gk104_grctx_generate_gpc_tpc_nr(struct gf100_gr *gr)
 	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
 }
 
-void
-gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
-{
-	struct nvkm_device *device = gr->base.engine.subdev.device;
-	const struct gf100_grctx_func *grctx = gr->func->grctx;
-	u32 idle_timeout;
-
-	nvkm_mc_unk260(device, 0);
-
-	gf100_gr_mmio(gr, grctx->hub);
-	gf100_gr_mmio(gr, grctx->gpc);
-	gf100_gr_mmio(gr, grctx->zcull);
-	gf100_gr_mmio(gr, grctx->tpc);
-	gf100_gr_mmio(gr, grctx->ppc);
-
-	idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
-
-	grctx->bundle(info);
-	grctx->pagepool(info);
-	grctx->attrib(info);
-	grctx->unkn(gr);
-
-	gf100_grctx_generate_floorsweep(gr);
-
-	gf100_gr_icmd(gr, grctx->icmd);
-	nvkm_wr32(device, 0x404154, idle_timeout);
-	gf100_gr_mthd(gr, grctx->mthd);
-	nvkm_mc_unk260(device, 1);
-
-	grctx->r418800(gr);
-}
-
 void
 gk104_grctx_generate_alpha_beta_tables(struct gf100_gr *gr)
 {
@@ -1002,7 +970,7 @@ gk104_grctx_generate_alpha_beta_tables(struct gf100_gr *gr)
 
 const struct gf100_grctx_func
 gk104_grctx = {
-	.main  = gk104_grctx_generate_main,
+	.main  = gf100_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gk104_grctx_pack_hub,
 	.gpc   = gk104_grctx_pack_gpc,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
index f00e7afef9c4..2ba35d727dc1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
@@ -817,7 +817,7 @@ gk110_grctx_generate_r419eb0(struct gf100_gr *gr)
 
 const struct gf100_grctx_func
 gk110_grctx = {
-	.main  = gk104_grctx_generate_main,
+	.main  = gf100_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gk110_grctx_pack_hub,
 	.gpc   = gk110_grctx_pack_gpc,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
index ed09cfdfb024..1112f8dc70e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
@@ -71,7 +71,7 @@ gk110b_grctx_pack_tpc[] = {
 
 const struct gf100_grctx_func
 gk110b_grctx = {
-	.main  = gk104_grctx_generate_main,
+	.main  = gf100_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gk110_grctx_pack_hub,
 	.gpc   = gk110_grctx_pack_gpc,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
index c69494f7418e..613c5cf8b3bf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
@@ -532,7 +532,7 @@ gk208_grctx_pack_ppc[] = {
 
 const struct gf100_grctx_func
 gk208_grctx = {
-	.main  = gk104_grctx_generate_main,
+	.main  = gf100_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gk208_grctx_pack_hub,
 	.gpc   = gk208_grctx_pack_gpc,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index 12a9431dc8b4..7816dcb7c974 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -955,38 +955,9 @@ gm107_grctx_generate_sm_id(struct gf100_gr *gr, int gpc, int tpc, int sm)
 	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), sm);
 }
 
-static void
-gm107_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
-{
-	struct nvkm_device *device = gr->base.engine.subdev.device;
-	const struct gf100_grctx_func *grctx = gr->func->grctx;
-	u32 idle_timeout;
-
-	gf100_gr_mmio(gr, grctx->hub);
-	gf100_gr_mmio(gr, grctx->gpc);
-	gf100_gr_mmio(gr, grctx->zcull);
-	gf100_gr_mmio(gr, grctx->tpc);
-	gf100_gr_mmio(gr, grctx->ppc);
-
-	idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
-
-	grctx->bundle(info);
-	grctx->pagepool(info);
-	grctx->attrib(info);
-	grctx->unkn(gr);
-
-	gf100_grctx_generate_floorsweep(gr);
-
-	gf100_gr_icmd(gr, grctx->icmd);
-	nvkm_wr32(device, 0x404154, idle_timeout);
-	gf100_gr_mthd(gr, grctx->mthd);
-
-	grctx->r419e00(gr);
-}
-
 const struct gf100_grctx_func
 gm107_grctx = {
-	.main  = gm107_grctx_generate_main,
+	.main  = gf100_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gm107_grctx_pack_hub,
 	.gpc   = gm107_grctx_pack_gpc,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
index 7a9be045e684..7107ec429778 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
@@ -89,31 +89,6 @@ gm200_grctx_generate_r406500(struct gf100_gr *gr)
 	nvkm_wr32(gr->base.engine.subdev.device, 0x406500, 0x00000000);
 }
 
-static void
-gm200_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
-{
-	struct nvkm_device *device = gr->base.engine.subdev.device;
-	const struct gf100_grctx_func *grctx = gr->func->grctx;
-	u32 idle_timeout;
-
-	gf100_gr_mmio(gr, gr->fuc_sw_ctx);
-
-	idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
-
-	grctx->bundle(info);
-	grctx->pagepool(info);
-	grctx->attrib(info);
-	grctx->unkn(gr);
-
-	gf100_grctx_generate_floorsweep(gr);
-
-	gf100_gr_icmd(gr, gr->fuc_bundle);
-	nvkm_wr32(device, 0x404154, idle_timeout);
-	gf100_gr_mthd(gr, gr->fuc_method);
-
-	grctx->r418e94(gr);
-}
-
 void
 gm200_grctx_generate_dist_skip_table(struct gf100_gr *gr)
 {
@@ -138,7 +113,7 @@ gm200_grctx_generate_dist_skip_table(struct gf100_gr *gr)
 
 const struct gf100_grctx_func
 gm200_grctx = {
-	.main  = gm200_grctx_generate_main,
+	.main  = gf100_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.bundle = gm107_grctx_generate_bundle,
 	.bundle_size = 0x3000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
index c60f9244fd7c..af6330c73872 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
@@ -120,32 +120,9 @@ gp100_grctx_generate_smid_config(struct gf100_gr *gr)
 		nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]);
 }
 
-void
-gp100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
-{
-	struct nvkm_device *device = gr->base.engine.subdev.device;
-	const struct gf100_grctx_func *grctx = gr->func->grctx;
-	u32 idle_timeout;
-
-	gf100_gr_mmio(gr, gr->fuc_sw_ctx);
-
-	idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
-
-	grctx->pagepool(info);
-	grctx->bundle(info);
-	grctx->attrib(info);
-	grctx->unkn(gr);
-
-	gf100_grctx_generate_floorsweep(gr);
-
-	gf100_gr_icmd(gr, gr->fuc_bundle);
-	nvkm_wr32(device, 0x404154, idle_timeout);
-	gf100_gr_mthd(gr, gr->fuc_method);
-}
-
 const struct gf100_grctx_func
 gp100_grctx = {
-	.main  = gp100_grctx_generate_main,
+	.main  = gf100_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.bundle = gm107_grctx_generate_bundle,
 	.bundle_size = 0x3000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
index 3c78a6d1b1ba..8a438c2efc3e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
@@ -88,7 +88,7 @@ gp102_grctx_generate_attrib(struct gf100_grctx *info)
 
 const struct gf100_grctx_func
 gp102_grctx = {
-	.main = gp100_grctx_generate_main,
+	.main = gf100_grctx_generate_main,
 	.unkn = gk104_grctx_generate_unkn,
 	.bundle = gm107_grctx_generate_bundle,
 	.bundle_size = 0x3000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp104.c
index 020cb041c5de..5f799c7369bb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp104.c
@@ -23,7 +23,7 @@
 
 const struct gf100_grctx_func
 gp104_grctx = {
-	.main = gp100_grctx_generate_main,
+	.main = gf100_grctx_generate_main,
 	.unkn = gk104_grctx_generate_unkn,
 	.bundle = gm107_grctx_generate_bundle,
 	.bundle_size = 0x3000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
index 1864674e5824..a69e824676c9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
@@ -31,7 +31,7 @@
 
 const struct gf100_grctx_func
 gp107_grctx = {
-	.main = gp100_grctx_generate_main,
+	.main = gf100_grctx_generate_main,
 	.unkn = gk104_grctx_generate_unkn,
 	.bundle = gm107_grctx_generate_bundle,
 	.bundle_size = 0x3000,

From 74b6068bd660a806e801ae039dbab58dc284301e Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1090/1286] drm/nouveau/gr/gf100-: add missing reset sequence
 before golden context init

RM and NVGPU both have a variant of this, we probably should too.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c | 20 +++++++++++++++++++
 1 file changed, 20 insertions(+)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index 55603766c7f1..4c25389fe80a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1456,6 +1456,26 @@ gf100_grctx_generate(struct gf100_gr *gr)
 	int ret, i;
 	u64 addr;
 
+	/* NV_PGRAPH_FE_PWR_MODE_FORCE_ON. */
+	nvkm_wr32(device, 0x404170, 0x00000012);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x404170) & 0x00000010))
+			break;
+	);
+
+	/* Reset FECS. */
+	nvkm_wr32(device, 0x409614, 0x00000070);
+	nvkm_usec(device, 10, NVKM_DELAY);
+	nvkm_mask(device, 0x409614, 0x00000700, 0x00000700);
+	nvkm_usec(device, 10, NVKM_DELAY);
+	nvkm_rd32(device, 0x409614);
+
+	/* NV_PGRAPH_FE_PWR_MODE_AUTO. */
+	nvkm_wr32(device, 0x404170, 0x00000010);
+
+	/* Init SCC RAM. */
+	nvkm_wr32(device, 0x40802c, 0x00000001);
+
 	/* Allocate memory to for a "channel", which we'll use to generate
 	 * the default context values.
 	 */

From 5c05a589856ad5f79c22b0500340291c591c3050 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1091/1286] drm/nouveau/gr/gf100-: virtualise trap_mp

Required to support Volta.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c  | 5 +++--
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h  | 2 ++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c  | 1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c  | 1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c  | 1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c  | 1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c  | 1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c  | 1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c  | 1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c | 1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c  | 1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c  | 1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c  | 1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c  | 1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c  | 1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c  | 1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c  | 1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c  | 1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c  | 1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c  | 1 +
 20 files changed, 23 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index dd4a4104306c..f05d9d4c6e5c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1010,7 +1010,7 @@ static const struct nvkm_bitfield gf100_mp_global_error[] = {
 	{}
 };
 
-static void
+void
 gf100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc)
 {
 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
@@ -1046,7 +1046,7 @@ gf100_gr_trap_tpc(struct gf100_gr *gr, int gpc, int tpc)
 	}
 
 	if (stat & 0x00000002) {
-		gf100_gr_trap_mp(gr, gpc, tpc);
+		gr->func->trap_mp(gr, gpc, tpc);
 		stat &= ~0x00000002;
 	}
 
@@ -2176,6 +2176,7 @@ gf100_gr = {
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.init_shader_exceptions = gf100_gr_init_shader_exceptions,
 	.init_400054 = gf100_gr_init_400054,
+	.trap_mp = gf100_gr_trap_mp,
 	.mmio = gf100_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 31109cec5a76..c25b93a0cb03 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -145,6 +145,7 @@ struct gf100_gr_func {
 	void (*init_504430)(struct gf100_gr *, int gpc, int tpc);
 	void (*init_shader_exceptions)(struct gf100_gr *, int gpc, int tpc);
 	void (*init_400054)(struct gf100_gr *);
+	void (*trap_mp)(struct gf100_gr *, int gpc, int tpc);
 	void (*set_hww_esr_report_mask)(struct gf100_gr *);
 	const struct gf100_gr_pack *mmio;
 	struct {
@@ -301,6 +302,7 @@ extern const struct gf100_gr_init gf100_gr_init_be_0[];
 extern const struct gf100_gr_init gf100_gr_init_fe_1[];
 extern const struct gf100_gr_init gf100_gr_init_pe_1[];
 void gf100_gr_init_gpc_mmu(struct gf100_gr *);
+void gf100_gr_trap_mp(struct gf100_gr *, int, int);
 
 extern const struct gf100_gr_init gf104_gr_init_ds_0[];
 extern const struct gf100_gr_init gf104_gr_init_tex_0[];
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
index d5276cab7f7b..df9cbed7ce50 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
@@ -126,6 +126,7 @@ gf104_gr = {
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.init_shader_exceptions = gf100_gr_init_shader_exceptions,
 	.init_400054 = gf100_gr_init_400054,
+	.trap_mp = gf100_gr_trap_mp,
 	.mmio = gf104_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index 8f22a311dccb..8ffa0fd1134f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -124,6 +124,7 @@ gf108_gr = {
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.init_shader_exceptions = gf100_gr_init_shader_exceptions,
 	.init_400054 = gf100_gr_init_400054,
+	.trap_mp = gf100_gr_trap_mp,
 	.mmio = gf108_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index fcbfdc7e9b26..0d4293e3e4ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -98,6 +98,7 @@ gf110_gr = {
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.init_shader_exceptions = gf100_gr_init_shader_exceptions,
 	.init_400054 = gf100_gr_init_400054,
+	.trap_mp = gf100_gr_trap_mp,
 	.mmio = gf110_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index f526ccddaf5e..e3c1dbbfbf34 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -162,6 +162,7 @@ gf117_gr = {
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.init_shader_exceptions = gf100_gr_init_shader_exceptions,
 	.init_400054 = gf100_gr_init_400054,
+	.trap_mp = gf100_gr_trap_mp,
 	.mmio = gf117_gr_pack_mmio,
 	.fecs.ucode = &gf117_gr_fecs_ucode,
 	.gpccs.ucode = &gf117_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index 5d3d22fb5c86..1ed70b93a10a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -189,6 +189,7 @@ gf119_gr = {
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.init_shader_exceptions = gf100_gr_init_shader_exceptions,
 	.init_400054 = gf100_gr_init_400054,
+	.trap_mp = gf100_gr_trap_mp,
 	.mmio = gf119_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index 2e5c48b61ac8..86819ab7f9a4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -462,6 +462,7 @@ gk104_gr = {
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.init_shader_exceptions = gf100_gr_init_shader_exceptions,
 	.init_400054 = gf100_gr_init_400054,
+	.trap_mp = gf100_gr_trap_mp,
 	.mmio = gk104_gr_pack_mmio,
 	.fecs.ucode = &gk104_gr_fecs_ucode,
 	.gpccs.ucode = &gk104_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index 9adb55f658c6..e30d94ff23d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -364,6 +364,7 @@ gk110_gr = {
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.init_shader_exceptions = gf100_gr_init_shader_exceptions,
 	.init_400054 = gf100_gr_init_400054,
+	.trap_mp = gf100_gr_trap_mp,
 	.mmio = gk110_gr_pack_mmio,
 	.fecs.ucode = &gk110_gr_fecs_ucode,
 	.gpccs.ucode = &gk110_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index f848f1578cd3..253b98181ac4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -116,6 +116,7 @@ gk110b_gr = {
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.init_shader_exceptions = gf100_gr_init_shader_exceptions,
 	.init_400054 = gf100_gr_init_400054,
+	.trap_mp = gf100_gr_trap_mp,
 	.mmio = gk110b_gr_pack_mmio,
 	.fecs.ucode = &gk110_gr_fecs_ucode,
 	.gpccs.ucode = &gk110_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index 9c678f17b2fe..702e9094c1c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -174,6 +174,7 @@ gk208_gr = {
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.init_shader_exceptions = gf100_gr_init_shader_exceptions,
 	.init_400054 = gf100_gr_init_400054,
+	.trap_mp = gf100_gr_trap_mp,
 	.mmio = gk208_gr_pack_mmio,
 	.fecs.ucode = &gk208_gr_fecs_ucode,
 	.gpccs.ucode = &gk208_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index a806643ede7e..95f7d859e634 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -285,6 +285,7 @@ gk20a_gr = {
 	.init = gk20a_gr_init,
 	.init_zcull = gf117_gr_init_zcull,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
+	.trap_mp = gf100_gr_trap_mp,
 	.set_hww_esr_report_mask = gk20a_gr_set_hww_esr_report_mask,
 	.rops = gf100_gr_rops,
 	.ppc_nr = 1,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index c598fa5a68e0..d67bf9465baa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -407,6 +407,7 @@ gm107_gr = {
 	.init_504430 = gm107_gr_init_504430,
 	.init_shader_exceptions = gm107_gr_init_shader_exceptions,
 	.init_400054 = gm107_gr_init_400054,
+	.trap_mp = gf100_gr_trap_mp,
 	.mmio = gm107_gr_pack_mmio,
 	.fecs.ucode = &gm107_gr_fecs_ucode,
 	.gpccs.ucode = &gm107_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
index ae0eaf8e6d71..03b255e9b812 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -133,6 +133,7 @@ gm200_gr = {
 	.init_504430 = gm107_gr_init_504430,
 	.init_shader_exceptions = gm107_gr_init_shader_exceptions,
 	.init_400054 = gm107_gr_init_400054,
+	.trap_mp = gf100_gr_trap_mp,
 	.rops = gm200_gr_rops,
 	.tpc_nr = 4,
 	.ppc_nr = 2,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
index fcf86d5cf26f..d2f9c7bf9f03 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
@@ -68,6 +68,7 @@ gm20b_gr = {
 	.init_zcull = gf117_gr_init_zcull,
 	.init_gpc_mmu = gm20b_gr_init_gpc_mmu,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
+	.trap_mp = gf100_gr_trap_mp,
 	.set_hww_esr_report_mask = gm20b_gr_set_hww_esr_report_mask,
 	.rops = gm200_gr_rops,
 	.ppc_nr = 1,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
index 3addbc1d62c7..e5f941f81e07 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -79,6 +79,7 @@ gp100_gr = {
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.init_504430 = gm107_gr_init_504430,
 	.init_shader_exceptions = gp100_gr_init_shader_exceptions,
+	.trap_mp = gf100_gr_trap_mp,
 	.rops = gm200_gr_rops,
 	.gpc_nr = 6,
 	.tpc_nr = 5,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
index ea99c15487ef..09e2665e4988 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
@@ -57,6 +57,7 @@ gp102_gr = {
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.init_504430 = gm107_gr_init_504430,
 	.init_shader_exceptions = gp100_gr_init_shader_exceptions,
+	.trap_mp = gf100_gr_trap_mp,
 	.rops = gm200_gr_rops,
 	.gpc_nr = 6,
 	.tpc_nr = 5,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
index 289d8b272b42..844fc9d63e5c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
@@ -41,6 +41,7 @@ gp104_gr = {
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.init_504430 = gm107_gr_init_504430,
 	.init_shader_exceptions = gp100_gr_init_shader_exceptions,
+	.trap_mp = gf100_gr_trap_mp,
 	.rops = gm200_gr_rops,
 	.gpc_nr = 6,
 	.tpc_nr = 5,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
index 09cba537b8b9..674385da3d43 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
@@ -43,6 +43,7 @@ gp107_gr = {
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.init_504430 = gm107_gr_init_504430,
 	.init_shader_exceptions = gp100_gr_init_shader_exceptions,
+	.trap_mp = gf100_gr_trap_mp,
 	.rops = gm200_gr_rops,
 	.gpc_nr = 2,
 	.tpc_nr = 3,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
index 4972bf8d2530..6103186a3724 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
@@ -41,6 +41,7 @@ gp10b_gr = {
 	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
 	.init_504430 = gm107_gr_init_504430,
 	.init_shader_exceptions = gp100_gr_init_shader_exceptions,
+	.trap_mp = gf100_gr_trap_mp,
 	.rops = gm200_gr_rops,
 	.gpc_nr = 1,
 	.tpc_nr = 2,

From 5f6474a4e6ce3291abb1843b279a23a0bb050d37 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1092/1286] drm/nouveau/gr/gf100-: port tile mapping
 calculations from NVGPU

There's also a couple of hardcoded tables for a couple of very specific
configurations that NVGPU's algorithm didn't work for.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c |  23 +---
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c |  23 +---
 .../gpu/drm/nouveau/nvkm/engine/gr/gf100.c    | 118 ++++++++++++------
 .../gpu/drm/nouveau/nvkm/engine/gr/gf100.h    |   8 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/gf104.c    |   1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gf108.c    |   1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gf110.c    |   1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gf117.c    |   1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gf119.c    |   1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gk104.c    |   1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gk110.c    |   1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gk110b.c   |   1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gk208.c    |   1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gk20a.c    |   1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gm107.c    |   1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gm200.c    |  41 ++++++
 .../gpu/drm/nouveau/nvkm/engine/gr/gm20b.c    |   1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gp100.c    |   1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gp102.c    |   1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gp104.c    |   1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gp107.c    |   1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gp10b.c    |   1 +
 22 files changed, 153 insertions(+), 77 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index 4c25389fe80a..949e1216b8ba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1116,27 +1116,14 @@ gf100_grctx_generate_rop_mapping(struct gf100_gr *gr)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
 	u32 data[6] = {}, data2[2] = {};
-	u8  tpcnr[GPC_MAX];
 	u8  shift, ntpcv;
-	int gpc, tpc, i;
+	int i;
 
-	/* calculate first set of magics */
-	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+	/* Pack tile map into register format. */
+	for (i = 0; i < 32; i++)
+		data[i / 6] |= (gr->tile[i] & 0x07) << ((i % 6) * 5);
 
-	gpc = -1;
-	for (tpc = 0; tpc < gr->tpc_total; tpc++) {
-		do {
-			gpc = (gpc + 1) % gr->gpc_nr;
-		} while (!tpcnr[gpc]);
-		tpcnr[gpc]--;
-
-		data[tpc / 6] |= gpc << ((tpc % 6) * 5);
-	}
-
-	for (; tpc < 32; tpc++)
-		data[tpc / 6] |= 7 << ((tpc % 6) * 5);
-
-	/* and the second... */
+	/* Magic. */
 	shift = 0;
 	ntpcv = gr->tpc_total;
 	while (!(ntpcv & (1 << 4))) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
index 4b7b4f8f75fd..bc4e86bbb9d4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
@@ -194,27 +194,14 @@ gf117_grctx_generate_rop_mapping(struct gf100_gr *gr)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
 	u32 data[6] = {}, data2[2] = {};
-	u8  tpcnr[GPC_MAX];
 	u8  shift, ntpcv;
-	int gpc, tpc, i;
+	int i;
 
-	/* calculate first set of magics */
-	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+	/* Pack tile map into register format. */
+	for (i = 0; i < 32; i++)
+		data[i / 6] |= (gr->tile[i] & 0x07) << ((i % 6) * 5);
 
-	gpc = -1;
-	for (tpc = 0; tpc < gr->tpc_total; tpc++) {
-		do {
-			gpc = (gpc + 1) % gr->gpc_nr;
-		} while (!tpcnr[gpc]);
-		tpcnr[gpc]--;
-
-		data[tpc / 6] |= gpc << ((tpc % 6) * 5);
-	}
-
-	for (; tpc < 32; tpc++)
-		data[tpc / 6] |= 7 << ((tpc % 6) * 5);
-
-	/* and the second... */
+	/* Magic. */
 	shift = 0;
 	ntpcv = gr->tpc_total;
 	while (!(ntpcv & (1 << 4))) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index f05d9d4c6e5c..519b109f40d2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1652,6 +1652,82 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr)
 	return ret;
 }
 
+void
+gf100_gr_oneinit_tiles(struct gf100_gr *gr)
+{
+	static const u8 primes[] = {
+		3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61
+	};
+	int init_frac[GPC_MAX], init_err[GPC_MAX], run_err[GPC_MAX], i, j;
+	u32 mul_factor, comm_denom;
+	u8  gpc_map[GPC_MAX];
+	bool sorted;
+
+	switch (gr->tpc_total) {
+	case 15: gr->screen_tile_row_offset = 0x06; break;
+	case 14: gr->screen_tile_row_offset = 0x05; break;
+	case 13: gr->screen_tile_row_offset = 0x02; break;
+	case 11: gr->screen_tile_row_offset = 0x07; break;
+	case 10: gr->screen_tile_row_offset = 0x06; break;
+	case  7:
+	case  5: gr->screen_tile_row_offset = 0x01; break;
+	case  3: gr->screen_tile_row_offset = 0x02; break;
+	case  2:
+	case  1: gr->screen_tile_row_offset = 0x01; break;
+	default: gr->screen_tile_row_offset = 0x03;
+		for (i = 0; i < ARRAY_SIZE(primes); i++) {
+			if (gr->tpc_total % primes[i]) {
+				gr->screen_tile_row_offset = primes[i];
+				break;
+			}
+		}
+		break;
+	}
+
+	/* Sort GPCs by TPC count, highest-to-lowest. */
+	for (i = 0; i < gr->gpc_nr; i++)
+		gpc_map[i] = i;
+	sorted = false;
+
+	while (!sorted) {
+		for (sorted = true, i = 0; i < gr->gpc_nr - 1; i++) {
+			if (gr->tpc_nr[gpc_map[i + 1]] >
+			    gr->tpc_nr[gpc_map[i + 0]]) {
+				u8 swap = gpc_map[i];
+				gpc_map[i + 0] = gpc_map[i + 1];
+				gpc_map[i + 1] = swap;
+				sorted = false;
+			}
+		}
+	}
+
+	/* Determine tile->GPC mapping */
+	mul_factor = gr->gpc_nr * gr->tpc_max;
+	if (mul_factor & 1)
+		mul_factor = 2;
+	else
+		mul_factor = 1;
+
+	comm_denom = gr->gpc_nr * gr->tpc_max * mul_factor;
+
+	for (i = 0; i < gr->gpc_nr; i++) {
+		init_frac[i] = gr->tpc_nr[gpc_map[i]] * gr->gpc_nr * mul_factor;
+		 init_err[i] = i * gr->tpc_max * mul_factor - comm_denom/2;
+		  run_err[i] = init_frac[i] + init_err[i];
+	}
+
+	for (i = 0; i < gr->tpc_total;) {
+		for (j = 0; j < gr->gpc_nr; j++) {
+			if ((run_err[j] * 2) >= comm_denom) {
+				gr->tile[i++] = gpc_map[j];
+				run_err[j] += init_frac[j] - comm_denom;
+			} else {
+				run_err[j] += init_frac[j];
+			}
+		}
+	}
+}
+
 static int
 gf100_gr_oneinit(struct nvkm_gr *base)
 {
@@ -1691,45 +1767,8 @@ gf100_gr_oneinit(struct nvkm_gr *base)
 		}
 	}
 
-	/*XXX: these need figuring out... though it might not even matter */
-	switch (device->chipset) {
-	case 0xc0:
-		if (gr->tpc_total == 11) { /* 465, 3/4/4/0, 4 */
-			gr->screen_tile_row_offset = 0x07;
-		} else
-		if (gr->tpc_total == 14) { /* 470, 3/3/4/4, 5 */
-			gr->screen_tile_row_offset = 0x05;
-		} else
-		if (gr->tpc_total == 15) { /* 480, 3/4/4/4, 6 */
-			gr->screen_tile_row_offset = 0x06;
-		}
-		break;
-	case 0xc3: /* 450, 4/0/0/0, 2 */
-		gr->screen_tile_row_offset = 0x03;
-		break;
-	case 0xc4: /* 460, 3/4/0/0, 4 */
-		gr->screen_tile_row_offset = 0x01;
-		break;
-	case 0xc1: /* 2/0/0/0, 1 */
-		gr->screen_tile_row_offset = 0x01;
-		break;
-	case 0xc8: /* 4/4/3/4, 5 */
-		gr->screen_tile_row_offset = 0x06;
-		break;
-	case 0xce: /* 4/4/0/0, 4 */
-		gr->screen_tile_row_offset = 0x03;
-		break;
-	case 0xcf: /* 4/0/0/0, 3 */
-		gr->screen_tile_row_offset = 0x03;
-		break;
-	case 0xd7:
-	case 0xd9: /* 1/0/0/0, 1 */
-	case 0xea: /* gk20a */
-	case 0x12b: /* gm20b */
-		gr->screen_tile_row_offset = 0x01;
-		break;
-	}
-
+	memset(gr->tile, 0xff, sizeof(gr->tile));
+	gr->func->oneinit_tiles(gr);
 	return 0;
 }
 
@@ -2164,6 +2203,7 @@ gf100_gr_gpccs_ucode = {
 
 static const struct gf100_gr_func
 gf100_gr = {
+	.oneinit_tiles = gf100_gr_oneinit_tiles,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index c25b93a0cb03..53a173e023b4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -107,12 +107,13 @@ struct gf100_gr {
 	u8 ppc_tpc_nr[GPC_MAX][4];
 	u8 ppc_tpc_min;
 
+	u8 screen_tile_row_offset;
+	u8 tile[TPC_MAX];
+
 	struct gf100_gr_data mmio_data[4];
 	struct gf100_gr_mmio mmio_list[4096/8];
 	u32  size;
 	u32 *data;
-
-	u8 screen_tile_row_offset;
 };
 
 int gf100_gr_ctor(const struct gf100_gr_func *, struct nvkm_device *,
@@ -123,6 +124,7 @@ void *gf100_gr_dtor(struct nvkm_gr *);
 
 struct gf100_gr_func {
 	void (*dtor)(struct gf100_gr *);
+	void (*oneinit_tiles)(struct gf100_gr *);
 	int (*init)(struct gf100_gr *);
 	void (*init_gpc_mmu)(struct gf100_gr *);
 	void (*init_r405a14)(struct gf100_gr *);
@@ -164,6 +166,7 @@ struct gf100_gr_func {
 };
 
 int gf100_gr_rops(struct gf100_gr *);
+void gf100_gr_oneinit_tiles(struct gf100_gr *);
 int gf100_gr_init(struct gf100_gr *);
 void gf100_gr_init_vsc_stream_master(struct gf100_gr *);
 void gf100_gr_init_zcull(struct gf100_gr *);
@@ -191,6 +194,7 @@ void gm107_gr_init_400054(struct gf100_gr *);
 
 int gk20a_gr_init(struct gf100_gr *);
 
+void gm200_gr_oneinit_tiles(struct gf100_gr *);
 int gm200_gr_rops(struct gf100_gr *);
 void gm200_gr_init_num_active_ltcs(struct gf100_gr *);
 void gm200_gr_init_ds_hww_esr_2(struct gf100_gr *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
index df9cbed7ce50..8b49b8fe6d2c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
@@ -114,6 +114,7 @@ gf104_gr_pack_mmio[] = {
 
 static const struct gf100_gr_func
 gf104_gr = {
+	.oneinit_tiles = gf100_gr_oneinit_tiles,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index 8ffa0fd1134f..6432aeba0a14 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -111,6 +111,7 @@ gf108_gr_init_r405a14(struct gf100_gr *gr)
 
 static const struct gf100_gr_func
 gf108_gr = {
+	.oneinit_tiles = gf100_gr_oneinit_tiles,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_r405a14 = gf108_gr_init_r405a14,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index 0d4293e3e4ea..4e007c945233 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -86,6 +86,7 @@ gf110_gr_pack_mmio[] = {
 
 static const struct gf100_gr_func
 gf110_gr = {
+	.oneinit_tiles = gf100_gr_oneinit_tiles,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index e3c1dbbfbf34..2ddb728fb7ca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -150,6 +150,7 @@ gf117_gr_init_zcull(struct gf100_gr *gr)
 
 static const struct gf100_gr_func
 gf117_gr = {
+	.oneinit_tiles = gf100_gr_oneinit_tiles,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index 1ed70b93a10a..f0f10a4d8a26 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -177,6 +177,7 @@ gf119_gr_pack_mmio[] = {
 
 static const struct gf100_gr_func
 gf119_gr = {
+	.oneinit_tiles = gf100_gr_oneinit_tiles,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index 86819ab7f9a4..d57fb5ff1fe9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -448,6 +448,7 @@ gk104_gr_gpccs_ucode = {
 
 static const struct gf100_gr_func
 gk104_gr = {
+	.oneinit_tiles = gf100_gr_oneinit_tiles,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index e30d94ff23d7..41997ebda719 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -350,6 +350,7 @@ gk110_gr_init_419eb4(struct gf100_gr *gr)
 
 static const struct gf100_gr_func
 gk110_gr = {
+	.oneinit_tiles = gf100_gr_oneinit_tiles,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index 253b98181ac4..b7a6479c6ec2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -102,6 +102,7 @@ gk110b_gr_pack_mmio[] = {
 
 static const struct gf100_gr_func
 gk110b_gr = {
+	.oneinit_tiles = gf100_gr_oneinit_tiles,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index 702e9094c1c8..5f1e71abe504 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -161,6 +161,7 @@ gk208_gr_gpccs_ucode = {
 
 static const struct gf100_gr_func
 gk208_gr = {
+	.oneinit_tiles = gf100_gr_oneinit_tiles,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index 95f7d859e634..ab4e5380eba2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -282,6 +282,7 @@ gk20a_gr_init(struct gf100_gr *gr)
 
 static const struct gf100_gr_func
 gk20a_gr = {
+	.oneinit_tiles = gf100_gr_oneinit_tiles,
 	.init = gk20a_gr_init,
 	.init_zcull = gf117_gr_init_zcull,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index d67bf9465baa..98f74fe7007d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -391,6 +391,7 @@ gm107_gr_gpccs_ucode = {
 
 static const struct gf100_gr_func
 gm107_gr = {
+	.oneinit_tiles = gf100_gr_oneinit_tiles,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gm107_gr_init_gpc_mmu,
 	.init_bios = gm107_gr_init_bios,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
index 03b255e9b812..3e017fdd23ed 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -77,6 +77,46 @@ gm200_gr_init_rop_active_fbps(struct gf100_gr *gr)
 	nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
 }
 
+static u8
+gm200_gr_tile_map_6_24[] = {
+	0, 1, 2, 3, 4, 5, 3, 4, 5, 0, 1, 2, 0, 1, 2, 3, 4, 5, 3, 4, 5, 0, 1, 2,
+};
+
+static u8
+gm200_gr_tile_map_4_16[] = {
+	0, 1, 2, 3, 2, 3, 0, 1, 3, 0, 1, 2, 1, 2, 3, 0,
+};
+
+static u8
+gm200_gr_tile_map_2_8[] = {
+	0, 1, 1, 0, 0, 1, 1, 0,
+};
+
+void
+gm200_gr_oneinit_tiles(struct gf100_gr *gr)
+{
+	/*XXX: Not sure what this is about.  The algorithm from NVGPU
+	 *     seems to work for all boards I tried from earlier (and
+	 *     later) GPUs except in these specific configurations.
+	 *
+	 *     Let's just hardcode them for now.
+	 */
+	if (gr->gpc_nr == 2 && gr->tpc_total == 8) {
+		memcpy(gr->tile, gm200_gr_tile_map_2_8, gr->tpc_total);
+		gr->screen_tile_row_offset = 1;
+	} else
+	if (gr->gpc_nr == 4 && gr->tpc_total == 16) {
+		memcpy(gr->tile, gm200_gr_tile_map_4_16, gr->tpc_total);
+		gr->screen_tile_row_offset = 4;
+	} else
+	if (gr->gpc_nr == 6 && gr->tpc_total == 24) {
+		memcpy(gr->tile, gm200_gr_tile_map_6_24, gr->tpc_total);
+		gr->screen_tile_row_offset = 5;
+	} else {
+		gf100_gr_oneinit_tiles(gr);
+	}
+}
+
 int
 gm200_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
 	      int index, struct nvkm_gr **pgr)
@@ -117,6 +157,7 @@ gm200_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
 
 static const struct gf100_gr_func
 gm200_gr = {
+	.oneinit_tiles = gm200_gr_oneinit_tiles,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
 	.init_bios = gm107_gr_init_bios,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
index d2f9c7bf9f03..29d3b9445cf6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
@@ -64,6 +64,7 @@ gm20b_gr_set_hww_esr_report_mask(struct gf100_gr *gr)
 
 static const struct gf100_gr_func
 gm20b_gr = {
+	.oneinit_tiles = gm200_gr_oneinit_tiles,
 	.init = gk20a_gr_init,
 	.init_zcull = gf117_gr_init_zcull,
 	.init_gpc_mmu = gm20b_gr_init_gpc_mmu,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
index e5f941f81e07..70d0aa2c1076 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -64,6 +64,7 @@ gp100_gr_init_rop_active_fbps(struct gf100_gr *gr)
 
 static const struct gf100_gr_func
 gp100_gr = {
+	.oneinit_tiles = gm200_gr_oneinit_tiles,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
index 09e2665e4988..71bfe2d8c3f6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
@@ -42,6 +42,7 @@ gp102_gr_init_swdx_pes_mask(struct gf100_gr *gr)
 
 static const struct gf100_gr_func
 gp102_gr = {
+	.oneinit_tiles = gm200_gr_oneinit_tiles,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
index 844fc9d63e5c..234c970bb0dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
@@ -26,6 +26,7 @@
 
 static const struct gf100_gr_func
 gp104_gr = {
+	.oneinit_tiles = gm200_gr_oneinit_tiles,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
index 674385da3d43..6c5724017c71 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
@@ -28,6 +28,7 @@
 
 static const struct gf100_gr_func
 gp107_gr = {
+	.oneinit_tiles = gm200_gr_oneinit_tiles,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
index 6103186a3724..aaaa2844ec20 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
@@ -27,6 +27,7 @@
 
 static const struct gf100_gr_func
 gp10b_gr = {
+	.oneinit_tiles = gm200_gr_oneinit_tiles,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,

From d00ffc0c403784c9f88d8da357f9f33f855289a4 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1093/1286] drm/nouveau/gr/gf100-: port zcull tile mapping
 calculations from NVGPU

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/gr/gf100.c    | 32 +++++++++----------
 .../gpu/drm/nouveau/nvkm/engine/gr/gf117.c    | 32 +++++++++----------
 2 files changed, 32 insertions(+), 32 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 519b109f40d2..f6d884156b75 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -2042,25 +2042,25 @@ gf100_gr_init_zcull(struct gf100_gr *gr)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
-	u32 data[TPC_MAX / 8] = {};
-	u8  tpcnr[GPC_MAX];
-	int gpc, tpc;
-	int i;
+	const u8 tile_nr = ALIGN(gr->tpc_total, 32);
+	u8 bank[GPC_MAX] = {}, gpc, i, j;
+	u32 data;
 
-	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
-	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
-		do {
-			gpc = (gpc + 1) % gr->gpc_nr;
-		} while (!tpcnr[gpc]);
-		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
-
-		data[i / 8] |= tpc << ((i % 8) * 4);
+	for (i = 0; i < tile_nr; i += 8) {
+		for (data = 0, j = 0; j < 8 && i + j < gr->tpc_total; j++) {
+			data |= bank[gr->tile[i + j]] << (j * 4);
+			bank[gr->tile[i + j]]++;
+		}
+		nvkm_wr32(device, GPC_BCAST(0x0980 + ((i / 8) * 4)), data);
 	}
 
-	nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
-	nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
-	nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
-	nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
+			  gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+							 gr->tpc_total);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
+	}
 
 	nvkm_wr32(device, GPC_BCAST(0x1bd4), magicgpc918);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index 2ddb728fb7ca..d6831b41df39 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -125,25 +125,25 @@ gf117_gr_init_zcull(struct gf100_gr *gr)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
-	u32 data[TPC_MAX / 8] = {};
-	u8  tpcnr[GPC_MAX];
-	int gpc, tpc;
-	int i;
+	const u8 tile_nr = ALIGN(gr->tpc_total, 32);
+	u8 bank[GPC_MAX] = {}, gpc, i, j;
+	u32 data;
 
-	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
-	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
-		do {
-			gpc = (gpc + 1) % gr->gpc_nr;
-		} while (!tpcnr[gpc]);
-		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
-
-		data[i / 8] |= tpc << ((i % 8) * 4);
+	for (i = 0; i < tile_nr; i += 8) {
+		for (data = 0, j = 0; j < 8 && i + j < gr->tpc_total; j++) {
+			data |= bank[gr->tile[i + j]] << (j * 4);
+			bank[gr->tile[i + j]]++;
+		}
+		nvkm_wr32(device, GPC_BCAST(0x0980 + ((i / 8) * 4)), data);
 	}
 
-	nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
-	nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
-	nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
-	nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
+			  gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+							 gr->tpc_total);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
+	}
 
 	nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
 }

From 068cae743c2ad08a082d6fef007e6b38f5fb3b16 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1094/1286] drm/nouveau/gr/gf100-: calculate and use sm mapping
 table

There's a number of places that require this data, so let's separate out
the calculations to ensure they remain consistent.

This is incorrect for GM200 and newer, but will produce the same results
as we did before.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c | 38 ++++++++-----------
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c | 22 +++--------
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c | 22 +++--------
 .../gpu/drm/nouveau/nvkm/engine/gr/gf100.c    | 17 +++++++++
 .../gpu/drm/nouveau/nvkm/engine/gr/gf100.h    |  9 +++++
 .../gpu/drm/nouveau/nvkm/engine/gr/gf104.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gf108.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gf110.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gf117.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gf119.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gk104.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gk110.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gk110b.c   |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gk208.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gk20a.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gm107.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gm200.c    |  8 ++++
 .../gpu/drm/nouveau/nvkm/engine/gr/gm20b.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gp100.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gp102.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gp104.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gp107.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gp10b.c    |  1 +
 23 files changed, 78 insertions(+), 55 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index 949e1216b8ba..c2dcc01379c9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1092,23 +1092,18 @@ gf100_grctx_generate_r4060a8(struct gf100_gr *gr)
 	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const u8 gpcmax = nvkm_rd32(device, 0x022430);
 	const u8 tpcmax = nvkm_rd32(device, 0x022434) * gpcmax;
-	u8 tpcnr[GPC_MAX], data[TPC_MAX];
-	int gpc, tpc, i;
+	int i, j, sm = 0;
+	u32 data;
 
-	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
-	memset(data, 0x1f, sizeof(data));
-
-	gpc = -1;
-	for (tpc = 0; tpc < gr->tpc_total; tpc++) {
-		do {
-			gpc = (gpc + 1) % gr->gpc_nr;
-		} while (!tpcnr[gpc]);
-		tpcnr[gpc]--;
-		data[tpc] = gpc;
+	for (i = 0; i < DIV_ROUND_UP(tpcmax, 4); i++) {
+		for (data = 0, j = 0; j < 4; j++) {
+			if (sm < gr->sm_nr)
+				data |= gr->sm[sm++].gpc << (j * 8);
+			else
+				data |= 0x1f << (j * 8);
+		}
+		nvkm_wr32(device, 0x4060a8 + (i * 4), data);
 	}
-
-	for (i = 0; i < DIV_ROUND_UP(tpcmax, 4); i++)
-		nvkm_wr32(device, 0x4060a8 + (i * 4), ((u32 *)data)[i]);
 }
 
 void
@@ -1326,16 +1321,13 @@ gf100_grctx_generate_floorsweep(struct gf100_gr *gr)
 {
 	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const struct gf100_grctx_func *func = gr->func->grctx;
-	int tpc, gpc, sm, i, j;
+	int gpc, sm, i, j;
 	u32 data;
 
-	for (tpc = 0, sm = 0; tpc < gr->tpc_max; tpc++) {
-		for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
-			if (tpc < gr->tpc_nr[gpc])
-				func->sm_id(gr, gpc, tpc, sm++);
-			if (func->tpc_nr)
-				func->tpc_nr(gr, gpc);
-		}
+	for (sm = 0; sm < gr->sm_nr; sm++) {
+		func->sm_id(gr, gr->sm[sm].gpc, gr->sm[sm].tpc, sm);
+		if (func->tpc_nr)
+			func->tpc_nr(gr, gr->sm[sm].gpc);
 	}
 
 	for (gpc = 0, i = 0; i < 4; i++) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
index 7107ec429778..013d05a0f0f6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
@@ -49,23 +49,13 @@ gm200_grctx_generate_smid_config(struct gf100_gr *gr)
 	const u32 dist_nr = DIV_ROUND_UP(gr->tpc_total, 4);
 	u32 dist[TPC_MAX / 4] = {};
 	u32 gpcs[GPC_MAX] = {};
-	u8  tpcnr[GPC_MAX];
-	int tpc, gpc, i;
+	u8  sm, i;
 
-	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
-
-	/* won't result in the same distribution as the binary driver where
-	 * some of the gpcs have more tpcs than others, but this shall do
-	 * for the moment.  the code for earlier gpus has this issue too.
-	 */
-	for (gpc = -1, i = 0; i < gr->tpc_total; i++) {
-		do {
-			gpc = (gpc + 1) % gr->gpc_nr;
-		} while(!tpcnr[gpc]);
-		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
-
-		dist[i / 4] |= ((gpc << 4) | tpc) << ((i % 4) * 8);
-		gpcs[gpc] |= i << (tpc * 8);
+	for (sm = 0; sm < gr->sm_nr; sm++) {
+		const u8 gpc = gr->sm[sm].gpc;
+		const u8 tpc = gr->sm[sm].tpc;
+		dist[sm / 4] |= ((gpc << 4) | tpc) << ((sm % 4) * 8);
+		gpcs[gpc] |= sm << (tpc * 8);
 	}
 
 	for (i = 0; i < dist_nr; i++)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
index af6330c73872..2344fd8086f7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
@@ -95,23 +95,13 @@ gp100_grctx_generate_smid_config(struct gf100_gr *gr)
 	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const u32 dist_nr = DIV_ROUND_UP(gr->tpc_total, 4);
 	u32 dist[TPC_MAX / 4] = {}, gpcs[16] = {};
-	u8  tpcnr[GPC_MAX];
-	int tpc, gpc, i;
+	u8  sm, i;
 
-	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
-
-	/* won't result in the same distribution as the binary driver where
-	 * some of the gpcs have more tpcs than others, but this shall do
-	 * for the moment.  the code for earlier gpus has this issue too.
-	 */
-	for (gpc = -1, i = 0; i < gr->tpc_total; i++) {
-		do {
-			gpc = (gpc + 1) % gr->gpc_nr;
-		} while(!tpcnr[gpc]);
-		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
-
-		dist[i / 4] |= ((gpc << 4) | tpc) << ((i % 4) * 8);
-		gpcs[gpc + (gr->func->gpc_nr * (tpc / 4))] |= i << (tpc * 8);
+	for (sm = 0; sm < gr->sm_nr; sm++) {
+		const u8 gpc = gr->sm[sm].gpc;
+		const u8 tpc = gr->sm[sm].tpc;
+		dist[sm / 4] |= ((gpc << 4) | tpc) << ((sm % 4) * 8);
+		gpcs[gpc + (gr->func->gpc_nr * (tpc / 4))] |= sm << ((tpc % 4) * 8);
 	}
 
 	for (i = 0; i < dist_nr; i++)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index f6d884156b75..b2070c87c91c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1652,6 +1652,21 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr)
 	return ret;
 }
 
+void
+gf100_gr_oneinit_sm_id(struct gf100_gr *gr)
+{
+	int tpc, gpc;
+	for (tpc = 0; tpc < gr->tpc_max; tpc++) {
+		for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+			if (tpc < gr->tpc_nr[gpc]) {
+				gr->sm[gr->sm_nr].gpc = gpc;
+				gr->sm[gr->sm_nr].tpc = tpc;
+				gr->sm_nr++;
+			}
+		}
+	}
+}
+
 void
 gf100_gr_oneinit_tiles(struct gf100_gr *gr)
 {
@@ -1769,6 +1784,7 @@ gf100_gr_oneinit(struct nvkm_gr *base)
 
 	memset(gr->tile, 0xff, sizeof(gr->tile));
 	gr->func->oneinit_tiles(gr);
+	gr->func->oneinit_sm_id(gr);
 	return 0;
 }
 
@@ -2204,6 +2220,7 @@ gf100_gr_gpccs_ucode = {
 static const struct gf100_gr_func
 gf100_gr = {
 	.oneinit_tiles = gf100_gr_oneinit_tiles,
+	.oneinit_sm_id = gf100_gr_oneinit_sm_id,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 53a173e023b4..55dedd87fc38 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -110,6 +110,12 @@ struct gf100_gr {
 	u8 screen_tile_row_offset;
 	u8 tile[TPC_MAX];
 
+	struct {
+		u8 gpc;
+		u8 tpc;
+	} sm[TPC_MAX];
+	u8 sm_nr;
+
 	struct gf100_gr_data mmio_data[4];
 	struct gf100_gr_mmio mmio_list[4096/8];
 	u32  size;
@@ -125,6 +131,7 @@ void *gf100_gr_dtor(struct nvkm_gr *);
 struct gf100_gr_func {
 	void (*dtor)(struct gf100_gr *);
 	void (*oneinit_tiles)(struct gf100_gr *);
+	void (*oneinit_sm_id)(struct gf100_gr *);
 	int (*init)(struct gf100_gr *);
 	void (*init_gpc_mmu)(struct gf100_gr *);
 	void (*init_r405a14)(struct gf100_gr *);
@@ -167,6 +174,7 @@ struct gf100_gr_func {
 
 int gf100_gr_rops(struct gf100_gr *);
 void gf100_gr_oneinit_tiles(struct gf100_gr *);
+void gf100_gr_oneinit_sm_id(struct gf100_gr *);
 int gf100_gr_init(struct gf100_gr *);
 void gf100_gr_init_vsc_stream_master(struct gf100_gr *);
 void gf100_gr_init_zcull(struct gf100_gr *);
@@ -195,6 +203,7 @@ void gm107_gr_init_400054(struct gf100_gr *);
 int gk20a_gr_init(struct gf100_gr *);
 
 void gm200_gr_oneinit_tiles(struct gf100_gr *);
+void gm200_gr_oneinit_sm_id(struct gf100_gr *);
 int gm200_gr_rops(struct gf100_gr *);
 void gm200_gr_init_num_active_ltcs(struct gf100_gr *);
 void gm200_gr_init_ds_hww_esr_2(struct gf100_gr *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
index 8b49b8fe6d2c..1d8e16a57136 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
@@ -115,6 +115,7 @@ gf104_gr_pack_mmio[] = {
 static const struct gf100_gr_func
 gf104_gr = {
 	.oneinit_tiles = gf100_gr_oneinit_tiles,
+	.oneinit_sm_id = gf100_gr_oneinit_sm_id,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index 6432aeba0a14..a5a74df4edff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -112,6 +112,7 @@ gf108_gr_init_r405a14(struct gf100_gr *gr)
 static const struct gf100_gr_func
 gf108_gr = {
 	.oneinit_tiles = gf100_gr_oneinit_tiles,
+	.oneinit_sm_id = gf100_gr_oneinit_sm_id,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_r405a14 = gf108_gr_init_r405a14,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index 4e007c945233..45fada099009 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -87,6 +87,7 @@ gf110_gr_pack_mmio[] = {
 static const struct gf100_gr_func
 gf110_gr = {
 	.oneinit_tiles = gf100_gr_oneinit_tiles,
+	.oneinit_sm_id = gf100_gr_oneinit_sm_id,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index d6831b41df39..5ee167d0f5aa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -151,6 +151,7 @@ gf117_gr_init_zcull(struct gf100_gr *gr)
 static const struct gf100_gr_func
 gf117_gr = {
 	.oneinit_tiles = gf100_gr_oneinit_tiles,
+	.oneinit_sm_id = gf100_gr_oneinit_sm_id,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index f0f10a4d8a26..2096552fc537 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -178,6 +178,7 @@ gf119_gr_pack_mmio[] = {
 static const struct gf100_gr_func
 gf119_gr = {
 	.oneinit_tiles = gf100_gr_oneinit_tiles,
+	.oneinit_sm_id = gf100_gr_oneinit_sm_id,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gf100_gr_init_vsc_stream_master,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index d57fb5ff1fe9..cce250a85ba6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -449,6 +449,7 @@ gk104_gr_gpccs_ucode = {
 static const struct gf100_gr_func
 gk104_gr = {
 	.oneinit_tiles = gf100_gr_oneinit_tiles,
+	.oneinit_sm_id = gf100_gr_oneinit_sm_id,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index 41997ebda719..558b497692ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -351,6 +351,7 @@ gk110_gr_init_419eb4(struct gf100_gr *gr)
 static const struct gf100_gr_func
 gk110_gr = {
 	.oneinit_tiles = gf100_gr_oneinit_tiles,
+	.oneinit_sm_id = gf100_gr_oneinit_sm_id,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index b7a6479c6ec2..c8c48a26f435 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -103,6 +103,7 @@ gk110b_gr_pack_mmio[] = {
 static const struct gf100_gr_func
 gk110b_gr = {
 	.oneinit_tiles = gf100_gr_oneinit_tiles,
+	.oneinit_sm_id = gf100_gr_oneinit_sm_id,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index 5f1e71abe504..477a7dea79f5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -162,6 +162,7 @@ gk208_gr_gpccs_ucode = {
 static const struct gf100_gr_func
 gk208_gr = {
 	.oneinit_tiles = gf100_gr_oneinit_tiles,
+	.oneinit_sm_id = gf100_gr_oneinit_sm_id,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gf100_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index ab4e5380eba2..11a32fa01586 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -283,6 +283,7 @@ gk20a_gr_init(struct gf100_gr *gr)
 static const struct gf100_gr_func
 gk20a_gr = {
 	.oneinit_tiles = gf100_gr_oneinit_tiles,
+	.oneinit_sm_id = gf100_gr_oneinit_sm_id,
 	.init = gk20a_gr_init,
 	.init_zcull = gf117_gr_init_zcull,
 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index 98f74fe7007d..a6937f8c3cfd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -392,6 +392,7 @@ gm107_gr_gpccs_ucode = {
 static const struct gf100_gr_func
 gm107_gr = {
 	.oneinit_tiles = gf100_gr_oneinit_tiles,
+	.oneinit_sm_id = gf100_gr_oneinit_sm_id,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gm107_gr_init_gpc_mmu,
 	.init_bios = gm107_gr_init_bios,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
index 3e017fdd23ed..8966d2a7235c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -92,6 +92,13 @@ gm200_gr_tile_map_2_8[] = {
 	0, 1, 1, 0, 0, 1, 1, 0,
 };
 
+void
+gm200_gr_oneinit_sm_id(struct gf100_gr *gr)
+{
+	/*XXX: There's a different algorithm here I've not yet figured out. */
+	gf100_gr_oneinit_sm_id(gr);
+}
+
 void
 gm200_gr_oneinit_tiles(struct gf100_gr *gr)
 {
@@ -158,6 +165,7 @@ gm200_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
 static const struct gf100_gr_func
 gm200_gr = {
 	.oneinit_tiles = gm200_gr_oneinit_tiles,
+	.oneinit_sm_id = gm200_gr_oneinit_sm_id,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
 	.init_bios = gm107_gr_init_bios,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
index 29d3b9445cf6..afa1c6e32230 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
@@ -65,6 +65,7 @@ gm20b_gr_set_hww_esr_report_mask(struct gf100_gr *gr)
 static const struct gf100_gr_func
 gm20b_gr = {
 	.oneinit_tiles = gm200_gr_oneinit_tiles,
+	.oneinit_sm_id = gm200_gr_oneinit_sm_id,
 	.init = gk20a_gr_init,
 	.init_zcull = gf117_gr_init_zcull,
 	.init_gpc_mmu = gm20b_gr_init_gpc_mmu,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
index 70d0aa2c1076..1d9d8760e13e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -65,6 +65,7 @@ gp100_gr_init_rop_active_fbps(struct gf100_gr *gr)
 static const struct gf100_gr_func
 gp100_gr = {
 	.oneinit_tiles = gm200_gr_oneinit_tiles,
+	.oneinit_sm_id = gm200_gr_oneinit_sm_id,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
index 71bfe2d8c3f6..2d9a2c3ec261 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
@@ -43,6 +43,7 @@ gp102_gr_init_swdx_pes_mask(struct gf100_gr *gr)
 static const struct gf100_gr_func
 gp102_gr = {
 	.oneinit_tiles = gm200_gr_oneinit_tiles,
+	.oneinit_sm_id = gm200_gr_oneinit_sm_id,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
index 234c970bb0dc..e466ae460d3c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
@@ -27,6 +27,7 @@
 static const struct gf100_gr_func
 gp104_gr = {
 	.oneinit_tiles = gm200_gr_oneinit_tiles,
+	.oneinit_sm_id = gm200_gr_oneinit_sm_id,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
index 6c5724017c71..2fa046a1da60 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
@@ -29,6 +29,7 @@
 static const struct gf100_gr_func
 gp107_gr = {
 	.oneinit_tiles = gm200_gr_oneinit_tiles,
+	.oneinit_sm_id = gm200_gr_oneinit_sm_id,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
index aaaa2844ec20..0a01a306da2a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
@@ -28,6 +28,7 @@
 static const struct gf100_gr_func
 gp10b_gr = {
 	.oneinit_tiles = gm200_gr_oneinit_tiles,
+	.oneinit_sm_id = gm200_gr_oneinit_sm_id,
 	.init = gf100_gr_init,
 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,

From 6f0233329bacd8de54959e0c0b9b6c46bf5118b8 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1095/1286] drm/nouveau/gr/gf100-: swap bundle and pagepool

Makes it easier to diff against RM traces.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index c2dcc01379c9..4096e2d23527 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1381,8 +1381,8 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 
 	idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
 
-	grctx->bundle(info);
 	grctx->pagepool(info);
+	grctx->bundle(info);
 	grctx->attrib(info);
 	if (grctx->patch_ltc)
 		grctx->patch_ltc(info);

From 191e323278dd7025ecd5fef02fbb984cfc91eebb Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1096/1286] drm/nouveau/gr/gf100-gm10x: update register lists

There are differences on GM200 and newer too, but we can't fix them there
as they come from firmware packages.

A request has been made to NVIDIA to release updated firmware.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c  | 13 ++++++++++---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h  | 12 ++++++++----
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c  |  3 ++-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c  | 10 ++++++++--
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c  |  8 +++-----
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c  | 10 ++++++++--
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c  |  8 +++-----
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c  | 12 +++++++++---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c  | 10 ++++++++--
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c |  3 ++-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c  | 10 ++++++++--
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c  | 10 ++++++++--
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c     |  3 ++-
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h     |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c     |  7 +++++++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c     |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c    |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c     |  1 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c     |  9 ++++++---
 19 files changed, 96 insertions(+), 36 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index 4096e2d23527..1ed63ed1a283 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -850,12 +850,17 @@ gf100_grctx_init_gcc_0[] = {
 };
 
 const struct gf100_gr_pack
-gf100_grctx_pack_gpc[] = {
+gf100_grctx_pack_gpc_0[] = {
 	{ gf100_grctx_init_gpc_unk_0 },
 	{ gf100_grctx_init_prop_0 },
 	{ gf100_grctx_init_gpc_unk_1 },
 	{ gf100_grctx_init_setup_0 },
 	{ gf100_grctx_init_zcull_0 },
+	{}
+};
+
+const struct gf100_gr_pack
+gf100_grctx_pack_gpc_1[] = {
 	{ gf100_grctx_init_crstr_0 },
 	{ gf100_grctx_init_gpm_0 },
 	{ gf100_grctx_init_gcc_0 },
@@ -1371,8 +1376,9 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 
 	if (!gr->fuc_sw_ctx) {
 		gf100_gr_mmio(gr, grctx->hub);
-		gf100_gr_mmio(gr, grctx->gpc);
+		gf100_gr_mmio(gr, grctx->gpc_0);
 		gf100_gr_mmio(gr, grctx->zcull);
+		gf100_gr_mmio(gr, grctx->gpc_1);
 		gf100_gr_mmio(gr, grctx->tpc);
 		gf100_gr_mmio(gr, grctx->ppc);
 	} else {
@@ -1567,7 +1573,8 @@ gf100_grctx = {
 	.main  = gf100_grctx_generate_main,
 	.unkn  = gf100_grctx_generate_unkn,
 	.hub   = gf100_grctx_pack_hub,
-	.gpc   = gf100_grctx_pack_gpc,
+	.gpc_0 = gf100_grctx_pack_gpc_0,
+	.gpc_1 = gf100_grctx_pack_gpc_1,
 	.zcull = gf100_grctx_pack_zcull,
 	.tpc   = gf100_grctx_pack_tpc,
 	.icmd  = gf100_grctx_pack_icmd,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index be57ff086022..1c06c675f09e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -27,7 +27,8 @@ struct gf100_grctx_func {
 	void  (*unkn)(struct gf100_gr *);
 	/* mmio context data */
 	const struct gf100_gr_pack *hub;
-	const struct gf100_gr_pack *gpc;
+	const struct gf100_gr_pack *gpc_0;
+	const struct gf100_gr_pack *gpc_1;
 	const struct gf100_gr_pack *zcull;
 	const struct gf100_gr_pack *tpc;
 	const struct gf100_gr_pack *ppc;
@@ -163,7 +164,8 @@ extern const struct gf100_gr_init gf100_grctx_init_memfmt_0[];
 extern const struct gf100_gr_init gf100_grctx_init_rstr2d_0[];
 extern const struct gf100_gr_init gf100_grctx_init_scc_0[];
 
-extern const struct gf100_gr_pack gf100_grctx_pack_gpc[];
+extern const struct gf100_gr_pack gf100_grctx_pack_gpc_0[];
+extern const struct gf100_gr_pack gf100_grctx_pack_gpc_1[];
 extern const struct gf100_gr_init gf100_grctx_init_gpc_unk_0[];
 extern const struct gf100_gr_init gf100_grctx_init_prop_0[];
 extern const struct gf100_gr_init gf100_grctx_init_gpc_unk_1[];
@@ -212,6 +214,8 @@ extern const struct gf100_gr_init gf117_grctx_init_pe_0[];
 
 extern const struct gf100_gr_init gf117_grctx_init_wwdx_0[];
 
+extern const struct gf100_gr_pack gf117_grctx_pack_gpc_1[];
+
 extern const struct gf100_gr_init gk104_grctx_init_memfmt_0[];
 extern const struct gf100_gr_init gk104_grctx_init_ds_0[];
 extern const struct gf100_gr_init gk104_grctx_init_scc_0[];
@@ -221,7 +225,6 @@ extern const struct gf100_gr_init gk104_grctx_init_gpm_0[];
 extern const struct gf100_gr_init gk104_grctx_init_pes_0[];
 
 extern const struct gf100_gr_pack gk104_grctx_pack_hub[];
-extern const struct gf100_gr_pack gk104_grctx_pack_gpc[];
 extern const struct gf100_gr_pack gk104_grctx_pack_tpc[];
 extern const struct gf100_gr_pack gk104_grctx_pack_ppc[];
 extern const struct gf100_gr_pack gk104_grctx_pack_icmd[];
@@ -235,7 +238,8 @@ extern const struct gf100_gr_pack gk110_grctx_pack_hub[];
 extern const struct gf100_gr_init gk110_grctx_init_pri_0[];
 extern const struct gf100_gr_init gk110_grctx_init_cwd_0[];
 
-extern const struct gf100_gr_pack gk110_grctx_pack_gpc[];
+extern const struct gf100_gr_pack gk110_grctx_pack_gpc_0[];
+extern const struct gf100_gr_pack gk110_grctx_pack_gpc_1[];
 extern const struct gf100_gr_init gk110_grctx_init_gpc_unk_2[];
 
 extern const struct gf100_gr_init gk110_grctx_init_tex_0[];
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
index 7f3b9289a66b..7a0564b6e3c7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
@@ -84,7 +84,8 @@ gf104_grctx = {
 	.main  = gf100_grctx_generate_main,
 	.unkn  = gf100_grctx_generate_unkn,
 	.hub   = gf100_grctx_pack_hub,
-	.gpc   = gf100_grctx_pack_gpc,
+	.gpc_0 = gf100_grctx_pack_gpc_0,
+	.gpc_1 = gf100_grctx_pack_gpc_1,
 	.zcull = gf100_grctx_pack_zcull,
 	.tpc   = gf104_grctx_pack_tpc,
 	.icmd  = gf100_grctx_pack_icmd,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
index 369d64f867c7..dda2c32e6232 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
@@ -667,12 +667,17 @@ gf108_grctx_init_gpm_0[] = {
 };
 
 static const struct gf100_gr_pack
-gf108_grctx_pack_gpc[] = {
+gf108_grctx_pack_gpc_0[] = {
 	{ gf100_grctx_init_gpc_unk_0 },
 	{ gf100_grctx_init_prop_0 },
 	{ gf100_grctx_init_gpc_unk_1 },
 	{ gf108_grctx_init_setup_0 },
 	{ gf100_grctx_init_zcull_0 },
+	{}
+};
+
+static const struct gf100_gr_pack
+gf108_grctx_pack_gpc_1[] = {
 	{ gf100_grctx_init_crstr_0 },
 	{ gf108_grctx_init_gpm_0 },
 	{ gf100_grctx_init_gcc_0 },
@@ -780,7 +785,8 @@ gf108_grctx = {
 	.main  = gf100_grctx_generate_main,
 	.unkn  = gf108_grctx_generate_unkn,
 	.hub   = gf108_grctx_pack_hub,
-	.gpc   = gf108_grctx_pack_gpc,
+	.gpc_0 = gf108_grctx_pack_gpc_0,
+	.gpc_1 = gf108_grctx_pack_gpc_1,
 	.zcull = gf100_grctx_pack_zcull,
 	.tpc   = gf108_grctx_pack_tpc,
 	.icmd  = gf108_grctx_pack_icmd,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
index d59c2480f04d..f5cca5e6a4f2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
@@ -314,15 +314,12 @@ gf110_grctx_init_setup_0[] = {
 };
 
 static const struct gf100_gr_pack
-gf110_grctx_pack_gpc[] = {
+gf110_grctx_pack_gpc_0[] = {
 	{ gf100_grctx_init_gpc_unk_0 },
 	{ gf100_grctx_init_prop_0 },
 	{ gf100_grctx_init_gpc_unk_1 },
 	{ gf110_grctx_init_setup_0 },
 	{ gf100_grctx_init_zcull_0 },
-	{ gf100_grctx_init_crstr_0 },
-	{ gf100_grctx_init_gpm_0 },
-	{ gf100_grctx_init_gcc_0 },
 	{}
 };
 
@@ -335,7 +332,8 @@ gf110_grctx = {
 	.main  = gf100_grctx_generate_main,
 	.unkn  = gf100_grctx_generate_unkn,
 	.hub   = gf100_grctx_pack_hub,
-	.gpc   = gf110_grctx_pack_gpc,
+	.gpc_0 = gf110_grctx_pack_gpc_0,
+	.gpc_1 = gf100_grctx_pack_gpc_1,
 	.zcull = gf100_grctx_pack_zcull,
 	.tpc   = gf100_grctx_pack_tpc,
 	.icmd  = gf110_grctx_pack_icmd,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
index bc4e86bbb9d4..276c282d19aa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
@@ -84,12 +84,17 @@ gf117_grctx_init_setup_0[] = {
 };
 
 static const struct gf100_gr_pack
-gf117_grctx_pack_gpc[] = {
+gf117_grctx_pack_gpc_0[] = {
 	{ gf100_grctx_init_gpc_unk_0 },
 	{ gf119_grctx_init_prop_0 },
 	{ gf119_grctx_init_gpc_unk_1 },
 	{ gf117_grctx_init_setup_0 },
 	{ gf100_grctx_init_zcull_0 },
+	{}
+};
+
+const struct gf100_gr_pack
+gf117_grctx_pack_gpc_1[] = {
 	{ gf119_grctx_init_crstr_0 },
 	{ gf108_grctx_init_gpm_0 },
 	{ gf100_grctx_init_gcc_0 },
@@ -278,7 +283,8 @@ gf117_grctx = {
 	.main  = gf100_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gf117_grctx_pack_hub,
-	.gpc   = gf117_grctx_pack_gpc,
+	.gpc_0 = gf117_grctx_pack_gpc_0,
+	.gpc_1 = gf117_grctx_pack_gpc_1,
 	.zcull = gf100_grctx_pack_zcull,
 	.tpc   = gf117_grctx_pack_tpc,
 	.ppc   = gf117_grctx_pack_ppc,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
index cc1a9354fecc..0cfe46366af6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
@@ -431,15 +431,12 @@ gf119_grctx_init_crstr_0[] = {
 };
 
 static const struct gf100_gr_pack
-gf119_grctx_pack_gpc[] = {
+gf119_grctx_pack_gpc_0[] = {
 	{ gf100_grctx_init_gpc_unk_0 },
 	{ gf119_grctx_init_prop_0 },
 	{ gf119_grctx_init_gpc_unk_1 },
 	{ gf119_grctx_init_setup_0 },
 	{ gf100_grctx_init_zcull_0 },
-	{ gf119_grctx_init_crstr_0 },
-	{ gf108_grctx_init_gpm_0 },
-	{ gf100_grctx_init_gcc_0 },
 	{}
 };
 
@@ -503,7 +500,8 @@ gf119_grctx = {
 	.main  = gf100_grctx_generate_main,
 	.unkn  = gf108_grctx_generate_unkn,
 	.hub   = gf119_grctx_pack_hub,
-	.gpc   = gf119_grctx_pack_gpc,
+	.gpc_0 = gf119_grctx_pack_gpc_0,
+	.gpc_1 = gf117_grctx_pack_gpc_1,
 	.zcull = gf100_grctx_pack_zcull,
 	.tpc   = gf119_grctx_pack_tpc,
 	.icmd  = gf119_grctx_pack_icmd,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index bdf2a1e6d3b6..304e9d268bad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -739,13 +739,18 @@ gk104_grctx_init_gpm_0[] = {
 	{}
 };
 
-const struct gf100_gr_pack
-gk104_grctx_pack_gpc[] = {
+static const struct gf100_gr_pack
+gk104_grctx_pack_gpc_0[] = {
 	{ gf100_grctx_init_gpc_unk_0 },
 	{ gf119_grctx_init_prop_0 },
 	{ gf119_grctx_init_gpc_unk_1 },
 	{ gk104_grctx_init_setup_0 },
 	{ gf100_grctx_init_zcull_0 },
+	{}
+};
+
+static const struct gf100_gr_pack
+gk104_grctx_pack_gpc_1[] = {
 	{ gf119_grctx_init_crstr_0 },
 	{ gk104_grctx_init_gpm_0 },
 	{ gf100_grctx_init_gcc_0 },
@@ -973,7 +978,8 @@ gk104_grctx = {
 	.main  = gf100_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gk104_grctx_pack_hub,
-	.gpc   = gk104_grctx_pack_gpc,
+	.gpc_0 = gk104_grctx_pack_gpc_0,
+	.gpc_1 = gk104_grctx_pack_gpc_1,
 	.zcull = gf100_grctx_pack_zcull,
 	.tpc   = gk104_grctx_pack_tpc,
 	.ppc   = gk104_grctx_pack_ppc,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
index 2ba35d727dc1..86547cfc38dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
@@ -704,12 +704,17 @@ gk110_grctx_init_gpc_unk_2[] = {
 };
 
 const struct gf100_gr_pack
-gk110_grctx_pack_gpc[] = {
+gk110_grctx_pack_gpc_0[] = {
 	{ gf100_grctx_init_gpc_unk_0 },
 	{ gf119_grctx_init_prop_0 },
 	{ gf119_grctx_init_gpc_unk_1 },
 	{ gk110_grctx_init_setup_0 },
 	{ gf100_grctx_init_zcull_0 },
+	{}
+};
+
+const struct gf100_gr_pack
+gk110_grctx_pack_gpc_1[] = {
 	{ gf119_grctx_init_crstr_0 },
 	{ gk104_grctx_init_gpm_0 },
 	{ gk110_grctx_init_gpc_unk_2 },
@@ -820,7 +825,8 @@ gk110_grctx = {
 	.main  = gf100_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gk110_grctx_pack_hub,
-	.gpc   = gk110_grctx_pack_gpc,
+	.gpc_0 = gk110_grctx_pack_gpc_0,
+	.gpc_1 = gk110_grctx_pack_gpc_1,
 	.zcull = gf100_grctx_pack_zcull,
 	.tpc   = gk110_grctx_pack_tpc,
 	.ppc   = gk110_grctx_pack_ppc,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
index 1112f8dc70e2..ebb947bd1446 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
@@ -74,7 +74,8 @@ gk110b_grctx = {
 	.main  = gf100_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gk110_grctx_pack_hub,
-	.gpc   = gk110_grctx_pack_gpc,
+	.gpc_0 = gk110_grctx_pack_gpc_0,
+	.gpc_1 = gk110_grctx_pack_gpc_1,
 	.zcull = gf100_grctx_pack_zcull,
 	.tpc   = gk110b_grctx_pack_tpc,
 	.ppc   = gk110_grctx_pack_ppc,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
index 613c5cf8b3bf..4d40512b5c99 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
@@ -443,12 +443,17 @@ gk208_grctx_init_gpm_0[] = {
 };
 
 static const struct gf100_gr_pack
-gk208_grctx_pack_gpc[] = {
+gk208_grctx_pack_gpc_0[] = {
 	{ gf100_grctx_init_gpc_unk_0 },
 	{ gk208_grctx_init_prop_0 },
 	{ gk208_grctx_init_gpc_unk_1 },
 	{ gk208_grctx_init_setup_0 },
 	{ gf100_grctx_init_zcull_0 },
+	{}
+};
+
+static const struct gf100_gr_pack
+gk208_grctx_pack_gpc_1[] = {
 	{ gk208_grctx_init_crstr_0 },
 	{ gk208_grctx_init_gpm_0 },
 	{ gk110_grctx_init_gpc_unk_2 },
@@ -535,7 +540,8 @@ gk208_grctx = {
 	.main  = gf100_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gk208_grctx_pack_hub,
-	.gpc   = gk208_grctx_pack_gpc,
+	.gpc_0 = gk208_grctx_pack_gpc_0,
+	.gpc_1 = gk208_grctx_pack_gpc_1,
 	.zcull = gf100_grctx_pack_zcull,
 	.tpc   = gk208_grctx_pack_tpc,
 	.ppc   = gk208_grctx_pack_ppc,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index 7816dcb7c974..0b3964e6b36e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -744,12 +744,17 @@ gm107_grctx_init_gpc_unk_2[] = {
 };
 
 static const struct gf100_gr_pack
-gm107_grctx_pack_gpc[] = {
+gm107_grctx_pack_gpc_0[] = {
 	{ gm107_grctx_init_gpc_unk_0 },
 	{ gk208_grctx_init_prop_0 },
 	{ gm107_grctx_init_gpc_unk_1 },
 	{ gm107_grctx_init_setup_0 },
 	{ gf100_grctx_init_zcull_0 },
+	{}
+};
+
+static const struct gf100_gr_pack
+gm107_grctx_pack_gpc_1[] = {
 	{ gk208_grctx_init_crstr_0 },
 	{ gk104_grctx_init_gpm_0 },
 	{ gm107_grctx_init_gpc_unk_2 },
@@ -960,7 +965,8 @@ gm107_grctx = {
 	.main  = gf100_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gm107_grctx_pack_hub,
-	.gpc   = gm107_grctx_pack_gpc,
+	.gpc_0 = gm107_grctx_pack_gpc_0,
+	.gpc_1 = gm107_grctx_pack_gpc_1,
 	.zcull = gf100_grctx_pack_zcull,
 	.tpc   = gm107_grctx_pack_tpc,
 	.ppc   = gm107_grctx_pack_ppc,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index b2070c87c91c..69418619dc79 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1612,7 +1612,8 @@ gf100_gr_init_ctxctl_int(struct gf100_gr *gr)
 
 	/* load register lists */
 	gf100_gr_init_csdata(gr, grctx->hub, 0x409000, 0x000, 0x000000);
-	gf100_gr_init_csdata(gr, grctx->gpc, 0x41a000, 0x000, 0x418000);
+	gf100_gr_init_csdata(gr, grctx->gpc_0, 0x41a000, 0x000, 0x418000);
+	gf100_gr_init_csdata(gr, grctx->gpc_1, 0x41a000, 0x000, 0x418000);
 	gf100_gr_init_csdata(gr, grctx->tpc, 0x41a000, 0x004, 0x419800);
 	gf100_gr_init_csdata(gr, grctx->ppc, 0x41a000, 0x008, 0x41be00);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 55dedd87fc38..9e608dff2846 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -338,6 +338,7 @@ extern const struct gf100_gr_init gf117_gr_init_wwdx_0[];
 extern const struct gf100_gr_init gf117_gr_init_cbm_0[];
 
 extern const struct gf100_gr_init gk104_gr_init_main_0[];
+extern const struct gf100_gr_init gk104_gr_init_gpc_unk_2[];
 extern const struct gf100_gr_init gk104_gr_init_tpccs_0[];
 extern const struct gf100_gr_init gk104_gr_init_pe_0[];
 extern const struct gf100_gr_init gk104_gr_init_be_0[];
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index cce250a85ba6..9abacb218361 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -82,6 +82,12 @@ gk104_gr_init_gpc_unk_1[] = {
 	{}
 };
 
+const struct gf100_gr_init
+gk104_gr_init_gpc_unk_2[] = {
+	{ 0x418884,   1, 0x04, 0x00000000 },
+	{}
+};
+
 const struct gf100_gr_init
 gk104_gr_init_tpccs_0[] = {
 	{ 0x419d0c,   1, 0x04, 0x00000000 },
@@ -160,6 +166,7 @@ gk104_gr_pack_mmio[] = {
 	{ gf119_gr_init_gpm_0 },
 	{ gk104_gr_init_gpc_unk_1 },
 	{ gf100_gr_init_gcc_0 },
+	{ gk104_gr_init_gpc_unk_2 },
 	{ gk104_gr_init_tpccs_0 },
 	{ gf119_gr_init_tex_0 },
 	{ gk104_gr_init_pe_0 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index 558b497692ab..dba70d50bb8e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -143,6 +143,7 @@ gk110_gr_pack_mmio[] = {
 	{ gf119_gr_init_gpm_0 },
 	{ gk110_gr_init_gpc_unk_1 },
 	{ gf100_gr_init_gcc_0 },
+	{ gk104_gr_init_gpc_unk_2 },
 	{ gk104_gr_init_tpccs_0 },
 	{ gk110_gr_init_tex_0 },
 	{ gk104_gr_init_pe_0 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index c8c48a26f435..48bc8d85dbb8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -82,6 +82,7 @@ gk110b_gr_pack_mmio[] = {
 	{ gf119_gr_init_gpm_0 },
 	{ gk110_gr_init_gpc_unk_1 },
 	{ gf100_gr_init_gcc_0 },
+	{ gk104_gr_init_gpc_unk_2 },
 	{ gk104_gr_init_tpccs_0 },
 	{ gk110_gr_init_tex_0 },
 	{ gk104_gr_init_pe_0 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index 477a7dea79f5..7f45b122dcb0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -121,6 +121,7 @@ gk208_gr_pack_mmio[] = {
 	{ gf119_gr_init_gpm_0 },
 	{ gk110_gr_init_gpc_unk_1 },
 	{ gf100_gr_init_gcc_0 },
+	{ gk104_gr_init_gpc_unk_2 },
 	{ gk104_gr_init_tpccs_0 },
 	{ gk208_gr_init_tex_0 },
 	{ gk104_gr_init_pe_0 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index a6937f8c3cfd..f5411aff0e44 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -38,6 +38,10 @@
 
 static const struct gf100_gr_init
 gm107_gr_init_main_0[] = {
+	{ 0x40880c,   1, 0x04, 0x00000000 },
+	{ 0x408910,   1, 0x04, 0x00000000 },
+	{ 0x408984,   1, 0x04, 0x00000000 },
+	{ 0x41a8a0,   1, 0x04, 0x00000000 },
 	{ 0x400080,   1, 0x04, 0x003003c2 },
 	{ 0x400088,   1, 0x04, 0x0001bfe7 },
 	{ 0x40008c,   1, 0x04, 0x00060000 },
@@ -212,14 +216,13 @@ gm107_gr_init_cbm_0[] = {
 static const struct gf100_gr_init
 gm107_gr_init_be_0[] = {
 	{ 0x408890,   1, 0x04, 0x000000ff },
-	{ 0x40880c,   1, 0x04, 0x00000000 },
 	{ 0x408850,   1, 0x04, 0x00000004 },
 	{ 0x408878,   1, 0x04, 0x00c81603 },
 	{ 0x40887c,   1, 0x04, 0x80543432 },
 	{ 0x408880,   1, 0x04, 0x0010581e },
 	{ 0x408884,   1, 0x04, 0x00001205 },
 	{ 0x408974,   1, 0x04, 0x000000ff },
-	{ 0x408910,   9, 0x04, 0x00000000 },
+	{ 0x408914,   8, 0x04, 0x00000000 },
 	{ 0x408950,   1, 0x04, 0x00000000 },
 	{ 0x408954,   1, 0x04, 0x0000ffff },
 	{ 0x408958,   1, 0x04, 0x00000034 },
@@ -229,7 +232,6 @@ gm107_gr_init_be_0[] = {
 	{ 0x408968,   1, 0x04, 0x02808833 },
 	{ 0x40896c,   1, 0x04, 0x01f02438 },
 	{ 0x408970,   1, 0x04, 0x00012c00 },
-	{ 0x408984,   1, 0x04, 0x00000000 },
 	{ 0x408988,   1, 0x04, 0x08040201 },
 	{ 0x40898c,   1, 0x04, 0x80402010 },
 	{}
@@ -262,6 +264,7 @@ gm107_gr_pack_mmio[] = {
 	{ gf100_gr_init_gpm_0 },
 	{ gm107_gr_init_gpc_unk_1 },
 	{ gf100_gr_init_gcc_0 },
+	{ gk104_gr_init_gpc_unk_2 },
 	{ gm107_gr_init_tpccs_0 },
 	{ gm107_gr_init_tex_0 },
 	{ gm107_gr_init_pe_0 },

From 17f2d4df32104b327178771fb323391fbf53921c Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1097/1286] drm/nouveau/gr/gp100-: fix pagepool setup

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
index 2344fd8086f7..c51e3a27af14 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
@@ -36,7 +36,7 @@ gp100_grctx_generate_pagepool(struct gf100_grctx *info)
 	const int s = 8;
 	const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), true);
 	mmio_refn(info, 0x40800c, 0x00000000, s, b);
-	mmio_wr32(info, 0x408010, 0x80000000);
+	mmio_wr32(info, 0x408010, 0x8007d800);
 	mmio_refn(info, 0x419004, 0x00000000, s, b);
 	mmio_wr32(info, 0x419008, 0x00000000);
 }

From 7a058a900ccb010c32ca1f29f6f9728a3654a519 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1098/1286] drm/nouveau/gr/gp100-: fix attrib cb setup

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c | 17 +++++++------
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c | 24 ++++++++++++-------
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgp104.c |  3 ++-
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gf100.c    |  2 ++
 .../gpu/drm/nouveau/nvkm/engine/gr/gf100.h    |  1 +
 7 files changed, 32 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 1c06c675f09e..9ce3d0075573 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -49,6 +49,7 @@ struct gf100_grctx_func {
 	u32 attrib_nr;
 	u32 alpha_nr_max;
 	u32 alpha_nr;
+	u32 gfxp_nr;
 	/* other patch buffer stuff */
 	void (*patch_ltc)(struct gf100_grctx *);
 	/* floorsweeping */
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
index c51e3a27af14..0b3326262e12 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
@@ -48,14 +48,17 @@ gp100_grctx_generate_attrib(struct gf100_grctx *info)
 	const struct gf100_grctx_func *grctx = gr->func->grctx;
 	const u32  alpha = grctx->alpha_nr;
 	const u32 attrib = grctx->attrib_nr;
-	const u32 pertpc = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
-	const u32   size = roundup(gr->tpc_total * pertpc, 0x80);
 	const int s = 12;
-	const int b = mmio_vram(info, size, (1 << s), false);
 	const int max_batches = 0xffff;
+	u32 size = grctx->alpha_nr_max * gr->tpc_total;
 	u32 ao = 0;
-	u32 bo = ao + grctx->alpha_nr_max * gr->tpc_total;
-	int gpc, ppc, n = 0;
+	u32 bo = ao + size;
+	int gpc, ppc, b, n = 0;
+
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++)
+		size += grctx->attrib_nr_max * gr->ppc_nr[gpc] * gr->ppc_tpc_max;
+	size = ((size * 0x20) + 128) & ~127;
+	b = mmio_vram(info, size, (1 << s), false);
 
 	mmio_refn(info, 0x418810, 0x80000000, s, b);
 	mmio_refn(info, 0x419848, 0x10000000, s, b);
@@ -69,7 +72,7 @@ gp100_grctx_generate_attrib(struct gf100_grctx *info)
 	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
 		for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) {
 			const u32 as =  alpha * gr->ppc_tpc_nr[gpc][ppc];
-			const u32 bs = attrib * gr->ppc_tpc_nr[gpc][ppc];
+			const u32 bs = attrib * gr->ppc_tpc_max;
 			const u32 u = 0x418ea0 + (n * 0x04);
 			const u32 o = PPC_UNIT(gpc, ppc, 0);
 			if (!(gr->ppc_mask[gpc] & (1 << ppc)))
@@ -77,7 +80,7 @@ gp100_grctx_generate_attrib(struct gf100_grctx *info)
 			mmio_wr32(info, o + 0xc0, bs);
 			mmio_wr32(info, o + 0xf4, bo);
 			mmio_wr32(info, o + 0xf0, bs);
-			bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
+			bo += grctx->attrib_nr_max * gr->ppc_tpc_max;
 			mmio_wr32(info, o + 0xe4, as);
 			mmio_wr32(info, o + 0xf8, ao);
 			ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
index 8a438c2efc3e..daee17bf7d0d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
@@ -43,14 +43,18 @@ gp102_grctx_generate_attrib(struct gf100_grctx *info)
 	const struct gf100_grctx_func *grctx = gr->func->grctx;
 	const u32  alpha = grctx->alpha_nr;
 	const u32 attrib = grctx->attrib_nr;
-	const u32 pertpc = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
-	const u32   size = roundup(gr->tpc_total * pertpc, 0x80);
+	const u32   gfxp = grctx->gfxp_nr;
 	const int s = 12;
-	const int b = mmio_vram(info, size, (1 << s), false);
 	const int max_batches = 0xffff;
+	u32 size = grctx->alpha_nr_max * gr->tpc_total;
 	u32 ao = 0;
-	u32 bo = ao + grctx->alpha_nr_max * gr->tpc_total;
-	int gpc, ppc, n = 0;
+	u32 bo = ao + size;
+	int gpc, ppc, b, n = 0;
+
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++)
+		size += grctx->gfxp_nr * gr->ppc_nr[gpc] * gr->ppc_tpc_max;
+	size = ((size * 0x20) + 128) & ~127;
+	b = mmio_vram(info, size, (1 << s), false);
 
 	mmio_refn(info, 0x418810, 0x80000000, s, b);
 	mmio_refn(info, 0x419848, 0x10000000, s, b);
@@ -64,17 +68,18 @@ gp102_grctx_generate_attrib(struct gf100_grctx *info)
 	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
 		for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) {
 			const u32 as =  alpha * gr->ppc_tpc_nr[gpc][ppc];
-			const u32 bs = attrib * gr->ppc_tpc_nr[gpc][ppc];
+			const u32 bs = attrib * gr->ppc_tpc_max;
+			const u32 gs =   gfxp * gr->ppc_tpc_max;
 			const u32 u = 0x418ea0 + (n * 0x04);
 			const u32 o = PPC_UNIT(gpc, ppc, 0);
 			const u32 p = GPC_UNIT(gpc, 0xc44 + (ppc * 4));
 			if (!(gr->ppc_mask[gpc] & (1 << ppc)))
 				continue;
-			mmio_wr32(info, o + 0xc0, bs);
+			mmio_wr32(info, o + 0xc0, gs);
 			mmio_wr32(info, p, bs);
 			mmio_wr32(info, o + 0xf4, bo);
 			mmio_wr32(info, o + 0xf0, bs);
-			bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
+			bo += gs;
 			mmio_wr32(info, o + 0xe4, as);
 			mmio_wr32(info, o + 0xf8, ao);
 			ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
@@ -97,10 +102,11 @@ gp102_grctx = {
 	.pagepool = gp100_grctx_generate_pagepool,
 	.pagepool_size = 0x20000,
 	.attrib = gp102_grctx_generate_attrib,
-	.attrib_nr_max = 0x5d4,
+	.attrib_nr_max = 0x4b0,
 	.attrib_nr = 0x320,
 	.alpha_nr_max = 0xc00,
 	.alpha_nr = 0x800,
+	.gfxp_nr = 0xba8,
 	.sm_id = gm107_grctx_generate_sm_id,
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
 	.dist_skip_table = gm200_grctx_generate_dist_skip_table,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp104.c
index 5f799c7369bb..3b85e3d326b2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp104.c
@@ -32,10 +32,11 @@ gp104_grctx = {
 	.pagepool = gp100_grctx_generate_pagepool,
 	.pagepool_size = 0x20000,
 	.attrib = gp102_grctx_generate_attrib,
-	.attrib_nr_max = 0x5d4,
+	.attrib_nr_max = 0x4b0,
 	.attrib_nr = 0x320,
 	.alpha_nr_max = 0xc00,
 	.alpha_nr = 0x800,
+	.gfxp_nr = 0xba8,
 	.sm_id = gm107_grctx_generate_sm_id,
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
 	.dist_skip_table = gm200_grctx_generate_dist_skip_table,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
index a69e824676c9..5060c5ee5ce0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
@@ -44,6 +44,7 @@ gp107_grctx = {
 	.attrib_nr = 0x540,
 	.alpha_nr_max = 0xc00,
 	.alpha_nr = 0x800,
+	.gfxp_nr = 0xe94,
 	.sm_id = gm107_grctx_generate_sm_id,
 	.rop_mapping = gf117_grctx_generate_rop_mapping,
 	.dist_skip_table = gm200_grctx_generate_dist_skip_table,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 69418619dc79..084a5d1dcf9c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1780,6 +1780,8 @@ gf100_gr_oneinit(struct nvkm_gr *base)
 			if (gr->ppc_tpc_min == 0 ||
 			    gr->ppc_tpc_min > gr->ppc_tpc_nr[i][j])
 				gr->ppc_tpc_min = gr->ppc_tpc_nr[i][j];
+			if (gr->ppc_tpc_max < gr->ppc_tpc_nr[i][j])
+				gr->ppc_tpc_max = gr->ppc_tpc_nr[i][j];
 		}
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 9e608dff2846..390bcc16f91f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -106,6 +106,7 @@ struct gf100_gr {
 	u8 ppc_tpc_mask[GPC_MAX][4];
 	u8 ppc_tpc_nr[GPC_MAX][4];
 	u8 ppc_tpc_min;
+	u8 ppc_tpc_max;
 
 	u8 screen_tile_row_offset;
 	u8 tile[TPC_MAX];

From e9d03335f604a1123b8de3103ce8e06db4ad777a Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1099/1286] drm/nouveau/gr/gp100-: use correct registers for
 zbc colour/depth setup

These were missed the first time around due to the driver version I traced
using the older registers still.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/engine/gr/gf100.c    | 33 +++++++++-------
 .../gpu/drm/nouveau/nvkm/engine/gr/gf100.h    | 10 +++++
 .../gpu/drm/nouveau/nvkm/engine/gr/gf104.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gf108.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gf110.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gf117.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gf119.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gk104.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gk110.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gk110b.c   |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gk208.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gk20a.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gm107.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gm200.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gm20b.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gp100.c    | 39 +++++++++++++++++++
 .../gpu/drm/nouveau/nvkm/engine/gr/gp102.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gp104.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gp107.c    |  1 +
 .../gpu/drm/nouveau/nvkm/engine/gr/gp10b.c    |  1 +
 20 files changed, 86 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 084a5d1dcf9c..0cffafb0130f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -92,7 +92,7 @@ gf100_gr_zbc_color_get(struct gf100_gr *gr, int format,
 	memcpy(gr->zbc_color[zbc].l2, l2, sizeof(gr->zbc_color[zbc].l2));
 	gr->zbc_color[zbc].format = format;
 	nvkm_ltc_zbc_color_get(ltc, zbc, l2);
-	gf100_gr_zbc_clear_color(gr, zbc);
+	gr->func->zbc->clear_color(gr, zbc);
 	return zbc;
 }
 
@@ -137,10 +137,16 @@ gf100_gr_zbc_depth_get(struct gf100_gr *gr, int format,
 	gr->zbc_depth[zbc].ds = ds;
 	gr->zbc_depth[zbc].l2 = l2;
 	nvkm_ltc_zbc_depth_get(ltc, zbc, l2);
-	gf100_gr_zbc_clear_depth(gr, zbc);
+	gr->func->zbc->clear_depth(gr, zbc);
 	return zbc;
 }
 
+const struct gf100_gr_func_zbc
+gf100_gr_zbc = {
+	.clear_color = gf100_gr_zbc_clear_color,
+	.clear_depth = gf100_gr_zbc_clear_depth,
+};
+
 /*******************************************************************************
  * Graphics object classes
  ******************************************************************************/
@@ -744,21 +750,21 @@ gf100_gr_zbc_init(struct gf100_gr *gr)
 	const u32 f32_1[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000,
 			      0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000 };
 	struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc;
-	int index;
+	int index, c = ltc->zbc_min, d = ltc->zbc_min;
 
 	if (!gr->zbc_color[0].format) {
-		gf100_gr_zbc_color_get(gr, 1,  & zero[0],   &zero[4]);
-		gf100_gr_zbc_color_get(gr, 2,  &  one[0],    &one[4]);
-		gf100_gr_zbc_color_get(gr, 4,  &f32_0[0],  &f32_0[4]);
-		gf100_gr_zbc_color_get(gr, 4,  &f32_1[0],  &f32_1[4]);
-		gf100_gr_zbc_depth_get(gr, 1, 0x00000000, 0x00000000);
-		gf100_gr_zbc_depth_get(gr, 1, 0x3f800000, 0x3f800000);
+		gf100_gr_zbc_color_get(gr, 1,  & zero[0],   &zero[4]); c++;
+		gf100_gr_zbc_color_get(gr, 2,  &  one[0],    &one[4]); c++;
+		gf100_gr_zbc_color_get(gr, 4,  &f32_0[0],  &f32_0[4]); c++;
+		gf100_gr_zbc_color_get(gr, 4,  &f32_1[0],  &f32_1[4]); c++;
+		gf100_gr_zbc_depth_get(gr, 1, 0x00000000, 0x00000000); d++;
+		gf100_gr_zbc_depth_get(gr, 1, 0x3f800000, 0x3f800000); d++;
 	}
 
-	for (index = ltc->zbc_min; index <= ltc->zbc_max; index++)
-		gf100_gr_zbc_clear_color(gr, index);
-	for (index = ltc->zbc_min; index <= ltc->zbc_max; index++)
-		gf100_gr_zbc_clear_depth(gr, index);
+	for (index = c; index <= ltc->zbc_max; index++)
+		gr->func->zbc->clear_color(gr, index);
+	for (index = d; index <= ltc->zbc_max; index++)
+		gr->func->zbc->clear_depth(gr, index);
 }
 
 /**
@@ -2242,6 +2248,7 @@ gf100_gr = {
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
 	.rops = gf100_gr_rops,
 	.grctx = &gf100_grctx,
+	.zbc = &gf100_gr_zbc,
 	.sclass = {
 		{ -1, -1, FERMI_TWOD_A },
 		{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 390bcc16f91f..d82951ab5ef1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -129,6 +129,11 @@ int gf100_gr_new_(const struct gf100_gr_func *, struct nvkm_device *,
 		  int, struct nvkm_gr **);
 void *gf100_gr_dtor(struct nvkm_gr *);
 
+struct gf100_gr_func_zbc {
+	void (*clear_color)(struct gf100_gr *, int zbc);
+	void (*clear_depth)(struct gf100_gr *, int zbc);
+};
+
 struct gf100_gr_func {
 	void (*dtor)(struct gf100_gr *);
 	void (*oneinit_tiles)(struct gf100_gr *);
@@ -170,6 +175,7 @@ struct gf100_gr_func {
 	int ppc_nr;
 	const struct gf100_grctx_func *grctx;
 	const struct nvkm_therm_clkgate_pack *clkgate_pack;
+	const struct gf100_gr_func_zbc *zbc;
 	struct nvkm_sclass sclass[];
 };
 
@@ -187,6 +193,7 @@ void gf100_gr_init_419eb4(struct gf100_gr *);
 void gf100_gr_init_tex_hww_esr(struct gf100_gr *, int, int);
 void gf100_gr_init_shader_exceptions(struct gf100_gr *, int, int);
 void gf100_gr_init_400054(struct gf100_gr *);
+extern const struct gf100_gr_func_zbc gf100_gr_zbc;
 
 void gf117_gr_init_zcull(struct gf100_gr *);
 
@@ -212,6 +219,9 @@ void gm200_gr_init_ds_hww_esr_2(struct gf100_gr *);
 void gp100_gr_init_rop_active_fbps(struct gf100_gr *);
 void gp100_gr_init_fecs_exceptions(struct gf100_gr *);
 void gp100_gr_init_shader_exceptions(struct gf100_gr *, int, int);
+extern const struct gf100_gr_func_zbc gp100_gr_zbc;
+void gp100_gr_zbc_clear_color(struct gf100_gr *, int);
+void gp100_gr_zbc_clear_depth(struct gf100_gr *, int);
 
 void gp102_gr_init_swdx_pes_mask(struct gf100_gr *);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
index 1d8e16a57136..42c2fd9fc04e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
@@ -134,6 +134,7 @@ gf104_gr = {
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
 	.rops = gf100_gr_rops,
 	.grctx = &gf104_grctx,
+	.zbc = &gf100_gr_zbc,
 	.sclass = {
 		{ -1, -1, FERMI_TWOD_A },
 		{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index a5a74df4edff..4731a460adc7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -132,6 +132,7 @@ gf108_gr = {
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
 	.rops = gf100_gr_rops,
 	.grctx = &gf108_grctx,
+	.zbc = &gf100_gr_zbc,
 	.sclass = {
 		{ -1, -1, FERMI_TWOD_A },
 		{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index 45fada099009..cdf759c8cd7f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -106,6 +106,7 @@ gf110_gr = {
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
 	.rops = gf100_gr_rops,
 	.grctx = &gf110_grctx,
+	.zbc = &gf100_gr_zbc,
 	.sclass = {
 		{ -1, -1, FERMI_TWOD_A },
 		{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index 5ee167d0f5aa..a4158f84c649 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -171,6 +171,7 @@ gf117_gr = {
 	.rops = gf100_gr_rops,
 	.ppc_nr = 1,
 	.grctx = &gf117_grctx,
+	.zbc = &gf100_gr_zbc,
 	.sclass = {
 		{ -1, -1, FERMI_TWOD_A },
 		{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index 2096552fc537..4197844870b3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -197,6 +197,7 @@ gf119_gr = {
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
 	.rops = gf100_gr_rops,
 	.grctx = &gf119_grctx,
+	.zbc = &gf100_gr_zbc,
 	.sclass = {
 		{ -1, -1, FERMI_TWOD_A },
 		{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index 9abacb218361..477fee3e3715 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -479,6 +479,7 @@ gk104_gr = {
 	.ppc_nr = 1,
 	.grctx = &gk104_grctx,
 	.clkgate_pack = gk104_clkgate_pack,
+	.zbc = &gf100_gr_zbc,
 	.sclass = {
 		{ -1, -1, FERMI_TWOD_A },
 		{ -1, -1, KEPLER_INLINE_TO_MEMORY_A },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index dba70d50bb8e..7cd628c84e07 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -375,6 +375,7 @@ gk110_gr = {
 	.ppc_nr = 2,
 	.grctx = &gk110_grctx,
 	.clkgate_pack = gk110_clkgate_pack,
+	.zbc = &gf100_gr_zbc,
 	.sclass = {
 		{ -1, -1, FERMI_TWOD_A },
 		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index 48bc8d85dbb8..a38faa215635 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -126,6 +126,7 @@ gk110b_gr = {
 	.rops = gf100_gr_rops,
 	.ppc_nr = 2,
 	.grctx = &gk110b_grctx,
+	.zbc = &gf100_gr_zbc,
 	.sclass = {
 		{ -1, -1, FERMI_TWOD_A },
 		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index 7f45b122dcb0..58456660e603 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -184,6 +184,7 @@ gk208_gr = {
 	.rops = gf100_gr_rops,
 	.ppc_nr = 1,
 	.grctx = &gk208_grctx,
+	.zbc = &gf100_gr_zbc,
 	.sclass = {
 		{ -1, -1, FERMI_TWOD_A },
 		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index 11a32fa01586..500cb08dd608 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -292,6 +292,7 @@ gk20a_gr = {
 	.rops = gf100_gr_rops,
 	.ppc_nr = 1,
 	.grctx = &gk20a_grctx,
+	.zbc = &gf100_gr_zbc,
 	.sclass = {
 		{ -1, -1, FERMI_TWOD_A },
 		{ -1, -1, KEPLER_INLINE_TO_MEMORY_A },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index f5411aff0e44..92e31d397207 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -419,6 +419,7 @@ gm107_gr = {
 	.rops = gf100_gr_rops,
 	.ppc_nr = 2,
 	.grctx = &gm107_grctx,
+	.zbc = &gf100_gr_zbc,
 	.sclass = {
 		{ -1, -1, FERMI_TWOD_A },
 		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
index 8966d2a7235c..eff30662b984 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -187,6 +187,7 @@ gm200_gr = {
 	.tpc_nr = 4,
 	.ppc_nr = 2,
 	.grctx = &gm200_grctx,
+	.zbc = &gf100_gr_zbc,
 	.sclass = {
 		{ -1, -1, FERMI_TWOD_A },
 		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
index afa1c6e32230..a667770ce3cb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
@@ -75,6 +75,7 @@ gm20b_gr = {
 	.rops = gm200_gr_rops,
 	.ppc_nr = 1,
 	.grctx = &gm20b_grctx,
+	.zbc = &gf100_gr_zbc,
 	.sclass = {
 		{ -1, -1, FERMI_TWOD_A },
 		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
index 1d9d8760e13e..ef16fee61327 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -29,6 +29,44 @@
 /*******************************************************************************
  * PGRAPH engine/subdev functions
  ******************************************************************************/
+void
+gp100_gr_zbc_clear_color(struct gf100_gr *gr, int zbc)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const int znum =  zbc - 1;
+	const u32 zoff = znum * 4;
+
+	if (gr->zbc_color[zbc].format) {
+		nvkm_wr32(device, 0x418010 + zoff, gr->zbc_color[zbc].ds[0]);
+		nvkm_wr32(device, 0x41804c + zoff, gr->zbc_color[zbc].ds[1]);
+		nvkm_wr32(device, 0x418088 + zoff, gr->zbc_color[zbc].ds[2]);
+		nvkm_wr32(device, 0x4180c4 + zoff, gr->zbc_color[zbc].ds[3]);
+	}
+
+	nvkm_mask(device, 0x418100 + ((znum / 4) * 4),
+			  0x0000007f << ((znum % 4) * 7),
+			  gr->zbc_color[zbc].format << ((znum % 4) * 7));
+}
+
+void
+gp100_gr_zbc_clear_depth(struct gf100_gr *gr, int zbc)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const int znum =  zbc - 1;
+	const u32 zoff = znum * 4;
+
+	if (gr->zbc_depth[zbc].format)
+		nvkm_wr32(device, 0x418110 + zoff, gr->zbc_depth[zbc].ds);
+	nvkm_mask(device, 0x41814c + ((znum / 4) * 4),
+			  0x0000007f << ((znum % 4) * 7),
+			  gr->zbc_depth[zbc].format << ((znum % 4) * 7));
+}
+
+const struct gf100_gr_func_zbc
+gp100_gr_zbc = {
+	.clear_color = gp100_gr_zbc_clear_color,
+	.clear_depth = gp100_gr_zbc_clear_depth,
+};
 
 void
 gp100_gr_init_shader_exceptions(struct gf100_gr *gr, int gpc, int tpc)
@@ -87,6 +125,7 @@ gp100_gr = {
 	.tpc_nr = 5,
 	.ppc_nr = 2,
 	.grctx = &gp100_grctx,
+	.zbc = &gp100_gr_zbc,
 	.sclass = {
 		{ -1, -1, FERMI_TWOD_A },
 		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
index 2d9a2c3ec261..8fc95a015e13 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
@@ -65,6 +65,7 @@ gp102_gr = {
 	.tpc_nr = 5,
 	.ppc_nr = 3,
 	.grctx = &gp102_grctx,
+	.zbc = &gp100_gr_zbc,
 	.sclass = {
 		{ -1, -1, FERMI_TWOD_A },
 		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
index e466ae460d3c..56f92c723504 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
@@ -49,6 +49,7 @@ gp104_gr = {
 	.tpc_nr = 5,
 	.ppc_nr = 3,
 	.grctx = &gp104_grctx,
+	.zbc = &gp100_gr_zbc,
 	.sclass = {
 		{ -1, -1, FERMI_TWOD_A },
 		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
index 2fa046a1da60..4bb0340e3fda 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
@@ -51,6 +51,7 @@ gp107_gr = {
 	.tpc_nr = 3,
 	.ppc_nr = 1,
 	.grctx = &gp107_grctx,
+	.zbc = &gp100_gr_zbc,
 	.sclass = {
 		{ -1, -1, FERMI_TWOD_A },
 		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
index 0a01a306da2a..b2a39ad2fa44 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
@@ -49,6 +49,7 @@ gp10b_gr = {
 	.tpc_nr = 2,
 	.ppc_nr = 1,
 	.grctx = &gp102_grctx,
+	.zbc = &gp100_gr_zbc,
 	.sclass = {
 		{ -1, -1, FERMI_TWOD_A },
 		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },

From 4b2c71edf0d7832ef4d2fe5b17402d1130b415dc Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1100/1286] drm/nouveau/gr/gp102-: setup stencil zbc

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/include/nvkm/subdev/ltc.h |  3 +
 .../gpu/drm/nouveau/nvkm/engine/device/base.c | 12 ++--
 .../gpu/drm/nouveau/nvkm/engine/gr/gf100.c    | 12 +++-
 .../gpu/drm/nouveau/nvkm/engine/gr/gf100.h    | 12 +++-
 .../gpu/drm/nouveau/nvkm/engine/gr/gp100.c    |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/gp102.c    | 58 ++++++++++++++++++-
 .../gpu/drm/nouveau/nvkm/engine/gr/gp104.c    |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/gp107.c    |  2 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/gp10b.c    |  2 +-
 .../gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild    |  1 +
 .../gpu/drm/nouveau/nvkm/subdev/ltc/base.c    | 10 ++++
 .../gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c   |  6 +-
 .../gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c   | 51 ++++++++++++++++
 .../gpu/drm/nouveau/nvkm/subdev/ltc/priv.h    |  5 ++
 14 files changed, 162 insertions(+), 16 deletions(-)
 create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c

diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
index 95b611554d53..9db5f8293198 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
@@ -21,12 +21,14 @@ struct nvkm_ltc {
 	int zbc_max;
 	u32 zbc_color[NVKM_LTC_MAX_ZBC_CNT][4];
 	u32 zbc_depth[NVKM_LTC_MAX_ZBC_CNT];
+	u32 zbc_stencil[NVKM_LTC_MAX_ZBC_CNT];
 };
 
 void nvkm_ltc_tags_clear(struct nvkm_device *, u32 first, u32 count);
 
 int nvkm_ltc_zbc_color_get(struct nvkm_ltc *, int index, const u32[4]);
 int nvkm_ltc_zbc_depth_get(struct nvkm_ltc *, int index, const u32);
+int nvkm_ltc_zbc_stencil_get(struct nvkm_ltc *, int index, const u32);
 
 void nvkm_ltc_invalidate(struct nvkm_ltc *);
 void nvkm_ltc_flush(struct nvkm_ltc *);
@@ -37,4 +39,5 @@ int gk20a_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
 int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
 int gm200_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
 int gp100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
+int gp102_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 16e8090082ab..5c79c795acaa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2204,7 +2204,7 @@ nv132_chipset = {
 	.i2c = gm200_i2c_new,
 	.ibus = gm200_ibus_new,
 	.imem = nv50_instmem_new,
-	.ltc = gp100_ltc_new,
+	.ltc = gp102_ltc_new,
 	.mc = gp100_mc_new,
 	.mmu = gp100_mmu_new,
 	.therm = gp100_therm_new,
@@ -2240,7 +2240,7 @@ nv134_chipset = {
 	.i2c = gm200_i2c_new,
 	.ibus = gm200_ibus_new,
 	.imem = nv50_instmem_new,
-	.ltc = gp100_ltc_new,
+	.ltc = gp102_ltc_new,
 	.mc = gp100_mc_new,
 	.mmu = gp100_mmu_new,
 	.therm = gp100_therm_new,
@@ -2276,7 +2276,7 @@ nv136_chipset = {
 	.i2c = gm200_i2c_new,
 	.ibus = gm200_ibus_new,
 	.imem = nv50_instmem_new,
-	.ltc = gp100_ltc_new,
+	.ltc = gp102_ltc_new,
 	.mc = gp100_mc_new,
 	.mmu = gp100_mmu_new,
 	.therm = gp100_therm_new,
@@ -2312,7 +2312,7 @@ nv137_chipset = {
 	.i2c = gm200_i2c_new,
 	.ibus = gm200_ibus_new,
 	.imem = nv50_instmem_new,
-	.ltc = gp100_ltc_new,
+	.ltc = gp102_ltc_new,
 	.mc = gp100_mc_new,
 	.mmu = gp100_mmu_new,
 	.therm = gp100_therm_new,
@@ -2348,7 +2348,7 @@ nv138_chipset = {
 	.i2c = gm200_i2c_new,
 	.ibus = gm200_ibus_new,
 	.imem = nv50_instmem_new,
-	.ltc = gp100_ltc_new,
+	.ltc = gp102_ltc_new,
 	.mc = gp100_mc_new,
 	.mmu = gp100_mmu_new,
 	.therm = gp100_therm_new,
@@ -2380,7 +2380,7 @@ nv13b_chipset = {
 	.fuse = gm107_fuse_new,
 	.ibus = gp10b_ibus_new,
 	.imem = gk20a_instmem_new,
-	.ltc = gp100_ltc_new,
+	.ltc = gp102_ltc_new,
 	.mc = gp10b_mc_new,
 	.mmu = gp10b_mmu_new,
 	.secboot = gp10b_secboot_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 0cffafb0130f..86ae5c706aa0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -750,7 +750,7 @@ gf100_gr_zbc_init(struct gf100_gr *gr)
 	const u32 f32_1[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000,
 			      0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000 };
 	struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc;
-	int index, c = ltc->zbc_min, d = ltc->zbc_min;
+	int index, c = ltc->zbc_min, d = ltc->zbc_min, s = ltc->zbc_min;
 
 	if (!gr->zbc_color[0].format) {
 		gf100_gr_zbc_color_get(gr, 1,  & zero[0],   &zero[4]); c++;
@@ -759,12 +759,22 @@ gf100_gr_zbc_init(struct gf100_gr *gr)
 		gf100_gr_zbc_color_get(gr, 4,  &f32_1[0],  &f32_1[4]); c++;
 		gf100_gr_zbc_depth_get(gr, 1, 0x00000000, 0x00000000); d++;
 		gf100_gr_zbc_depth_get(gr, 1, 0x3f800000, 0x3f800000); d++;
+		if (gr->func->zbc->stencil_get) {
+			gr->func->zbc->stencil_get(gr, 1, 0x00, 0x00); s++;
+			gr->func->zbc->stencil_get(gr, 1, 0x01, 0x01); s++;
+			gr->func->zbc->stencil_get(gr, 1, 0xff, 0xff); s++;
+		}
 	}
 
 	for (index = c; index <= ltc->zbc_max; index++)
 		gr->func->zbc->clear_color(gr, index);
 	for (index = d; index <= ltc->zbc_max; index++)
 		gr->func->zbc->clear_depth(gr, index);
+
+	if (gr->func->zbc->clear_stencil) {
+		for (index = s; index <= ltc->zbc_max; index++)
+			gr->func->zbc->clear_stencil(gr, index);
+	}
 }
 
 /**
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index d82951ab5ef1..edf6edabf6df 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -72,6 +72,12 @@ struct gf100_gr_zbc_depth {
 	u32 l2;
 };
 
+struct gf100_gr_zbc_stencil {
+	u32 format;
+	u32 ds;
+	u32 l2;
+};
+
 struct gf100_gr {
 	const struct gf100_gr_func *func;
 	struct nvkm_gr base;
@@ -95,6 +101,7 @@ struct gf100_gr {
 
 	struct gf100_gr_zbc_color zbc_color[NVKM_LTC_MAX_ZBC_CNT];
 	struct gf100_gr_zbc_depth zbc_depth[NVKM_LTC_MAX_ZBC_CNT];
+	struct gf100_gr_zbc_stencil zbc_stencil[NVKM_LTC_MAX_ZBC_CNT];
 
 	u8 rop_nr;
 	u8 gpc_nr;
@@ -132,6 +139,9 @@ void *gf100_gr_dtor(struct nvkm_gr *);
 struct gf100_gr_func_zbc {
 	void (*clear_color)(struct gf100_gr *, int zbc);
 	void (*clear_depth)(struct gf100_gr *, int zbc);
+	int (*stencil_get)(struct gf100_gr *, int format,
+			   const u32 ds, const u32 l2);
+	void (*clear_stencil)(struct gf100_gr *, int zbc);
 };
 
 struct gf100_gr_func {
@@ -219,11 +229,11 @@ void gm200_gr_init_ds_hww_esr_2(struct gf100_gr *);
 void gp100_gr_init_rop_active_fbps(struct gf100_gr *);
 void gp100_gr_init_fecs_exceptions(struct gf100_gr *);
 void gp100_gr_init_shader_exceptions(struct gf100_gr *, int, int);
-extern const struct gf100_gr_func_zbc gp100_gr_zbc;
 void gp100_gr_zbc_clear_color(struct gf100_gr *, int);
 void gp100_gr_zbc_clear_depth(struct gf100_gr *, int);
 
 void gp102_gr_init_swdx_pes_mask(struct gf100_gr *);
+extern const struct gf100_gr_func_zbc gp102_gr_zbc;
 
 #define gf100_gr_chan(p) container_of((p), struct gf100_gr_chan, object)
 #include <core/object.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
index ef16fee61327..9d0521ce309a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -62,7 +62,7 @@ gp100_gr_zbc_clear_depth(struct gf100_gr *gr, int zbc)
 			  gr->zbc_depth[zbc].format << ((znum % 4) * 7));
 }
 
-const struct gf100_gr_func_zbc
+static const struct gf100_gr_func_zbc
 gp100_gr_zbc = {
 	.clear_color = gp100_gr_zbc_clear_color,
 	.clear_depth = gp100_gr_zbc_clear_depth,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
index 8fc95a015e13..37f7d739bf80 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
@@ -26,6 +26,62 @@
 
 #include <nvif/class.h>
 
+static void
+gp102_gr_zbc_clear_stencil(struct gf100_gr *gr, int zbc)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const int znum =  zbc - 1;
+	const u32 zoff = znum * 4;
+
+	if (gr->zbc_stencil[zbc].format)
+		nvkm_wr32(device, 0x41815c + zoff, gr->zbc_stencil[zbc].ds);
+	nvkm_mask(device, 0x418198 + ((znum / 4) * 4),
+			  0x0000007f << ((znum % 4) * 7),
+			  gr->zbc_stencil[zbc].format << ((znum % 4) * 7));
+}
+
+static int
+gp102_gr_zbc_stencil_get(struct gf100_gr *gr, int format,
+			 const u32 ds, const u32 l2)
+{
+	struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc;
+	int zbc = -ENOSPC, i;
+
+	for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) {
+		if (gr->zbc_stencil[i].format) {
+			if (gr->zbc_stencil[i].format != format)
+				continue;
+			if (gr->zbc_stencil[i].ds != ds)
+				continue;
+			if (gr->zbc_stencil[i].l2 != l2) {
+				WARN_ON(1);
+				return -EINVAL;
+			}
+			return i;
+		} else {
+			zbc = (zbc < 0) ? i : zbc;
+		}
+	}
+
+	if (zbc < 0)
+		return zbc;
+
+	gr->zbc_stencil[zbc].format = format;
+	gr->zbc_stencil[zbc].ds = ds;
+	gr->zbc_stencil[zbc].l2 = l2;
+	nvkm_ltc_zbc_stencil_get(ltc, zbc, l2);
+	gr->func->zbc->clear_stencil(gr, zbc);
+	return zbc;
+}
+
+const struct gf100_gr_func_zbc
+gp102_gr_zbc = {
+	.clear_color = gp100_gr_zbc_clear_color,
+	.clear_depth = gp100_gr_zbc_clear_depth,
+	.stencil_get = gp102_gr_zbc_stencil_get,
+	.clear_stencil = gp102_gr_zbc_clear_stencil,
+};
+
 void
 gp102_gr_init_swdx_pes_mask(struct gf100_gr *gr)
 {
@@ -65,7 +121,7 @@ gp102_gr = {
 	.tpc_nr = 5,
 	.ppc_nr = 3,
 	.grctx = &gp102_grctx,
-	.zbc = &gp100_gr_zbc,
+	.zbc = &gp102_gr_zbc,
 	.sclass = {
 		{ -1, -1, FERMI_TWOD_A },
 		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
index 56f92c723504..4573c914c021 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
@@ -49,7 +49,7 @@ gp104_gr = {
 	.tpc_nr = 5,
 	.ppc_nr = 3,
 	.grctx = &gp104_grctx,
-	.zbc = &gp100_gr_zbc,
+	.zbc = &gp102_gr_zbc,
 	.sclass = {
 		{ -1, -1, FERMI_TWOD_A },
 		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
index 4bb0340e3fda..812aba91653f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
@@ -51,7 +51,7 @@ gp107_gr = {
 	.tpc_nr = 3,
 	.ppc_nr = 1,
 	.grctx = &gp107_grctx,
-	.zbc = &gp100_gr_zbc,
+	.zbc = &gp102_gr_zbc,
 	.sclass = {
 		{ -1, -1, FERMI_TWOD_A },
 		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
index b2a39ad2fa44..303dceddd4a8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
@@ -49,7 +49,7 @@ gp10b_gr = {
 	.tpc_nr = 2,
 	.ppc_nr = 1,
 	.grctx = &gp102_grctx,
-	.zbc = &gp100_gr_zbc,
+	.zbc = &gp102_gr_zbc,
 	.sclass = {
 		{ -1, -1, FERMI_TWOD_A },
 		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
index 12d6f4f102cb..290ff1c425a9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
@@ -4,3 +4,4 @@ nvkm-y += nvkm/subdev/ltc/gk104.o
 nvkm-y += nvkm/subdev/ltc/gm107.o
 nvkm-y += nvkm/subdev/ltc/gm200.o
 nvkm-y += nvkm/subdev/ltc/gp100.o
+nvkm-y += nvkm/subdev/ltc/gp102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
index 1f185274d3e6..23242179e600 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
@@ -55,6 +55,14 @@ nvkm_ltc_zbc_depth_get(struct nvkm_ltc *ltc, int index, const u32 depth)
 	return index;
 }
 
+int
+nvkm_ltc_zbc_stencil_get(struct nvkm_ltc *ltc, int index, const u32 stencil)
+{
+	ltc->zbc_stencil[index] = stencil;
+	ltc->func->zbc_clear_stencil(ltc, index, stencil);
+	return index;
+}
+
 void
 nvkm_ltc_invalidate(struct nvkm_ltc *ltc)
 {
@@ -92,6 +100,8 @@ nvkm_ltc_init(struct nvkm_subdev *subdev)
 	for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) {
 		ltc->func->zbc_clear_color(ltc, i, ltc->zbc_color[i]);
 		ltc->func->zbc_clear_depth(ltc, i, ltc->zbc_depth[i]);
+		if (ltc->func->zbc_clear_stencil)
+			ltc->func->zbc_clear_stencil(ltc, i, ltc->zbc_stencil[i]);
 	}
 
 	ltc->func->init(ltc);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
index e34d42108019..e923ed76d37a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
@@ -23,7 +23,7 @@
  */
 #include "priv.h"
 
-static void
+void
 gp100_ltc_intr(struct nvkm_ltc *ltc)
 {
 	struct nvkm_device *device = ltc->subdev.device;
@@ -38,7 +38,7 @@ gp100_ltc_intr(struct nvkm_ltc *ltc)
 	}
 }
 
-static int
+int
 gp100_ltc_oneinit(struct nvkm_ltc *ltc)
 {
 	struct nvkm_device *device = ltc->subdev.device;
@@ -48,7 +48,7 @@ gp100_ltc_oneinit(struct nvkm_ltc *ltc)
 	return 0;
 }
 
-static void
+void
 gp100_ltc_init(struct nvkm_ltc *ltc)
 {
 	/*XXX: PMU LS call to setup tagram address */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c
new file mode 100644
index 000000000000..601747ada655
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+void
+gp102_ltc_zbc_clear_stencil(struct nvkm_ltc *ltc, int i, const u32 stencil)
+{
+	struct nvkm_device *device = ltc->subdev.device;
+	nvkm_mask(device, 0x17e338, 0x0000000f, i);
+	nvkm_wr32(device, 0x17e204, stencil);
+}
+
+static const struct nvkm_ltc_func
+gp102_ltc = {
+	.oneinit = gp100_ltc_oneinit,
+	.init = gp100_ltc_init,
+	.intr = gp100_ltc_intr,
+	.cbc_clear = gm107_ltc_cbc_clear,
+	.cbc_wait = gm107_ltc_cbc_wait,
+	.zbc = 16,
+	.zbc_clear_color = gm107_ltc_zbc_clear_color,
+	.zbc_clear_depth = gm107_ltc_zbc_clear_depth,
+	.zbc_clear_stencil = gp102_ltc_zbc_clear_stencil,
+	.invalidate = gf100_ltc_invalidate,
+	.flush = gf100_ltc_flush,
+};
+
+int
+gp102_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+{
+	return nvkm_ltc_new_(&gp102_ltc, device, index, pltc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
index e71cc25cc775..9dcde43c0f3c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
@@ -19,6 +19,7 @@ struct nvkm_ltc_func {
 	int zbc;
 	void (*zbc_clear_color)(struct nvkm_ltc *, int, const u32[4]);
 	void (*zbc_clear_depth)(struct nvkm_ltc *, int, const u32);
+	void (*zbc_clear_stencil)(struct nvkm_ltc *, int, const u32);
 
 	void (*invalidate)(struct nvkm_ltc *);
 	void (*flush)(struct nvkm_ltc *);
@@ -41,4 +42,8 @@ void gm107_ltc_cbc_clear(struct nvkm_ltc *, u32, u32);
 void gm107_ltc_cbc_wait(struct nvkm_ltc *);
 void gm107_ltc_zbc_clear_color(struct nvkm_ltc *, int, const u32[4]);
 void gm107_ltc_zbc_clear_depth(struct nvkm_ltc *, int, const u32);
+
+int gp100_ltc_oneinit(struct nvkm_ltc *);
+void gp100_ltc_init(struct nvkm_ltc *);
+void gp100_ltc_intr(struct nvkm_ltc *);
 #endif

From 19ca10d82e33bcfe92412c461fc3534ec1e14747 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1101/1286] drm/nouveau/gem: lookup VMAs for buffers referenced
 by pushbuf ioctl

We previously only did this for push buffers, but an upcoming patch will
need to attach fences to all VMAs to resolve another issue.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nouveau_dma.c | 10 +---------
 drivers/gpu/drm/nouveau/nouveau_dma.h |  5 ++---
 drivers/gpu/drm/nouveau/nouveau_gem.c | 19 ++++++++++++++++---
 3 files changed, 19 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 10e84f6ca2b7..e0664d28802b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -80,18 +80,10 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
 }
 
 void
-nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
-	      int delta, int length)
+nv50_dma_push(struct nouveau_channel *chan, u64 offset, int length)
 {
-	struct nouveau_cli *cli = (void *)chan->user.client;
 	struct nouveau_bo *pb = chan->push.buffer;
-	struct nouveau_vma *vma;
 	int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
-	u64 offset;
-
-	vma = nouveau_vma_find(bo, &cli->vmm);
-	BUG_ON(!vma);
-	offset = vma->addr + delta;
 
 	BUG_ON(chan->dma.ib_free < 1);
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 74e10b14a7da..89c87111bbbd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -31,8 +31,7 @@
 #include "nouveau_chan.h"
 
 int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
-void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *,
-		   int delta, int length);
+void nv50_dma_push(struct nouveau_channel *, u64 addr, int length);
 
 /*
  * There's a hw race condition where you can't jump to your PUT offset,
@@ -151,7 +150,7 @@ FIRE_RING(struct nouveau_channel *chan)
 	chan->accel_done = true;
 
 	if (chan->dma.ib_max) {
-		nv50_dma_push(chan, chan->push.buffer, chan->dma.put << 2,
+		nv50_dma_push(chan, chan->push.addr + (chan->dma.put << 2),
 			      (chan->dma.cur - chan->dma.put) << 2);
 	} else {
 		WRITE_PUT(chan->dma.cur);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index e72a7e37eb0a..707e02c80f18 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -432,7 +432,20 @@ retry:
 			}
 		}
 
-		b->user_priv = (uint64_t)(unsigned long)nvbo;
+		if (cli->vmm.vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
+			struct nouveau_vmm *vmm = &cli->vmm;
+			struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm);
+			if (!vma) {
+				NV_PRINTK(err, cli, "vma not found!\n");
+				ret = -EINVAL;
+				break;
+			}
+
+			b->user_priv = (uint64_t)(unsigned long)vma;
+		} else {
+			b->user_priv = (uint64_t)(unsigned long)nvbo;
+		}
+
 		nvbo->reserved_by = file_priv;
 		nvbo->pbbo_index = i;
 		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
@@ -763,10 +776,10 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 		}
 
 		for (i = 0; i < req->nr_push; i++) {
-			struct nouveau_bo *nvbo = (void *)(unsigned long)
+			struct nouveau_vma *vma = (void *)(unsigned long)
 				bo[push[i].bo_index].user_priv;
 
-			nv50_dma_push(chan, nvbo, push[i].offset,
+			nv50_dma_push(chan, vma->addr + push[i].offset,
 				      push[i].length);
 		}
 	} else

From 0db912af8f5ad4fa4dc08a9c8e411a10953c5403 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1102/1286] drm/nouveau/gem: attach fences to VMAs to track GPU
 usage

An upcoming patch will use these to fix issues related to the deferred
unmapping of GEM objects.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nouveau_gem.c | 14 +++++++++++++-
 drivers/gpu/drm/nouveau/nouveau_vmm.c |  1 +
 drivers/gpu/drm/nouveau/nouveau_vmm.h |  2 ++
 3 files changed, 16 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 707e02c80f18..2016d9eb338e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -99,6 +99,7 @@ struct nouveau_gem_object_unmap {
 static void
 nouveau_gem_object_delete(struct nouveau_vma *vma)
 {
+	nouveau_fence_unref(&vma->fence);
 	nouveau_vma_del(&vma);
 }
 
@@ -344,9 +345,20 @@ validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence,
 		nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
 		b = &pbbo[nvbo->pbbo_index];
 
-		if (likely(fence))
+		if (likely(fence)) {
+			struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
+			struct nouveau_vma *vma;
+
 			nouveau_bo_fence(nvbo, fence, !!b->write_domains);
 
+			if (drm->client.vmm.vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
+				vma = (void *)(unsigned long)b->user_priv;
+				nouveau_fence_unref(&vma->fence);
+				dma_fence_get(&fence->base);
+				vma->fence = fence;
+			}
+		}
+
 		if (unlikely(nvbo->validate_mapped)) {
 			ttm_bo_kunmap(&nvbo->kmap);
 			nvbo->validate_mapped = false;
diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.c b/drivers/gpu/drm/nouveau/nouveau_vmm.c
index f5371d96b003..2032c3e4f6e5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vmm.c
@@ -92,6 +92,7 @@ nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
 	vma->refs = 1;
 	vma->addr = ~0ULL;
 	vma->mem = NULL;
+	vma->fence = NULL;
 	list_add_tail(&vma->head, &nvbo->vma_list);
 
 	if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.h b/drivers/gpu/drm/nouveau/nouveau_vmm.h
index 5c31f43678d3..7e3b118cf7c4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vmm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vmm.h
@@ -11,6 +11,8 @@ struct nouveau_vma {
 	u64 addr;
 
 	struct nouveau_mem *mem;
+
+	struct nouveau_fence *fence;
 };
 
 struct nouveau_vma *nouveau_vma_find(struct nouveau_bo *, struct nouveau_vmm *);

From 470db8b78186efe840b6452c6c4934178058059e Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1103/1286] drm/nouveau/gem: tie deferred unmapping of buffers
 to VMA fence completion

As VMAs are per-client, unlike buffers, this allows us to avoid referencing
foreign fences (those that belong to another client/driver) from the client
deferred work handler, and prevent some not-fun race conditions that can be
triggered when a fence stalls.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nouveau_gem.c | 17 ++---------------
 1 file changed, 2 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 2016d9eb338e..300daee74209 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -115,25 +115,12 @@ nouveau_gem_object_delete_work(struct nouveau_cli_work *w)
 static void
 nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
 {
-	const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
-	struct reservation_object *resv = nvbo->bo.resv;
-	struct reservation_object_list *fobj;
+	struct dma_fence *fence = vma->fence ? &vma->fence->base : NULL;
 	struct nouveau_gem_object_unmap *work;
-	struct dma_fence *fence = NULL;
-
-	fobj = reservation_object_get_list(resv);
 
 	list_del_init(&vma->head);
 
-	if (fobj && fobj->shared_count > 1)
-		ttm_bo_wait(&nvbo->bo, false, false);
-	else if (fobj && fobj->shared_count == 1)
-		fence = rcu_dereference_protected(fobj->shared[0],
-						reservation_object_held(resv));
-	else
-		fence = reservation_object_get_excl(nvbo->bo.resv);
-
-	if (!fence || !mapped) {
+	if (!fence) {
 		nouveau_gem_object_delete(vma);
 		return;
 	}

From 11e451e74050d9e9030581ce40337838acfcea5b Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1104/1286] drm/nouveau: remove fence wait code from deferred
 client work handler

Fences attached to deferred client work items now originate from channels
belonging to the client, meaning we can be certain they've been signalled
before we destroy a client.

This closes a race that could happen if the dma_fence_wait_timeout() call
didn't succeed.  When the fence was later signalled, a use-after-free was
possible.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nouveau_drm.c | 30 +++++++++++++--------------
 1 file changed, 14 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 6caece4f2f5f..64b8fd0c4d68 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -113,24 +113,22 @@ nouveau_name(struct drm_device *dev)
 }
 
 static inline bool
-nouveau_cli_work_ready(struct dma_fence *fence, bool wait)
+nouveau_cli_work_ready(struct dma_fence *fence)
 {
-	if (!dma_fence_is_signaled(fence)) {
-		if (!wait)
-			return false;
-		WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
-	}
+	if (!dma_fence_is_signaled(fence))
+		return false;
 	dma_fence_put(fence);
 	return true;
 }
 
 static void
-nouveau_cli_work_flush(struct nouveau_cli *cli, bool wait)
+nouveau_cli_work(struct work_struct *w)
 {
+	struct nouveau_cli *cli = container_of(w, typeof(*cli), work);
 	struct nouveau_cli_work *work, *wtmp;
 	mutex_lock(&cli->lock);
 	list_for_each_entry_safe(work, wtmp, &cli->worker, head) {
-		if (!work->fence || nouveau_cli_work_ready(work->fence, wait)) {
+		if (!work->fence || nouveau_cli_work_ready(work->fence)) {
 			list_del(&work->head);
 			work->func(work);
 		}
@@ -158,17 +156,17 @@ nouveau_cli_work_queue(struct nouveau_cli *cli, struct dma_fence *fence,
 	mutex_unlock(&cli->lock);
 }
 
-static void
-nouveau_cli_work(struct work_struct *w)
-{
-	struct nouveau_cli *cli = container_of(w, typeof(*cli), work);
-	nouveau_cli_work_flush(cli, false);
-}
-
 static void
 nouveau_cli_fini(struct nouveau_cli *cli)
 {
-	nouveau_cli_work_flush(cli, true);
+	/* All our channels are dead now, which means all the fences they
+	 * own are signalled, and all callback functions have been called.
+	 *
+	 * So, after flushing the workqueue, there should be nothing left.
+	 */
+	flush_work(&cli->work);
+	WARN_ON(!list_empty(&cli->worker));
+
 	usif_client_fini(cli);
 	nouveau_vmm_fini(&cli->vmm);
 	nvif_mmu_fini(&cli->mmu);

From 92b4eaaf9a84a7bd35db6f903c0ecbda4f9594ee Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1105/1286] drm/nouveau: no need to create ctxdma for push
 buffers on fermi and up

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nouveau_chan.c | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 67950a5c56ce..97900e9cfe3f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -163,12 +163,15 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
 			return ret;
 		}
 
+		chan->push.addr = chan->push.vma->addr;
+
+		if (device->info.family >= NV_DEVICE_INFO_V0_FERMI)
+			return 0;
+
 		args.target = NV_DMA_V0_TARGET_VM;
 		args.access = NV_DMA_V0_ACCESS_VM;
 		args.start = 0;
 		args.limit = cli->vmm.vmm.limit - 1;
-
-		chan->push.addr = chan->push.vma->addr;
 	} else
 	if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
 		if (device->info.family == NV_DEVICE_INFO_V0_TNT) {

From 512fa0b8a398539c3c2db251f6c40da4ef065d09 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1106/1286] drm/nouveau/drm/nv50-: remove allocation of sw
 class

Hasn't been required for a long time.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nouveau_dma.h |  1 -
 drivers/gpu/drm/nouveau/nouveau_drm.c | 40 +++++++++++++--------------
 2 files changed, 19 insertions(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 89c87111bbbd..fc5e3f41282d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -54,7 +54,6 @@ enum {
 
 	NvSub2D		= 3, /* DO NOT CHANGE - hardcoded for kepler gr fifo */
 	NvSubCopy	= 4, /* DO NOT CHANGE - hardcoded for kepler gr fifo */
-	FermiSw		= 5, /* DO NOT CHANGE (well.. 6/7 will work...) */
 };
 
 /* Object handles - for stuff that's doesn't use handle == oclass. */
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 64b8fd0c4d68..8e506c5d5a73 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -388,38 +388,36 @@ nouveau_accel_init(struct nouveau_drm *drm)
 		return;
 	}
 
-	ret = nvif_object_init(&drm->channel->user, NVDRM_NVSW,
-			       nouveau_abi16_swclass(drm), NULL, 0, &drm->nvsw);
-	if (ret == 0) {
-		ret = RING_SPACE(drm->channel, 2);
+	if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
+		ret = nvif_object_init(&drm->channel->user, NVDRM_NVSW,
+				       nouveau_abi16_swclass(drm), NULL, 0,
+				       &drm->nvsw);
 		if (ret == 0) {
-			if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
+			ret = RING_SPACE(drm->channel, 2);
+			if (ret == 0) {
 				BEGIN_NV04(drm->channel, NvSubSw, 0, 1);
-				OUT_RING  (drm->channel, NVDRM_NVSW);
-			} else
-			if (device->info.family < NV_DEVICE_INFO_V0_KEPLER) {
-				BEGIN_NVC0(drm->channel, FermiSw, 0, 1);
-				OUT_RING  (drm->channel, 0x001f0000);
+				OUT_RING  (drm->channel, drm->nvsw.handle);
+			}
+
+			ret = nvif_notify_init(&drm->nvsw,
+					       nouveau_flip_complete,
+					       false, NV04_NVSW_NTFY_UEVENT,
+					       NULL, 0, 0, &drm->flip);
+			if (ret == 0)
+				ret = nvif_notify_get(&drm->flip);
+			if (ret) {
+				nouveau_accel_fini(drm);
+				return;
 			}
 		}
 
-		ret = nvif_notify_init(&drm->nvsw, nouveau_flip_complete,
-				       false, NV04_NVSW_NTFY_UEVENT,
-				       NULL, 0, 0, &drm->flip);
-		if (ret == 0)
-			ret = nvif_notify_get(&drm->flip);
 		if (ret) {
+			NV_ERROR(drm, "failed to allocate sw class, %d\n", ret);
 			nouveau_accel_fini(drm);
 			return;
 		}
 	}
 
-	if (ret) {
-		NV_ERROR(drm, "failed to allocate software object, %d\n", ret);
-		nouveau_accel_fini(drm);
-		return;
-	}
-
 	if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
 		ret = nvkm_gpuobj_new(nvxx_device(&drm->client.device), 32, 0,
 				      false, NULL, &drm->notify);

From 0d4a2c5767dc6136079b11ed45934143d309026e Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1107/1286] drm/nouveau/kms: move display class instantiation
 to library

This function is useful outside of DRM code.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/include/nvif/disp.h   | 12 ++++
 drivers/gpu/drm/nouveau/include/nvif/object.h | 16 +++++
 drivers/gpu/drm/nouveau/nouveau_connector.c   | 14 ++---
 drivers/gpu/drm/nouveau/nouveau_display.c     | 35 +++--------
 drivers/gpu/drm/nouveau/nouveau_display.h     |  3 +-
 drivers/gpu/drm/nouveau/nv50_display.c        | 50 ++++++++--------
 drivers/gpu/drm/nouveau/nvif/Kbuild           |  1 +
 drivers/gpu/drm/nouveau/nvif/disp.c           | 59 +++++++++++++++++++
 8 files changed, 129 insertions(+), 61 deletions(-)
 create mode 100644 drivers/gpu/drm/nouveau/include/nvif/disp.h
 create mode 100644 drivers/gpu/drm/nouveau/nvif/disp.c

diff --git a/drivers/gpu/drm/nouveau/include/nvif/disp.h b/drivers/gpu/drm/nouveau/include/nvif/disp.h
new file mode 100644
index 000000000000..7c0eda375c01
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/disp.h
@@ -0,0 +1,12 @@
+#ifndef __NVIF_DISP_H__
+#define __NVIF_DISP_H__
+#include <nvif/object.h>
+struct nvif_device;
+
+struct nvif_disp {
+	struct nvif_object object;
+};
+
+int nvif_disp_ctor(struct nvif_device *, s32 oclass, struct nvif_disp *);
+void nvif_disp_dtor(struct nvif_disp *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/object.h b/drivers/gpu/drm/nouveau/include/nvif/object.h
index a2d5244ff2b7..20754d9e6883 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/object.h
@@ -99,6 +99,22 @@ struct nvif_mclass {
 	ret;                                                                   \
 })
 
+#define nvif_sclass(o,m,u) ({                                                  \
+	const typeof(m[0]) *_mclass = (m);                                     \
+	s32 _oclass = (u);                                                     \
+	int _cid;                                                              \
+	if (_oclass) {                                                         \
+		for (_cid = 0; _mclass[_cid].oclass; _cid++) {                 \
+			if (_mclass[_cid].oclass == _oclass)                   \
+				break;                                         \
+		}                                                              \
+		_cid = _mclass[_cid].oclass ? _cid : -ENOSYS;                  \
+	} else {                                                               \
+		_cid = nvif_mclass((o), _mclass);                              \
+	}                                                                      \
+	_cid;                                                                  \
+})
+
 /*XXX*/
 #include <core/object.h>
 #define nvxx_object(a) ({                                                      \
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 6ed9cb053dfa..18e3239f7658 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -151,7 +151,7 @@ nouveau_conn_atomic_set_property(struct drm_connector *connector,
 				/* ... except prior to G80, where the code
 				 * doesn't support such things.
 				 */
-				if (disp->disp.oclass < NV50_DISP)
+				if (disp->disp.object.oclass < NV50_DISP)
 					return -EINVAL;
 				break;
 			default:
@@ -260,7 +260,7 @@ nouveau_conn_reset(struct drm_connector *connector)
 	asyc->procamp.color_vibrance = 150;
 	asyc->procamp.vibrant_hue = 90;
 
-	if (nouveau_display(connector->dev)->disp.oclass < NV50_DISP) {
+	if (nouveau_display(connector->dev)->disp.object.oclass < NV50_DISP) {
 		switch (connector->connector_type) {
 		case DRM_MODE_CONNECTOR_LVDS:
 			/* See note in nouveau_conn_atomic_set_property(). */
@@ -314,7 +314,7 @@ nouveau_conn_attach_properties(struct drm_connector *connector)
 	case DRM_MODE_CONNECTOR_TV:
 		break;
 	case DRM_MODE_CONNECTOR_VGA:
-		if (disp->disp.oclass < NV50_DISP)
+		if (disp->disp.object.oclass < NV50_DISP)
 			break; /* Can only scale on DFPs. */
 		/* Fall-through. */
 	default:
@@ -1321,7 +1321,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
 	}
 
 	/* HDMI 3D support */
-	if ((disp->disp.oclass >= G82_DISP)
+	if ((disp->disp.object.oclass >= G82_DISP)
 	    && ((type == DRM_MODE_CONNECTOR_DisplayPort)
 		|| (type == DRM_MODE_CONNECTOR_eDP)
 		|| (type == DRM_MODE_CONNECTOR_HDMIA)))
@@ -1343,7 +1343,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
 	case DCB_CONNECTOR_LVDS_SPWG:
 	case DCB_CONNECTOR_eDP:
 		/* see note in nouveau_connector_set_property() */
-		if (disp->disp.oclass < NV50_DISP) {
+		if (disp->disp.object.oclass < NV50_DISP) {
 			nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
 			break;
 		}
@@ -1366,8 +1366,8 @@ nouveau_connector_create(struct drm_device *dev, int index)
 		break;
 	}
 
-	ret = nvif_notify_init(&disp->disp, nouveau_connector_hotplug, true,
-			       NV04_DISP_NTFY_CONN,
+	ret = nvif_notify_init(&disp->disp.object, nouveau_connector_hotplug,
+			       true, NV04_DISP_NTFY_CONN,
 			       &(struct nvif_notify_conn_req_v0) {
 				.mask = NVIF_NOTIFY_CONN_V0_ANY,
 				.conn = index,
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 7d0bec8dd03d..774b429142bc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -116,7 +116,7 @@ nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
 	bool ret = false;
 
 	do {
-		ret = nvif_mthd(&disp->disp, 0, &args, sizeof(args));
+		ret = nvif_mthd(&disp->disp.object, 0, &args, sizeof(args));
 		if (ret != 0)
 			return false;
 
@@ -175,7 +175,7 @@ nouveau_display_vblank_init(struct drm_device *dev)
 
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-		ret = nvif_notify_init(&disp->disp,
+		ret = nvif_notify_init(&disp->disp.object,
 				       nouveau_display_vblank_handler, false,
 				       NV04_DISP_NTFY_VBLANK,
 				       &(struct nvif_notify_head_req_v0) {
@@ -454,10 +454,10 @@ nouveau_display_create_properties(struct drm_device *dev)
 	struct nouveau_display *disp = nouveau_display(dev);
 	int gen;
 
-	if (disp->disp.oclass < NV50_DISP)
+	if (disp->disp.object.oclass < NV50_DISP)
 		gen = 0;
 	else
-	if (disp->disp.oclass < GF110_DISP)
+	if (disp->disp.object.oclass < GF110_DISP)
 		gen = 1;
 	else
 		gen = 2;
@@ -533,31 +533,10 @@ nouveau_display_create(struct drm_device *dev)
 	drm_kms_helper_poll_disable(dev);
 
 	if (nouveau_modeset != 2 && drm->vbios.dcb.entries) {
-		static const u16 oclass[] = {
-			GP102_DISP,
-			GP100_DISP,
-			GM200_DISP,
-			GM107_DISP,
-			GK110_DISP,
-			GK104_DISP,
-			GF110_DISP,
-			GT214_DISP,
-			GT206_DISP,
-			GT200_DISP,
-			G82_DISP,
-			NV50_DISP,
-			NV04_DISP,
-		};
-		int i;
-
-		for (i = 0, ret = -ENODEV; ret && i < ARRAY_SIZE(oclass); i++) {
-			ret = nvif_object_init(&drm->client.device.object, 0,
-					       oclass[i], NULL, 0, &disp->disp);
-		}
-
+		ret = nvif_disp_ctor(&drm->client.device, 0, &disp->disp);
 		if (ret == 0) {
 			nouveau_display_create_properties(dev);
-			if (disp->disp.oclass < NV50_DISP)
+			if (disp->disp.object.oclass < NV50_DISP)
 				ret = nv04_display_create(dev);
 			else
 				ret = nv50_display_create(dev);
@@ -611,7 +590,7 @@ nouveau_display_destroy(struct drm_device *dev)
 	if (disp->dtor)
 		disp->dtor(dev);
 
-	nvif_object_fini(&disp->disp);
+	nvif_disp_dtor(&disp->disp);
 
 	nouveau_drm(dev)->display = NULL;
 	kfree(disp);
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 270ba56f2756..54aa7c3fa42d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -2,6 +2,7 @@
 #ifndef __NOUVEAU_DISPLAY_H__
 #define __NOUVEAU_DISPLAY_H__
 #include "nouveau_drv.h"
+#include <nvif/disp.h>
 
 struct nouveau_framebuffer {
 	struct drm_framebuffer base;
@@ -38,7 +39,7 @@ struct nouveau_display {
 	int  (*init)(struct drm_device *);
 	void (*fini)(struct drm_device *);
 
-	struct nvif_object disp;
+	struct nvif_disp disp;
 
 	struct drm_property *dithering_mode;
 	struct drm_property *dithering_depth;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index e90330e4e8c5..f2156c8ca90f 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -663,7 +663,7 @@ struct nv50_head {
 #define nv50_vers(c) nv50_chan(c)->user.oclass
 
 struct nv50_disp {
-	struct nvif_object *disp;
+	struct nvif_disp *disp;
 	struct nv50_mast mast;
 
 	struct nouveau_bo *sync;
@@ -1201,7 +1201,7 @@ nv50_curs_new(struct nouveau_drm *drm, struct nv50_head *head,
 	struct nv50_curs *curs;
 	int cid, ret;
 
-	cid = nvif_mclass(disp->disp, curses);
+	cid = nvif_mclass(&disp->disp->object, curses);
 	if (cid < 0) {
 		NV_ERROR(drm, "No supported cursor immediate class\n");
 		return cid;
@@ -1219,8 +1219,8 @@ nv50_curs_new(struct nouveau_drm *drm, struct nv50_head *head,
 		return ret;
 	}
 
-	ret = nvif_object_init(disp->disp, 0, curses[cid].oclass, &args,
-			       sizeof(args), &curs->chan);
+	ret = nvif_object_init(&disp->disp->object, 0, curses[cid].oclass,
+			       &args, sizeof(args), &curs->chan);
 	if (ret) {
 		NV_ERROR(drm, "curs%04x allocation failed: %d\n",
 			 curses[cid].oclass, ret);
@@ -1517,8 +1517,8 @@ nv50_base_new(struct nouveau_drm *drm, struct nv50_head *head,
 		return ret;
 	}
 
-	ret = nv50_base_create(&drm->client.device, disp->disp, base->id,
-			       disp->sync->bo.offset, &base->chan);
+	ret = nv50_base_create(&drm->client.device, &disp->disp->object,
+			       base->id, disp->sync->bo.offset, &base->chan);
 	if (ret)
 		return ret;
 
@@ -2105,7 +2105,7 @@ nv50_head_atomic_check_lut(struct nv50_head *head,
 		return;
 	}
 
-	if (disp->disp->oclass < GF110_DISP) {
+	if (disp->disp->object.oclass < GF110_DISP) {
 		asyh->lut.mode = (asyh->base.cpp == 1) ? 0 : 1;
 		asyh->set.ilut = true;
 	} else {
@@ -2404,12 +2404,12 @@ nv50_head_create(struct drm_device *dev, int index)
 	}
 
 	/* allocate overlay resources */
-	ret = nv50_oimm_create(device, disp->disp, index, &head->oimm);
+	ret = nv50_oimm_create(device, &disp->disp->object, index, &head->oimm);
 	if (ret)
 		goto out;
 
-	ret = nv50_ovly_create(device, disp->disp, index, disp->sync->bo.offset,
-			       &head->ovly);
+	ret = nv50_ovly_create(device, &disp->disp->object, index,
+			       disp->sync->bo.offset, &head->ovly);
 	if (ret)
 		goto out;
 
@@ -2435,7 +2435,7 @@ nv50_outp_release(struct nouveau_encoder *nv_encoder)
 		.base.hashm  = nv_encoder->dcb->hashm,
 	};
 
-	nvif_mthd(disp->disp, 0, &args, sizeof(args));
+	nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
 	nv_encoder->or = -1;
 	nv_encoder->link = 0;
 }
@@ -2456,7 +2456,7 @@ nv50_outp_acquire(struct nouveau_encoder *nv_encoder)
 	};
 	int ret;
 
-	ret = nvif_mthd(disp->disp, 0, &args, sizeof(args));
+	ret = nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
 	if (ret) {
 		NV_ERROR(drm, "error acquiring output path: %d\n", ret);
 		return ret;
@@ -2618,7 +2618,7 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
 	if (args.load.data == 0)
 		args.load.data = 340;
 
-	ret = nvif_mthd(disp->disp, 0, &args, sizeof(args));
+	ret = nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
 	if (ret || !args.load.load)
 		return connector_status_disconnected;
 
@@ -2694,7 +2694,7 @@ nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
 				(0x0100 << nv_crtc->index),
 	};
 
-	nvif_mthd(disp->disp, 0, &args, sizeof(args));
+	nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
 }
 
 static void
@@ -2724,7 +2724,7 @@ nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
 
 	memcpy(args.data, nv_connector->base.eld, sizeof(args.data));
 
-	nvif_mthd(disp->disp, 0, &args,
+	nvif_mthd(&disp->disp->object, 0, &args,
 		  sizeof(args.base) + drm_eld_size(args.data));
 }
 
@@ -2747,7 +2747,7 @@ nv50_hdmi_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
 			       (0x0100 << nv_crtc->index),
 	};
 
-	nvif_mthd(disp->disp, 0, &args, sizeof(args));
+	nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
 }
 
 static void
@@ -2808,7 +2808,7 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
 		+ sizeof(args.pwr)
 		+ args.pwr.avi_infoframe_length
 		+ args.pwr.vendor_infoframe_length;
-	nvif_mthd(disp->disp, 0, &args, size);
+	nvif_mthd(&disp->disp->object, 0, &args, size);
 	nv50_audio_enable(encoder, mode);
 }
 
@@ -2923,7 +2923,7 @@ nv50_msto_prepare(struct nv50_msto *msto)
 		  msto->encoder.name, msto->head->base.base.name,
 		  args.vcpi.start_slot, args.vcpi.num_slots,
 		  args.vcpi.pbn, args.vcpi.aligned_pbn);
-	nvif_mthd(&drm->display->disp, 0, &args, sizeof(args));
+	nvif_mthd(&drm->display->disp.object, 0, &args, sizeof(args));
 }
 
 static int
@@ -3341,7 +3341,7 @@ nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
 		.mst.state = state,
 	};
 	struct nouveau_drm *drm = nouveau_drm(outp->base.base.dev);
-	struct nvif_object *disp = &drm->display->disp;
+	struct nvif_object *disp = &drm->display->disp.object;
 	int ret;
 
 	if (dpcd >= 0x12) {
@@ -3610,7 +3610,7 @@ nv50_sor_enable(struct drm_encoder *encoder)
 				lvds.lvds.script |= 0x0200;
 		}
 
-		nvif_mthd(disp->disp, 0, &lvds, sizeof(lvds));
+		nvif_mthd(&disp->disp->object, 0, &lvds, sizeof(lvds));
 		break;
 	case DCB_OUTPUT_DP:
 		if (nv_connector->base.display_info.bpc == 6)
@@ -3696,7 +3696,7 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
 		struct nvkm_i2c_aux *aux =
 			nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
 		if (aux) {
-			if (disp->disp->oclass < GF110_DISP) {
+			if (disp->disp->object.oclass < GF110_DISP) {
 				/* HW has no support for address-only
 				 * transactions, so we're required to
 				 * use custom I2C-over-AUX code.
@@ -3709,7 +3709,7 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
 		}
 
 		/*TODO: Use DP Info Table to check for support. */
-		if (disp->disp->oclass >= GF110_DISP) {
+		if (disp->disp->object.oclass >= GF110_DISP) {
 			ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
 					    nv_connector->base.base.id,
 					    &nv_encoder->dp.mstm);
@@ -4474,13 +4474,13 @@ nv50_display_create(struct drm_device *dev)
 		goto out;
 
 	/* allocate master evo channel */
-	ret = nv50_core_create(device, disp->disp, disp->sync->bo.offset,
-			      &disp->mast);
+	ret = nv50_core_create(device, &disp->disp->object,
+			       disp->sync->bo.offset, &disp->mast);
 	if (ret)
 		goto out;
 
 	/* create crtc objects to represent the hw heads */
-	if (disp->disp->oclass >= GF110_DISP)
+	if (disp->disp->object.oclass >= GF110_DISP)
 		crtcs = nvif_rd32(&device->object, 0x612004) & 0xf;
 	else
 		crtcs = 0x3;
diff --git a/drivers/gpu/drm/nouveau/nvif/Kbuild b/drivers/gpu/drm/nouveau/nvif/Kbuild
index c817b02b7acf..3db12504140f 100644
--- a/drivers/gpu/drm/nouveau/nvif/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvif/Kbuild
@@ -1,6 +1,7 @@
 nvif-y := nvif/object.o
 nvif-y += nvif/client.o
 nvif-y += nvif/device.o
+nvif-y += nvif/disp.o
 nvif-y += nvif/driver.o
 nvif-y += nvif/fifo.o
 nvif-y += nvif/mem.o
diff --git a/drivers/gpu/drm/nouveau/nvif/disp.c b/drivers/gpu/drm/nouveau/nvif/disp.c
new file mode 100644
index 000000000000..7006482e8e29
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/disp.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <nvif/disp.h>
+#include <nvif/device.h>
+
+#include <nvif/class.h>
+
+void
+nvif_disp_dtor(struct nvif_disp *disp)
+{
+	nvif_object_fini(&disp->object);
+}
+
+int
+nvif_disp_ctor(struct nvif_device *device, s32 oclass, struct nvif_disp *disp)
+{
+	static const struct nvif_mclass disps[] = {
+		{ GP102_DISP, -1 },
+		{ GP100_DISP, -1 },
+		{ GM200_DISP, -1 },
+		{ GM107_DISP, -1 },
+		{ GK110_DISP, -1 },
+		{ GK104_DISP, -1 },
+		{ GF110_DISP, -1 },
+		{ GT214_DISP, -1 },
+		{ GT206_DISP, -1 },
+		{ GT200_DISP, -1 },
+		{   G82_DISP, -1 },
+		{  NV50_DISP, -1 },
+		{  NV04_DISP, -1 },
+		{}
+	};
+	int cid = nvif_sclass(&device->object, disps, oclass);
+	disp->object.client = NULL;
+	if (cid < 0)
+		return cid;
+
+	return nvif_object_init(&device->object, 0, disps[cid].oclass,
+				NULL, 0, &disp->object);
+}

From 30ed49b55b6e44e004c3095671e74fea93ee84cb Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1108/1286] drm/nouveau/kms/nv50-: move code underneath
 dispnv50/

The code is about to be split up, and this matches dispnv04.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/Kbuild                            | 8 +++-----
 drivers/gpu/drm/nouveau/dispnv50/Kbuild                   | 1 +
 .../gpu/drm/nouveau/{nv50_display.c => dispnv50/disp.c}   | 0
 3 files changed, 4 insertions(+), 5 deletions(-)
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/Kbuild
 rename drivers/gpu/drm/nouveau/{nv50_display.c => dispnv50/disp.c} (100%)

diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index 9c0c650655e9..b17843dd050d 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -38,14 +38,16 @@ nouveau-y += nouveau_vmm.o
 
 # DRM - modesetting
 nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
+nouveau-y += nouveau_bios.o
 nouveau-y += nouveau_connector.o
 nouveau-y += nouveau_display.o
-nouveau-y += nv50_display.o
 nouveau-y += nouveau_dp.o
 nouveau-y += nouveau_fbcon.o
 nouveau-y += nv04_fbcon.o
 nouveau-y += nv50_fbcon.o
 nouveau-y += nvc0_fbcon.o
+include $(src)/dispnv04/Kbuild
+include $(src)/dispnv50/Kbuild
 
 # DRM - command submission
 nouveau-y += nouveau_abi16.o
@@ -59,8 +61,4 @@ nouveau-y += nv50_fence.o
 nouveau-y += nv84_fence.o
 nouveau-y += nvc0_fence.o
 
-# DRM - prehistoric modesetting (NV04-G7x)
-nouveau-y += nouveau_bios.o
-include $(src)/dispnv04/Kbuild
-
 obj-$(CONFIG_DRM_NOUVEAU) += nouveau.o
diff --git a/drivers/gpu/drm/nouveau/dispnv50/Kbuild b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
new file mode 100644
index 000000000000..43fc8be49391
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
@@ -0,0 +1 @@
+nouveau-y += dispnv50/disp.o
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
similarity index 100%
rename from drivers/gpu/drm/nouveau/nv50_display.c
rename to drivers/gpu/drm/nouveau/dispnv50/disp.c

From 62b290fc7b36e8fec2a370b946d7117c1899b6c1 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1109/1286] drm/nouveau/kms/nv50-: fix i2c-over-aux on anx9805

We don't support address-only transactions there.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/dispnv50/disp.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index f2156c8ca90f..9aa17500d57c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -3836,7 +3836,6 @@ nv50_pior_func = {
 static int
 nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
 {
-	struct nouveau_connector *nv_connector = nouveau_connector(connector);
 	struct nouveau_drm *drm = nouveau_drm(connector->dev);
 	struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
 	struct nvkm_i2c_bus *bus = NULL;
@@ -3854,7 +3853,7 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
 		break;
 	case DCB_OUTPUT_DP:
 		aux  = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbe->extdev));
-		ddc  = aux ? &nv_connector->aux.ddc : NULL;
+		ddc  = aux ? &aux->i2c : NULL;
 		type = DRM_MODE_ENCODER_TMDS;
 		break;
 	default:

From 5bca1621c07c3ad37b5a4943450a892e18984df0 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1110/1286] drm/nouveau/kms/nv50-: move fb ctxdma tracking into
 windows

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/dispnv50/disp.c | 163 ++++++++++++------------
 1 file changed, 84 insertions(+), 79 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 9aa17500d57c..fc3055d5c8c9 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -395,7 +395,7 @@ nv50_oimm_create(struct nvif_device *device, struct nvif_object *disp,
  * DMA EVO channel
  *****************************************************************************/
 
-struct nv50_dmac_ctxdma {
+struct nv50_wndw_ctxdma {
 	struct list_head head;
 	struct nvif_object object;
 };
@@ -408,7 +408,6 @@ struct nv50_dmac {
 
 	struct nvif_object sync;
 	struct nvif_object vram;
-	struct list_head ctxdma;
 
 	/* Protects against concurrent pushbuf access to this channel, lock is
 	 * grabbed by evo_wait (if the pushbuf reservation is successful) and
@@ -416,83 +415,9 @@ struct nv50_dmac {
 	struct mutex lock;
 };
 
-static void
-nv50_dmac_ctxdma_del(struct nv50_dmac_ctxdma *ctxdma)
-{
-	nvif_object_fini(&ctxdma->object);
-	list_del(&ctxdma->head);
-	kfree(ctxdma);
-}
-
-static struct nv50_dmac_ctxdma *
-nv50_dmac_ctxdma_new(struct nv50_dmac *dmac, struct nouveau_framebuffer *fb)
-{
-	struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
-	struct nv50_dmac_ctxdma *ctxdma;
-	const u8    kind = fb->nvbo->kind;
-	const u32 handle = 0xfb000000 | kind;
-	struct {
-		struct nv_dma_v0 base;
-		union {
-			struct nv50_dma_v0 nv50;
-			struct gf100_dma_v0 gf100;
-			struct gf119_dma_v0 gf119;
-		};
-	} args = {};
-	u32 argc = sizeof(args.base);
-	int ret;
-
-	list_for_each_entry(ctxdma, &dmac->ctxdma, head) {
-		if (ctxdma->object.handle == handle)
-			return ctxdma;
-	}
-
-	if (!(ctxdma = kzalloc(sizeof(*ctxdma), GFP_KERNEL)))
-		return ERR_PTR(-ENOMEM);
-	list_add(&ctxdma->head, &dmac->ctxdma);
-
-	args.base.target = NV_DMA_V0_TARGET_VRAM;
-	args.base.access = NV_DMA_V0_ACCESS_RDWR;
-	args.base.start  = 0;
-	args.base.limit  = drm->client.device.info.ram_user - 1;
-
-	if (drm->client.device.info.chipset < 0x80) {
-		args.nv50.part = NV50_DMA_V0_PART_256;
-		argc += sizeof(args.nv50);
-	} else
-	if (drm->client.device.info.chipset < 0xc0) {
-		args.nv50.part = NV50_DMA_V0_PART_256;
-		args.nv50.kind = kind;
-		argc += sizeof(args.nv50);
-	} else
-	if (drm->client.device.info.chipset < 0xd0) {
-		args.gf100.kind = kind;
-		argc += sizeof(args.gf100);
-	} else {
-		args.gf119.page = GF119_DMA_V0_PAGE_LP;
-		args.gf119.kind = kind;
-		argc += sizeof(args.gf119);
-	}
-
-	ret = nvif_object_init(&dmac->base.user, handle, NV_DMA_IN_MEMORY,
-			       &args, argc, &ctxdma->object);
-	if (ret) {
-		nv50_dmac_ctxdma_del(ctxdma);
-		return ERR_PTR(ret);
-	}
-
-	return ctxdma;
-}
-
 static void
 nv50_dmac_destroy(struct nv50_dmac *dmac)
 {
-	struct nv50_dmac_ctxdma *ctxdma, *ctxtmp;
-
-	list_for_each_entry_safe(ctxdma, ctxtmp, &dmac->ctxdma, head) {
-		nv50_dmac_ctxdma_del(ctxdma);
-	}
-
 	nvif_object_fini(&dmac->vram);
 	nvif_object_fini(&dmac->sync);
 
@@ -511,7 +436,6 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
 	int ret;
 
 	mutex_init(&dmac->lock);
-	INIT_LIST_HEAD(&dmac->ctxdma);
 
 	ret = nvif_mem_init_map(&cli->mmu, NVIF_MEM_COHERENT, 0x1000,
 				&dmac->push);
@@ -740,6 +664,11 @@ struct nv50_wndw {
 	const struct nv50_wndw_func *func;
 	struct nv50_dmac *dmac;
 
+	struct {
+		struct nvif_object *parent;
+		struct list_head list;
+	} ctxdma;
+
 	struct drm_plane plane;
 
 	struct nvif_notify notify;
@@ -770,6 +699,74 @@ struct nv50_wndw_func {
 	u32 (*update)(struct nv50_wndw *, u32 interlock);
 };
 
+static void
+nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma)
+{
+	nvif_object_fini(&ctxdma->object);
+	list_del(&ctxdma->head);
+	kfree(ctxdma);
+}
+
+static struct nv50_wndw_ctxdma *
+nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct nouveau_framebuffer *fb)
+{
+	struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
+	struct nv50_wndw_ctxdma *ctxdma;
+	const u8    kind = fb->nvbo->kind;
+	const u32 handle = 0xfb000000 | kind;
+	struct {
+		struct nv_dma_v0 base;
+		union {
+			struct nv50_dma_v0 nv50;
+			struct gf100_dma_v0 gf100;
+			struct gf119_dma_v0 gf119;
+		};
+	} args = {};
+	u32 argc = sizeof(args.base);
+	int ret;
+
+	list_for_each_entry(ctxdma, &wndw->ctxdma.list, head) {
+		if (ctxdma->object.handle == handle)
+			return ctxdma;
+	}
+
+	if (!(ctxdma = kzalloc(sizeof(*ctxdma), GFP_KERNEL)))
+		return ERR_PTR(-ENOMEM);
+	list_add(&ctxdma->head, &wndw->ctxdma.list);
+
+	args.base.target = NV_DMA_V0_TARGET_VRAM;
+	args.base.access = NV_DMA_V0_ACCESS_RDWR;
+	args.base.start  = 0;
+	args.base.limit  = drm->client.device.info.ram_user - 1;
+
+	if (drm->client.device.info.chipset < 0x80) {
+		args.nv50.part = NV50_DMA_V0_PART_256;
+		argc += sizeof(args.nv50);
+	} else
+	if (drm->client.device.info.chipset < 0xc0) {
+		args.nv50.part = NV50_DMA_V0_PART_256;
+		args.nv50.kind = kind;
+		argc += sizeof(args.nv50);
+	} else
+	if (drm->client.device.info.chipset < 0xd0) {
+		args.gf100.kind = kind;
+		argc += sizeof(args.gf100);
+	} else {
+		args.gf119.page = GF119_DMA_V0_PAGE_LP;
+		args.gf119.kind = kind;
+		argc += sizeof(args.gf119);
+	}
+
+	ret = nvif_object_init(wndw->ctxdma.parent, handle, NV_DMA_IN_MEMORY,
+			       &args, argc, &ctxdma->object);
+	if (ret) {
+		nv50_wndw_ctxdma_del(ctxdma);
+		return ERR_PTR(ret);
+	}
+
+	return ctxdma;
+}
+
 static int
 nv50_wndw_wait_armed(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
@@ -944,7 +941,7 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
 	struct nv50_wndw *wndw = nv50_wndw(plane);
 	struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
 	struct nv50_head_atom *asyh;
-	struct nv50_dmac_ctxdma *ctxdma;
+	struct nv50_wndw_ctxdma *ctxdma;
 	int ret;
 
 	NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, state->fb);
@@ -955,7 +952,7 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
 	if (ret)
 		return ret;
 
-	ctxdma = nv50_dmac_ctxdma_new(wndw->dmac, fb);
+	ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
 	if (IS_ERR(ctxdma)) {
 		nouveau_bo_unpin(fb->nvbo);
 		return PTR_ERR(ctxdma);
@@ -1030,7 +1027,13 @@ static void
 nv50_wndw_destroy(struct drm_plane *plane)
 {
 	struct nv50_wndw *wndw = nv50_wndw(plane);
+	struct nv50_wndw_ctxdma *ctxdma, *ctxtmp;
 	void *data;
+
+	list_for_each_entry_safe(ctxdma, ctxtmp, &wndw->ctxdma.list, head) {
+		nv50_wndw_ctxdma_del(ctxdma);
+	}
+
 	nvif_notify_fini(&wndw->notify);
 	data = wndw->func->dtor(wndw);
 	drm_plane_cleanup(&wndw->plane);
@@ -1069,6 +1072,7 @@ nv50_wndw_ctor(const struct nv50_wndw_func *func, struct drm_device *dev,
 
 	wndw->func = func;
 	wndw->dmac = dmac;
+	wndw->ctxdma.parent = &dmac->base.user;
 
 	ret = drm_universal_plane_init(dev, &wndw->plane, 0, &nv50_wndw,
 				       format, nformat, NULL,
@@ -1077,6 +1081,7 @@ nv50_wndw_ctor(const struct nv50_wndw_func *func, struct drm_device *dev,
 		return ret;
 
 	drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper);
+	INIT_LIST_HEAD(&wndw->ctxdma.list);
 	return 0;
 }
 

From a97c530eb968bad8d945d4f64fb550fa37a9d362 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1111/1286] drm/nouveau/kms/nv50-: modify overlay allocation so
 the code can be split

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/dispnv50/disp.c | 277 +++++++++++++++---------
 1 file changed, 179 insertions(+), 98 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index fc3055d5c8c9..26caca270ec8 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -340,57 +340,6 @@ nv50_chan_destroy(struct nv50_chan *chan)
 	nvif_object_fini(&chan->user);
 }
 
-/******************************************************************************
- * PIO EVO channel
- *****************************************************************************/
-
-struct nv50_pioc {
-	struct nv50_chan base;
-};
-
-static void
-nv50_pioc_destroy(struct nv50_pioc *pioc)
-{
-	nv50_chan_destroy(&pioc->base);
-}
-
-static int
-nv50_pioc_create(struct nvif_device *device, struct nvif_object *disp,
-		 const s32 *oclass, u8 head, void *data, u32 size,
-		 struct nv50_pioc *pioc)
-{
-	return nv50_chan_create(device, disp, oclass, head, data, size,
-				&pioc->base);
-}
-
-/******************************************************************************
- * Overlay Immediate
- *****************************************************************************/
-
-struct nv50_oimm {
-	struct nv50_pioc base;
-};
-
-static int
-nv50_oimm_create(struct nvif_device *device, struct nvif_object *disp,
-		 int head, struct nv50_oimm *oimm)
-{
-	struct nv50_disp_cursor_v0 args = {
-		.head = head,
-	};
-	static const s32 oclass[] = {
-		GK104_DISP_OVERLAY,
-		GF110_DISP_OVERLAY,
-		GT214_DISP_OVERLAY,
-		G82_DISP_OVERLAY,
-		NV50_DISP_OVERLAY,
-		0
-	};
-
-	return nv50_pioc_create(device, disp, oclass, head, &args, sizeof(args),
-				&oimm->base);
-}
-
 /******************************************************************************
  * DMA EVO channel
  *****************************************************************************/
@@ -541,43 +490,12 @@ nv50_base_create(struct nvif_device *device, struct nvif_object *disp,
 				syncbuf, &base->base);
 }
 
-/******************************************************************************
- * Overlay
- *****************************************************************************/
-
-struct nv50_ovly {
-	struct nv50_dmac base;
-};
-
-static int
-nv50_ovly_create(struct nvif_device *device, struct nvif_object *disp,
-		 int head, u64 syncbuf, struct nv50_ovly *ovly)
-{
-	struct nv50_disp_overlay_channel_dma_v0 args = {
-		.head = head,
-	};
-	static const s32 oclass[] = {
-		GK104_DISP_OVERLAY_CONTROL_DMA,
-		GF110_DISP_OVERLAY_CONTROL_DMA,
-		GT214_DISP_OVERLAY_CHANNEL_DMA,
-		GT200_DISP_OVERLAY_CHANNEL_DMA,
-		G82_DISP_OVERLAY_CHANNEL_DMA,
-		NV50_DISP_OVERLAY_CHANNEL_DMA,
-		0
-	};
-
-	return nv50_dmac_create(device, disp, oclass, head, &args, sizeof(args),
-				syncbuf, &ovly->base);
-}
-
 struct nv50_head {
 	struct nouveau_crtc base;
 	struct {
 		struct nouveau_bo *nvbo[2];
 		int next;
 	} lut;
-	struct nv50_ovly ovly;
-	struct nv50_oimm oimm;
 };
 
 #define nv50_head(c) ((struct nv50_head *)nouveau_crtc(c))
@@ -662,7 +580,9 @@ evo_kick(u32 *push, void *evoc)
 
 struct nv50_wndw {
 	const struct nv50_wndw_func *func;
+	const struct nv50_wimm_func *immd;
 	struct nv50_dmac *dmac;
+	int id;
 
 	struct {
 		struct nvif_object *parent;
@@ -671,6 +591,9 @@ struct nv50_wndw {
 
 	struct drm_plane plane;
 
+	struct nv50_dmac wndw;
+	struct nv50_dmac wimm;
+
 	struct nvif_notify notify;
 	u16 ntfy;
 	u16 sema;
@@ -699,6 +622,9 @@ struct nv50_wndw_func {
 	u32 (*update)(struct nv50_wndw *, u32 interlock);
 };
 
+struct nv50_wimm_func {
+};
+
 static void
 nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma)
 {
@@ -1028,14 +954,17 @@ nv50_wndw_destroy(struct drm_plane *plane)
 {
 	struct nv50_wndw *wndw = nv50_wndw(plane);
 	struct nv50_wndw_ctxdma *ctxdma, *ctxtmp;
-	void *data;
+	void *data = wndw;
 
 	list_for_each_entry_safe(ctxdma, ctxtmp, &wndw->ctxdma.list, head) {
 		nv50_wndw_ctxdma_del(ctxdma);
 	}
 
 	nvif_notify_fini(&wndw->notify);
-	data = wndw->func->dtor(wndw);
+	if (wndw->func->dtor)
+		data = wndw->func->dtor(wndw);
+	nv50_dmac_destroy(&wndw->wimm);
+	nv50_dmac_destroy(&wndw->wndw);
 	drm_plane_cleanup(&wndw->plane);
 	kfree(data);
 }
@@ -1085,6 +1014,170 @@ nv50_wndw_ctor(const struct nv50_wndw_func *func, struct drm_device *dev,
 	return 0;
 }
 
+static int
+nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
+	       enum drm_plane_type type, const char *name, int index,
+	       const u32 *format, struct nv50_wndw **pwndw)
+{
+	struct nv50_wndw *wndw;
+	int nformat;
+	int ret;
+
+	if (!(wndw = *pwndw = kzalloc(sizeof(*wndw), GFP_KERNEL)))
+		return -ENOMEM;
+	wndw->id = index;
+
+	for (nformat = 0; format[nformat]; nformat++);
+
+	ret = nv50_wndw_ctor(func, dev, type, name, index,
+			     &wndw->wndw, format, nformat, wndw);
+	if (ret) {
+		kfree(*pwndw);
+		*pwndw = NULL;
+	}
+
+	return ret;
+}
+
+/******************************************************************************
+ * Overlay
+ *****************************************************************************/
+
+static const struct nv50_wimm_func
+oimm507b = {
+};
+
+static int
+oimm507b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
+	       s32 oclass, struct nv50_wndw *wndw)
+{
+	struct nv50_disp_overlay_v0 args = {
+		.head = wndw->id,
+	};
+	struct nv50_disp *disp = nv50_disp(drm->dev);
+	int ret;
+
+	ret = nvif_object_init(&disp->disp->object, 0, oclass, &args,
+			       sizeof(args), &wndw->wimm.base.user);
+	if (ret) {
+		NV_ERROR(drm, "oimm%04x allocation failed: %d\n", oclass, ret);
+		return ret;
+	}
+
+	nvif_object_map(&wndw->wimm.base.user, NULL, 0);
+	wndw->immd = func;
+	return 0;
+}
+
+static int
+oimm507b_init(struct nouveau_drm *drm, s32 oclass, struct nv50_wndw *wndw)
+{
+	return oimm507b_init_(&oimm507b, drm, oclass, wndw);
+}
+
+static int
+nv50_oimm_init(struct nouveau_drm *drm, struct nv50_wndw *wndw)
+{
+	static const struct {
+		s32 oclass;
+		int version;
+		int (*init)(struct nouveau_drm *, s32, struct nv50_wndw *);
+	} oimms[] = {
+		{ GK104_DISP_OVERLAY, 0, oimm507b_init },
+		{ GF110_DISP_OVERLAY, 0, oimm507b_init },
+		{ GT214_DISP_OVERLAY, 0, oimm507b_init },
+		{   G82_DISP_OVERLAY, 0, oimm507b_init },
+		{  NV50_DISP_OVERLAY, 0, oimm507b_init },
+		{}
+	};
+	struct nv50_disp *disp = nv50_disp(drm->dev);
+	int cid;
+
+	cid = nvif_mclass(&disp->disp->object, oimms);
+	if (cid < 0) {
+		NV_ERROR(drm, "No supported overlay immediate class\n");
+		return cid;
+	}
+
+	return oimms[cid].init(drm, oimms[cid].oclass, wndw);
+}
+
+static const struct nv50_wndw_func
+ovly507e = {
+};
+
+static const u32
+ovly507e_format[] = {
+	0
+};
+
+static int
+ovly507e_new_(const struct nv50_wndw_func *func, const u32 *format,
+	      struct nouveau_drm *drm, int head, s32 oclass,
+	      struct nv50_wndw **pwndw)
+{
+	struct nv50_disp_overlay_channel_dma_v0 args = {
+		.head = head,
+	};
+	struct nv50_disp *disp = nv50_disp(drm->dev);
+	struct nv50_wndw *wndw;
+	int ret;
+
+	ret = nv50_wndw_new_(func, drm->dev, DRM_PLANE_TYPE_OVERLAY,
+			     "ovly", head, format, &wndw);
+	if (*pwndw = wndw, ret)
+		return ret;
+
+	ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
+			       &oclass, 0, &args, sizeof(args),
+			       disp->sync->bo.offset, &wndw->wndw);
+	if (ret) {
+		NV_ERROR(drm, "ovly%04x allocation failed: %d\n", oclass, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+ovly507e_new(struct nouveau_drm *drm, int head, s32 oclass,
+	     struct nv50_wndw **pwndw)
+{
+	return ovly507e_new_(&ovly507e, ovly507e_format, drm, head, oclass, pwndw);
+}
+
+static int
+nv50_ovly_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
+{
+	static const struct {
+		s32 oclass;
+		int version;
+		int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
+	} ovlys[] = {
+		{ GK104_DISP_OVERLAY_CONTROL_DMA, 0, ovly507e_new },
+		{ GF110_DISP_OVERLAY_CONTROL_DMA, 0, ovly507e_new },
+		{ GT214_DISP_OVERLAY_CHANNEL_DMA, 0, ovly507e_new },
+		{ GT200_DISP_OVERLAY_CHANNEL_DMA, 0, ovly507e_new },
+		{   G82_DISP_OVERLAY_CHANNEL_DMA, 0, ovly507e_new },
+		{  NV50_DISP_OVERLAY_CHANNEL_DMA, 0, ovly507e_new },
+		{}
+	};
+	struct nv50_disp *disp = nv50_disp(drm->dev);
+	int cid, ret;
+
+	cid = nvif_mclass(&disp->disp->object, ovlys);
+	if (cid < 0) {
+		NV_ERROR(drm, "No supported overlay class\n");
+		return cid;
+	}
+
+	ret = ovlys[cid].new(drm, head, ovlys[cid].oclass, pwndw);
+	if (ret)
+		return ret;
+
+	return nv50_oimm_init(drm, *pwndw);
+}
+
 /******************************************************************************
  * Cursor plane
  *****************************************************************************/
@@ -2347,9 +2440,6 @@ nv50_head_destroy(struct drm_crtc *crtc)
 	struct nv50_head *head = nv50_head(crtc);
 	int i;
 
-	nv50_dmac_destroy(&head->ovly.base);
-	nv50_pioc_destroy(&head->oimm.base);
-
 	for (i = 0; i < ARRAY_SIZE(head->lut.nvbo); i++)
 		nouveau_bo_unmap_unpin_unref(&head->lut.nvbo[i]);
 
@@ -2372,11 +2462,10 @@ static int
 nv50_head_create(struct drm_device *dev, int index)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvif_device *device = &drm->client.device;
-	struct nv50_disp *disp = nv50_disp(dev);
 	struct nv50_head *head;
 	struct nv50_base *base;
 	struct nv50_curs *curs;
+	struct nv50_wndw *wndw;
 	struct drm_crtc *crtc;
 	int ret, i;
 
@@ -2409,15 +2498,7 @@ nv50_head_create(struct drm_device *dev, int index)
 	}
 
 	/* allocate overlay resources */
-	ret = nv50_oimm_create(device, &disp->disp->object, index, &head->oimm);
-	if (ret)
-		goto out;
-
-	ret = nv50_ovly_create(device, &disp->disp->object, index,
-			       disp->sync->bo.offset, &head->ovly);
-	if (ret)
-		goto out;
-
+	ret = nv50_ovly_new(drm, head->base.index, &wndw);
 out:
 	if (ret)
 		nv50_head_destroy(crtc);

From b97ace4072267ea44a254ef2c3b001d2122313dc Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1112/1286] drm/nouveau/kms/nv50-: modify cursor allocation so
 the code can be split

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/dispnv50/disp.c | 136 +++++++++++++-----------
 1 file changed, 71 insertions(+), 65 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 26caca270ec8..33cb358ebeeb 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -617,12 +617,14 @@ struct nv50_wndw_func {
 	void (*image_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
 	void (*image_clr)(struct nv50_wndw *);
 	void (*lut)(struct nv50_wndw *, struct nv50_wndw_atom *);
-	void (*point)(struct nv50_wndw *, struct nv50_wndw_atom *);
 
 	u32 (*update)(struct nv50_wndw *, u32 interlock);
 };
 
 struct nv50_wimm_func {
+	void (*point)(struct nv50_wndw *, struct nv50_wndw_atom *);
+
+	u32 (*update)(struct nv50_wndw *, u32 interlock);
 };
 
 static void
@@ -728,9 +730,12 @@ nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 interlock,
 	if (asyw->set.ntfy ) wndw->func->ntfy_set (wndw, asyw);
 	if (asyw->set.image) wndw->func->image_set(wndw, asyw);
 	if (asyw->set.lut  ) wndw->func->lut      (wndw, asyw);
-	if (asyw->set.point) wndw->func->point    (wndw, asyw);
+	if (asyw->set.point) {
+		wndw->immd->point(wndw, asyw);
+		wndw->immd->update(wndw, interlock);
+	}
 
-	return wndw->func->update(wndw, interlock);
+	return wndw->func->update ? wndw->func->update(wndw, interlock) : 0;
 }
 
 static void
@@ -1181,28 +1186,26 @@ nv50_ovly_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
 /******************************************************************************
  * Cursor plane
  *****************************************************************************/
-#define nv50_curs(p) container_of((p), struct nv50_curs, wndw)
-
-struct nv50_curs {
-	struct nv50_wndw wndw;
-	struct nvif_object chan;
-};
-
 static u32
 nv50_curs_update(struct nv50_wndw *wndw, u32 interlock)
 {
-	struct nv50_curs *curs = nv50_curs(wndw);
-	nvif_wr32(&curs->chan, 0x0080, 0x00000000);
+	nvif_wr32(&wndw->wimm.base.user, 0x0080, 0x00000000);
 	return 0;
 }
 
 static void
 nv50_curs_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
-	struct nv50_curs *curs = nv50_curs(wndw);
-	nvif_wr32(&curs->chan, 0x0084, (asyw->point.y << 16) | asyw->point.x);
+	nvif_wr32(&wndw->wimm.base.user, 0x0084, (asyw->point.y << 16) |
+						  asyw->point.x);
 }
 
+static const struct nv50_wimm_func
+curs507a = {
+	.point = nv50_curs_point,
+	.update = nv50_curs_update,
+};
+
 static void
 nv50_curs_prepare(struct nv50_wndw *wndw, struct nv50_head_atom *asyh,
 		  struct nv50_wndw_atom *asyw)
@@ -1257,47 +1260,72 @@ nv50_curs_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
 	return 0;
 }
 
-static void *
-nv50_curs_dtor(struct nv50_wndw *wndw)
-{
-	struct nv50_curs *curs = nv50_curs(wndw);
-	nvif_object_fini(&curs->chan);
-	return curs;
-}
-
 static const u32
 nv50_curs_format[] = {
 	DRM_FORMAT_ARGB8888,
+	0
 };
 
 static const struct nv50_wndw_func
 nv50_curs = {
-	.dtor = nv50_curs_dtor,
 	.acquire = nv50_curs_acquire,
 	.release = nv50_curs_release,
 	.prepare = nv50_curs_prepare,
-	.point = nv50_curs_point,
-	.update = nv50_curs_update,
 };
 
 static int
-nv50_curs_new(struct nouveau_drm *drm, struct nv50_head *head,
-	      struct nv50_curs **pcurs)
+curs507a_new_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
+	      int head, s32 oclass, struct nv50_wndw **pwndw)
 {
-	static const struct nvif_mclass curses[] = {
-		{ GK104_DISP_CURSOR, 0 },
-		{ GF110_DISP_CURSOR, 0 },
-		{ GT214_DISP_CURSOR, 0 },
-		{   G82_DISP_CURSOR, 0 },
-		{  NV50_DISP_CURSOR, 0 },
-		{}
-	};
 	struct nv50_disp_cursor_v0 args = {
-		.head = head->base.index,
+		.head = head,
 	};
 	struct nv50_disp *disp = nv50_disp(drm->dev);
-	struct nv50_curs *curs;
-	int cid, ret;
+	struct nv50_wndw *wndw;
+	int ret;
+
+	ret = nv50_wndw_new_(&nv50_curs, drm->dev, DRM_PLANE_TYPE_CURSOR,
+			     "curs", head, nv50_curs_format, &wndw);
+	if (*pwndw = wndw, ret)
+		return ret;
+
+	ret = nvif_object_init(&disp->disp->object, 0, oclass, &args,
+			       sizeof(args), &wndw->wimm.base.user);
+	if (ret) {
+		NV_ERROR(drm, "curs%04x allocation failed: %d\n", oclass, ret);
+		return ret;
+	}
+
+	nvif_object_map(&wndw->wimm.base.user, NULL, 0);
+	wndw->immd = func;
+	wndw->ctxdma.parent = &disp->mast.base.base.user;
+	return 0;
+}
+
+static int
+curs507a_new(struct nouveau_drm *drm, int head, s32 oclass,
+	     struct nv50_wndw **pwndw)
+{
+	return curs507a_new_(&curs507a, drm, head, oclass, pwndw);
+}
+
+static int
+nv50_curs_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
+{
+	struct {
+		s32 oclass;
+		int version;
+		int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
+	} curses[] = {
+		{ GK104_DISP_CURSOR, 0, curs507a_new },
+		{ GF110_DISP_CURSOR, 0, curs507a_new },
+		{ GT214_DISP_CURSOR, 0, curs507a_new },
+		{   G82_DISP_CURSOR, 0, curs507a_new },
+		{  NV50_DISP_CURSOR, 0, curs507a_new },
+		{}
+	};
+	struct nv50_disp *disp = nv50_disp(drm->dev);
+	int cid;
 
 	cid = nvif_mclass(&disp->disp->object, curses);
 	if (cid < 0) {
@@ -1305,27 +1333,7 @@ nv50_curs_new(struct nouveau_drm *drm, struct nv50_head *head,
 		return cid;
 	}
 
-	if (!(curs = *pcurs = kzalloc(sizeof(*curs), GFP_KERNEL)))
-		return -ENOMEM;
-
-	ret = nv50_wndw_ctor(&nv50_curs, drm->dev, DRM_PLANE_TYPE_CURSOR,
-			     "curs", head->base.index, &disp->mast.base,
-			     nv50_curs_format, ARRAY_SIZE(nv50_curs_format),
-			     &curs->wndw);
-	if (ret) {
-		kfree(curs);
-		return ret;
-	}
-
-	ret = nvif_object_init(&disp->disp->object, 0, curses[cid].oclass,
-			       &args, sizeof(args), &curs->chan);
-	if (ret) {
-		NV_ERROR(drm, "curs%04x allocation failed: %d\n",
-			 curses[cid].oclass, ret);
-		return ret;
-	}
-
-	return 0;
+	return curses[cid].new(drm, head, curses[cid].oclass, pwndw);
 }
 
 /******************************************************************************
@@ -2464,8 +2472,7 @@ nv50_head_create(struct drm_device *dev, int index)
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nv50_head *head;
 	struct nv50_base *base;
-	struct nv50_curs *curs;
-	struct nv50_wndw *wndw;
+	struct nv50_wndw *curs, *wndw;
 	struct drm_crtc *crtc;
 	int ret, i;
 
@@ -2476,16 +2483,15 @@ nv50_head_create(struct drm_device *dev, int index)
 	head->base.index = index;
 	ret = nv50_base_new(drm, head, &base);
 	if (ret == 0)
-		ret = nv50_curs_new(drm, head, &curs);
+		ret = nv50_curs_new(drm, head->base.index, &curs);
 	if (ret) {
 		kfree(head);
 		return ret;
 	}
 
 	crtc = &head->base.base;
-	drm_crtc_init_with_planes(dev, crtc, &base->wndw.plane,
-				  &curs->wndw.plane, &nv50_head_func,
-				  "head-%d", head->base.index);
+	drm_crtc_init_with_planes(dev, crtc, &base->wndw.plane, &curs->plane,
+				  &nv50_head_func, "head-%d", head->base.index);
 	drm_crtc_helper_add(crtc, &nv50_head_help);
 	drm_mode_crtc_set_gamma_size(crtc, 256);
 

From d7c6e97a32329032ba7af1f53cab2767832fed77 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1113/1286] drm/nouveau/kms/nv50-: modify base allocation so
 the code can be split

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/dispnv50/disp.c | 236 +++++++++++-------------
 1 file changed, 104 insertions(+), 132 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 33cb358ebeeb..f0edf9b5337a 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -468,28 +468,6 @@ struct nv50_sync {
 	u32 data;
 };
 
-static int
-nv50_base_create(struct nvif_device *device, struct nvif_object *disp,
-		 int head, u64 syncbuf, struct nv50_sync *base)
-{
-	struct nv50_disp_base_channel_dma_v0 args = {
-		.head = head,
-	};
-	static const s32 oclass[] = {
-		GK110_DISP_BASE_CHANNEL_DMA,
-		GK104_DISP_BASE_CHANNEL_DMA,
-		GF110_DISP_BASE_CHANNEL_DMA,
-		GT214_DISP_BASE_CHANNEL_DMA,
-		GT200_DISP_BASE_CHANNEL_DMA,
-		G82_DISP_BASE_CHANNEL_DMA,
-		NV50_DISP_BASE_CHANNEL_DMA,
-		0
-	};
-
-	return nv50_dmac_create(device, disp, oclass, head, &args, sizeof(args),
-				syncbuf, &base->base);
-}
-
 struct nv50_head {
 	struct nouveau_crtc base;
 	struct {
@@ -581,7 +559,6 @@ evo_kick(u32 *push, void *evoc)
 struct nv50_wndw {
 	const struct nv50_wndw_func *func;
 	const struct nv50_wimm_func *immd;
-	struct nv50_dmac *dmac;
 	int id;
 
 	struct {
@@ -601,7 +578,6 @@ struct nv50_wndw {
 };
 
 struct nv50_wndw_func {
-	void *(*dtor)(struct nv50_wndw *);
 	int (*acquire)(struct nv50_wndw *, struct nv50_wndw_atom *asyw,
 		       struct nv50_head_atom *asyh);
 	void (*release)(struct nv50_wndw *, struct nv50_wndw_atom *asyw,
@@ -959,19 +935,16 @@ nv50_wndw_destroy(struct drm_plane *plane)
 {
 	struct nv50_wndw *wndw = nv50_wndw(plane);
 	struct nv50_wndw_ctxdma *ctxdma, *ctxtmp;
-	void *data = wndw;
 
 	list_for_each_entry_safe(ctxdma, ctxtmp, &wndw->ctxdma.list, head) {
 		nv50_wndw_ctxdma_del(ctxdma);
 	}
 
 	nvif_notify_fini(&wndw->notify);
-	if (wndw->func->dtor)
-		data = wndw->func->dtor(wndw);
 	nv50_dmac_destroy(&wndw->wimm);
 	nv50_dmac_destroy(&wndw->wndw);
 	drm_plane_cleanup(&wndw->plane);
-	kfree(data);
+	kfree(wndw);
 }
 
 static const struct drm_plane_funcs
@@ -984,6 +957,12 @@ nv50_wndw = {
 	.atomic_destroy_state = nv50_wndw_atomic_destroy_state,
 };
 
+static int
+nv50_wndw_notify(struct nvif_notify *notify)
+{
+	return NVIF_NOTIFY_KEEP;
+}
+
 static void
 nv50_wndw_fini(struct nv50_wndw *wndw)
 {
@@ -996,29 +975,6 @@ nv50_wndw_init(struct nv50_wndw *wndw)
 	nvif_notify_get(&wndw->notify);
 }
 
-static int
-nv50_wndw_ctor(const struct nv50_wndw_func *func, struct drm_device *dev,
-	       enum drm_plane_type type, const char *name, int index,
-	       struct nv50_dmac *dmac, const u32 *format, int nformat,
-	       struct nv50_wndw *wndw)
-{
-	int ret;
-
-	wndw->func = func;
-	wndw->dmac = dmac;
-	wndw->ctxdma.parent = &dmac->base.user;
-
-	ret = drm_universal_plane_init(dev, &wndw->plane, 0, &nv50_wndw,
-				       format, nformat, NULL,
-				       type, "%s-%d", name, index);
-	if (ret)
-		return ret;
-
-	drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper);
-	INIT_LIST_HEAD(&wndw->ctxdma.list);
-	return 0;
-}
-
 static int
 nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
 	       enum drm_plane_type type, const char *name, int index,
@@ -1030,18 +986,27 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
 
 	if (!(wndw = *pwndw = kzalloc(sizeof(*wndw), GFP_KERNEL)))
 		return -ENOMEM;
+	wndw->func = func;
 	wndw->id = index;
 
+	wndw->ctxdma.parent = &wndw->wndw.base.user;
+	INIT_LIST_HEAD(&wndw->ctxdma.list);
+
 	for (nformat = 0; format[nformat]; nformat++);
 
-	ret = nv50_wndw_ctor(func, dev, type, name, index,
-			     &wndw->wndw, format, nformat, wndw);
+	ret = drm_universal_plane_init(dev, &wndw->plane, 0, &nv50_wndw,
+				       format, nformat, NULL,
+				       type, "%s-%d", name, index);
 	if (ret) {
 		kfree(*pwndw);
 		*pwndw = NULL;
+		return ret;
 	}
 
-	return ret;
+	drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper);
+
+	wndw->notify.func = nv50_wndw_notify;
+	return 0;
 }
 
 /******************************************************************************
@@ -1339,53 +1304,36 @@ nv50_curs_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
 /******************************************************************************
  * Primary plane
  *****************************************************************************/
-#define nv50_base(p) container_of((p), struct nv50_base, wndw)
-
-struct nv50_base {
-	struct nv50_wndw wndw;
-	struct nv50_sync chan;
-	int id;
-};
-
-static int
-nv50_base_notify(struct nvif_notify *notify)
-{
-	return NVIF_NOTIFY_KEEP;
-}
-
 static void
 nv50_base_lut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
-	struct nv50_base *base = nv50_base(wndw);
 	u32 *push;
-	if ((push = evo_wait(&base->chan, 2))) {
+	if ((push = evo_wait(&wndw->wndw, 2))) {
 		evo_mthd(push, 0x00e0, 1);
 		evo_data(push, asyw->lut.enable << 30);
-		evo_kick(push, &base->chan);
+		evo_kick(push, &wndw->wndw);
 	}
 }
 
 static void
 nv50_base_image_clr(struct nv50_wndw *wndw)
 {
-	struct nv50_base *base = nv50_base(wndw);
 	u32 *push;
-	if ((push = evo_wait(&base->chan, 4))) {
+	if ((push = evo_wait(&wndw->wndw, 4))) {
 		evo_mthd(push, 0x0084, 1);
 		evo_data(push, 0x00000000);
 		evo_mthd(push, 0x00c0, 1);
 		evo_data(push, 0x00000000);
-		evo_kick(push, &base->chan);
+		evo_kick(push, &wndw->wndw);
 	}
 }
 
 static void
 nv50_base_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
-	struct nv50_base *base = nv50_base(wndw);
-	const s32 oclass = base->chan.base.base.user.oclass;
+	const s32 oclass = wndw->wndw.base.user.oclass;
 	u32 *push;
-	if ((push = evo_wait(&base->chan, 10))) {
+	if ((push = evo_wait(&wndw->wndw, 10))) {
 		evo_mthd(push, 0x0084, 1);
 		evo_data(push, (asyw->image.mode << 8) |
 			       (asyw->image.interval << 4));
@@ -1421,77 +1369,72 @@ nv50_base_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 					asyw->image.block);
 			evo_data(push, asyw->image.format << 8);
 		}
-		evo_kick(push, &base->chan);
+		evo_kick(push, &wndw->wndw);
 	}
 }
 
 static void
 nv50_base_ntfy_clr(struct nv50_wndw *wndw)
 {
-	struct nv50_base *base = nv50_base(wndw);
 	u32 *push;
-	if ((push = evo_wait(&base->chan, 2))) {
+	if ((push = evo_wait(&wndw->wndw, 2))) {
 		evo_mthd(push, 0x00a4, 1);
 		evo_data(push, 0x00000000);
-		evo_kick(push, &base->chan);
+		evo_kick(push, &wndw->wndw);
 	}
 }
 
 static void
 nv50_base_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
-	struct nv50_base *base = nv50_base(wndw);
 	u32 *push;
-	if ((push = evo_wait(&base->chan, 3))) {
+	if ((push = evo_wait(&wndw->wndw, 3))) {
 		evo_mthd(push, 0x00a0, 2);
 		evo_data(push, (asyw->ntfy.awaken << 30) | asyw->ntfy.offset);
 		evo_data(push, asyw->ntfy.handle);
-		evo_kick(push, &base->chan);
+		evo_kick(push, &wndw->wndw);
 	}
 }
 
 static void
 nv50_base_sema_clr(struct nv50_wndw *wndw)
 {
-	struct nv50_base *base = nv50_base(wndw);
 	u32 *push;
-	if ((push = evo_wait(&base->chan, 2))) {
+	if ((push = evo_wait(&wndw->wndw, 2))) {
 		evo_mthd(push, 0x0094, 1);
 		evo_data(push, 0x00000000);
-		evo_kick(push, &base->chan);
+		evo_kick(push, &wndw->wndw);
 	}
 }
 
 static void
 nv50_base_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
-	struct nv50_base *base = nv50_base(wndw);
 	u32 *push;
-	if ((push = evo_wait(&base->chan, 5))) {
+	if ((push = evo_wait(&wndw->wndw, 5))) {
 		evo_mthd(push, 0x0088, 4);
 		evo_data(push, asyw->sema.offset);
 		evo_data(push, asyw->sema.acquire);
 		evo_data(push, asyw->sema.release);
 		evo_data(push, asyw->sema.handle);
-		evo_kick(push, &base->chan);
+		evo_kick(push, &wndw->wndw);
 	}
 }
 
 static u32
 nv50_base_update(struct nv50_wndw *wndw, u32 interlock)
 {
-	struct nv50_base *base = nv50_base(wndw);
 	u32 *push;
 
-	if (!(push = evo_wait(&base->chan, 2)))
+	if (!(push = evo_wait(&wndw->wndw, 2)))
 		return 0;
 	evo_mthd(push, 0x0080, 1);
 	evo_data(push, interlock);
-	evo_kick(push, &base->chan);
+	evo_kick(push, &wndw->wndw);
 
-	if (base->chan.base.base.user.oclass < GF110_DISP_BASE_CHANNEL_DMA)
-		return interlock ? 2 << (base->id * 8) : 0;
-	return interlock ? 2 << (base->id * 4) : 0;
+	if (wndw->wndw.base.user.oclass < GF110_DISP_BASE_CHANNEL_DMA)
+		return interlock ? 2 << (wndw->id * 8) : 0;
+	return interlock ? 2 << (wndw->id * 4) : 0;
 }
 
 static int
@@ -1561,14 +1504,6 @@ nv50_base_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
 	return 0;
 }
 
-static void *
-nv50_base_dtor(struct nv50_wndw *wndw)
-{
-	struct nv50_base *base = nv50_base(wndw);
-	nv50_dmac_destroy(&base->chan.base);
-	return base;
-}
-
 static const u32
 nv50_base_format[] = {
 	DRM_FORMAT_C8,
@@ -1581,11 +1516,11 @@ nv50_base_format[] = {
 	DRM_FORMAT_ABGR2101010,
 	DRM_FORMAT_XBGR8888,
 	DRM_FORMAT_ABGR8888,
+	0
 };
 
 static const struct nv50_wndw_func
 nv50_base = {
-	.dtor = nv50_base_dtor,
 	.acquire = nv50_base_acquire,
 	.release = nv50_base_release,
 	.sema_set = nv50_base_sema_set,
@@ -1600,41 +1535,79 @@ nv50_base = {
 };
 
 static int
-nv50_base_new(struct nouveau_drm *drm, struct nv50_head *head,
-	      struct nv50_base **pbase)
+base507c_new_(const struct nv50_wndw_func *func, const u32 *format,
+	      struct nouveau_drm *drm, int head, s32 oclass,
+	      struct nv50_wndw **pwndw)
 {
+	struct nv50_disp_base_channel_dma_v0 args = {
+		.head = head,
+	};
 	struct nv50_disp *disp = nv50_disp(drm->dev);
-	struct nv50_base *base;
+	struct nv50_wndw *wndw;
 	int ret;
 
-	if (!(base = *pbase = kzalloc(sizeof(*base), GFP_KERNEL)))
-		return -ENOMEM;
-	base->id = head->base.index;
-	base->wndw.ntfy = EVO_FLIP_NTFY0(base->id);
-	base->wndw.sema = EVO_FLIP_SEM0(base->id);
-	base->wndw.data = 0x00000000;
+	ret = nv50_wndw_new_(func, drm->dev, DRM_PLANE_TYPE_PRIMARY,
+			     "base", head, format, &wndw);
+	if (*pwndw = wndw, ret)
+		return ret;
 
-	ret = nv50_wndw_ctor(&nv50_base, drm->dev, DRM_PLANE_TYPE_PRIMARY,
-			     "base", base->id, &base->chan.base,
-			     nv50_base_format, ARRAY_SIZE(nv50_base_format),
-			     &base->wndw);
+	ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
+			       &oclass, head, &args, sizeof(args),
+			       disp->sync->bo.offset, &wndw->wndw);
 	if (ret) {
-		kfree(base);
+		NV_ERROR(drm, "base%04x allocation failed: %d\n", oclass, ret);
 		return ret;
 	}
 
-	ret = nv50_base_create(&drm->client.device, &disp->disp->object,
-			       base->id, disp->sync->bo.offset, &base->chan);
+	ret = nvif_notify_init(&wndw->wndw.base.user, wndw->notify.func,
+			       false, NV50_DISP_BASE_CHANNEL_DMA_V0_NTFY_UEVENT,
+			       &(struct nvif_notify_uevent_req) {},
+			       sizeof(struct nvif_notify_uevent_req),
+			       sizeof(struct nvif_notify_uevent_rep),
+			       &wndw->notify);
 	if (ret)
 		return ret;
 
-	return nvif_notify_init(&base->chan.base.base.user, nv50_base_notify,
-				false,
-				NV50_DISP_BASE_CHANNEL_DMA_V0_NTFY_UEVENT,
-				&(struct nvif_notify_uevent_req) {},
-				sizeof(struct nvif_notify_uevent_req),
-				sizeof(struct nvif_notify_uevent_rep),
-				&base->wndw.notify);
+	wndw->ntfy = EVO_FLIP_NTFY0(wndw->id);
+	wndw->sema = EVO_FLIP_SEM0(wndw->id);
+	wndw->data = 0x00000000;
+	return 0;
+}
+
+static int
+base507c_new(struct nouveau_drm *drm, int head, s32 oclass,
+	     struct nv50_wndw **pwndw)
+{
+	return base507c_new_(&nv50_base, nv50_base_format, drm, head, oclass, pwndw);
+}
+
+static int
+nv50_base_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
+{
+	struct {
+		s32 oclass;
+		int version;
+		int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
+	} bases[] = {
+		{ GK110_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
+		{ GK104_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
+		{ GF110_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
+		{ GT214_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
+		{ GT200_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
+		{   G82_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
+		{  NV50_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
+		{}
+	};
+	struct nv50_disp *disp = nv50_disp(drm->dev);
+	int cid;
+
+	cid = nvif_mclass(&disp->disp->object, bases);
+	if (cid < 0) {
+		NV_ERROR(drm, "No supported base class\n");
+		return cid;
+	}
+
+	return bases[cid].new(drm, head, bases[cid].oclass, pwndw);
 }
 
 /******************************************************************************
@@ -2471,7 +2444,6 @@ nv50_head_create(struct drm_device *dev, int index)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nv50_head *head;
-	struct nv50_base *base;
 	struct nv50_wndw *curs, *wndw;
 	struct drm_crtc *crtc;
 	int ret, i;
@@ -2481,7 +2453,7 @@ nv50_head_create(struct drm_device *dev, int index)
 		return -ENOMEM;
 
 	head->base.index = index;
-	ret = nv50_base_new(drm, head, &base);
+	ret = nv50_base_new(drm, head->base.index, &wndw);
 	if (ret == 0)
 		ret = nv50_curs_new(drm, head->base.index, &curs);
 	if (ret) {
@@ -2490,7 +2462,7 @@ nv50_head_create(struct drm_device *dev, int index)
 	}
 
 	crtc = &head->base.base;
-	drm_crtc_init_with_planes(dev, crtc, &base->wndw.plane, &curs->plane,
+	drm_crtc_init_with_planes(dev, crtc, &wndw->plane, &curs->plane,
 				  &nv50_head_func, "head-%d", head->base.index);
 	drm_crtc_helper_add(crtc, &nv50_head_help);
 	drm_mode_crtc_set_gamma_size(crtc, 256);
@@ -4256,7 +4228,7 @@ nv50_disp_atomic_commit(struct drm_device *dev,
 		struct nv50_wndw *wndw = nv50_wndw(plane);
 
 		if (asyw->set.image) {
-			asyw->ntfy.handle = wndw->dmac->sync.handle;
+			asyw->ntfy.handle = wndw->wndw.sync.handle;
 			asyw->ntfy.offset = wndw->ntfy;
 			asyw->ntfy.awaken = false;
 			asyw->set.ntfy = true;

From 9ca6f1ebba10240ad02f7c659481899a28220fbc Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1114/1286] drm/nouveau/kms/nv50: modify core allocation so the
 code can be split

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/dispnv50/disp.c | 215 +++++++++++++++---------
 1 file changed, 134 insertions(+), 81 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index f0edf9b5337a..abdf39ed9d26 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -425,39 +425,6 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
 	return ret;
 }
 
-/******************************************************************************
- * Core
- *****************************************************************************/
-
-struct nv50_mast {
-	struct nv50_dmac base;
-};
-
-static int
-nv50_core_create(struct nvif_device *device, struct nvif_object *disp,
-		 u64 syncbuf, struct nv50_mast *core)
-{
-	struct nv50_disp_core_channel_dma_v0 args = {};
-	static const s32 oclass[] = {
-		GP102_DISP_CORE_CHANNEL_DMA,
-		GP100_DISP_CORE_CHANNEL_DMA,
-		GM200_DISP_CORE_CHANNEL_DMA,
-		GM107_DISP_CORE_CHANNEL_DMA,
-		GK110_DISP_CORE_CHANNEL_DMA,
-		GK104_DISP_CORE_CHANNEL_DMA,
-		GF110_DISP_CORE_CHANNEL_DMA,
-		GT214_DISP_CORE_CHANNEL_DMA,
-		GT206_DISP_CORE_CHANNEL_DMA,
-		GT200_DISP_CORE_CHANNEL_DMA,
-		G82_DISP_CORE_CHANNEL_DMA,
-		NV50_DISP_CORE_CHANNEL_DMA,
-		0
-	};
-
-	return nv50_dmac_create(device, disp, oclass, 0, &args, sizeof(args),
-				syncbuf, &core->base);
-}
-
 /******************************************************************************
  * Base
  *****************************************************************************/
@@ -477,14 +444,10 @@ struct nv50_head {
 };
 
 #define nv50_head(c) ((struct nv50_head *)nouveau_crtc(c))
-#define nv50_ovly(c) (&nv50_head(c)->ovly)
-#define nv50_oimm(c) (&nv50_head(c)->oimm)
-#define nv50_chan(c) (&(c)->base.base)
-#define nv50_vers(c) nv50_chan(c)->user.oclass
 
 struct nv50_disp {
 	struct nvif_disp *disp;
-	struct nv50_mast mast;
+	struct nv50_core *core;
 
 	struct nouveau_bo *sync;
 
@@ -497,7 +460,41 @@ nv50_disp(struct drm_device *dev)
 	return nouveau_display(dev)->priv;
 }
 
-#define nv50_mast(d) (&nv50_disp(d)->mast)
+/******************************************************************************
+ * Core
+ *****************************************************************************/
+
+struct nv50_core {
+	const struct nv50_core_func *func;
+	struct nv50_dmac chan;
+};
+
+struct nv50_core_func {
+};
+
+static int
+core507d_new_(const struct nv50_core_func *func, struct nouveau_drm *drm,
+	      s32 oclass, struct nv50_core **pcore)
+{
+	struct nv50_disp_core_channel_dma_v0 args = {};
+	struct nv50_disp *disp = nv50_disp(drm->dev);
+	struct nv50_core *core;
+	int ret;
+
+	if (!(core = *pcore = kzalloc(sizeof(*core), GFP_KERNEL)))
+		return -ENOMEM;
+	core->func = func;
+
+	ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
+			       &oclass, 0, &args, sizeof(args),
+			       disp->sync->bo.offset, &core->chan);
+	if (ret) {
+		NV_ERROR(drm, "core%04x allocation failed: %d\n", oclass, ret);
+		return ret;
+	}
+
+	return 0;
+}
 
 /******************************************************************************
  * EVO channel helpers
@@ -1175,7 +1172,7 @@ static void
 nv50_curs_prepare(struct nv50_wndw *wndw, struct nv50_head_atom *asyh,
 		  struct nv50_wndw_atom *asyw)
 {
-	u32 handle = nv50_disp(wndw->plane.dev)->mast.base.vram.handle;
+	u32 handle = nv50_disp(wndw->plane.dev)->core->chan.vram.handle;
 	u32 offset = asyw->image.offset;
 	if (asyh->curs.handle != handle || asyh->curs.offset != offset) {
 		asyh->curs.handle = handle;
@@ -1263,7 +1260,7 @@ curs507a_new_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
 
 	nvif_object_map(&wndw->wimm.base.user, NULL, 0);
 	wndw->immd = func;
-	wndw->ctxdma.parent = &disp->mast.base.base.user;
+	wndw->ctxdma.parent = &disp->core->chan.base.user;
 	return 0;
 }
 
@@ -1616,7 +1613,7 @@ nv50_base_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
 static void
 nv50_head_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
 	u32 *push;
 	if ((push = evo_wait(core, 2))) {
 		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
@@ -1632,7 +1629,7 @@ nv50_head_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
 static void
 nv50_head_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
 	u32 *push;
 	if ((push = evo_wait(core, 2))) {
 		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
@@ -1652,7 +1649,7 @@ nv50_head_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
 static void
 nv50_head_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
 	u32 bounds = 0;
 	u32 *push;
 
@@ -1681,7 +1678,7 @@ nv50_head_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
 static void
 nv50_head_base(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
 	u32 bounds = 0;
 	u32 *push;
 
@@ -1711,7 +1708,7 @@ nv50_head_base(struct nv50_head *head, struct nv50_head_atom *asyh)
 static void
 nv50_head_curs_clr(struct nv50_head *head)
 {
-	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
 	u32 *push;
 	if ((push = evo_wait(core, 4))) {
 		if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
@@ -1736,7 +1733,7 @@ nv50_head_curs_clr(struct nv50_head *head)
 static void
 nv50_head_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
 	u32 *push;
 	if ((push = evo_wait(core, 5))) {
 		if (core->base.user.oclass < G82_DISP_BASE_CHANNEL_DMA) {
@@ -1767,7 +1764,7 @@ nv50_head_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 static void
 nv50_head_core_clr(struct nv50_head *head)
 {
-	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
 	u32 *push;
 	if ((push = evo_wait(core, 2))) {
 		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
@@ -1782,7 +1779,7 @@ nv50_head_core_clr(struct nv50_head *head)
 static void
 nv50_head_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
 	u32 *push;
 	if ((push = evo_wait(core, 9))) {
 		if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
@@ -1836,7 +1833,7 @@ nv50_head_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 static void
 nv50_head_lut_clr(struct nv50_head *head)
 {
-	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
 	u32 *push;
 	if ((push = evo_wait(core, 4))) {
 		if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
@@ -1909,7 +1906,7 @@ nv50_head_lut_load(struct drm_property_blob *blob, int mode,
 static void
 nv50_head_lut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
 	u32 *push;
 	if ((push = evo_wait(core, 7))) {
 		if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
@@ -1939,7 +1936,7 @@ nv50_head_lut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 static void
 nv50_head_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
 	struct nv50_head_mode *m = &asyh->mode;
 	u32 *push;
 	if ((push = evo_wait(core, 14))) {
@@ -1980,7 +1977,7 @@ nv50_head_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
 static void
 nv50_head_view(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
 	u32 *push;
 	if ((push = evo_wait(core, 10))) {
 		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
@@ -2191,7 +2188,7 @@ nv50_head_atomic_check_lut(struct nv50_head *head,
 		asyh->lut.mode = 7;
 		asyh->set.ilut = asyh->state.color_mgmt_changed;
 	}
-	asyh->lut.handle = disp->mast.base.vram.handle;
+	asyh->lut.handle = disp->core->chan.vram.handle;
 }
 
 static void
@@ -2311,7 +2308,7 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
 			asyh->core.w = asyh->state.mode.hdisplay;
 			asyh->core.h = asyh->state.mode.vdisplay;
 		}
-		asyh->core.handle = disp->mast.base.vram.handle;
+		asyh->core.handle = disp->core->chan.vram.handle;
 		asyh->core.offset = 0;
 		asyh->core.format = 0xcf;
 		asyh->core.kind = 0;
@@ -2483,6 +2480,61 @@ out:
 	return ret;
 }
 
+static const struct nv50_core_func
+core507d = {
+};
+
+static int
+core507d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore)
+{
+	return core507d_new_(&core507d, drm, oclass, pcore);
+}
+
+static void
+nv50_core_del(struct nv50_core **pcore)
+{
+	struct nv50_core *core = *pcore;
+	if (core) {
+		nv50_dmac_destroy(&core->chan);
+		kfree(*pcore);
+		*pcore = NULL;
+	}
+}
+
+static int
+nv50_core_new(struct nouveau_drm *drm, struct nv50_core **pcore)
+{
+	struct {
+		s32 oclass;
+		int version;
+		int (*new)(struct nouveau_drm *, s32, struct nv50_core **);
+	} cores[] = {
+		{ GP102_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
+		{ GP100_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
+		{ GM200_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
+		{ GM107_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
+		{ GK110_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
+		{ GK104_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
+		{ GF110_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
+		{ GT214_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
+		{ GT206_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
+		{ GT200_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
+		{   G82_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
+		{  NV50_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
+		{}
+	};
+	struct nv50_disp *disp = nv50_disp(drm->dev);
+	int cid;
+
+	cid = nvif_mclass(&disp->disp->object, cores);
+	if (cid < 0) {
+		NV_ERROR(drm, "No supported core channel class\n");
+		return cid;
+	}
+
+	return cores[cid].new(drm, cores[cid].oclass, pcore);
+}
+
 /******************************************************************************
  * Output path helpers
  *****************************************************************************/
@@ -2591,21 +2643,21 @@ static void
 nv50_dac_disable(struct drm_encoder *encoder)
 {
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct nv50_mast *mast = nv50_mast(encoder->dev);
+	struct nv50_dmac *core = &nv50_disp(encoder->dev)->core->chan;
 	const int or = nv_encoder->or;
 	u32 *push;
 
 	if (nv_encoder->crtc) {
-		push = evo_wait(mast, 4);
+		push = evo_wait(core, 4);
 		if (push) {
-			if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
+			if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
 				evo_mthd(push, 0x0400 + (or * 0x080), 1);
 				evo_data(push, 0x00000000);
 			} else {
 				evo_mthd(push, 0x0180 + (or * 0x020), 1);
 				evo_data(push, 0x00000000);
 			}
-			evo_kick(push, mast);
+			evo_kick(push, core);
 		}
 	}
 
@@ -2616,7 +2668,7 @@ nv50_dac_disable(struct drm_encoder *encoder)
 static void
 nv50_dac_enable(struct drm_encoder *encoder)
 {
-	struct nv50_mast *mast = nv50_mast(encoder->dev);
+	struct nv50_dmac *core = &nv50_disp(encoder->dev)->core->chan;
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
 	struct drm_display_mode *mode = &nv_crtc->base.state->adjusted_mode;
@@ -2624,9 +2676,9 @@ nv50_dac_enable(struct drm_encoder *encoder)
 
 	nv50_outp_acquire(nv_encoder);
 
-	push = evo_wait(mast, 8);
+	push = evo_wait(core, 8);
 	if (push) {
-		if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
+		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
 			u32 syncs = 0x00000000;
 
 			if (mode->flags & DRM_MODE_FLAG_NHSYNC)
@@ -2656,7 +2708,7 @@ nv50_dac_enable(struct drm_encoder *encoder)
 			evo_data(push, 1 << nv_crtc->index);
 		}
 
-		evo_kick(push, mast);
+		evo_kick(push, core);
 	}
 
 	nv_encoder->crtc = encoder->crtc;
@@ -3526,7 +3578,8 @@ static void
 nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head,
 		struct drm_display_mode *mode, u8 proto, u8 depth)
 {
-	struct nv50_dmac *core = &nv50_mast(nv_encoder->base.base.dev)->base;
+	struct nv50_disp *disp = nv50_disp(nv_encoder->base.base.dev);
+	struct nv50_dmac *core = &disp->core->chan;
 	u32 *push;
 
 	if (!mode) {
@@ -3809,18 +3862,18 @@ static void
 nv50_pior_disable(struct drm_encoder *encoder)
 {
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct nv50_mast *mast = nv50_mast(encoder->dev);
+	struct nv50_dmac *core = &nv50_disp(encoder->dev)->core->chan;
 	const int or = nv_encoder->or;
 	u32 *push;
 
 	if (nv_encoder->crtc) {
-		push = evo_wait(mast, 4);
+		push = evo_wait(core, 4);
 		if (push) {
-			if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
+			if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
 				evo_mthd(push, 0x0700 + (or * 0x040), 1);
 				evo_data(push, 0x00000000);
 			}
-			evo_kick(push, mast);
+			evo_kick(push, core);
 		}
 	}
 
@@ -3831,7 +3884,7 @@ nv50_pior_disable(struct drm_encoder *encoder)
 static void
 nv50_pior_enable(struct drm_encoder *encoder)
 {
-	struct nv50_mast *mast = nv50_mast(encoder->dev);
+	struct nv50_dmac *core = &nv50_disp(encoder->dev)->core->chan;
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
 	struct nouveau_connector *nv_connector;
@@ -3860,9 +3913,9 @@ nv50_pior_enable(struct drm_encoder *encoder)
 		break;
 	}
 
-	push = evo_wait(mast, 8);
+	push = evo_wait(core, 8);
 	if (push) {
-		if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
+		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
 			u32 ctrl = (depth << 16) | (proto << 8) | owner;
 			if (mode->flags & DRM_MODE_FLAG_NHSYNC)
 				ctrl |= 0x00001000;
@@ -3872,7 +3925,7 @@ nv50_pior_enable(struct drm_encoder *encoder)
 			evo_data(push, ctrl);
 		}
 
-		evo_kick(push, mast);
+		evo_kick(push, core);
 	}
 
 	nv_encoder->crtc = encoder->crtc;
@@ -3950,7 +4003,7 @@ static void
 nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 interlock)
 {
 	struct nv50_disp *disp = nv50_disp(drm->dev);
-	struct nv50_dmac *core = &disp->mast.base;
+	struct nv50_dmac *core = &disp->core->chan;
 	struct nv50_mstm *mstm;
 	struct drm_encoder *encoder;
 	u32 *push;
@@ -4134,11 +4187,11 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
 	/* Flush update. */
 	if (interlock_core) {
 		if (!interlock_chan && atom->state.legacy_cursor_update) {
-			u32 *push = evo_wait(&disp->mast, 2);
+			u32 *push = evo_wait(&disp->core->chan, 2);
 			if (push) {
 				evo_mthd(push, 0x0080, 1);
 				evo_data(push, 0x00000000);
-				evo_kick(push, &disp->mast);
+				evo_kick(push, &disp->core->chan);
 			}
 		} else {
 			nv50_disp_atomic_commit_core(drm, interlock_chan);
@@ -4442,17 +4495,18 @@ nv50_display_fini(struct drm_device *dev)
 int
 nv50_display_init(struct drm_device *dev)
 {
+	struct nv50_dmac *core = &nv50_disp(dev)->core->chan;
 	struct drm_encoder *encoder;
 	struct drm_plane *plane;
 	u32 *push;
 
-	push = evo_wait(nv50_mast(dev), 32);
+	push = evo_wait(core, 32);
 	if (!push)
 		return -EBUSY;
 
 	evo_mthd(push, 0x0088, 1);
-	evo_data(push, nv50_mast(dev)->base.sync.handle);
-	evo_kick(push, nv50_mast(dev));
+	evo_data(push, core->sync.handle);
+	evo_kick(push, core);
 
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 		if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
@@ -4477,7 +4531,7 @@ nv50_display_destroy(struct drm_device *dev)
 {
 	struct nv50_disp *disp = nv50_disp(dev);
 
-	nv50_dmac_destroy(&disp->mast.base);
+	nv50_core_del(&disp->core);
 
 	nouveau_bo_unmap(disp->sync);
 	if (disp->sync)
@@ -4537,8 +4591,7 @@ nv50_display_create(struct drm_device *dev)
 		goto out;
 
 	/* allocate master evo channel */
-	ret = nv50_core_create(device, &disp->disp->object,
-			       disp->sync->bo.offset, &disp->mast);
+	ret = nv50_core_new(drm, &disp->core);
 	if (ret)
 		goto out;
 

From 10ffe0fad53308ff54da0c6b1c5befca4e6915a1 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1115/1286] drm/nouveau/kms/nv50-: abstract head interfaces so
 the code can be split

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/dispnv50/disp.c | 64 +++++++++++++++++++------
 1 file changed, 50 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index abdf39ed9d26..a23a33de401d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -436,6 +436,7 @@ struct nv50_sync {
 };
 
 struct nv50_head {
+	const struct nv50_head_func *func;
 	struct nouveau_crtc base;
 	struct {
 		struct nouveau_bo *nvbo[2];
@@ -443,7 +444,22 @@ struct nv50_head {
 	} lut;
 };
 
-#define nv50_head(c) ((struct nv50_head *)nouveau_crtc(c))
+struct nv50_head_func {
+	void (*view)(struct nv50_head *, struct nv50_head_atom *);
+	void (*mode)(struct nv50_head *, struct nv50_head_atom *);
+	void (*ilut_set)(struct nv50_head *, struct nv50_head_atom *);
+	void (*ilut_clr)(struct nv50_head *);
+	void (*core_set)(struct nv50_head *, struct nv50_head_atom *);
+	void (*core_clr)(struct nv50_head *);
+	void (*curs_set)(struct nv50_head *, struct nv50_head_atom *);
+	void (*curs_clr)(struct nv50_head *);
+	void (*base)(struct nv50_head *, struct nv50_head_atom *);
+	void (*ovly)(struct nv50_head *, struct nv50_head_atom *);
+	void (*dither)(struct nv50_head *, struct nv50_head_atom *);
+	void (*procamp)(struct nv50_head *, struct nv50_head_atom *);
+};
+
+#define nv50_head(c) container_of((c), struct nv50_head, base.base)
 
 struct nv50_disp {
 	struct nvif_disp *disp;
@@ -470,6 +486,7 @@ struct nv50_core {
 };
 
 struct nv50_core_func {
+	const struct nv50_head_func *head;
 };
 
 static int
@@ -2002,22 +2019,38 @@ nv50_head_view(struct nv50_head *head, struct nv50_head_atom *asyh)
 	}
 }
 
+static const struct nv50_head_func
+head507d = {
+	.view = nv50_head_view,
+	.mode = nv50_head_mode,
+	.ilut_set = nv50_head_lut_set,
+	.ilut_clr = nv50_head_lut_clr,
+	.core_set = nv50_head_core_set,
+	.core_clr = nv50_head_core_clr,
+	.curs_set = nv50_head_curs_set,
+	.curs_clr = nv50_head_curs_clr,
+	.base = nv50_head_base,
+	.ovly = nv50_head_ovly,
+	.dither = nv50_head_dither,
+	.procamp = nv50_head_procamp,
+};
+
 static void
 nv50_head_flush_clr(struct nv50_head *head, struct nv50_head_atom *asyh, bool y)
 {
 	if (asyh->clr.ilut && (!asyh->set.ilut || y))
-		nv50_head_lut_clr(head);
+		head->func->ilut_clr(head);
 	if (asyh->clr.core && (!asyh->set.core || y))
-		nv50_head_core_clr(head);
+		head->func->core_clr(head);
 	if (asyh->clr.curs && (!asyh->set.curs || y))
-		nv50_head_curs_clr(head);
+		head->func->curs_clr(head);
 }
 
 static void
 nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-	if (asyh->set.view   ) nv50_head_view    (head, asyh);
-	if (asyh->set.mode   ) nv50_head_mode    (head, asyh);
+	if (asyh->set.view   ) head->func->view    (head, asyh);
+	if (asyh->set.mode   ) head->func->mode    (head, asyh);
 	if (asyh->set.ilut   ) {
 		struct nouveau_bo *nvbo = head->lut.nvbo[head->lut.next];
 		struct drm_property_blob *blob = asyh->state.gamma_lut;
@@ -2025,14 +2058,14 @@ nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 			nv50_head_lut_load(blob, asyh->lut.mode, nvbo);
 		asyh->lut.offset = nvbo->bo.offset;
 		head->lut.next ^= 1;
-		nv50_head_lut_set(head, asyh);
+		head->func->ilut_set(head, asyh);
 	}
-	if (asyh->set.core   ) nv50_head_core_set(head, asyh);
-	if (asyh->set.curs   ) nv50_head_curs_set(head, asyh);
-	if (asyh->set.base   ) nv50_head_base    (head, asyh);
-	if (asyh->set.ovly   ) nv50_head_ovly    (head, asyh);
-	if (asyh->set.dither ) nv50_head_dither  (head, asyh);
-	if (asyh->set.procamp) nv50_head_procamp (head, asyh);
+	if (asyh->set.core   ) head->func->core_set(head, asyh);
+	if (asyh->set.curs   ) head->func->curs_set(head, asyh);
+	if (asyh->set.base   ) head->func->base    (head, asyh);
+	if (asyh->set.ovly   ) head->func->ovly    (head, asyh);
+	if (asyh->set.dither ) head->func->dither  (head, asyh);
+	if (asyh->set.procamp) head->func->procamp (head, asyh);
 }
 
 static void
@@ -2422,7 +2455,7 @@ nv50_head_destroy(struct drm_crtc *crtc)
 		nouveau_bo_unmap_unpin_unref(&head->lut.nvbo[i]);
 
 	drm_crtc_cleanup(crtc);
-	kfree(crtc);
+	kfree(head);
 }
 
 static const struct drm_crtc_funcs
@@ -2440,6 +2473,7 @@ static int
 nv50_head_create(struct drm_device *dev, int index)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nv50_disp *disp = nv50_disp(dev);
 	struct nv50_head *head;
 	struct nv50_wndw *curs, *wndw;
 	struct drm_crtc *crtc;
@@ -2449,6 +2483,7 @@ nv50_head_create(struct drm_device *dev, int index)
 	if (!head)
 		return -ENOMEM;
 
+	head->func = disp->core->func->head;
 	head->base.index = index;
 	ret = nv50_base_new(drm, head->base.index, &wndw);
 	if (ret == 0)
@@ -2482,6 +2517,7 @@ out:
 
 static const struct nv50_core_func
 core507d = {
+	.head = &head507d,
 };
 
 static int

From 2ca7fb5c1cc69ee7fc1a3c048c6f2b75cf842df9 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1116/1286] drm/nouveau/kms/nv50: handle
 SetControlOutputResource from head

Removes duplicated code from OR-specific functions.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/dispnv50/disp.c   | 115 +++++++++++-----------
 drivers/gpu/drm/nouveau/nouveau_encoder.h |   3 +-
 2 files changed, 57 insertions(+), 61 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index a23a33de401d..f22c6373fcc2 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -194,6 +194,12 @@ struct nv50_head_atom {
 		} sat;
 	} procamp;
 
+	struct {
+		u8 nhsync:1;
+		u8 nvsync:1;
+		u8 depth:4;
+	} or;
+
 	union {
 		struct {
 			bool ilut:1;
@@ -214,6 +220,7 @@ struct nv50_head_atom {
 			bool ovly:1;
 			bool dither:1;
 			bool procamp:1;
+			bool or:1;
 		};
 		u16 mask;
 	} set;
@@ -457,6 +464,7 @@ struct nv50_head_func {
 	void (*ovly)(struct nv50_head *, struct nv50_head_atom *);
 	void (*dither)(struct nv50_head *, struct nv50_head_atom *);
 	void (*procamp)(struct nv50_head *, struct nv50_head_atom *);
+	void (*or)(struct nv50_head *, struct nv50_head_atom *);
 };
 
 #define nv50_head(c) container_of((c), struct nv50_head, base.base)
@@ -1627,6 +1635,23 @@ nv50_base_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
 /******************************************************************************
  * Head
  *****************************************************************************/
+static void
+head907d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if (core->base.user.oclass >= GF110_DISP_CORE_CHANNEL_DMA &&
+	    (push = evo_wait(core, 2))) {
+		evo_mthd(push, 0x0404 + (head->base.index * 0x300), 2);
+		evo_data(push, 0x00000001 | (asyh->or.depth  << 6) |
+					    (asyh->or.nvsync << 4) |
+					    (asyh->or.nhsync << 3));
+		evo_data(push, 0x31ec6000 | (head->base.index << 25) |
+					     asyh->mode.interlace);
+		evo_kick(push, core);
+	}
+}
+
 static void
 nv50_head_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
@@ -2033,6 +2058,7 @@ head507d = {
 	.ovly = nv50_head_ovly,
 	.dither = nv50_head_dither,
 	.procamp = nv50_head_procamp,
+	.or = head907d_or,
 };
 
 static void
@@ -2066,6 +2092,7 @@ nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 	if (asyh->set.ovly   ) head->func->ovly    (head, asyh);
 	if (asyh->set.dither ) head->func->dither  (head, asyh);
 	if (asyh->set.procamp) head->func->procamp (head, asyh);
+	if (asyh->set.or     ) head->func->or      (head, asyh);
 }
 
 static void
@@ -2268,6 +2295,9 @@ nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
 	}
 	m->clock = mode->crtc_clock;
 
+	asyh->or.nhsync = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
+	asyh->or.nvsync = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
+	asyh->set.or = head->func->or != NULL;
 	asyh->set.mode = true;
 }
 
@@ -2304,6 +2334,7 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
 			if (asyc)
 				asyc->set.mask = ~0;
 			asyh->set.mask = ~0;
+			asyh->set.or = head->func->or != NULL;
 		}
 
 		if (asyh->state.mode_changed)
@@ -2707,7 +2738,7 @@ nv50_dac_enable(struct drm_encoder *encoder)
 	struct nv50_dmac *core = &nv50_disp(encoder->dev)->core->chan;
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
-	struct drm_display_mode *mode = &nv_crtc->base.state->adjusted_mode;
+	struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
 	u32 *push;
 
 	nv50_outp_acquire(nv_encoder);
@@ -2715,37 +2746,17 @@ nv50_dac_enable(struct drm_encoder *encoder)
 	push = evo_wait(core, 8);
 	if (push) {
 		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
-			u32 syncs = 0x00000000;
-
-			if (mode->flags & DRM_MODE_FLAG_NHSYNC)
-				syncs |= 0x00000001;
-			if (mode->flags & DRM_MODE_FLAG_NVSYNC)
-				syncs |= 0x00000002;
-
 			evo_mthd(push, 0x0400 + (nv_encoder->or * 0x080), 2);
 			evo_data(push, 1 << nv_crtc->index);
-			evo_data(push, syncs);
+			evo_data(push, (asyh->or.nvsync << 1) | asyh->or.nhsync);
 		} else {
-			u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
-			u32 syncs = 0x00000001;
-
-			if (mode->flags & DRM_MODE_FLAG_NHSYNC)
-				syncs |= 0x00000008;
-			if (mode->flags & DRM_MODE_FLAG_NVSYNC)
-				syncs |= 0x00000010;
-
-			if (mode->flags & DRM_MODE_FLAG_INTERLACE)
-				magic |= 0x00000001;
-
-			evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
-			evo_data(push, syncs);
-			evo_data(push, magic);
 			evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 1);
 			evo_data(push, 1 << nv_crtc->index);
 		}
 
 		evo_kick(push, core);
 	}
+	asyh->or.depth = 0;
 
 	nv_encoder->crtc = encoder->crtc;
 }
@@ -3144,7 +3155,7 @@ nv50_msto_enable(struct drm_encoder *encoder)
 	}
 
 	mstm->outp->update(mstm->outp, head->base.index,
-			   &head->base.base.state->adjusted_mode, proto, depth);
+			   nv50_head_atom(head->base.base.state), proto, depth);
 
 	msto->head = head;
 	msto->mstc = mstc;
@@ -3612,46 +3623,31 @@ nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
  *****************************************************************************/
 static void
 nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head,
-		struct drm_display_mode *mode, u8 proto, u8 depth)
+		struct nv50_head_atom *asyh, u8 proto, u8 depth)
 {
 	struct nv50_disp *disp = nv50_disp(nv_encoder->base.base.dev);
 	struct nv50_dmac *core = &disp->core->chan;
 	u32 *push;
 
-	if (!mode) {
+	if (!asyh) {
 		nv_encoder->ctrl &= ~BIT(head);
 		if (!(nv_encoder->ctrl & 0x0000000f))
 			nv_encoder->ctrl = 0;
 	} else {
 		nv_encoder->ctrl |= proto << 8;
 		nv_encoder->ctrl |= BIT(head);
+		asyh->or.depth = depth;
 	}
 
 	if ((push = evo_wait(core, 6))) {
 		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
-			if (mode) {
-				if (mode->flags & DRM_MODE_FLAG_NHSYNC)
-					nv_encoder->ctrl |= 0x00001000;
-				if (mode->flags & DRM_MODE_FLAG_NVSYNC)
-					nv_encoder->ctrl |= 0x00002000;
-				nv_encoder->ctrl |= depth << 16;
+			if (asyh) {
+				nv_encoder->ctrl |= asyh->or.depth  << 16 |
+						    asyh->or.nvsync << 13 |
+						    asyh->or.nhsync << 12;
 			}
 			evo_mthd(push, 0x0600 + (nv_encoder->or * 0x40), 1);
 		} else {
-			if (mode) {
-				u32 magic = 0x31ec6000 | (head << 25);
-				u32 syncs = 0x00000001;
-				if (mode->flags & DRM_MODE_FLAG_NHSYNC)
-					syncs |= 0x00000008;
-				if (mode->flags & DRM_MODE_FLAG_NVSYNC)
-					syncs |= 0x00000010;
-				if (mode->flags & DRM_MODE_FLAG_INTERLACE)
-					magic |= 0x00000001;
-
-				evo_mthd(push, 0x0404 + (head * 0x300), 2);
-				evo_data(push, syncs | (depth << 6));
-				evo_data(push, magic);
-			}
 			evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
 		}
 		evo_data(push, nv_encoder->ctrl);
@@ -3692,7 +3688,8 @@ nv50_sor_enable(struct drm_encoder *encoder)
 {
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
-	struct drm_display_mode *mode = &nv_crtc->base.state->adjusted_mode;
+	struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
+	struct drm_display_mode *mode = &asyh->state.adjusted_mode;
 	struct {
 		struct nv50_disp_mthd_v1 base;
 		struct nv50_disp_sor_lvds_script_v0 lvds;
@@ -3786,7 +3783,7 @@ nv50_sor_enable(struct drm_encoder *encoder)
 		break;
 	}
 
-	nv_encoder->update(nv_encoder, nv_crtc->index, mode, proto, depth);
+	nv_encoder->update(nv_encoder, nv_crtc->index, asyh, proto, depth);
 }
 
 static const struct drm_encoder_helper_funcs
@@ -3924,19 +3921,19 @@ nv50_pior_enable(struct drm_encoder *encoder)
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
 	struct nouveau_connector *nv_connector;
-	struct drm_display_mode *mode = &nv_crtc->base.state->adjusted_mode;
+	struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
 	u8 owner = 1 << nv_crtc->index;
-	u8 proto, depth;
+	u8 proto;
 	u32 *push;
 
 	nv50_outp_acquire(nv_encoder);
 
 	nv_connector = nouveau_encoder_connector_get(nv_encoder);
 	switch (nv_connector->base.display_info.bpc) {
-	case 10: depth = 0x6; break;
-	case  8: depth = 0x5; break;
-	case  6: depth = 0x2; break;
-	default: depth = 0x0; break;
+	case 10: asyh->or.depth = 0x6; break;
+	case  8: asyh->or.depth = 0x5; break;
+	case  6: asyh->or.depth = 0x2; break;
+	default: asyh->or.depth = 0x0; break;
 	}
 
 	switch (nv_encoder->dcb->type) {
@@ -3952,13 +3949,11 @@ nv50_pior_enable(struct drm_encoder *encoder)
 	push = evo_wait(core, 8);
 	if (push) {
 		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
-			u32 ctrl = (depth << 16) | (proto << 8) | owner;
-			if (mode->flags & DRM_MODE_FLAG_NHSYNC)
-				ctrl |= 0x00001000;
-			if (mode->flags & DRM_MODE_FLAG_NVSYNC)
-				ctrl |= 0x00002000;
 			evo_mthd(push, 0x0700 + (nv_encoder->or * 0x040), 1);
-			evo_data(push, ctrl);
+			evo_data(push, (asyh->or.depth  << 16) |
+				       (asyh->or.nvsync << 13) |
+				       (asyh->or.nhsync << 12) |
+				       (proto << 8) | owner);
 		}
 
 		evo_kick(push, core);
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index e28d966946a1..3517f920bf89 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -32,6 +32,7 @@
 #include <drm/drm_encoder_slave.h>
 #include <drm/drm_dp_mst_helper.h>
 #include "dispnv04/disp.h"
+struct nv50_head_atom;
 
 #define NV_DPMS_CLEARED 0x80
 
@@ -68,7 +69,7 @@ struct nouveau_encoder {
 	void (*enc_save)(struct drm_encoder *encoder);
 	void (*enc_restore)(struct drm_encoder *encoder);
 	void (*update)(struct nouveau_encoder *, u8 head,
-		       struct drm_display_mode *, u8 proto, u8 depth);
+		       struct nv50_head_atom *, u8 proto, u8 depth);
 };
 
 struct nouveau_encoder *

From 0a3687716bb0a53a363b63cb5ba2bddc14c3bd2a Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1117/1286] drm/nouveau/kms/nv50: abstract OR interfaces so the
 code can be split

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/dispnv50/disp.c | 184 +++++++++++++-----------
 1 file changed, 104 insertions(+), 80 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index f22c6373fcc2..995109ee5762 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -495,6 +495,14 @@ struct nv50_core {
 
 struct nv50_core_func {
 	const struct nv50_head_func *head;
+	const struct nv50_outp_func *dac;
+	const struct nv50_outp_func *sor;
+	const struct nv50_outp_func *pior;
+};
+
+struct nv50_outp_func {
+	void (*ctrl)(struct nv50_core *, int or, u32 ctrl,
+		     struct nv50_head_atom *);
 };
 
 static int
@@ -1641,7 +1649,7 @@ head907d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
 	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
 	u32 *push;
 	if (core->base.user.oclass >= GF110_DISP_CORE_CHANNEL_DMA &&
-	    (push = evo_wait(core, 2))) {
+	    (push = evo_wait(core, 3))) {
 		evo_mthd(push, 0x0404 + (head->base.index * 0x300), 2);
 		evo_data(push, 0x00000001 | (asyh->or.depth  << 6) |
 					    (asyh->or.nvsync << 4) |
@@ -2546,9 +2554,15 @@ out:
 	return ret;
 }
 
+static const struct nv50_outp_func dac507d;
+static const struct nv50_outp_func sor507d;
+static const struct nv50_outp_func pior507d;
 static const struct nv50_core_func
 core507d = {
 	.head = &head507d,
+	.dac = &dac507d,
+	.sor = &sor507d,
+	.pior = &pior507d,
 };
 
 static int
@@ -2706,28 +2720,40 @@ nv50_outp_atomic_check(struct drm_encoder *encoder,
 /******************************************************************************
  * DAC
  *****************************************************************************/
+static void
+dac507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
+	     struct nv50_head_atom *asyh)
+{
+	u32 *push, sync = 0;
+	if ((push = evo_wait(&core->chan, 3))) {
+		if (core->chan.base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+			if (asyh) {
+				sync |= asyh->or.nvsync << 1;
+				sync |= asyh->or.nhsync;
+			}
+			evo_mthd(push, 0x0400 + (or * 0x080), 2);
+			evo_data(push, ctrl);
+			evo_data(push, sync);
+		} else {
+			evo_mthd(push, 0x0180 + (or * 0x020), 1);
+			evo_data(push, ctrl);
+		}
+		evo_kick(push, &core->chan);
+	}
+}
+
+static const struct nv50_outp_func
+dac507d = {
+	.ctrl = dac507d_ctrl,
+};
+
 static void
 nv50_dac_disable(struct drm_encoder *encoder)
 {
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct nv50_dmac *core = &nv50_disp(encoder->dev)->core->chan;
-	const int or = nv_encoder->or;
-	u32 *push;
-
-	if (nv_encoder->crtc) {
-		push = evo_wait(core, 4);
-		if (push) {
-			if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
-				evo_mthd(push, 0x0400 + (or * 0x080), 1);
-				evo_data(push, 0x00000000);
-			} else {
-				evo_mthd(push, 0x0180 + (or * 0x020), 1);
-				evo_data(push, 0x00000000);
-			}
-			evo_kick(push, core);
-		}
-	}
-
+	struct nv50_core *core = nv50_disp(encoder->dev)->core;
+	if (nv_encoder->crtc)
+		core->func->dac->ctrl(core, nv_encoder->or, 0x00000000, NULL);
 	nv_encoder->crtc = NULL;
 	nv50_outp_release(nv_encoder);
 }
@@ -2735,27 +2761,14 @@ nv50_dac_disable(struct drm_encoder *encoder)
 static void
 nv50_dac_enable(struct drm_encoder *encoder)
 {
-	struct nv50_dmac *core = &nv50_disp(encoder->dev)->core->chan;
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
 	struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
-	u32 *push;
+	struct nv50_core *core = nv50_disp(encoder->dev)->core;
 
 	nv50_outp_acquire(nv_encoder);
 
-	push = evo_wait(core, 8);
-	if (push) {
-		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
-			evo_mthd(push, 0x0400 + (nv_encoder->or * 0x080), 2);
-			evo_data(push, 1 << nv_crtc->index);
-			evo_data(push, (asyh->or.nvsync << 1) | asyh->or.nhsync);
-		} else {
-			evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 1);
-			evo_data(push, 1 << nv_crtc->index);
-		}
-
-		evo_kick(push, core);
-	}
+	core->func->dac->ctrl(core, nv_encoder->or, 1 << nv_crtc->index, asyh);
 	asyh->or.depth = 0;
 
 	nv_encoder->crtc = encoder->crtc;
@@ -3621,13 +3634,38 @@ nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
 /******************************************************************************
  * SOR
  *****************************************************************************/
+static void
+sor507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
+	     struct nv50_head_atom *asyh)
+{
+	u32 *push;
+	if ((push = evo_wait(&core->chan, 6))) {
+		if (core->chan.base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+			if (asyh) {
+				ctrl |= asyh->or.depth  << 16;
+				ctrl |= asyh->or.nvsync << 13;
+				ctrl |= asyh->or.nhsync << 12;
+			}
+			evo_mthd(push, 0x0600 + (or * 0x40), 1);
+		} else {
+			evo_mthd(push, 0x0200 + (or * 0x20), 1);
+		}
+		evo_data(push, ctrl);
+		evo_kick(push, &core->chan);
+	}
+}
+
+static const struct nv50_outp_func
+sor507d = {
+	.ctrl = sor507d_ctrl,
+};
+
 static void
 nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head,
 		struct nv50_head_atom *asyh, u8 proto, u8 depth)
 {
 	struct nv50_disp *disp = nv50_disp(nv_encoder->base.base.dev);
-	struct nv50_dmac *core = &disp->core->chan;
-	u32 *push;
+	struct nv50_core *core = disp->core;
 
 	if (!asyh) {
 		nv_encoder->ctrl &= ~BIT(head);
@@ -3639,20 +3677,7 @@ nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head,
 		asyh->or.depth = depth;
 	}
 
-	if ((push = evo_wait(core, 6))) {
-		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
-			if (asyh) {
-				nv_encoder->ctrl |= asyh->or.depth  << 16 |
-						    asyh->or.nvsync << 13 |
-						    asyh->or.nhsync << 12;
-			}
-			evo_mthd(push, 0x0600 + (nv_encoder->or * 0x40), 1);
-		} else {
-			evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
-		}
-		evo_data(push, nv_encoder->ctrl);
-		evo_kick(push, core);
-	}
+	core->func->sor->ctrl(core, nv_encoder->or, nv_encoder->ctrl, asyh);
 }
 
 static void
@@ -3879,6 +3904,30 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
 /******************************************************************************
  * PIOR
  *****************************************************************************/
+static void
+pior507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
+	      struct nv50_head_atom *asyh)
+{
+	u32 *push;
+	if ((push = evo_wait(&core->chan, 8))) {
+		if (core->chan.base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+			if (asyh) {
+				ctrl |= asyh->or.depth  << 16;
+				ctrl |= asyh->or.nvsync << 13;
+				ctrl |= asyh->or.nhsync << 12;
+			}
+			evo_mthd(push, 0x0700 + (or * 0x040), 1);
+			evo_data(push, ctrl);
+		}
+		evo_kick(push, &core->chan);
+	}
+}
+
+static const struct nv50_outp_func
+pior507d = {
+	.ctrl = pior507d_ctrl,
+};
+
 static int
 nv50_pior_atomic_check(struct drm_encoder *encoder,
 		       struct drm_crtc_state *crtc_state,
@@ -3895,21 +3944,9 @@ static void
 nv50_pior_disable(struct drm_encoder *encoder)
 {
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-	struct nv50_dmac *core = &nv50_disp(encoder->dev)->core->chan;
-	const int or = nv_encoder->or;
-	u32 *push;
-
-	if (nv_encoder->crtc) {
-		push = evo_wait(core, 4);
-		if (push) {
-			if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
-				evo_mthd(push, 0x0700 + (or * 0x040), 1);
-				evo_data(push, 0x00000000);
-			}
-			evo_kick(push, core);
-		}
-	}
-
+	struct nv50_core *core = nv50_disp(encoder->dev)->core;
+	if (nv_encoder->crtc)
+		core->func->pior->ctrl(core, nv_encoder->or, 0x00000000, NULL);
 	nv_encoder->crtc = NULL;
 	nv50_outp_release(nv_encoder);
 }
@@ -3917,14 +3954,13 @@ nv50_pior_disable(struct drm_encoder *encoder)
 static void
 nv50_pior_enable(struct drm_encoder *encoder)
 {
-	struct nv50_dmac *core = &nv50_disp(encoder->dev)->core->chan;
 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
 	struct nouveau_connector *nv_connector;
 	struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
+	struct nv50_core *core = nv50_disp(encoder->dev)->core;
 	u8 owner = 1 << nv_crtc->index;
 	u8 proto;
-	u32 *push;
 
 	nv50_outp_acquire(nv_encoder);
 
@@ -3946,19 +3982,7 @@ nv50_pior_enable(struct drm_encoder *encoder)
 		break;
 	}
 
-	push = evo_wait(core, 8);
-	if (push) {
-		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
-			evo_mthd(push, 0x0700 + (nv_encoder->or * 0x040), 1);
-			evo_data(push, (asyh->or.depth  << 16) |
-				       (asyh->or.nvsync << 13) |
-				       (asyh->or.nhsync << 12) |
-				       (proto << 8) | owner);
-		}
-
-		evo_kick(push, core);
-	}
-
+	core->func->pior->ctrl(core, nv_encoder->or, (proto << 8) | owner, asyh);
 	nv_encoder->crtc = encoder->crtc;
 }
 

From 1590700d94ac53772491ed3103a4e8b8de01640a Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1118/1286] drm/nouveau/kms/nv50-: split each resource type
 into their own source files

There should be no code changes here, just shuffling stuff around.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/dispnv50/Kbuild     |   26 +
 drivers/gpu/drm/nouveau/dispnv50/atom.h     |  207 ++
 drivers/gpu/drm/nouveau/dispnv50/base.c     |   53 +
 drivers/gpu/drm/nouveau/dispnv50/base.h     |    8 +
 drivers/gpu/drm/nouveau/dispnv50/base507c.c |  307 +++
 drivers/gpu/drm/nouveau/dispnv50/core.c     |   69 +
 drivers/gpu/drm/nouveau/dispnv50/core.h     |   26 +
 drivers/gpu/drm/nouveau/dispnv50/core507d.c |   65 +
 drivers/gpu/drm/nouveau/dispnv50/curs.c     |   51 +
 drivers/gpu/drm/nouveau/dispnv50/curs.h     |    8 +
 drivers/gpu/drm/nouveau/dispnv50/curs507a.c |  151 ++
 drivers/gpu/drm/nouveau/dispnv50/dac507d.c  |   51 +
 drivers/gpu/drm/nouveau/dispnv50/disp.c     | 2493 +------------------
 drivers/gpu/drm/nouveau/dispnv50/disp.h     |   71 +
 drivers/gpu/drm/nouveau/dispnv50/head.c     |  566 +++++
 drivers/gpu/drm/nouveau/dispnv50/head.h     |   39 +
 drivers/gpu/drm/nouveau/dispnv50/head507d.c |  403 +++
 drivers/gpu/drm/nouveau/dispnv50/oimm.c     |   51 +
 drivers/gpu/drm/nouveau/dispnv50/oimm.h     |    8 +
 drivers/gpu/drm/nouveau/dispnv50/oimm507b.c |   56 +
 drivers/gpu/drm/nouveau/dispnv50/ovly.c     |   57 +
 drivers/gpu/drm/nouveau/dispnv50/ovly.h     |    8 +
 drivers/gpu/drm/nouveau/dispnv50/ovly507e.c |   70 +
 drivers/gpu/drm/nouveau/dispnv50/pior507d.c |   48 +
 drivers/gpu/drm/nouveau/dispnv50/sor507d.c  |   50 +
 drivers/gpu/drm/nouveau/dispnv50/wndw.c     |  434 ++++
 drivers/gpu/drm/nouveau/dispnv50/wndw.h     |   73 +
 drivers/gpu/drm/nouveau/nv50_display.h      |    1 -
 28 files changed, 2967 insertions(+), 2483 deletions(-)
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/atom.h
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/base.c
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/base.h
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/base507c.c
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/core.c
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/core.h
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/core507d.c
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/curs.c
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/curs.h
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/curs507a.c
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/dac507d.c
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/disp.h
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/head.c
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/head.h
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/head507d.c
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/oimm.c
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/oimm.h
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/oimm507b.c
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/ovly.c
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/ovly.h
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/pior507d.c
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/sor507d.c
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/wndw.c
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/wndw.h

diff --git a/drivers/gpu/drm/nouveau/dispnv50/Kbuild b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
index 43fc8be49391..f3877d2d8840 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/Kbuild
+++ b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
@@ -1 +1,27 @@
 nouveau-y += dispnv50/disp.o
+
+nouveau-y += dispnv50/core.o
+nouveau-y += dispnv50/core507d.o
+
+nouveau-y += dispnv50/dac507d.o
+
+nouveau-y += dispnv50/pior507d.o
+
+nouveau-y += dispnv50/sor507d.o
+
+nouveau-y += dispnv50/head.o
+nouveau-y += dispnv50/head507d.o
+
+nouveau-y += dispnv50/wndw.o
+
+nouveau-y += dispnv50/base.o
+nouveau-y += dispnv50/base507c.o
+
+nouveau-y += dispnv50/curs.o
+nouveau-y += dispnv50/curs507a.o
+
+nouveau-y += dispnv50/oimm.o
+nouveau-y += dispnv50/oimm507b.o
+
+nouveau-y += dispnv50/ovly.o
+nouveau-y += dispnv50/ovly507e.o
diff --git a/drivers/gpu/drm/nouveau/dispnv50/atom.h b/drivers/gpu/drm/nouveau/dispnv50/atom.h
new file mode 100644
index 000000000000..8c97e25c881f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/atom.h
@@ -0,0 +1,207 @@
+#ifndef __NV50_KMS_ATOM_H__
+#define __NV50_KMS_ATOM_H__
+#define nv50_atom(p) container_of((p), struct nv50_atom, state)
+#include <drm/drm_atomic.h>
+
+struct nv50_atom {
+	struct drm_atomic_state state;
+
+	struct list_head outp;
+	bool lock_core;
+	bool flush_disable;
+};
+
+#define nv50_head_atom(p) container_of((p), struct nv50_head_atom, state)
+
+struct nv50_head_atom {
+	struct drm_crtc_state state;
+
+	struct {
+		u16 iW;
+		u16 iH;
+		u16 oW;
+		u16 oH;
+	} view;
+
+	struct nv50_head_mode {
+		bool interlace;
+		u32 clock;
+		struct {
+			u16 active;
+			u16 synce;
+			u16 blanke;
+			u16 blanks;
+		} h;
+		struct {
+			u32 active;
+			u16 synce;
+			u16 blanke;
+			u16 blanks;
+			u16 blank2s;
+			u16 blank2e;
+			u16 blankus;
+		} v;
+	} mode;
+
+	struct {
+		bool visible;
+		u32 handle;
+		u64 offset:40;
+		u8  mode:4;
+	} ilut;
+
+	struct {
+		bool visible;
+		u32 handle;
+		u64 offset:40;
+		u8  format;
+		u8  kind:7;
+		u8  layout:1;
+		u8  block:4;
+		u32 pitch:20;
+		u16 x;
+		u16 y;
+		u16 w;
+		u16 h;
+	} core;
+
+	struct {
+		bool visible;
+		u32 handle;
+		u64 offset:40;
+		u8  layout:1;
+		u8  format:1;
+	} curs;
+
+	struct {
+		u8  depth;
+		u8  cpp;
+		u16 x;
+		u16 y;
+		u16 w;
+		u16 h;
+	} base;
+
+	struct {
+		u8 cpp;
+	} ovly;
+
+	struct {
+		bool enable:1;
+		u8 bits:2;
+		u8 mode:4;
+	} dither;
+
+	struct {
+		struct {
+			u16 cos:12;
+			u16 sin:12;
+		} sat;
+	} procamp;
+
+	struct {
+		u8 nhsync:1;
+		u8 nvsync:1;
+		u8 depth:4;
+	} or;
+
+	union {
+		struct {
+			bool ilut:1;
+			bool core:1;
+			bool curs:1;
+		};
+		u8 mask;
+	} clr;
+
+	union {
+		struct {
+			bool ilut:1;
+			bool core:1;
+			bool curs:1;
+			bool view:1;
+			bool mode:1;
+			bool base:1;
+			bool ovly:1;
+			bool dither:1;
+			bool procamp:1;
+			bool or:1;
+		};
+		u16 mask;
+	} set;
+};
+
+static inline struct nv50_head_atom *
+nv50_head_atom_get(struct drm_atomic_state *state, struct drm_crtc *crtc)
+{
+	struct drm_crtc_state *statec = drm_atomic_get_crtc_state(state, crtc);
+	if (IS_ERR(statec))
+		return (void *)statec;
+	return nv50_head_atom(statec);
+}
+
+#define nv50_wndw_atom(p) container_of((p), struct nv50_wndw_atom, state)
+
+struct nv50_wndw_atom {
+	struct drm_plane_state state;
+	u8 interval;
+
+	struct {
+		u32  handle;
+		u16  offset:12;
+		bool awaken:1;
+	} ntfy;
+
+	struct {
+		u32 handle;
+		u16 offset:12;
+		u32 acquire;
+		u32 release;
+	} sema;
+
+	struct {
+		u8 enable:2;
+	} lut;
+
+	struct {
+		u8  mode:2;
+		u8  interval:4;
+
+		u8  format;
+		u8  kind:7;
+		u8  layout:1;
+		u8  block:4;
+		u32 pitch:20;
+		u16 w;
+		u16 h;
+
+		u32 handle;
+		u64 offset;
+	} image;
+
+	struct {
+		u16 x;
+		u16 y;
+	} point;
+
+	union {
+		struct {
+			bool ntfy:1;
+			bool sema:1;
+			bool image:1;
+		};
+		u8 mask;
+	} clr;
+
+	union {
+		struct {
+			bool ntfy:1;
+			bool sema:1;
+			bool image:1;
+			bool lut:1;
+			bool point:1;
+		};
+		u8 mask;
+	} set;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base.c b/drivers/gpu/drm/nouveau/dispnv50/base.c
new file mode 100644
index 000000000000..12ca5d70509c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/base.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "base.h"
+
+#include <nvif/class.h>
+
+int
+nv50_base_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
+{
+	struct {
+		s32 oclass;
+		int version;
+		int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
+	} bases[] = {
+		{ GK110_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
+		{ GK104_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
+		{ GF110_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
+		{ GT214_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
+		{ GT200_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
+		{   G82_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
+		{  NV50_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
+		{}
+	};
+	struct nv50_disp *disp = nv50_disp(drm->dev);
+	int cid;
+
+	cid = nvif_mclass(&disp->disp->object, bases);
+	if (cid < 0) {
+		NV_ERROR(drm, "No supported base class\n");
+		return cid;
+	}
+
+	return bases[cid].new(drm, head, bases[cid].oclass, pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base.h b/drivers/gpu/drm/nouveau/dispnv50/base.h
new file mode 100644
index 000000000000..1daba7319ba9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/base.h
@@ -0,0 +1,8 @@
+#ifndef __NV50_KMS_BASE_H__
+#define __NV50_KMS_BASE_H__
+#include "wndw.h"
+
+int base507c_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
+
+int nv50_base_new(struct nouveau_drm *, int head, struct nv50_wndw **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
new file mode 100644
index 000000000000..b73e7b4d86a5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "base.h"
+
+#include <nvif/class.h>
+#include <nvif/cl507c.h>
+#include <nvif/event.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include "nouveau_bo.h"
+
+static u32
+base507c_update(struct nv50_wndw *wndw, u32 interlock)
+{
+	u32 *push;
+
+	if (!(push = evo_wait(&wndw->wndw, 2)))
+		return 0;
+	evo_mthd(push, 0x0080, 1);
+	evo_data(push, interlock);
+	evo_kick(push, &wndw->wndw);
+
+	if (wndw->wndw.base.user.oclass < GF110_DISP_BASE_CHANNEL_DMA)
+		return interlock ? 2 << (wndw->id * 8) : 0;
+	return interlock ? 2 << (wndw->id * 4) : 0;
+}
+
+static void
+base507c_lut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+	u32 *push;
+	if ((push = evo_wait(&wndw->wndw, 2))) {
+		evo_mthd(push, 0x00e0, 1);
+		evo_data(push, asyw->lut.enable << 30);
+		evo_kick(push, &wndw->wndw);
+	}
+}
+
+static void
+base507c_image_clr(struct nv50_wndw *wndw)
+{
+	u32 *push;
+	if ((push = evo_wait(&wndw->wndw, 4))) {
+		evo_mthd(push, 0x0084, 1);
+		evo_data(push, 0x00000000);
+		evo_mthd(push, 0x00c0, 1);
+		evo_data(push, 0x00000000);
+		evo_kick(push, &wndw->wndw);
+	}
+}
+
+static void
+base507c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+	const s32 oclass = wndw->wndw.base.user.oclass;
+	u32 *push;
+	if ((push = evo_wait(&wndw->wndw, 10))) {
+		evo_mthd(push, 0x0084, 1);
+		evo_data(push, asyw->image.mode << 8 |
+			       asyw->image.interval << 4);
+		evo_mthd(push, 0x00c0, 1);
+		evo_data(push, asyw->image.handle);
+		if (oclass < G82_DISP_BASE_CHANNEL_DMA) {
+			evo_mthd(push, 0x0800, 5);
+			evo_data(push, asyw->image.offset >> 8);
+			evo_data(push, 0x00000000);
+			evo_data(push, (asyw->image.h << 16) | asyw->image.w);
+			evo_data(push, (asyw->image.layout << 20) |
+					asyw->image.pitch |
+					asyw->image.block);
+			evo_data(push, (asyw->image.kind << 16) |
+				       (asyw->image.format << 8));
+		} else
+		if (oclass < GF110_DISP_BASE_CHANNEL_DMA) {
+			evo_mthd(push, 0x0800, 5);
+			evo_data(push, asyw->image.offset >> 8);
+			evo_data(push, 0x00000000);
+			evo_data(push, (asyw->image.h << 16) | asyw->image.w);
+			evo_data(push, (asyw->image.layout << 20) |
+					asyw->image.pitch |
+					asyw->image.block);
+			evo_data(push, asyw->image.format << 8);
+		} else {
+			evo_mthd(push, 0x0400, 5);
+			evo_data(push, asyw->image.offset >> 8);
+			evo_data(push, 0x00000000);
+			evo_data(push, (asyw->image.h << 16) | asyw->image.w);
+			evo_data(push, (asyw->image.layout << 24) |
+					asyw->image.pitch |
+					asyw->image.block);
+			evo_data(push, asyw->image.format << 8);
+		}
+		evo_kick(push, &wndw->wndw);
+	}
+}
+
+static int
+base507c_ntfy_wait_begun(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+	struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
+	struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
+	if (nvif_msec(&drm->client.device, 2000ULL,
+		u32 data = nouveau_bo_rd32(disp->sync, asyw->ntfy.offset / 4);
+		if ((data & 0xc0000000) == 0x40000000)
+			break;
+		usleep_range(1, 2);
+	) < 0)
+		return -ETIMEDOUT;
+	return 0;
+}
+
+static void
+base507c_ntfy_clr(struct nv50_wndw *wndw)
+{
+	u32 *push;
+	if ((push = evo_wait(&wndw->wndw, 2))) {
+		evo_mthd(push, 0x00a4, 1);
+		evo_data(push, 0x00000000);
+		evo_kick(push, &wndw->wndw);
+	}
+}
+
+static void
+base507c_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+	u32 *push;
+	if ((push = evo_wait(&wndw->wndw, 3))) {
+		evo_mthd(push, 0x00a0, 2);
+		evo_data(push, asyw->ntfy.awaken << 30 | asyw->ntfy.offset);
+		evo_data(push, asyw->ntfy.handle);
+		evo_kick(push, &wndw->wndw);
+	}
+}
+
+static void
+base507c_sema_clr(struct nv50_wndw *wndw)
+{
+	u32 *push;
+	if ((push = evo_wait(&wndw->wndw, 2))) {
+		evo_mthd(push, 0x0094, 1);
+		evo_data(push, 0x00000000);
+		evo_kick(push, &wndw->wndw);
+	}
+}
+
+static void
+base507c_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+	u32 *push;
+	if ((push = evo_wait(&wndw->wndw, 5))) {
+		evo_mthd(push, 0x0088, 4);
+		evo_data(push, asyw->sema.offset);
+		evo_data(push, asyw->sema.acquire);
+		evo_data(push, asyw->sema.release);
+		evo_data(push, asyw->sema.handle);
+		evo_kick(push, &wndw->wndw);
+	}
+}
+
+static void
+base507c_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+		 struct nv50_head_atom *asyh)
+{
+	asyh->base.cpp = 0;
+}
+
+static int
+base507c_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+		 struct nv50_head_atom *asyh)
+{
+	const struct drm_framebuffer *fb = asyw->state.fb;
+	int ret;
+
+	if (!fb->format->depth)
+		return -EINVAL;
+
+	ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
+						  DRM_PLANE_HELPER_NO_SCALING,
+						  DRM_PLANE_HELPER_NO_SCALING,
+						  false, true);
+	if (ret)
+		return ret;
+
+	asyh->base.depth = fb->format->depth;
+	asyh->base.cpp = fb->format->cpp[0];
+	asyh->base.x = asyw->state.src.x1 >> 16;
+	asyh->base.y = asyw->state.src.y1 >> 16;
+	asyh->base.w = asyw->state.fb->width;
+	asyh->base.h = asyw->state.fb->height;
+
+	switch (fb->format->format) {
+	case DRM_FORMAT_C8         : asyw->image.format = 0x1e; break;
+	case DRM_FORMAT_RGB565     : asyw->image.format = 0xe8; break;
+	case DRM_FORMAT_XRGB1555   :
+	case DRM_FORMAT_ARGB1555   : asyw->image.format = 0xe9; break;
+	case DRM_FORMAT_XRGB8888   :
+	case DRM_FORMAT_ARGB8888   : asyw->image.format = 0xcf; break;
+	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_ABGR2101010: asyw->image.format = 0xd1; break;
+	case DRM_FORMAT_XBGR8888   :
+	case DRM_FORMAT_ABGR8888   : asyw->image.format = 0xd5; break;
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	asyw->lut.enable = 1;
+	asyw->set.image = true;
+	return 0;
+}
+
+static const u32
+base507c_format[] = {
+	DRM_FORMAT_C8,
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_XRGB1555,
+	DRM_FORMAT_ARGB1555,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_XBGR2101010,
+	DRM_FORMAT_ABGR2101010,
+	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_ABGR8888,
+	0
+};
+
+static const struct nv50_wndw_func
+base507c = {
+	.acquire = base507c_acquire,
+	.release = base507c_release,
+	.sema_set = base507c_sema_set,
+	.sema_clr = base507c_sema_clr,
+	.ntfy_set = base507c_ntfy_set,
+	.ntfy_clr = base507c_ntfy_clr,
+	.ntfy_wait_begun = base507c_ntfy_wait_begun,
+	.image_set = base507c_image_set,
+	.image_clr = base507c_image_clr,
+	.lut = base507c_lut,
+	.update = base507c_update,
+};
+
+static int
+base507c_new_(const struct nv50_wndw_func *func, const u32 *format,
+	      struct nouveau_drm *drm, int head, s32 oclass,
+	      struct nv50_wndw **pwndw)
+{
+	struct nv50_disp_base_channel_dma_v0 args = {
+		.head = head,
+	};
+	struct nv50_disp *disp = nv50_disp(drm->dev);
+	struct nv50_wndw *wndw;
+	int ret;
+
+	ret = nv50_wndw_new_(func, drm->dev, DRM_PLANE_TYPE_PRIMARY,
+			     "base", head, format, &wndw);
+	if (*pwndw = wndw, ret)
+		return ret;
+
+	ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
+			       &oclass, head, &args, sizeof(args),
+			       disp->sync->bo.offset, &wndw->wndw);
+	if (ret) {
+		NV_ERROR(drm, "base%04x allocation failed: %d\n", oclass, ret);
+		return ret;
+	}
+
+	ret = nvif_notify_init(&wndw->wndw.base.user, wndw->notify.func,
+			       false, NV50_DISP_BASE_CHANNEL_DMA_V0_NTFY_UEVENT,
+			       &(struct nvif_notify_uevent_req) {},
+			       sizeof(struct nvif_notify_uevent_req),
+			       sizeof(struct nvif_notify_uevent_rep),
+			       &wndw->notify);
+	if (ret)
+		return ret;
+
+	wndw->ntfy = NV50_DISP_BASE_NTFY(wndw->id);
+	wndw->sema = NV50_DISP_BASE_SEM0(wndw->id);
+	wndw->data = 0x00000000;
+	return 0;
+}
+
+int
+base507c_new(struct nouveau_drm *drm, int head, s32 oclass,
+	     struct nv50_wndw **pwndw)
+{
+	return base507c_new_(&base507c, base507c_format, drm, head, oclass, pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.c b/drivers/gpu/drm/nouveau/dispnv50/core.c
new file mode 100644
index 000000000000..b12899fe052a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "core.h"
+
+#include <nvif/class.h>
+
+void
+nv50_core_del(struct nv50_core **pcore)
+{
+	struct nv50_core *core = *pcore;
+	if (core) {
+		nv50_dmac_destroy(&core->chan);
+		kfree(*pcore);
+		*pcore = NULL;
+	}
+}
+
+int
+nv50_core_new(struct nouveau_drm *drm, struct nv50_core **pcore)
+{
+	struct {
+		s32 oclass;
+		int version;
+		int (*new)(struct nouveau_drm *, s32, struct nv50_core **);
+	} cores[] = {
+		{ GP102_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
+		{ GP100_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
+		{ GM200_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
+		{ GM107_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
+		{ GK110_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
+		{ GK104_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
+		{ GF110_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
+		{ GT214_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
+		{ GT206_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
+		{ GT200_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
+		{   G82_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
+		{  NV50_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
+		{}
+	};
+	struct nv50_disp *disp = nv50_disp(drm->dev);
+	int cid;
+
+	cid = nvif_mclass(&disp->disp->object, cores);
+	if (cid < 0) {
+		NV_ERROR(drm, "No supported core channel class\n");
+		return cid;
+	}
+
+	return cores[cid].new(drm, cores[cid].oclass, pcore);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.h b/drivers/gpu/drm/nouveau/dispnv50/core.h
new file mode 100644
index 000000000000..3cd54469311a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.h
@@ -0,0 +1,26 @@
+#ifndef __NV50_KMS_CORE_H__
+#define __NV50_KMS_CORE_H__
+#include "disp.h"
+#include "atom.h"
+
+struct nv50_core {
+	const struct nv50_core_func *func;
+	struct nv50_dmac chan;
+};
+
+int nv50_core_new(struct nouveau_drm *, struct nv50_core **);
+void nv50_core_del(struct nv50_core **);
+
+struct nv50_core_func {
+	const struct nv50_head_func *head;
+	const struct nv50_outp_func {
+		void (*ctrl)(struct nv50_core *, int or, u32 ctrl,
+			     struct nv50_head_atom *);
+	} *dac, *pior, *sor;
+};
+
+int core507d_new(struct nouveau_drm *, s32, struct nv50_core **);
+extern const struct nv50_outp_func dac507d;
+extern const struct nv50_outp_func sor507d;
+extern const struct nv50_outp_func pior507d;
+#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core507d.c b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
new file mode 100644
index 000000000000..b0325f69a26f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "core.h"
+#include "head.h"
+
+#include <nvif/cl507d.h>
+
+#include "nouveau_bo.h"
+
+static const struct nv50_core_func
+core507d = {
+	.head = &head507d,
+	.dac = &dac507d,
+	.sor = &sor507d,
+	.pior = &pior507d,
+};
+
+static int
+core507d_new_(const struct nv50_core_func *func, struct nouveau_drm *drm,
+	      s32 oclass, struct nv50_core **pcore)
+{
+	struct nv50_disp_core_channel_dma_v0 args = {};
+	struct nv50_disp *disp = nv50_disp(drm->dev);
+	struct nv50_core *core;
+	int ret;
+
+	if (!(core = *pcore = kzalloc(sizeof(*core), GFP_KERNEL)))
+		return -ENOMEM;
+	core->func = func;
+
+	ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
+			       &oclass, 0, &args, sizeof(args),
+			       disp->sync->bo.offset, &core->chan);
+	if (ret) {
+		NV_ERROR(drm, "core%04x allocation failed: %d\n", oclass, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+int
+core507d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore)
+{
+	return core507d_new_(&core507d, drm, oclass, pcore);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs.c b/drivers/gpu/drm/nouveau/dispnv50/curs.c
new file mode 100644
index 000000000000..6d60e978db69
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "curs.h"
+
+#include <nvif/class.h>
+
+int
+nv50_curs_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
+{
+	struct {
+		s32 oclass;
+		int version;
+		int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
+	} curses[] = {
+		{ GK104_DISP_CURSOR, 0, curs507a_new },
+		{ GF110_DISP_CURSOR, 0, curs507a_new },
+		{ GT214_DISP_CURSOR, 0, curs507a_new },
+		{   G82_DISP_CURSOR, 0, curs507a_new },
+		{  NV50_DISP_CURSOR, 0, curs507a_new },
+		{}
+	};
+	struct nv50_disp *disp = nv50_disp(drm->dev);
+	int cid;
+
+	cid = nvif_mclass(&disp->disp->object, curses);
+	if (cid < 0) {
+		NV_ERROR(drm, "No supported cursor immediate class\n");
+		return cid;
+	}
+
+	return curses[cid].new(drm, head, curses[cid].oclass, pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs.h b/drivers/gpu/drm/nouveau/dispnv50/curs.h
new file mode 100644
index 000000000000..b85ca9fa419c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs.h
@@ -0,0 +1,8 @@
+#ifndef __NV50_KMS_CURS_H__
+#define __NV50_KMS_CURS_H__
+#include "wndw.h"
+
+int curs507a_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
+
+int nv50_curs_new(struct nouveau_drm *, int head, struct nv50_wndw **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
new file mode 100644
index 000000000000..1a3e199b5b45
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "curs.h"
+#include "core.h"
+
+#include <nvif/cl507a.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+
+static u32
+curs507a_update(struct nv50_wndw *wndw, u32 interlock)
+{
+	nvif_wr32(&wndw->wimm.base.user, 0x0080, 0x00000000);
+	return 0;
+}
+
+static void
+curs507a_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+	nvif_wr32(&wndw->wimm.base.user, 0x0084, asyw->point.y << 16 |
+						 asyw->point.x);
+}
+
+static const struct nv50_wimm_func
+curs507a = {
+	.point = curs507a_point,
+	.update = curs507a_update,
+};
+
+static void
+curs507a_prepare(struct nv50_wndw *wndw, struct nv50_head_atom *asyh,
+		 struct nv50_wndw_atom *asyw)
+{
+	u32 handle = nv50_disp(wndw->plane.dev)->core->chan.vram.handle;
+	u32 offset = asyw->image.offset;
+	if (asyh->curs.handle != handle || asyh->curs.offset != offset) {
+		asyh->curs.handle = handle;
+		asyh->curs.offset = offset;
+		asyh->set.curs = asyh->curs.visible;
+	}
+}
+
+static void
+curs507a_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+		 struct nv50_head_atom *asyh)
+{
+	asyh->curs.visible = false;
+}
+
+static int
+curs507a_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+		 struct nv50_head_atom *asyh)
+{
+	int ret;
+
+	ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
+						  DRM_PLANE_HELPER_NO_SCALING,
+						  DRM_PLANE_HELPER_NO_SCALING,
+						  true, true);
+	asyh->curs.visible = asyw->state.visible;
+	if (ret || !asyh->curs.visible)
+		return ret;
+
+	switch (asyw->state.fb->width) {
+	case 32: asyh->curs.layout = 0; break;
+	case 64: asyh->curs.layout = 1; break;
+	default:
+		return -EINVAL;
+	}
+
+	if (asyw->state.fb->width != asyw->state.fb->height)
+		return -EINVAL;
+
+	switch (asyw->state.fb->format->format) {
+	case DRM_FORMAT_ARGB8888: asyh->curs.format = 1; break;
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static const u32
+curs507a_format[] = {
+	DRM_FORMAT_ARGB8888,
+	0
+};
+
+static const struct nv50_wndw_func
+curs507a_wndw = {
+	.acquire = curs507a_acquire,
+	.release = curs507a_release,
+	.prepare = curs507a_prepare,
+};
+
+static int
+curs507a_new_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
+	      int head, s32 oclass, struct nv50_wndw **pwndw)
+{
+	struct nv50_disp_cursor_v0 args = {
+		.head = head,
+	};
+	struct nv50_disp *disp = nv50_disp(drm->dev);
+	struct nv50_wndw *wndw;
+	int ret;
+
+	ret = nv50_wndw_new_(&curs507a_wndw, drm->dev, DRM_PLANE_TYPE_CURSOR,
+			     "curs", head, curs507a_format, &wndw);
+	if (*pwndw = wndw, ret)
+		return ret;
+
+	ret = nvif_object_init(&disp->disp->object, 0, oclass, &args,
+			       sizeof(args), &wndw->wimm.base.user);
+	if (ret) {
+		NV_ERROR(drm, "curs%04x allocation failed: %d\n", oclass, ret);
+		return ret;
+	}
+
+	nvif_object_map(&wndw->wimm.base.user, NULL, 0);
+	wndw->immd = func;
+	wndw->ctxdma.parent = &disp->core->chan.base.user;
+	return 0;
+}
+
+int
+curs507a_new(struct nouveau_drm *drm, int head, s32 oclass,
+	     struct nv50_wndw **pwndw)
+{
+	return curs507a_new_(&curs507a, drm, head, oclass, pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/dac507d.c b/drivers/gpu/drm/nouveau/dispnv50/dac507d.c
new file mode 100644
index 000000000000..28b6025a80f3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/dac507d.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "core.h"
+
+#include <nvif/class.h>
+
+static void
+dac507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
+	     struct nv50_head_atom *asyh)
+{
+	u32 *push, sync = 0;
+	if ((push = evo_wait(&core->chan, 3))) {
+		if (core->chan.base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+			if (asyh) {
+				sync |= asyh->or.nvsync << 1;
+				sync |= asyh->or.nhsync;
+			}
+			evo_mthd(push, 0x0400 + (or * 0x080), 2);
+			evo_data(push, ctrl);
+			evo_data(push, sync);
+		} else {
+			evo_mthd(push, 0x0180 + (or * 0x020), 1);
+			evo_data(push, ctrl);
+		}
+		evo_kick(push, &core->chan);
+	}
+}
+
+const struct nv50_outp_func
+dac507d = {
+	.ctrl = dac507d_ctrl,
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 995109ee5762..a8367c5d4691 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -21,12 +21,16 @@
  *
  * Authors: Ben Skeggs
  */
+#include "disp.h"
+#include "atom.h"
+#include "core.h"
+#include "head.h"
+#include "wndw.h"
 
 #include <linux/dma-mapping.h>
 #include <linux/hdmi.h>
 
 #include <drm/drmP.h>
-#include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_dp_helper.h>
@@ -34,16 +38,10 @@
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_edid.h>
 
-#include <nvif/mem.h>
-
 #include <nvif/class.h>
 #include <nvif/cl0002.h>
 #include <nvif/cl5070.h>
-#include <nvif/cl507a.h>
-#include <nvif/cl507b.h>
-#include <nvif/cl507c.h>
 #include <nvif/cl507d.h>
-#include <nvif/cl507e.h>
 #include <nvif/event.h>
 
 #include "nouveau_drv.h"
@@ -51,39 +49,12 @@
 #include "nouveau_gem.h"
 #include "nouveau_connector.h"
 #include "nouveau_encoder.h"
-#include "nouveau_crtc.h"
 #include "nouveau_fence.h"
 #include "nouveau_fbcon.h"
-#include "nv50_display.h"
-
-#define EVO_DMA_NR 9
-
-#define EVO_MASTER  (0x00)
-#define EVO_FLIP(c) (0x01 + (c))
-#define EVO_OVLY(c) (0x05 + (c))
-#define EVO_OIMM(c) (0x09 + (c))
-#define EVO_CURS(c) (0x0d + (c))
-
-/* offsets in shared sync bo of various structures */
-#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
-#define EVO_MAST_NTFY     EVO_SYNC(      0, 0x00)
-#define EVO_FLIP_SEM0(c)  EVO_SYNC((c) + 1, 0x00)
-#define EVO_FLIP_SEM1(c)  EVO_SYNC((c) + 1, 0x10)
-#define EVO_FLIP_NTFY0(c) EVO_SYNC((c) + 1, 0x20)
-#define EVO_FLIP_NTFY1(c) EVO_SYNC((c) + 1, 0x30)
 
 /******************************************************************************
  * Atomic state
  *****************************************************************************/
-#define nv50_atom(p) container_of((p), struct nv50_atom, state)
-
-struct nv50_atom {
-	struct drm_atomic_state state;
-
-	struct list_head outp;
-	bool lock_core;
-	bool flush_disable;
-};
 
 struct nv50_outp_atom {
 	struct list_head head;
@@ -106,209 +77,10 @@ struct nv50_outp_atom {
 	} set;
 };
 
-#define nv50_head_atom(p) container_of((p), struct nv50_head_atom, state)
-
-struct nv50_head_atom {
-	struct drm_crtc_state state;
-
-	struct {
-		u16 iW;
-		u16 iH;
-		u16 oW;
-		u16 oH;
-	} view;
-
-	struct nv50_head_mode {
-		bool interlace;
-		u32 clock;
-		struct {
-			u16 active;
-			u16 synce;
-			u16 blanke;
-			u16 blanks;
-		} h;
-		struct {
-			u32 active;
-			u16 synce;
-			u16 blanke;
-			u16 blanks;
-			u16 blank2s;
-			u16 blank2e;
-			u16 blankus;
-		} v;
-	} mode;
-
-	struct {
-		bool visible;
-		u32 handle;
-		u64 offset:40;
-		u8  mode:4;
-	} lut;
-
-	struct {
-		bool visible;
-		u32 handle;
-		u64 offset:40;
-		u8  format;
-		u8  kind:7;
-		u8  layout:1;
-		u8  block:4;
-		u32 pitch:20;
-		u16 x;
-		u16 y;
-		u16 w;
-		u16 h;
-	} core;
-
-	struct {
-		bool visible;
-		u32 handle;
-		u64 offset:40;
-		u8  layout:1;
-		u8  format:1;
-	} curs;
-
-	struct {
-		u8  depth;
-		u8  cpp;
-		u16 x;
-		u16 y;
-		u16 w;
-		u16 h;
-	} base;
-
-	struct {
-		u8 cpp;
-	} ovly;
-
-	struct {
-		bool enable:1;
-		u8 bits:2;
-		u8 mode:4;
-	} dither;
-
-	struct {
-		struct {
-			u16 cos:12;
-			u16 sin:12;
-		} sat;
-	} procamp;
-
-	struct {
-		u8 nhsync:1;
-		u8 nvsync:1;
-		u8 depth:4;
-	} or;
-
-	union {
-		struct {
-			bool ilut:1;
-			bool core:1;
-			bool curs:1;
-		};
-		u8 mask;
-	} clr;
-
-	union {
-		struct {
-			bool ilut:1;
-			bool core:1;
-			bool curs:1;
-			bool view:1;
-			bool mode:1;
-			bool base:1;
-			bool ovly:1;
-			bool dither:1;
-			bool procamp:1;
-			bool or:1;
-		};
-		u16 mask;
-	} set;
-};
-
-static inline struct nv50_head_atom *
-nv50_head_atom_get(struct drm_atomic_state *state, struct drm_crtc *crtc)
-{
-	struct drm_crtc_state *statec = drm_atomic_get_crtc_state(state, crtc);
-	if (IS_ERR(statec))
-		return (void *)statec;
-	return nv50_head_atom(statec);
-}
-
-#define nv50_wndw_atom(p) container_of((p), struct nv50_wndw_atom, state)
-
-struct nv50_wndw_atom {
-	struct drm_plane_state state;
-	u8 interval;
-
-	struct {
-		u32  handle;
-		u16  offset:12;
-		bool awaken:1;
-	} ntfy;
-
-	struct {
-		u32 handle;
-		u16 offset:12;
-		u32 acquire;
-		u32 release;
-	} sema;
-
-	struct {
-		u8 enable:2;
-	} lut;
-
-	struct {
-		u8  mode:2;
-		u8  interval:4;
-
-		u8  format;
-		u8  kind:7;
-		u8  layout:1;
-		u8  block:4;
-		u32 pitch:20;
-		u16 w;
-		u16 h;
-
-		u32 handle;
-		u64 offset;
-	} image;
-
-	struct {
-		u16 x;
-		u16 y;
-	} point;
-
-	union {
-		struct {
-			bool ntfy:1;
-			bool sema:1;
-			bool image:1;
-		};
-		u8 mask;
-	} clr;
-
-	union {
-		struct {
-			bool ntfy:1;
-			bool sema:1;
-			bool image:1;
-			bool lut:1;
-			bool point:1;
-		};
-		u8 mask;
-	} set;
-};
-
 /******************************************************************************
  * EVO channel
  *****************************************************************************/
 
-struct nv50_chan {
-	struct nvif_object user;
-	struct nvif_device *device;
-};
-
 static int
 nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
 		 const s32 *oclass, u8 head, void *data, u32 size,
@@ -351,27 +123,7 @@ nv50_chan_destroy(struct nv50_chan *chan)
  * DMA EVO channel
  *****************************************************************************/
 
-struct nv50_wndw_ctxdma {
-	struct list_head head;
-	struct nvif_object object;
-};
-
-struct nv50_dmac {
-	struct nv50_chan base;
-
-	struct nvif_mem push;
-	u32 *ptr;
-
-	struct nvif_object sync;
-	struct nvif_object vram;
-
-	/* Protects against concurrent pushbuf access to this channel, lock is
-	 * grabbed by evo_wait (if the pushbuf reservation is successful) and
-	 * dropped again by evo_kick. */
-	struct mutex lock;
-};
-
-static void
+void
 nv50_dmac_destroy(struct nv50_dmac *dmac)
 {
 	nvif_object_fini(&dmac->vram);
@@ -382,7 +134,7 @@ nv50_dmac_destroy(struct nv50_dmac *dmac)
 	nvif_mem_fini(&dmac->push);
 }
 
-static int
+int
 nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
 		 const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf,
 		 struct nv50_dmac *dmac)
@@ -432,108 +184,11 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
 	return ret;
 }
 
-/******************************************************************************
- * Base
- *****************************************************************************/
-
-struct nv50_sync {
-	struct nv50_dmac base;
-	u32 addr;
-	u32 data;
-};
-
-struct nv50_head {
-	const struct nv50_head_func *func;
-	struct nouveau_crtc base;
-	struct {
-		struct nouveau_bo *nvbo[2];
-		int next;
-	} lut;
-};
-
-struct nv50_head_func {
-	void (*view)(struct nv50_head *, struct nv50_head_atom *);
-	void (*mode)(struct nv50_head *, struct nv50_head_atom *);
-	void (*ilut_set)(struct nv50_head *, struct nv50_head_atom *);
-	void (*ilut_clr)(struct nv50_head *);
-	void (*core_set)(struct nv50_head *, struct nv50_head_atom *);
-	void (*core_clr)(struct nv50_head *);
-	void (*curs_set)(struct nv50_head *, struct nv50_head_atom *);
-	void (*curs_clr)(struct nv50_head *);
-	void (*base)(struct nv50_head *, struct nv50_head_atom *);
-	void (*ovly)(struct nv50_head *, struct nv50_head_atom *);
-	void (*dither)(struct nv50_head *, struct nv50_head_atom *);
-	void (*procamp)(struct nv50_head *, struct nv50_head_atom *);
-	void (*or)(struct nv50_head *, struct nv50_head_atom *);
-};
-
-#define nv50_head(c) container_of((c), struct nv50_head, base.base)
-
-struct nv50_disp {
-	struct nvif_disp *disp;
-	struct nv50_core *core;
-
-	struct nouveau_bo *sync;
-
-	struct mutex mutex;
-};
-
-static struct nv50_disp *
-nv50_disp(struct drm_device *dev)
-{
-	return nouveau_display(dev)->priv;
-}
-
-/******************************************************************************
- * Core
- *****************************************************************************/
-
-struct nv50_core {
-	const struct nv50_core_func *func;
-	struct nv50_dmac chan;
-};
-
-struct nv50_core_func {
-	const struct nv50_head_func *head;
-	const struct nv50_outp_func *dac;
-	const struct nv50_outp_func *sor;
-	const struct nv50_outp_func *pior;
-};
-
-struct nv50_outp_func {
-	void (*ctrl)(struct nv50_core *, int or, u32 ctrl,
-		     struct nv50_head_atom *);
-};
-
-static int
-core507d_new_(const struct nv50_core_func *func, struct nouveau_drm *drm,
-	      s32 oclass, struct nv50_core **pcore)
-{
-	struct nv50_disp_core_channel_dma_v0 args = {};
-	struct nv50_disp *disp = nv50_disp(drm->dev);
-	struct nv50_core *core;
-	int ret;
-
-	if (!(core = *pcore = kzalloc(sizeof(*core), GFP_KERNEL)))
-		return -ENOMEM;
-	core->func = func;
-
-	ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
-			       &oclass, 0, &args, sizeof(args),
-			       disp->sync->bo.offset, &core->chan);
-	if (ret) {
-		NV_ERROR(drm, "core%04x allocation failed: %d\n", oclass, ret);
-		return ret;
-	}
-
-	return 0;
-}
-
 /******************************************************************************
  * EVO channel helpers
  *****************************************************************************/
-static u32 *
-evo_wait(void *evoc, int nr)
+u32 *
+evo_wait(struct nv50_dmac *evoc, int nr)
 {
 	struct nv50_dmac *dmac = evoc;
 	struct nvif_device *device = dmac->base.device;
@@ -559,2063 +214,14 @@ evo_wait(void *evoc, int nr)
 	return dmac->ptr + put;
 }
 
-static void
-evo_kick(u32 *push, void *evoc)
+void
+evo_kick(u32 *push, struct nv50_dmac *evoc)
 {
 	struct nv50_dmac *dmac = evoc;
 	nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
 	mutex_unlock(&dmac->lock);
 }
 
-#define evo_mthd(p, m, s) do {						\
-	const u32 _m = (m), _s = (s);					\
-	if (drm_debug & DRM_UT_KMS)					\
-		pr_err("%04x %d %s\n", _m, _s, __func__);		\
-	*((p)++) = ((_s << 18) | _m);					\
-} while(0)
-
-#define evo_data(p, d) do {						\
-	const u32 _d = (d);						\
-	if (drm_debug & DRM_UT_KMS)					\
-		pr_err("\t%08x\n", _d);					\
-	*((p)++) = _d;							\
-} while(0)
-
-/******************************************************************************
- * Plane
- *****************************************************************************/
-#define nv50_wndw(p) container_of((p), struct nv50_wndw, plane)
-
-struct nv50_wndw {
-	const struct nv50_wndw_func *func;
-	const struct nv50_wimm_func *immd;
-	int id;
-
-	struct {
-		struct nvif_object *parent;
-		struct list_head list;
-	} ctxdma;
-
-	struct drm_plane plane;
-
-	struct nv50_dmac wndw;
-	struct nv50_dmac wimm;
-
-	struct nvif_notify notify;
-	u16 ntfy;
-	u16 sema;
-	u32 data;
-};
-
-struct nv50_wndw_func {
-	int (*acquire)(struct nv50_wndw *, struct nv50_wndw_atom *asyw,
-		       struct nv50_head_atom *asyh);
-	void (*release)(struct nv50_wndw *, struct nv50_wndw_atom *asyw,
-			struct nv50_head_atom *asyh);
-	void (*prepare)(struct nv50_wndw *, struct nv50_head_atom *asyh,
-			struct nv50_wndw_atom *asyw);
-
-	void (*sema_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
-	void (*sema_clr)(struct nv50_wndw *);
-	void (*ntfy_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
-	void (*ntfy_clr)(struct nv50_wndw *);
-	int (*ntfy_wait_begun)(struct nv50_wndw *, struct nv50_wndw_atom *);
-	void (*image_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
-	void (*image_clr)(struct nv50_wndw *);
-	void (*lut)(struct nv50_wndw *, struct nv50_wndw_atom *);
-
-	u32 (*update)(struct nv50_wndw *, u32 interlock);
-};
-
-struct nv50_wimm_func {
-	void (*point)(struct nv50_wndw *, struct nv50_wndw_atom *);
-
-	u32 (*update)(struct nv50_wndw *, u32 interlock);
-};
-
-static void
-nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma)
-{
-	nvif_object_fini(&ctxdma->object);
-	list_del(&ctxdma->head);
-	kfree(ctxdma);
-}
-
-static struct nv50_wndw_ctxdma *
-nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct nouveau_framebuffer *fb)
-{
-	struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
-	struct nv50_wndw_ctxdma *ctxdma;
-	const u8    kind = fb->nvbo->kind;
-	const u32 handle = 0xfb000000 | kind;
-	struct {
-		struct nv_dma_v0 base;
-		union {
-			struct nv50_dma_v0 nv50;
-			struct gf100_dma_v0 gf100;
-			struct gf119_dma_v0 gf119;
-		};
-	} args = {};
-	u32 argc = sizeof(args.base);
-	int ret;
-
-	list_for_each_entry(ctxdma, &wndw->ctxdma.list, head) {
-		if (ctxdma->object.handle == handle)
-			return ctxdma;
-	}
-
-	if (!(ctxdma = kzalloc(sizeof(*ctxdma), GFP_KERNEL)))
-		return ERR_PTR(-ENOMEM);
-	list_add(&ctxdma->head, &wndw->ctxdma.list);
-
-	args.base.target = NV_DMA_V0_TARGET_VRAM;
-	args.base.access = NV_DMA_V0_ACCESS_RDWR;
-	args.base.start  = 0;
-	args.base.limit  = drm->client.device.info.ram_user - 1;
-
-	if (drm->client.device.info.chipset < 0x80) {
-		args.nv50.part = NV50_DMA_V0_PART_256;
-		argc += sizeof(args.nv50);
-	} else
-	if (drm->client.device.info.chipset < 0xc0) {
-		args.nv50.part = NV50_DMA_V0_PART_256;
-		args.nv50.kind = kind;
-		argc += sizeof(args.nv50);
-	} else
-	if (drm->client.device.info.chipset < 0xd0) {
-		args.gf100.kind = kind;
-		argc += sizeof(args.gf100);
-	} else {
-		args.gf119.page = GF119_DMA_V0_PAGE_LP;
-		args.gf119.kind = kind;
-		argc += sizeof(args.gf119);
-	}
-
-	ret = nvif_object_init(wndw->ctxdma.parent, handle, NV_DMA_IN_MEMORY,
-			       &args, argc, &ctxdma->object);
-	if (ret) {
-		nv50_wndw_ctxdma_del(ctxdma);
-		return ERR_PTR(ret);
-	}
-
-	return ctxdma;
-}
-
-static int
-nv50_wndw_wait_armed(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
-{
-	if (asyw->set.ntfy)
-		return wndw->func->ntfy_wait_begun(wndw, asyw);
-	return 0;
-}
-
-static u32
-nv50_wndw_flush_clr(struct nv50_wndw *wndw, u32 interlock, bool flush,
-		    struct nv50_wndw_atom *asyw)
-{
-	if (asyw->clr.sema && (!asyw->set.sema || flush))
-		wndw->func->sema_clr(wndw);
-	if (asyw->clr.ntfy && (!asyw->set.ntfy || flush))
-		wndw->func->ntfy_clr(wndw);
-	if (asyw->clr.image && (!asyw->set.image || flush))
-		wndw->func->image_clr(wndw);
-
-	return flush ? wndw->func->update(wndw, interlock) : 0;
-}
-
-static u32
-nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 interlock,
-		    struct nv50_wndw_atom *asyw)
-{
-	if (interlock) {
-		asyw->image.mode = 0;
-		asyw->image.interval = 1;
-	}
-
-	if (asyw->set.sema ) wndw->func->sema_set (wndw, asyw);
-	if (asyw->set.ntfy ) wndw->func->ntfy_set (wndw, asyw);
-	if (asyw->set.image) wndw->func->image_set(wndw, asyw);
-	if (asyw->set.lut  ) wndw->func->lut      (wndw, asyw);
-	if (asyw->set.point) {
-		wndw->immd->point(wndw, asyw);
-		wndw->immd->update(wndw, interlock);
-	}
-
-	return wndw->func->update ? wndw->func->update(wndw, interlock) : 0;
-}
-
-static void
-nv50_wndw_atomic_check_release(struct nv50_wndw *wndw,
-			       struct nv50_wndw_atom *asyw,
-			       struct nv50_head_atom *asyh)
-{
-	struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
-	NV_ATOMIC(drm, "%s release\n", wndw->plane.name);
-	wndw->func->release(wndw, asyw, asyh);
-	asyw->ntfy.handle = 0;
-	asyw->sema.handle = 0;
-}
-
-static int
-nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
-			       struct nv50_wndw_atom *asyw,
-			       struct nv50_head_atom *asyh)
-{
-	struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb);
-	struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
-	int ret;
-
-	NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name);
-
-	asyw->image.w = fb->base.width;
-	asyw->image.h = fb->base.height;
-	asyw->image.kind = fb->nvbo->kind;
-
-	if (asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
-		asyw->interval = 0;
-	else
-		asyw->interval = 1;
-
-	if (asyw->image.kind) {
-		asyw->image.layout = 0;
-		if (drm->client.device.info.chipset >= 0xc0)
-			asyw->image.block = fb->nvbo->mode >> 4;
-		else
-			asyw->image.block = fb->nvbo->mode;
-		asyw->image.pitch = (fb->base.pitches[0] / 4) << 4;
-	} else {
-		asyw->image.layout = 1;
-		asyw->image.block  = 0;
-		asyw->image.pitch  = fb->base.pitches[0];
-	}
-
-	ret = wndw->func->acquire(wndw, asyw, asyh);
-	if (ret)
-		return ret;
-
-	if (asyw->set.image) {
-		if (!(asyw->image.mode = asyw->interval ? 0 : 1))
-			asyw->image.interval = asyw->interval;
-		else
-			asyw->image.interval = 0;
-	}
-
-	return 0;
-}
-
-static int
-nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
-{
-	struct nouveau_drm *drm = nouveau_drm(plane->dev);
-	struct nv50_wndw *wndw = nv50_wndw(plane);
-	struct nv50_wndw_atom *armw = nv50_wndw_atom(wndw->plane.state);
-	struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
-	struct nv50_head_atom *harm = NULL, *asyh = NULL;
-	bool varm = false, asyv = false, asym = false;
-	int ret;
-
-	NV_ATOMIC(drm, "%s atomic_check\n", plane->name);
-	if (asyw->state.crtc) {
-		asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
-		if (IS_ERR(asyh))
-			return PTR_ERR(asyh);
-		asym = drm_atomic_crtc_needs_modeset(&asyh->state);
-		asyv = asyh->state.active;
-	}
-
-	if (armw->state.crtc) {
-		harm = nv50_head_atom_get(asyw->state.state, armw->state.crtc);
-		if (IS_ERR(harm))
-			return PTR_ERR(harm);
-		varm = harm->state.crtc->state->active;
-	}
-
-	if (asyv) {
-		asyw->point.x = asyw->state.crtc_x;
-		asyw->point.y = asyw->state.crtc_y;
-		if (memcmp(&armw->point, &asyw->point, sizeof(asyw->point)))
-			asyw->set.point = true;
-
-		ret = nv50_wndw_atomic_check_acquire(wndw, asyw, asyh);
-		if (ret)
-			return ret;
-	} else
-	if (varm) {
-		nv50_wndw_atomic_check_release(wndw, asyw, harm);
-	} else {
-		return 0;
-	}
-
-	if (!asyv || asym) {
-		asyw->clr.ntfy = armw->ntfy.handle != 0;
-		asyw->clr.sema = armw->sema.handle != 0;
-		if (wndw->func->image_clr)
-			asyw->clr.image = armw->image.handle != 0;
-		asyw->set.lut = wndw->func->lut && asyv;
-	}
-
-	return 0;
-}
-
-static void
-nv50_wndw_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state)
-{
-	struct nouveau_framebuffer *fb = nouveau_framebuffer(old_state->fb);
-	struct nouveau_drm *drm = nouveau_drm(plane->dev);
-
-	NV_ATOMIC(drm, "%s cleanup: %p\n", plane->name, old_state->fb);
-	if (!old_state->fb)
-		return;
-
-	nouveau_bo_unpin(fb->nvbo);
-}
-
-static int
-nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
-{
-	struct nouveau_framebuffer *fb = nouveau_framebuffer(state->fb);
-	struct nouveau_drm *drm = nouveau_drm(plane->dev);
-	struct nv50_wndw *wndw = nv50_wndw(plane);
-	struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
-	struct nv50_head_atom *asyh;
-	struct nv50_wndw_ctxdma *ctxdma;
-	int ret;
-
-	NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, state->fb);
-	if (!asyw->state.fb)
-		return 0;
-
-	ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM, true);
-	if (ret)
-		return ret;
-
-	ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
-	if (IS_ERR(ctxdma)) {
-		nouveau_bo_unpin(fb->nvbo);
-		return PTR_ERR(ctxdma);
-	}
-
-	asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv);
-	asyw->image.handle = ctxdma->object.handle;
-	asyw->image.offset = fb->nvbo->bo.offset;
-
-	if (wndw->func->prepare) {
-		asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
-		if (IS_ERR(asyh))
-			return PTR_ERR(asyh);
-
-		wndw->func->prepare(wndw, asyh, asyw);
-	}
-
-	return 0;
-}
-
-static const struct drm_plane_helper_funcs
-nv50_wndw_helper = {
-	.prepare_fb = nv50_wndw_prepare_fb,
-	.cleanup_fb = nv50_wndw_cleanup_fb,
-	.atomic_check = nv50_wndw_atomic_check,
-};
-
-static void
-nv50_wndw_atomic_destroy_state(struct drm_plane *plane,
-			       struct drm_plane_state *state)
-{
-	struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
-	__drm_atomic_helper_plane_destroy_state(&asyw->state);
-	kfree(asyw);
-}
-
-static struct drm_plane_state *
-nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
-{
-	struct nv50_wndw_atom *armw = nv50_wndw_atom(plane->state);
-	struct nv50_wndw_atom *asyw;
-	if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL)))
-		return NULL;
-	__drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
-	asyw->interval = 1;
-	asyw->sema = armw->sema;
-	asyw->ntfy = armw->ntfy;
-	asyw->image = armw->image;
-	asyw->point = armw->point;
-	asyw->lut = armw->lut;
-	asyw->clr.mask = 0;
-	asyw->set.mask = 0;
-	return &asyw->state;
-}
-
-static void
-nv50_wndw_reset(struct drm_plane *plane)
-{
-	struct nv50_wndw_atom *asyw;
-
-	if (WARN_ON(!(asyw = kzalloc(sizeof(*asyw), GFP_KERNEL))))
-		return;
-
-	if (plane->state)
-		plane->funcs->atomic_destroy_state(plane, plane->state);
-	plane->state = &asyw->state;
-	plane->state->plane = plane;
-	plane->state->rotation = DRM_MODE_ROTATE_0;
-}
-
-static void
-nv50_wndw_destroy(struct drm_plane *plane)
-{
-	struct nv50_wndw *wndw = nv50_wndw(plane);
-	struct nv50_wndw_ctxdma *ctxdma, *ctxtmp;
-
-	list_for_each_entry_safe(ctxdma, ctxtmp, &wndw->ctxdma.list, head) {
-		nv50_wndw_ctxdma_del(ctxdma);
-	}
-
-	nvif_notify_fini(&wndw->notify);
-	nv50_dmac_destroy(&wndw->wimm);
-	nv50_dmac_destroy(&wndw->wndw);
-	drm_plane_cleanup(&wndw->plane);
-	kfree(wndw);
-}
-
-static const struct drm_plane_funcs
-nv50_wndw = {
-	.update_plane = drm_atomic_helper_update_plane,
-	.disable_plane = drm_atomic_helper_disable_plane,
-	.destroy = nv50_wndw_destroy,
-	.reset = nv50_wndw_reset,
-	.atomic_duplicate_state = nv50_wndw_atomic_duplicate_state,
-	.atomic_destroy_state = nv50_wndw_atomic_destroy_state,
-};
-
-static int
-nv50_wndw_notify(struct nvif_notify *notify)
-{
-	return NVIF_NOTIFY_KEEP;
-}
-
-static void
-nv50_wndw_fini(struct nv50_wndw *wndw)
-{
-	nvif_notify_put(&wndw->notify);
-}
-
-static void
-nv50_wndw_init(struct nv50_wndw *wndw)
-{
-	nvif_notify_get(&wndw->notify);
-}
-
-static int
-nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
-	       enum drm_plane_type type, const char *name, int index,
-	       const u32 *format, struct nv50_wndw **pwndw)
-{
-	struct nv50_wndw *wndw;
-	int nformat;
-	int ret;
-
-	if (!(wndw = *pwndw = kzalloc(sizeof(*wndw), GFP_KERNEL)))
-		return -ENOMEM;
-	wndw->func = func;
-	wndw->id = index;
-
-	wndw->ctxdma.parent = &wndw->wndw.base.user;
-	INIT_LIST_HEAD(&wndw->ctxdma.list);
-
-	for (nformat = 0; format[nformat]; nformat++);
-
-	ret = drm_universal_plane_init(dev, &wndw->plane, 0, &nv50_wndw,
-				       format, nformat, NULL,
-				       type, "%s-%d", name, index);
-	if (ret) {
-		kfree(*pwndw);
-		*pwndw = NULL;
-		return ret;
-	}
-
-	drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper);
-
-	wndw->notify.func = nv50_wndw_notify;
-	return 0;
-}
-
-/******************************************************************************
- * Overlay
- *****************************************************************************/
-
-static const struct nv50_wimm_func
-oimm507b = {
-};
-
-static int
-oimm507b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
-	       s32 oclass, struct nv50_wndw *wndw)
-{
-	struct nv50_disp_overlay_v0 args = {
-		.head = wndw->id,
-	};
-	struct nv50_disp *disp = nv50_disp(drm->dev);
-	int ret;
-
-	ret = nvif_object_init(&disp->disp->object, 0, oclass, &args,
-			       sizeof(args), &wndw->wimm.base.user);
-	if (ret) {
-		NV_ERROR(drm, "oimm%04x allocation failed: %d\n", oclass, ret);
-		return ret;
-	}
-
-	nvif_object_map(&wndw->wimm.base.user, NULL, 0);
-	wndw->immd = func;
-	return 0;
-}
-
-static int
-oimm507b_init(struct nouveau_drm *drm, s32 oclass, struct nv50_wndw *wndw)
-{
-	return oimm507b_init_(&oimm507b, drm, oclass, wndw);
-}
-
-static int
-nv50_oimm_init(struct nouveau_drm *drm, struct nv50_wndw *wndw)
-{
-	static const struct {
-		s32 oclass;
-		int version;
-		int (*init)(struct nouveau_drm *, s32, struct nv50_wndw *);
-	} oimms[] = {
-		{ GK104_DISP_OVERLAY, 0, oimm507b_init },
-		{ GF110_DISP_OVERLAY, 0, oimm507b_init },
-		{ GT214_DISP_OVERLAY, 0, oimm507b_init },
-		{   G82_DISP_OVERLAY, 0, oimm507b_init },
-		{  NV50_DISP_OVERLAY, 0, oimm507b_init },
-		{}
-	};
-	struct nv50_disp *disp = nv50_disp(drm->dev);
-	int cid;
-
-	cid = nvif_mclass(&disp->disp->object, oimms);
-	if (cid < 0) {
-		NV_ERROR(drm, "No supported overlay immediate class\n");
-		return cid;
-	}
-
-	return oimms[cid].init(drm, oimms[cid].oclass, wndw);
-}
-
-static const struct nv50_wndw_func
-ovly507e = {
-};
-
-static const u32
-ovly507e_format[] = {
-	0
-};
-
-static int
-ovly507e_new_(const struct nv50_wndw_func *func, const u32 *format,
-	      struct nouveau_drm *drm, int head, s32 oclass,
-	      struct nv50_wndw **pwndw)
-{
-	struct nv50_disp_overlay_channel_dma_v0 args = {
-		.head = head,
-	};
-	struct nv50_disp *disp = nv50_disp(drm->dev);
-	struct nv50_wndw *wndw;
-	int ret;
-
-	ret = nv50_wndw_new_(func, drm->dev, DRM_PLANE_TYPE_OVERLAY,
-			     "ovly", head, format, &wndw);
-	if (*pwndw = wndw, ret)
-		return ret;
-
-	ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
-			       &oclass, 0, &args, sizeof(args),
-			       disp->sync->bo.offset, &wndw->wndw);
-	if (ret) {
-		NV_ERROR(drm, "ovly%04x allocation failed: %d\n", oclass, ret);
-		return ret;
-	}
-
-	return 0;
-}
-
-static int
-ovly507e_new(struct nouveau_drm *drm, int head, s32 oclass,
-	     struct nv50_wndw **pwndw)
-{
-	return ovly507e_new_(&ovly507e, ovly507e_format, drm, head, oclass, pwndw);
-}
-
-static int
-nv50_ovly_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
-{
-	static const struct {
-		s32 oclass;
-		int version;
-		int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
-	} ovlys[] = {
-		{ GK104_DISP_OVERLAY_CONTROL_DMA, 0, ovly507e_new },
-		{ GF110_DISP_OVERLAY_CONTROL_DMA, 0, ovly507e_new },
-		{ GT214_DISP_OVERLAY_CHANNEL_DMA, 0, ovly507e_new },
-		{ GT200_DISP_OVERLAY_CHANNEL_DMA, 0, ovly507e_new },
-		{   G82_DISP_OVERLAY_CHANNEL_DMA, 0, ovly507e_new },
-		{  NV50_DISP_OVERLAY_CHANNEL_DMA, 0, ovly507e_new },
-		{}
-	};
-	struct nv50_disp *disp = nv50_disp(drm->dev);
-	int cid, ret;
-
-	cid = nvif_mclass(&disp->disp->object, ovlys);
-	if (cid < 0) {
-		NV_ERROR(drm, "No supported overlay class\n");
-		return cid;
-	}
-
-	ret = ovlys[cid].new(drm, head, ovlys[cid].oclass, pwndw);
-	if (ret)
-		return ret;
-
-	return nv50_oimm_init(drm, *pwndw);
-}
-
-/******************************************************************************
- * Cursor plane
- *****************************************************************************/
-static u32
-nv50_curs_update(struct nv50_wndw *wndw, u32 interlock)
-{
-	nvif_wr32(&wndw->wimm.base.user, 0x0080, 0x00000000);
-	return 0;
-}
-
-static void
-nv50_curs_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
-{
-	nvif_wr32(&wndw->wimm.base.user, 0x0084, (asyw->point.y << 16) |
-						  asyw->point.x);
-}
-
-static const struct nv50_wimm_func
-curs507a = {
-	.point = nv50_curs_point,
-	.update = nv50_curs_update,
-};
-
-static void
-nv50_curs_prepare(struct nv50_wndw *wndw, struct nv50_head_atom *asyh,
-		  struct nv50_wndw_atom *asyw)
-{
-	u32 handle = nv50_disp(wndw->plane.dev)->core->chan.vram.handle;
-	u32 offset = asyw->image.offset;
-	if (asyh->curs.handle != handle || asyh->curs.offset != offset) {
-		asyh->curs.handle = handle;
-		asyh->curs.offset = offset;
-		asyh->set.curs = asyh->curs.visible;
-	}
-}
-
-static void
-nv50_curs_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
-		  struct nv50_head_atom *asyh)
-{
-	asyh->curs.visible = false;
-}
-
-static int
-nv50_curs_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
-		  struct nv50_head_atom *asyh)
-{
-	int ret;
-
-	ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
-						  DRM_PLANE_HELPER_NO_SCALING,
-						  DRM_PLANE_HELPER_NO_SCALING,
-						  true, true);
-	asyh->curs.visible = asyw->state.visible;
-	if (ret || !asyh->curs.visible)
-		return ret;
-
-	switch (asyw->state.fb->width) {
-	case 32: asyh->curs.layout = 0; break;
-	case 64: asyh->curs.layout = 1; break;
-	default:
-		return -EINVAL;
-	}
-
-	if (asyw->state.fb->width != asyw->state.fb->height)
-		return -EINVAL;
-
-	switch (asyw->state.fb->format->format) {
-	case DRM_FORMAT_ARGB8888: asyh->curs.format = 1; break;
-	default:
-		WARN_ON(1);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static const u32
-nv50_curs_format[] = {
-	DRM_FORMAT_ARGB8888,
-	0
-};
-
-static const struct nv50_wndw_func
-nv50_curs = {
-	.acquire = nv50_curs_acquire,
-	.release = nv50_curs_release,
-	.prepare = nv50_curs_prepare,
-};
-
-static int
-curs507a_new_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
-	      int head, s32 oclass, struct nv50_wndw **pwndw)
-{
-	struct nv50_disp_cursor_v0 args = {
-		.head = head,
-	};
-	struct nv50_disp *disp = nv50_disp(drm->dev);
-	struct nv50_wndw *wndw;
-	int ret;
-
-	ret = nv50_wndw_new_(&nv50_curs, drm->dev, DRM_PLANE_TYPE_CURSOR,
-			     "curs", head, nv50_curs_format, &wndw);
-	if (*pwndw = wndw, ret)
-		return ret;
-
-	ret = nvif_object_init(&disp->disp->object, 0, oclass, &args,
-			       sizeof(args), &wndw->wimm.base.user);
-	if (ret) {
-		NV_ERROR(drm, "curs%04x allocation failed: %d\n", oclass, ret);
-		return ret;
-	}
-
-	nvif_object_map(&wndw->wimm.base.user, NULL, 0);
-	wndw->immd = func;
-	wndw->ctxdma.parent = &disp->core->chan.base.user;
-	return 0;
-}
-
-static int
-curs507a_new(struct nouveau_drm *drm, int head, s32 oclass,
-	     struct nv50_wndw **pwndw)
-{
-	return curs507a_new_(&curs507a, drm, head, oclass, pwndw);
-}
-
-static int
-nv50_curs_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
-{
-	struct {
-		s32 oclass;
-		int version;
-		int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
-	} curses[] = {
-		{ GK104_DISP_CURSOR, 0, curs507a_new },
-		{ GF110_DISP_CURSOR, 0, curs507a_new },
-		{ GT214_DISP_CURSOR, 0, curs507a_new },
-		{   G82_DISP_CURSOR, 0, curs507a_new },
-		{  NV50_DISP_CURSOR, 0, curs507a_new },
-		{}
-	};
-	struct nv50_disp *disp = nv50_disp(drm->dev);
-	int cid;
-
-	cid = nvif_mclass(&disp->disp->object, curses);
-	if (cid < 0) {
-		NV_ERROR(drm, "No supported cursor immediate class\n");
-		return cid;
-	}
-
-	return curses[cid].new(drm, head, curses[cid].oclass, pwndw);
-}
-
-/******************************************************************************
- * Primary plane
- *****************************************************************************/
-static void
-nv50_base_lut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
-{
-	u32 *push;
-	if ((push = evo_wait(&wndw->wndw, 2))) {
-		evo_mthd(push, 0x00e0, 1);
-		evo_data(push, asyw->lut.enable << 30);
-		evo_kick(push, &wndw->wndw);
-	}
-}
-
-static void
-nv50_base_image_clr(struct nv50_wndw *wndw)
-{
-	u32 *push;
-	if ((push = evo_wait(&wndw->wndw, 4))) {
-		evo_mthd(push, 0x0084, 1);
-		evo_data(push, 0x00000000);
-		evo_mthd(push, 0x00c0, 1);
-		evo_data(push, 0x00000000);
-		evo_kick(push, &wndw->wndw);
-	}
-}
-
-static void
-nv50_base_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
-{
-	const s32 oclass = wndw->wndw.base.user.oclass;
-	u32 *push;
-	if ((push = evo_wait(&wndw->wndw, 10))) {
-		evo_mthd(push, 0x0084, 1);
-		evo_data(push, (asyw->image.mode << 8) |
-			       (asyw->image.interval << 4));
-		evo_mthd(push, 0x00c0, 1);
-		evo_data(push, asyw->image.handle);
-		if (oclass < G82_DISP_BASE_CHANNEL_DMA) {
-			evo_mthd(push, 0x0800, 5);
-			evo_data(push, asyw->image.offset >> 8);
-			evo_data(push, 0x00000000);
-			evo_data(push, (asyw->image.h << 16) | asyw->image.w);
-			evo_data(push, (asyw->image.layout << 20) |
-					asyw->image.pitch |
-					asyw->image.block);
-			evo_data(push, (asyw->image.kind << 16) |
-				       (asyw->image.format << 8));
-		} else
-		if (oclass < GF110_DISP_BASE_CHANNEL_DMA) {
-			evo_mthd(push, 0x0800, 5);
-			evo_data(push, asyw->image.offset >> 8);
-			evo_data(push, 0x00000000);
-			evo_data(push, (asyw->image.h << 16) | asyw->image.w);
-			evo_data(push, (asyw->image.layout << 20) |
-					asyw->image.pitch |
-					asyw->image.block);
-			evo_data(push, asyw->image.format << 8);
-		} else {
-			evo_mthd(push, 0x0400, 5);
-			evo_data(push, asyw->image.offset >> 8);
-			evo_data(push, 0x00000000);
-			evo_data(push, (asyw->image.h << 16) | asyw->image.w);
-			evo_data(push, (asyw->image.layout << 24) |
-					asyw->image.pitch |
-					asyw->image.block);
-			evo_data(push, asyw->image.format << 8);
-		}
-		evo_kick(push, &wndw->wndw);
-	}
-}
-
-static void
-nv50_base_ntfy_clr(struct nv50_wndw *wndw)
-{
-	u32 *push;
-	if ((push = evo_wait(&wndw->wndw, 2))) {
-		evo_mthd(push, 0x00a4, 1);
-		evo_data(push, 0x00000000);
-		evo_kick(push, &wndw->wndw);
-	}
-}
-
-static void
-nv50_base_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
-{
-	u32 *push;
-	if ((push = evo_wait(&wndw->wndw, 3))) {
-		evo_mthd(push, 0x00a0, 2);
-		evo_data(push, (asyw->ntfy.awaken << 30) | asyw->ntfy.offset);
-		evo_data(push, asyw->ntfy.handle);
-		evo_kick(push, &wndw->wndw);
-	}
-}
-
-static void
-nv50_base_sema_clr(struct nv50_wndw *wndw)
-{
-	u32 *push;
-	if ((push = evo_wait(&wndw->wndw, 2))) {
-		evo_mthd(push, 0x0094, 1);
-		evo_data(push, 0x00000000);
-		evo_kick(push, &wndw->wndw);
-	}
-}
-
-static void
-nv50_base_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
-{
-	u32 *push;
-	if ((push = evo_wait(&wndw->wndw, 5))) {
-		evo_mthd(push, 0x0088, 4);
-		evo_data(push, asyw->sema.offset);
-		evo_data(push, asyw->sema.acquire);
-		evo_data(push, asyw->sema.release);
-		evo_data(push, asyw->sema.handle);
-		evo_kick(push, &wndw->wndw);
-	}
-}
-
-static u32
-nv50_base_update(struct nv50_wndw *wndw, u32 interlock)
-{
-	u32 *push;
-
-	if (!(push = evo_wait(&wndw->wndw, 2)))
-		return 0;
-	evo_mthd(push, 0x0080, 1);
-	evo_data(push, interlock);
-	evo_kick(push, &wndw->wndw);
-
-	if (wndw->wndw.base.user.oclass < GF110_DISP_BASE_CHANNEL_DMA)
-		return interlock ? 2 << (wndw->id * 8) : 0;
-	return interlock ? 2 << (wndw->id * 4) : 0;
-}
-
-static int
-nv50_base_ntfy_wait_begun(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
-{
-	struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
-	struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
-	if (nvif_msec(&drm->client.device, 2000ULL,
-		u32 data = nouveau_bo_rd32(disp->sync, asyw->ntfy.offset / 4);
-		if ((data & 0xc0000000) == 0x40000000)
-			break;
-		usleep_range(1, 2);
-	) < 0)
-		return -ETIMEDOUT;
-	return 0;
-}
-
-static void
-nv50_base_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
-		  struct nv50_head_atom *asyh)
-{
-	asyh->base.cpp = 0;
-}
-
-static int
-nv50_base_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
-		  struct nv50_head_atom *asyh)
-{
-	const struct drm_framebuffer *fb = asyw->state.fb;
-	int ret;
-
-	if (!fb->format->depth)
-		return -EINVAL;
-
-	ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
-						  DRM_PLANE_HELPER_NO_SCALING,
-						  DRM_PLANE_HELPER_NO_SCALING,
-						  false, true);
-	if (ret)
-		return ret;
-
-	asyh->base.depth = fb->format->depth;
-	asyh->base.cpp = fb->format->cpp[0];
-	asyh->base.x = asyw->state.src.x1 >> 16;
-	asyh->base.y = asyw->state.src.y1 >> 16;
-	asyh->base.w = asyw->state.fb->width;
-	asyh->base.h = asyw->state.fb->height;
-
-	switch (fb->format->format) {
-	case DRM_FORMAT_C8         : asyw->image.format = 0x1e; break;
-	case DRM_FORMAT_RGB565     : asyw->image.format = 0xe8; break;
-	case DRM_FORMAT_XRGB1555   :
-	case DRM_FORMAT_ARGB1555   : asyw->image.format = 0xe9; break;
-	case DRM_FORMAT_XRGB8888   :
-	case DRM_FORMAT_ARGB8888   : asyw->image.format = 0xcf; break;
-	case DRM_FORMAT_XBGR2101010:
-	case DRM_FORMAT_ABGR2101010: asyw->image.format = 0xd1; break;
-	case DRM_FORMAT_XBGR8888   :
-	case DRM_FORMAT_ABGR8888   : asyw->image.format = 0xd5; break;
-	default:
-		WARN_ON(1);
-		return -EINVAL;
-	}
-
-	asyw->lut.enable = 1;
-	asyw->set.image = true;
-	return 0;
-}
-
-static const u32
-nv50_base_format[] = {
-	DRM_FORMAT_C8,
-	DRM_FORMAT_RGB565,
-	DRM_FORMAT_XRGB1555,
-	DRM_FORMAT_ARGB1555,
-	DRM_FORMAT_XRGB8888,
-	DRM_FORMAT_ARGB8888,
-	DRM_FORMAT_XBGR2101010,
-	DRM_FORMAT_ABGR2101010,
-	DRM_FORMAT_XBGR8888,
-	DRM_FORMAT_ABGR8888,
-	0
-};
-
-static const struct nv50_wndw_func
-nv50_base = {
-	.acquire = nv50_base_acquire,
-	.release = nv50_base_release,
-	.sema_set = nv50_base_sema_set,
-	.sema_clr = nv50_base_sema_clr,
-	.ntfy_set = nv50_base_ntfy_set,
-	.ntfy_clr = nv50_base_ntfy_clr,
-	.ntfy_wait_begun = nv50_base_ntfy_wait_begun,
-	.image_set = nv50_base_image_set,
-	.image_clr = nv50_base_image_clr,
-	.lut = nv50_base_lut,
-	.update = nv50_base_update,
-};
-
-static int
-base507c_new_(const struct nv50_wndw_func *func, const u32 *format,
-	      struct nouveau_drm *drm, int head, s32 oclass,
-	      struct nv50_wndw **pwndw)
-{
-	struct nv50_disp_base_channel_dma_v0 args = {
-		.head = head,
-	};
-	struct nv50_disp *disp = nv50_disp(drm->dev);
-	struct nv50_wndw *wndw;
-	int ret;
-
-	ret = nv50_wndw_new_(func, drm->dev, DRM_PLANE_TYPE_PRIMARY,
-			     "base", head, format, &wndw);
-	if (*pwndw = wndw, ret)
-		return ret;
-
-	ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
-			       &oclass, head, &args, sizeof(args),
-			       disp->sync->bo.offset, &wndw->wndw);
-	if (ret) {
-		NV_ERROR(drm, "base%04x allocation failed: %d\n", oclass, ret);
-		return ret;
-	}
-
-	ret = nvif_notify_init(&wndw->wndw.base.user, wndw->notify.func,
-			       false, NV50_DISP_BASE_CHANNEL_DMA_V0_NTFY_UEVENT,
-			       &(struct nvif_notify_uevent_req) {},
-			       sizeof(struct nvif_notify_uevent_req),
-			       sizeof(struct nvif_notify_uevent_rep),
-			       &wndw->notify);
-	if (ret)
-		return ret;
-
-	wndw->ntfy = EVO_FLIP_NTFY0(wndw->id);
-	wndw->sema = EVO_FLIP_SEM0(wndw->id);
-	wndw->data = 0x00000000;
-	return 0;
-}
-
-static int
-base507c_new(struct nouveau_drm *drm, int head, s32 oclass,
-	     struct nv50_wndw **pwndw)
-{
-	return base507c_new_(&nv50_base, nv50_base_format, drm, head, oclass, pwndw);
-}
-
-static int
-nv50_base_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
-{
-	struct {
-		s32 oclass;
-		int version;
-		int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
-	} bases[] = {
-		{ GK110_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
-		{ GK104_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
-		{ GF110_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
-		{ GT214_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
-		{ GT200_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
-		{   G82_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
-		{  NV50_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
-		{}
-	};
-	struct nv50_disp *disp = nv50_disp(drm->dev);
-	int cid;
-
-	cid = nvif_mclass(&disp->disp->object, bases);
-	if (cid < 0) {
-		NV_ERROR(drm, "No supported base class\n");
-		return cid;
-	}
-
-	return bases[cid].new(drm, head, bases[cid].oclass, pwndw);
-}
-
-/******************************************************************************
- * Head
- *****************************************************************************/
-static void
-head907d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
-{
-	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-	u32 *push;
-	if (core->base.user.oclass >= GF110_DISP_CORE_CHANNEL_DMA &&
-	    (push = evo_wait(core, 3))) {
-		evo_mthd(push, 0x0404 + (head->base.index * 0x300), 2);
-		evo_data(push, 0x00000001 | (asyh->or.depth  << 6) |
-					    (asyh->or.nvsync << 4) |
-					    (asyh->or.nhsync << 3));
-		evo_data(push, 0x31ec6000 | (head->base.index << 25) |
-					     asyh->mode.interlace);
-		evo_kick(push, core);
-	}
-}
-
-static void
-nv50_head_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
-{
-	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-	u32 *push;
-	if ((push = evo_wait(core, 2))) {
-		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
-			evo_mthd(push, 0x08a8 + (head->base.index * 0x400), 1);
-		else
-			evo_mthd(push, 0x0498 + (head->base.index * 0x300), 1);
-		evo_data(push, (asyh->procamp.sat.sin << 20) |
-			       (asyh->procamp.sat.cos << 8));
-		evo_kick(push, core);
-	}
-}
-
-static void
-nv50_head_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
-{
-	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-	u32 *push;
-	if ((push = evo_wait(core, 2))) {
-		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
-			evo_mthd(push, 0x08a0 + (head->base.index * 0x0400), 1);
-		else
-		if (core->base.user.oclass < GK104_DISP_CORE_CHANNEL_DMA)
-			evo_mthd(push, 0x0490 + (head->base.index * 0x0300), 1);
-		else
-			evo_mthd(push, 0x04a0 + (head->base.index * 0x0300), 1);
-		evo_data(push, (asyh->dither.mode << 3) |
-			       (asyh->dither.bits << 1) |
-			        asyh->dither.enable);
-		evo_kick(push, core);
-	}
-}
-
-static void
-nv50_head_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
-{
-	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-	u32 bounds = 0;
-	u32 *push;
-
-	if (asyh->base.cpp) {
-		switch (asyh->base.cpp) {
-		case 8: bounds |= 0x00000500; break;
-		case 4: bounds |= 0x00000300; break;
-		case 2: bounds |= 0x00000100; break;
-		default:
-			WARN_ON(1);
-			break;
-		}
-		bounds |= 0x00000001;
-	}
-
-	if ((push = evo_wait(core, 2))) {
-		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
-			evo_mthd(push, 0x0904 + head->base.index * 0x400, 1);
-		else
-			evo_mthd(push, 0x04d4 + head->base.index * 0x300, 1);
-		evo_data(push, bounds);
-		evo_kick(push, core);
-	}
-}
-
-static void
-nv50_head_base(struct nv50_head *head, struct nv50_head_atom *asyh)
-{
-	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-	u32 bounds = 0;
-	u32 *push;
-
-	if (asyh->base.cpp) {
-		switch (asyh->base.cpp) {
-		case 8: bounds |= 0x00000500; break;
-		case 4: bounds |= 0x00000300; break;
-		case 2: bounds |= 0x00000100; break;
-		case 1: bounds |= 0x00000000; break;
-		default:
-			WARN_ON(1);
-			break;
-		}
-		bounds |= 0x00000001;
-	}
-
-	if ((push = evo_wait(core, 2))) {
-		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
-			evo_mthd(push, 0x0900 + head->base.index * 0x400, 1);
-		else
-			evo_mthd(push, 0x04d0 + head->base.index * 0x300, 1);
-		evo_data(push, bounds);
-		evo_kick(push, core);
-	}
-}
-
-static void
-nv50_head_curs_clr(struct nv50_head *head)
-{
-	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-	u32 *push;
-	if ((push = evo_wait(core, 4))) {
-		if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
-			evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
-			evo_data(push, 0x05000000);
-		} else
-		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
-			evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
-			evo_data(push, 0x05000000);
-			evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
-			evo_data(push, 0x00000000);
-		} else {
-			evo_mthd(push, 0x0480 + head->base.index * 0x300, 1);
-			evo_data(push, 0x05000000);
-			evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
-			evo_data(push, 0x00000000);
-		}
-		evo_kick(push, core);
-	}
-}
-
-static void
-nv50_head_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
-{
-	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-	u32 *push;
-	if ((push = evo_wait(core, 5))) {
-		if (core->base.user.oclass < G82_DISP_BASE_CHANNEL_DMA) {
-			evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
-			evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
-						    (asyh->curs.format << 24));
-			evo_data(push, asyh->curs.offset >> 8);
-		} else
-		if (core->base.user.oclass < GF110_DISP_BASE_CHANNEL_DMA) {
-			evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
-			evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
-						    (asyh->curs.format << 24));
-			evo_data(push, asyh->curs.offset >> 8);
-			evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
-			evo_data(push, asyh->curs.handle);
-		} else {
-			evo_mthd(push, 0x0480 + head->base.index * 0x300, 2);
-			evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
-						    (asyh->curs.format << 24));
-			evo_data(push, asyh->curs.offset >> 8);
-			evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
-			evo_data(push, asyh->curs.handle);
-		}
-		evo_kick(push, core);
-	}
-}
-
-static void
-nv50_head_core_clr(struct nv50_head *head)
-{
-	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-	u32 *push;
-	if ((push = evo_wait(core, 2))) {
-		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
-			evo_mthd(push, 0x0874 + head->base.index * 0x400, 1);
-		else
-			evo_mthd(push, 0x0474 + head->base.index * 0x300, 1);
-		evo_data(push, 0x00000000);
-		evo_kick(push, core);
-	}
-}
-
-static void
-nv50_head_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
-{
-	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-	u32 *push;
-	if ((push = evo_wait(core, 9))) {
-		if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
-			evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
-			evo_data(push, asyh->core.offset >> 8);
-			evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
-			evo_data(push, (asyh->core.h << 16) | asyh->core.w);
-			evo_data(push, asyh->core.layout << 20 |
-				       (asyh->core.pitch >> 8) << 8 |
-				       asyh->core.block);
-			evo_data(push, asyh->core.kind << 16 |
-				       asyh->core.format << 8);
-			evo_data(push, asyh->core.handle);
-			evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
-			evo_data(push, (asyh->core.y << 16) | asyh->core.x);
-			/* EVO will complain with INVALID_STATE if we have an
-			 * active cursor and (re)specify HeadSetContextDmaIso
-			 * without also updating HeadSetOffsetCursor.
-			 */
-			asyh->set.curs = asyh->curs.visible;
-		} else
-		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
-			evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
-			evo_data(push, asyh->core.offset >> 8);
-			evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
-			evo_data(push, (asyh->core.h << 16) | asyh->core.w);
-			evo_data(push, asyh->core.layout << 20 |
-				       (asyh->core.pitch >> 8) << 8 |
-				       asyh->core.block);
-			evo_data(push, asyh->core.format << 8);
-			evo_data(push, asyh->core.handle);
-			evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
-			evo_data(push, (asyh->core.y << 16) | asyh->core.x);
-		} else {
-			evo_mthd(push, 0x0460 + head->base.index * 0x300, 1);
-			evo_data(push, asyh->core.offset >> 8);
-			evo_mthd(push, 0x0468 + head->base.index * 0x300, 4);
-			evo_data(push, (asyh->core.h << 16) | asyh->core.w);
-			evo_data(push, asyh->core.layout << 24 |
-				       (asyh->core.pitch >> 8) << 8 |
-				       asyh->core.block);
-			evo_data(push, asyh->core.format << 8);
-			evo_data(push, asyh->core.handle);
-			evo_mthd(push, 0x04b0 + head->base.index * 0x300, 1);
-			evo_data(push, (asyh->core.y << 16) | asyh->core.x);
-		}
-		evo_kick(push, core);
-	}
-}
-
-static void
-nv50_head_lut_clr(struct nv50_head *head)
-{
-	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-	u32 *push;
-	if ((push = evo_wait(core, 4))) {
-		if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
-			evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
-			evo_data(push, 0x40000000);
-		} else
-		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
-			evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
-			evo_data(push, 0x40000000);
-			evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
-			evo_data(push, 0x00000000);
-		} else {
-			evo_mthd(push, 0x0440 + (head->base.index * 0x300), 1);
-			evo_data(push, 0x03000000);
-			evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
-			evo_data(push, 0x00000000);
-		}
-		evo_kick(push, core);
-	}
-}
-
-static void
-nv50_head_lut_load(struct drm_property_blob *blob, int mode,
-		   struct nouveau_bo *nvbo)
-{
-	struct drm_color_lut *in = (struct drm_color_lut *)blob->data;
-	void __iomem *lut = (u8 *)nvbo_kmap_obj_iovirtual(nvbo);
-	const int size = blob->length / sizeof(*in);
-	int bits, shift, i;
-	u16 zero, r, g, b;
-
-	/* This can't happen.. But it shuts the compiler up. */
-	if (WARN_ON(size != 256))
-		return;
-
-	switch (mode) {
-	case 0: /* LORES. */
-	case 1: /* HIRES. */
-		bits = 11;
-		shift = 3;
-		zero = 0x0000;
-		break;
-	case 7: /* INTERPOLATE_257_UNITY_RANGE. */
-		bits = 14;
-		shift = 0;
-		zero = 0x6000;
-		break;
-	default:
-		WARN_ON(1);
-		return;
-	}
-
-	for (i = 0; i < size; i++) {
-		r = (drm_color_lut_extract(in[i].  red, bits) + zero) << shift;
-		g = (drm_color_lut_extract(in[i].green, bits) + zero) << shift;
-		b = (drm_color_lut_extract(in[i]. blue, bits) + zero) << shift;
-		writew(r, lut + (i * 0x08) + 0);
-		writew(g, lut + (i * 0x08) + 2);
-		writew(b, lut + (i * 0x08) + 4);
-	}
-
-	/* INTERPOLATE modes require a "next" entry to interpolate with,
-	 * so we replicate the last entry to deal with this for now.
-	 */
-	writew(r, lut + (i * 0x08) + 0);
-	writew(g, lut + (i * 0x08) + 2);
-	writew(b, lut + (i * 0x08) + 4);
-}
-
-static void
-nv50_head_lut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
-{
-	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-	u32 *push;
-	if ((push = evo_wait(core, 7))) {
-		if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
-			evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
-			evo_data(push, 0x80000000 | asyh->lut.mode << 30);
-			evo_data(push, asyh->lut.offset >> 8);
-		} else
-		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
-			evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
-			evo_data(push, 0x80000000 | asyh->lut.mode << 30);
-			evo_data(push, asyh->lut.offset >> 8);
-			evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
-			evo_data(push, asyh->lut.handle);
-		} else {
-			evo_mthd(push, 0x0440 + (head->base.index * 0x300), 4);
-			evo_data(push, 0x80000000 | asyh->lut.mode << 24);
-			evo_data(push, asyh->lut.offset >> 8);
-			evo_data(push, 0x00000000);
-			evo_data(push, 0x00000000);
-			evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
-			evo_data(push, asyh->lut.handle);
-		}
-		evo_kick(push, core);
-	}
-}
-
-static void
-nv50_head_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
-{
-	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-	struct nv50_head_mode *m = &asyh->mode;
-	u32 *push;
-	if ((push = evo_wait(core, 14))) {
-		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
-			evo_mthd(push, 0x0804 + (head->base.index * 0x400), 2);
-			evo_data(push, 0x00800000 | m->clock);
-			evo_data(push, m->interlace ? 0x00000002 : 0x00000000);
-			evo_mthd(push, 0x0810 + (head->base.index * 0x400), 7);
-			evo_data(push, 0x00000000);
-			evo_data(push, (m->v.active  << 16) | m->h.active );
-			evo_data(push, (m->v.synce   << 16) | m->h.synce  );
-			evo_data(push, (m->v.blanke  << 16) | m->h.blanke );
-			evo_data(push, (m->v.blanks  << 16) | m->h.blanks );
-			evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
-			evo_data(push, asyh->mode.v.blankus);
-			evo_mthd(push, 0x082c + (head->base.index * 0x400), 1);
-			evo_data(push, 0x00000000);
-		} else {
-			evo_mthd(push, 0x0410 + (head->base.index * 0x300), 6);
-			evo_data(push, 0x00000000);
-			evo_data(push, (m->v.active  << 16) | m->h.active );
-			evo_data(push, (m->v.synce   << 16) | m->h.synce  );
-			evo_data(push, (m->v.blanke  << 16) | m->h.blanke );
-			evo_data(push, (m->v.blanks  << 16) | m->h.blanks );
-			evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
-			evo_mthd(push, 0x042c + (head->base.index * 0x300), 2);
-			evo_data(push, 0x00000000); /* ??? */
-			evo_data(push, 0xffffff00);
-			evo_mthd(push, 0x0450 + (head->base.index * 0x300), 3);
-			evo_data(push, m->clock * 1000);
-			evo_data(push, 0x00200000); /* ??? */
-			evo_data(push, m->clock * 1000);
-		}
-		evo_kick(push, core);
-	}
-}
-
-static void
-nv50_head_view(struct nv50_head *head, struct nv50_head_atom *asyh)
-{
-	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-	u32 *push;
-	if ((push = evo_wait(core, 10))) {
-		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
-			evo_mthd(push, 0x08a4 + (head->base.index * 0x400), 1);
-			evo_data(push, 0x00000000);
-			evo_mthd(push, 0x08c8 + (head->base.index * 0x400), 1);
-			evo_data(push, (asyh->view.iH << 16) | asyh->view.iW);
-			evo_mthd(push, 0x08d8 + (head->base.index * 0x400), 2);
-			evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
-			evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
-		} else {
-			evo_mthd(push, 0x0494 + (head->base.index * 0x300), 1);
-			evo_data(push, 0x00000000);
-			evo_mthd(push, 0x04b8 + (head->base.index * 0x300), 1);
-			evo_data(push, (asyh->view.iH << 16) | asyh->view.iW);
-			evo_mthd(push, 0x04c0 + (head->base.index * 0x300), 3);
-			evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
-			evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
-			evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
-		}
-		evo_kick(push, core);
-	}
-}
-
-static const struct nv50_head_func
-head507d = {
-	.view = nv50_head_view,
-	.mode = nv50_head_mode,
-	.ilut_set = nv50_head_lut_set,
-	.ilut_clr = nv50_head_lut_clr,
-	.core_set = nv50_head_core_set,
-	.core_clr = nv50_head_core_clr,
-	.curs_set = nv50_head_curs_set,
-	.curs_clr = nv50_head_curs_clr,
-	.base = nv50_head_base,
-	.ovly = nv50_head_ovly,
-	.dither = nv50_head_dither,
-	.procamp = nv50_head_procamp,
-	.or = head907d_or,
-};
-
-static void
-nv50_head_flush_clr(struct nv50_head *head, struct nv50_head_atom *asyh, bool y)
-{
-	if (asyh->clr.ilut && (!asyh->set.ilut || y))
-		head->func->ilut_clr(head);
-	if (asyh->clr.core && (!asyh->set.core || y))
-		head->func->core_clr(head);
-	if (asyh->clr.curs && (!asyh->set.curs || y))
-		head->func->curs_clr(head);
-}
-
-static void
-nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
-{
-	if (asyh->set.view   ) head->func->view    (head, asyh);
-	if (asyh->set.mode   ) head->func->mode    (head, asyh);
-	if (asyh->set.ilut   ) {
-		struct nouveau_bo *nvbo = head->lut.nvbo[head->lut.next];
-		struct drm_property_blob *blob = asyh->state.gamma_lut;
-		if (blob)
-			nv50_head_lut_load(blob, asyh->lut.mode, nvbo);
-		asyh->lut.offset = nvbo->bo.offset;
-		head->lut.next ^= 1;
-		head->func->ilut_set(head, asyh);
-	}
-	if (asyh->set.core   ) head->func->core_set(head, asyh);
-	if (asyh->set.curs   ) head->func->curs_set(head, asyh);
-	if (asyh->set.base   ) head->func->base    (head, asyh);
-	if (asyh->set.ovly   ) head->func->ovly    (head, asyh);
-	if (asyh->set.dither ) head->func->dither  (head, asyh);
-	if (asyh->set.procamp) head->func->procamp (head, asyh);
-	if (asyh->set.or     ) head->func->or      (head, asyh);
-}
-
-static void
-nv50_head_atomic_check_procamp(struct nv50_head_atom *armh,
-			       struct nv50_head_atom *asyh,
-			       struct nouveau_conn_atom *asyc)
-{
-	const int vib = asyc->procamp.color_vibrance - 100;
-	const int hue = asyc->procamp.vibrant_hue - 90;
-	const int adj = (vib > 0) ? 50 : 0;
-	asyh->procamp.sat.cos = ((vib * 2047 + adj) / 100) & 0xfff;
-	asyh->procamp.sat.sin = ((hue * 2047) / 100) & 0xfff;
-	asyh->set.procamp = true;
-}
-
-static void
-nv50_head_atomic_check_dither(struct nv50_head_atom *armh,
-			      struct nv50_head_atom *asyh,
-			      struct nouveau_conn_atom *asyc)
-{
-	struct drm_connector *connector = asyc->state.connector;
-	u32 mode = 0x00;
-
-	if (asyc->dither.mode == DITHERING_MODE_AUTO) {
-		if (asyh->base.depth > connector->display_info.bpc * 3)
-			mode = DITHERING_MODE_DYNAMIC2X2;
-	} else {
-		mode = asyc->dither.mode;
-	}
-
-	if (asyc->dither.depth == DITHERING_DEPTH_AUTO) {
-		if (connector->display_info.bpc >= 8)
-			mode |= DITHERING_DEPTH_8BPC;
-	} else {
-		mode |= asyc->dither.depth;
-	}
-
-	asyh->dither.enable = mode;
-	asyh->dither.bits = mode >> 1;
-	asyh->dither.mode = mode >> 3;
-	asyh->set.dither = true;
-}
-
-static void
-nv50_head_atomic_check_view(struct nv50_head_atom *armh,
-			    struct nv50_head_atom *asyh,
-			    struct nouveau_conn_atom *asyc)
-{
-	struct drm_connector *connector = asyc->state.connector;
-	struct drm_display_mode *omode = &asyh->state.adjusted_mode;
-	struct drm_display_mode *umode = &asyh->state.mode;
-	int mode = asyc->scaler.mode;
-	struct edid *edid;
-	int umode_vdisplay, omode_hdisplay, omode_vdisplay;
-
-	if (connector->edid_blob_ptr)
-		edid = (struct edid *)connector->edid_blob_ptr->data;
-	else
-		edid = NULL;
-
-	if (!asyc->scaler.full) {
-		if (mode == DRM_MODE_SCALE_NONE)
-			omode = umode;
-	} else {
-		/* Non-EDID LVDS/eDP mode. */
-		mode = DRM_MODE_SCALE_FULLSCREEN;
-	}
-
-	/* For the user-specified mode, we must ignore doublescan and
-	 * the like, but honor frame packing.
-	 */
-	umode_vdisplay = umode->vdisplay;
-	if ((umode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
-		umode_vdisplay += umode->vtotal;
-	asyh->view.iW = umode->hdisplay;
-	asyh->view.iH = umode_vdisplay;
-	/* For the output mode, we can just use the stock helper. */
-	drm_mode_get_hv_timing(omode, &omode_hdisplay, &omode_vdisplay);
-	asyh->view.oW = omode_hdisplay;
-	asyh->view.oH = omode_vdisplay;
-
-	/* Add overscan compensation if necessary, will keep the aspect
-	 * ratio the same as the backend mode unless overridden by the
-	 * user setting both hborder and vborder properties.
-	 */
-	if ((asyc->scaler.underscan.mode == UNDERSCAN_ON ||
-	    (asyc->scaler.underscan.mode == UNDERSCAN_AUTO &&
-	     drm_detect_hdmi_monitor(edid)))) {
-		u32 bX = asyc->scaler.underscan.hborder;
-		u32 bY = asyc->scaler.underscan.vborder;
-		u32 r = (asyh->view.oH << 19) / asyh->view.oW;
-
-		if (bX) {
-			asyh->view.oW -= (bX * 2);
-			if (bY) asyh->view.oH -= (bY * 2);
-			else    asyh->view.oH  = ((asyh->view.oW * r) + (r / 2)) >> 19;
-		} else {
-			asyh->view.oW -= (asyh->view.oW >> 4) + 32;
-			if (bY) asyh->view.oH -= (bY * 2);
-			else    asyh->view.oH  = ((asyh->view.oW * r) + (r / 2)) >> 19;
-		}
-	}
-
-	/* Handle CENTER/ASPECT scaling, taking into account the areas
-	 * removed already for overscan compensation.
-	 */
-	switch (mode) {
-	case DRM_MODE_SCALE_CENTER:
-		asyh->view.oW = min((u16)umode->hdisplay, asyh->view.oW);
-		asyh->view.oH = min((u16)umode_vdisplay, asyh->view.oH);
-		/* fall-through */
-	case DRM_MODE_SCALE_ASPECT:
-		if (asyh->view.oH < asyh->view.oW) {
-			u32 r = (asyh->view.iW << 19) / asyh->view.iH;
-			asyh->view.oW = ((asyh->view.oH * r) + (r / 2)) >> 19;
-		} else {
-			u32 r = (asyh->view.iH << 19) / asyh->view.iW;
-			asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
-		}
-		break;
-	default:
-		break;
-	}
-
-	asyh->set.view = true;
-}
-
-static void
-nv50_head_atomic_check_lut(struct nv50_head *head,
-			   struct nv50_head_atom *armh,
-			   struct nv50_head_atom *asyh)
-{
-	struct nv50_disp *disp = nv50_disp(head->base.base.dev);
-
-	/* An I8 surface without an input LUT makes no sense, and
-	 * EVO will throw an error if you try.
-	 *
-	 * Legacy clients actually cause this due to the order in
-	 * which they call ioctls, so we will enable the LUT with
-	 * whatever contents the buffer already contains to avoid
-	 * triggering the error check.
-	 */
-	if (!asyh->state.gamma_lut && asyh->base.cpp != 1) {
-		asyh->lut.handle = 0;
-		asyh->clr.ilut = armh->lut.visible;
-		return;
-	}
-
-	if (disp->disp->object.oclass < GF110_DISP) {
-		asyh->lut.mode = (asyh->base.cpp == 1) ? 0 : 1;
-		asyh->set.ilut = true;
-	} else {
-		asyh->lut.mode = 7;
-		asyh->set.ilut = asyh->state.color_mgmt_changed;
-	}
-	asyh->lut.handle = disp->core->chan.vram.handle;
-}
-
-static void
-nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
-{
-	struct drm_display_mode *mode = &asyh->state.adjusted_mode;
-	struct nv50_head_mode *m = &asyh->mode;
-	u32 blankus;
-
-	drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V | CRTC_STEREO_DOUBLE);
-
-	/*
-	 * DRM modes are defined in terms of a repeating interval
-	 * starting with the active display area.  The hardware modes
-	 * are defined in terms of a repeating interval starting one
-	 * unit (pixel or line) into the sync pulse.  So, add bias.
-	 */
-
-	m->h.active = mode->crtc_htotal;
-	m->h.synce  = mode->crtc_hsync_end - mode->crtc_hsync_start - 1;
-	m->h.blanke = mode->crtc_hblank_end - mode->crtc_hsync_start - 1;
-	m->h.blanks = m->h.blanke + mode->crtc_hdisplay;
-
-	m->v.active = mode->crtc_vtotal;
-	m->v.synce  = mode->crtc_vsync_end - mode->crtc_vsync_start - 1;
-	m->v.blanke = mode->crtc_vblank_end - mode->crtc_vsync_start - 1;
-	m->v.blanks = m->v.blanke + mode->crtc_vdisplay;
-
-	/*XXX: Safe underestimate, even "0" works */
-	blankus = (m->v.active - mode->crtc_vdisplay - 2) * m->h.active;
-	blankus *= 1000;
-	blankus /= mode->crtc_clock;
-	m->v.blankus = blankus;
-
-	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
-		m->v.blank2e =  m->v.active + m->v.blanke;
-		m->v.blank2s =  m->v.blank2e + mode->crtc_vdisplay;
-		m->v.active  = (m->v.active * 2) + 1;
-		m->interlace = true;
-	} else {
-		m->v.blank2e = 0;
-		m->v.blank2s = 1;
-		m->interlace = false;
-	}
-	m->clock = mode->crtc_clock;
-
-	asyh->or.nhsync = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
-	asyh->or.nvsync = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
-	asyh->set.or = head->func->or != NULL;
-	asyh->set.mode = true;
-}
-
-static int
-nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
-{
-	struct nouveau_drm *drm = nouveau_drm(crtc->dev);
-	struct nv50_disp *disp = nv50_disp(crtc->dev);
-	struct nv50_head *head = nv50_head(crtc);
-	struct nv50_head_atom *armh = nv50_head_atom(crtc->state);
-	struct nv50_head_atom *asyh = nv50_head_atom(state);
-	struct nouveau_conn_atom *asyc = NULL;
-	struct drm_connector_state *conns;
-	struct drm_connector *conn;
-	int i;
-
-	NV_ATOMIC(drm, "%s atomic_check %d\n", crtc->name, asyh->state.active);
-	if (asyh->state.active) {
-		for_each_new_connector_in_state(asyh->state.state, conn, conns, i) {
-			if (conns->crtc == crtc) {
-				asyc = nouveau_conn_atom(conns);
-				break;
-			}
-		}
-
-		if (armh->state.active) {
-			if (asyc) {
-				if (asyh->state.mode_changed)
-					asyc->set.scaler = true;
-				if (armh->base.depth != asyh->base.depth)
-					asyc->set.dither = true;
-			}
-		} else {
-			if (asyc)
-				asyc->set.mask = ~0;
-			asyh->set.mask = ~0;
-			asyh->set.or = head->func->or != NULL;
-		}
-
-		if (asyh->state.mode_changed)
-			nv50_head_atomic_check_mode(head, asyh);
-
-		if (asyh->state.color_mgmt_changed ||
-		    asyh->base.cpp != armh->base.cpp)
-			nv50_head_atomic_check_lut(head, armh, asyh);
-		asyh->lut.visible = asyh->lut.handle != 0;
-
-		if (asyc) {
-			if (asyc->set.scaler)
-				nv50_head_atomic_check_view(armh, asyh, asyc);
-			if (asyc->set.dither)
-				nv50_head_atomic_check_dither(armh, asyh, asyc);
-			if (asyc->set.procamp)
-				nv50_head_atomic_check_procamp(armh, asyh, asyc);
-		}
-
-		if ((asyh->core.visible = (asyh->base.cpp != 0))) {
-			asyh->core.x = asyh->base.x;
-			asyh->core.y = asyh->base.y;
-			asyh->core.w = asyh->base.w;
-			asyh->core.h = asyh->base.h;
-		} else
-		if ((asyh->core.visible = asyh->curs.visible) ||
-		    (asyh->core.visible = asyh->lut.visible)) {
-			/*XXX: We need to either find some way of having the
-			 *     primary base layer appear black, while still
-			 *     being able to display the other layers, or we
-			 *     need to allocate a dummy black surface here.
-			 */
-			asyh->core.x = 0;
-			asyh->core.y = 0;
-			asyh->core.w = asyh->state.mode.hdisplay;
-			asyh->core.h = asyh->state.mode.vdisplay;
-		}
-		asyh->core.handle = disp->core->chan.vram.handle;
-		asyh->core.offset = 0;
-		asyh->core.format = 0xcf;
-		asyh->core.kind = 0;
-		asyh->core.layout = 1;
-		asyh->core.block = 0;
-		asyh->core.pitch = ALIGN(asyh->core.w, 64) * 4;
-		asyh->set.base = armh->base.cpp != asyh->base.cpp;
-		asyh->set.ovly = armh->ovly.cpp != asyh->ovly.cpp;
-	} else {
-		asyh->lut.visible = false;
-		asyh->core.visible = false;
-		asyh->curs.visible = false;
-		asyh->base.cpp = 0;
-		asyh->ovly.cpp = 0;
-	}
-
-	if (!drm_atomic_crtc_needs_modeset(&asyh->state)) {
-		if (asyh->core.visible) {
-			if (memcmp(&armh->core, &asyh->core, sizeof(asyh->core)))
-				asyh->set.core = true;
-		} else
-		if (armh->core.visible) {
-			asyh->clr.core = true;
-		}
-
-		if (asyh->curs.visible) {
-			if (memcmp(&armh->curs, &asyh->curs, sizeof(asyh->curs)))
-				asyh->set.curs = true;
-		} else
-		if (armh->curs.visible) {
-			asyh->clr.curs = true;
-		}
-	} else {
-		asyh->clr.ilut = armh->lut.visible;
-		asyh->clr.core = armh->core.visible;
-		asyh->clr.curs = armh->curs.visible;
-		asyh->set.ilut = asyh->lut.visible;
-		asyh->set.core = asyh->core.visible;
-		asyh->set.curs = asyh->curs.visible;
-	}
-
-	if (asyh->clr.mask || asyh->set.mask)
-		nv50_atom(asyh->state.state)->lock_core = true;
-	return 0;
-}
-
-static const struct drm_crtc_helper_funcs
-nv50_head_help = {
-	.atomic_check = nv50_head_atomic_check,
-};
-
-static void
-nv50_head_atomic_destroy_state(struct drm_crtc *crtc,
-			       struct drm_crtc_state *state)
-{
-	struct nv50_head_atom *asyh = nv50_head_atom(state);
-	__drm_atomic_helper_crtc_destroy_state(&asyh->state);
-	kfree(asyh);
-}
-
-static struct drm_crtc_state *
-nv50_head_atomic_duplicate_state(struct drm_crtc *crtc)
-{
-	struct nv50_head_atom *armh = nv50_head_atom(crtc->state);
-	struct nv50_head_atom *asyh;
-	if (!(asyh = kmalloc(sizeof(*asyh), GFP_KERNEL)))
-		return NULL;
-	__drm_atomic_helper_crtc_duplicate_state(crtc, &asyh->state);
-	asyh->view = armh->view;
-	asyh->mode = armh->mode;
-	asyh->lut  = armh->lut;
-	asyh->core = armh->core;
-	asyh->curs = armh->curs;
-	asyh->base = armh->base;
-	asyh->ovly = armh->ovly;
-	asyh->dither = armh->dither;
-	asyh->procamp = armh->procamp;
-	asyh->clr.mask = 0;
-	asyh->set.mask = 0;
-	return &asyh->state;
-}
-
-static void
-__drm_atomic_helper_crtc_reset(struct drm_crtc *crtc,
-			       struct drm_crtc_state *state)
-{
-	if (crtc->state)
-		crtc->funcs->atomic_destroy_state(crtc, crtc->state);
-	crtc->state = state;
-	crtc->state->crtc = crtc;
-}
-
-static void
-nv50_head_reset(struct drm_crtc *crtc)
-{
-	struct nv50_head_atom *asyh;
-
-	if (WARN_ON(!(asyh = kzalloc(sizeof(*asyh), GFP_KERNEL))))
-		return;
-
-	__drm_atomic_helper_crtc_reset(crtc, &asyh->state);
-}
-
-static void
-nv50_head_destroy(struct drm_crtc *crtc)
-{
-	struct nv50_head *head = nv50_head(crtc);
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(head->lut.nvbo); i++)
-		nouveau_bo_unmap_unpin_unref(&head->lut.nvbo[i]);
-
-	drm_crtc_cleanup(crtc);
-	kfree(head);
-}
-
-static const struct drm_crtc_funcs
-nv50_head_func = {
-	.reset = nv50_head_reset,
-	.gamma_set = drm_atomic_helper_legacy_gamma_set,
-	.destroy = nv50_head_destroy,
-	.set_config = drm_atomic_helper_set_config,
-	.page_flip = drm_atomic_helper_page_flip,
-	.atomic_duplicate_state = nv50_head_atomic_duplicate_state,
-	.atomic_destroy_state = nv50_head_atomic_destroy_state,
-};
-
-static int
-nv50_head_create(struct drm_device *dev, int index)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nv50_disp *disp = nv50_disp(dev);
-	struct nv50_head *head;
-	struct nv50_wndw *curs, *wndw;
-	struct drm_crtc *crtc;
-	int ret, i;
-
-	head = kzalloc(sizeof(*head), GFP_KERNEL);
-	if (!head)
-		return -ENOMEM;
-
-	head->func = disp->core->func->head;
-	head->base.index = index;
-	ret = nv50_base_new(drm, head->base.index, &wndw);
-	if (ret == 0)
-		ret = nv50_curs_new(drm, head->base.index, &curs);
-	if (ret) {
-		kfree(head);
-		return ret;
-	}
-
-	crtc = &head->base.base;
-	drm_crtc_init_with_planes(dev, crtc, &wndw->plane, &curs->plane,
-				  &nv50_head_func, "head-%d", head->base.index);
-	drm_crtc_helper_add(crtc, &nv50_head_help);
-	drm_mode_crtc_set_gamma_size(crtc, 256);
-
-	for (i = 0; i < ARRAY_SIZE(head->lut.nvbo); i++) {
-		ret = nouveau_bo_new_pin_map(&drm->client, 1025 * 8, 0x100,
-					     TTM_PL_FLAG_VRAM,
-					     &head->lut.nvbo[i]);
-		if (ret)
-			goto out;
-	}
-
-	/* allocate overlay resources */
-	ret = nv50_ovly_new(drm, head->base.index, &wndw);
-out:
-	if (ret)
-		nv50_head_destroy(crtc);
-	return ret;
-}
-
-static const struct nv50_outp_func dac507d;
-static const struct nv50_outp_func sor507d;
-static const struct nv50_outp_func pior507d;
-static const struct nv50_core_func
-core507d = {
-	.head = &head507d,
-	.dac = &dac507d,
-	.sor = &sor507d,
-	.pior = &pior507d,
-};
-
-static int
-core507d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore)
-{
-	return core507d_new_(&core507d, drm, oclass, pcore);
-}
-
-static void
-nv50_core_del(struct nv50_core **pcore)
-{
-	struct nv50_core *core = *pcore;
-	if (core) {
-		nv50_dmac_destroy(&core->chan);
-		kfree(*pcore);
-		*pcore = NULL;
-	}
-}
-
-static int
-nv50_core_new(struct nouveau_drm *drm, struct nv50_core **pcore)
-{
-	struct {
-		s32 oclass;
-		int version;
-		int (*new)(struct nouveau_drm *, s32, struct nv50_core **);
-	} cores[] = {
-		{ GP102_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
-		{ GP100_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
-		{ GM200_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
-		{ GM107_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
-		{ GK110_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
-		{ GK104_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
-		{ GF110_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
-		{ GT214_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
-		{ GT206_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
-		{ GT200_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
-		{   G82_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
-		{  NV50_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
-		{}
-	};
-	struct nv50_disp *disp = nv50_disp(drm->dev);
-	int cid;
-
-	cid = nvif_mclass(&disp->disp->object, cores);
-	if (cid < 0) {
-		NV_ERROR(drm, "No supported core channel class\n");
-		return cid;
-	}
-
-	return cores[cid].new(drm, cores[cid].oclass, pcore);
-}
-
 /******************************************************************************
  * Output path helpers
  *****************************************************************************/
@@ -2720,33 +326,6 @@ nv50_outp_atomic_check(struct drm_encoder *encoder,
 /******************************************************************************
  * DAC
  *****************************************************************************/
-static void
-dac507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
-	     struct nv50_head_atom *asyh)
-{
-	u32 *push, sync = 0;
-	if ((push = evo_wait(&core->chan, 3))) {
-		if (core->chan.base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
-			if (asyh) {
-				sync |= asyh->or.nvsync << 1;
-				sync |= asyh->or.nhsync;
-			}
-			evo_mthd(push, 0x0400 + (or * 0x080), 2);
-			evo_data(push, ctrl);
-			evo_data(push, sync);
-		} else {
-			evo_mthd(push, 0x0180 + (or * 0x020), 1);
-			evo_data(push, ctrl);
-		}
-		evo_kick(push, &core->chan);
-	}
-}
-
-static const struct nv50_outp_func
-dac507d = {
-	.ctrl = dac507d_ctrl,
-};
-
 static void
 nv50_dac_disable(struct drm_encoder *encoder)
 {
@@ -3634,32 +1213,6 @@ nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
 /******************************************************************************
  * SOR
  *****************************************************************************/
-static void
-sor507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
-	     struct nv50_head_atom *asyh)
-{
-	u32 *push;
-	if ((push = evo_wait(&core->chan, 6))) {
-		if (core->chan.base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
-			if (asyh) {
-				ctrl |= asyh->or.depth  << 16;
-				ctrl |= asyh->or.nvsync << 13;
-				ctrl |= asyh->or.nhsync << 12;
-			}
-			evo_mthd(push, 0x0600 + (or * 0x40), 1);
-		} else {
-			evo_mthd(push, 0x0200 + (or * 0x20), 1);
-		}
-		evo_data(push, ctrl);
-		evo_kick(push, &core->chan);
-	}
-}
-
-static const struct nv50_outp_func
-sor507d = {
-	.ctrl = sor507d_ctrl,
-};
-
 static void
 nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head,
 		struct nv50_head_atom *asyh, u8 proto, u8 depth)
@@ -3904,30 +1457,6 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
 /******************************************************************************
  * PIOR
  *****************************************************************************/
-static void
-pior507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
-	      struct nv50_head_atom *asyh)
-{
-	u32 *push;
-	if ((push = evo_wait(&core->chan, 8))) {
-		if (core->chan.base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
-			if (asyh) {
-				ctrl |= asyh->or.depth  << 16;
-				ctrl |= asyh->or.nvsync << 13;
-				ctrl |= asyh->or.nhsync << 12;
-			}
-			evo_mthd(push, 0x0700 + (or * 0x040), 1);
-			evo_data(push, ctrl);
-		}
-		evo_kick(push, &core->chan);
-	}
-}
-
-static const struct nv50_outp_func
-pior507d = {
-	.ctrl = pior507d_ctrl,
-};
-
 static int
 nv50_pior_atomic_check(struct drm_encoder *encoder,
 		       struct drm_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
new file mode 100644
index 000000000000..7cbd66849743
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -0,0 +1,71 @@
+#ifndef __NV50_KMS_H__
+#define __NV50_KMS_H__
+#include <nvif/mem.h>
+
+#include "nouveau_display.h"
+
+struct nv50_disp {
+	struct nvif_disp *disp;
+	struct nv50_core *core;
+
+#define NV50_DISP_SYNC(c, o)                                ((c) * 0x040 + (o))
+#define NV50_DISP_CORE_NTFY                       NV50_DISP_SYNC(0      , 0x00)
+#define NV50_DISP_WNDW_SEM0(c)                    NV50_DISP_SYNC(1 + (c), 0x00)
+#define NV50_DISP_WNDW_SEM1(c)                    NV50_DISP_SYNC(1 + (c), 0x10)
+#define NV50_DISP_WNDW_NTFY(c)                    NV50_DISP_SYNC(1 + (c), 0x20)
+#define NV50_DISP_BASE_SEM0(c)                    NV50_DISP_WNDW_SEM0(0 + (c))
+#define NV50_DISP_BASE_SEM1(c)                    NV50_DISP_WNDW_SEM1(0 + (c))
+#define NV50_DISP_BASE_NTFY(c)                    NV50_DISP_WNDW_NTFY(0 + (c))
+	struct nouveau_bo *sync;
+
+	struct mutex mutex;
+};
+
+static inline struct nv50_disp *
+nv50_disp(struct drm_device *dev)
+{
+	return nouveau_display(dev)->priv;
+}
+
+struct nv50_chan {
+	struct nvif_object user;
+	struct nvif_device *device;
+};
+
+struct nv50_dmac {
+	struct nv50_chan base;
+
+	struct nvif_mem push;
+	u32 *ptr;
+
+	struct nvif_object sync;
+	struct nvif_object vram;
+
+	/* Protects against concurrent pushbuf access to this channel, lock is
+	 * grabbed by evo_wait (if the pushbuf reservation is successful) and
+	 * dropped again by evo_kick. */
+	struct mutex lock;
+};
+
+int nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
+		     const s32 *oclass, u8 head, void *data, u32 size,
+		     u64 syncbuf, struct nv50_dmac *dmac);
+void nv50_dmac_destroy(struct nv50_dmac *);
+
+u32 *evo_wait(struct nv50_dmac *, int nr);
+void evo_kick(u32 *, struct nv50_dmac *);
+
+#define evo_mthd(p, m, s) do {						\
+	const u32 _m = (m), _s = (s);					\
+	if (drm_debug & DRM_UT_KMS)					\
+		pr_err("%04x %d %s\n", _m, _s, __func__);		\
+	*((p)++) = ((_s << 18) | _m);					\
+} while(0)
+
+#define evo_data(p, d) do {						\
+	const u32 _d = (d);						\
+	if (drm_debug & DRM_UT_KMS)					\
+		pr_err("\t%08x\n", _d);					\
+	*((p)++) = _d;							\
+} while(0)
+#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
new file mode 100644
index 000000000000..6a809ff24e14
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -0,0 +1,566 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "head.h"
+#include "base.h"
+#include "core.h"
+#include "curs.h"
+#include "ovly.h"
+
+#include <nvif/class.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include "nouveau_connector.h"
+#include "nouveau_bo.h"
+
+static void
+nv50_head_lut_load(struct drm_property_blob *blob, int mode,
+		   struct nouveau_bo *nvbo)
+{
+	struct drm_color_lut *in = (struct drm_color_lut *)blob->data;
+	void __iomem *lut = (u8 *)nvbo_kmap_obj_iovirtual(nvbo);
+	const int size = blob->length / sizeof(*in);
+	int bits, shift, i;
+	u16 zero, r, g, b;
+
+	/* This can't happen.. But it shuts the compiler up. */
+	if (WARN_ON(size != 256))
+		return;
+
+	switch (mode) {
+	case 0: /* LORES. */
+	case 1: /* HIRES. */
+		bits = 11;
+		shift = 3;
+		zero = 0x0000;
+		break;
+	case 7: /* INTERPOLATE_257_UNITY_RANGE. */
+		bits = 14;
+		shift = 0;
+		zero = 0x6000;
+		break;
+	default:
+		WARN_ON(1);
+		return;
+	}
+
+	for (i = 0; i < size; i++) {
+		r = (drm_color_lut_extract(in[i].  red, bits) + zero) << shift;
+		g = (drm_color_lut_extract(in[i].green, bits) + zero) << shift;
+		b = (drm_color_lut_extract(in[i]. blue, bits) + zero) << shift;
+		writew(r, lut + (i * 0x08) + 0);
+		writew(g, lut + (i * 0x08) + 2);
+		writew(b, lut + (i * 0x08) + 4);
+	}
+
+	/* INTERPOLATE modes require a "next" entry to interpolate with,
+	 * so we replicate the last entry to deal with this for now.
+	 */
+	writew(r, lut + (i * 0x08) + 0);
+	writew(g, lut + (i * 0x08) + 2);
+	writew(b, lut + (i * 0x08) + 4);
+}
+
+void
+nv50_head_flush_clr(struct nv50_head *head, struct nv50_head_atom *asyh, bool y)
+{
+	if (asyh->clr.ilut && (!asyh->set.ilut || y))
+		head->func->ilut_clr(head);
+	if (asyh->clr.core && (!asyh->set.core || y))
+		head->func->core_clr(head);
+	if (asyh->clr.curs && (!asyh->set.curs || y))
+		head->func->curs_clr(head);
+}
+
+void
+nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	if (asyh->set.view   ) head->func->view    (head, asyh);
+	if (asyh->set.mode   ) head->func->mode    (head, asyh);
+	if (asyh->set.ilut   ) {
+		struct nouveau_bo *nvbo = head->ilut.nvbo[head->ilut.next];
+		struct drm_property_blob *blob = asyh->state.gamma_lut;
+		if (blob)
+			nv50_head_lut_load(blob, asyh->ilut.mode, nvbo);
+		asyh->ilut.offset = nvbo->bo.offset;
+		head->ilut.next ^= 1;
+		head->func->ilut_set(head, asyh);
+	}
+	if (asyh->set.core   ) head->func->core_set(head, asyh);
+	if (asyh->set.curs   ) head->func->curs_set(head, asyh);
+	if (asyh->set.base   ) head->func->base    (head, asyh);
+	if (asyh->set.ovly   ) head->func->ovly    (head, asyh);
+	if (asyh->set.dither ) head->func->dither  (head, asyh);
+	if (asyh->set.procamp) head->func->procamp (head, asyh);
+	if (asyh->set.or     ) head->func->or      (head, asyh);
+}
+
+static void
+nv50_head_atomic_check_procamp(struct nv50_head_atom *armh,
+			       struct nv50_head_atom *asyh,
+			       struct nouveau_conn_atom *asyc)
+{
+	const int vib = asyc->procamp.color_vibrance - 100;
+	const int hue = asyc->procamp.vibrant_hue - 90;
+	const int adj = (vib > 0) ? 50 : 0;
+	asyh->procamp.sat.cos = ((vib * 2047 + adj) / 100) & 0xfff;
+	asyh->procamp.sat.sin = ((hue * 2047) / 100) & 0xfff;
+	asyh->set.procamp = true;
+}
+
+static void
+nv50_head_atomic_check_dither(struct nv50_head_atom *armh,
+			      struct nv50_head_atom *asyh,
+			      struct nouveau_conn_atom *asyc)
+{
+	struct drm_connector *connector = asyc->state.connector;
+	u32 mode = 0x00;
+
+	if (asyc->dither.mode == DITHERING_MODE_AUTO) {
+		if (asyh->base.depth > connector->display_info.bpc * 3)
+			mode = DITHERING_MODE_DYNAMIC2X2;
+	} else {
+		mode = asyc->dither.mode;
+	}
+
+	if (asyc->dither.depth == DITHERING_DEPTH_AUTO) {
+		if (connector->display_info.bpc >= 8)
+			mode |= DITHERING_DEPTH_8BPC;
+	} else {
+		mode |= asyc->dither.depth;
+	}
+
+	asyh->dither.enable = mode;
+	asyh->dither.bits = mode >> 1;
+	asyh->dither.mode = mode >> 3;
+	asyh->set.dither = true;
+}
+
+static void
+nv50_head_atomic_check_view(struct nv50_head_atom *armh,
+			    struct nv50_head_atom *asyh,
+			    struct nouveau_conn_atom *asyc)
+{
+	struct drm_connector *connector = asyc->state.connector;
+	struct drm_display_mode *omode = &asyh->state.adjusted_mode;
+	struct drm_display_mode *umode = &asyh->state.mode;
+	int mode = asyc->scaler.mode;
+	struct edid *edid;
+	int umode_vdisplay, omode_hdisplay, omode_vdisplay;
+
+	if (connector->edid_blob_ptr)
+		edid = (struct edid *)connector->edid_blob_ptr->data;
+	else
+		edid = NULL;
+
+	if (!asyc->scaler.full) {
+		if (mode == DRM_MODE_SCALE_NONE)
+			omode = umode;
+	} else {
+		/* Non-EDID LVDS/eDP mode. */
+		mode = DRM_MODE_SCALE_FULLSCREEN;
+	}
+
+	/* For the user-specified mode, we must ignore doublescan and
+	 * the like, but honor frame packing.
+	 */
+	umode_vdisplay = umode->vdisplay;
+	if ((umode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
+		umode_vdisplay += umode->vtotal;
+	asyh->view.iW = umode->hdisplay;
+	asyh->view.iH = umode_vdisplay;
+	/* For the output mode, we can just use the stock helper. */
+	drm_mode_get_hv_timing(omode, &omode_hdisplay, &omode_vdisplay);
+	asyh->view.oW = omode_hdisplay;
+	asyh->view.oH = omode_vdisplay;
+
+	/* Add overscan compensation if necessary, will keep the aspect
+	 * ratio the same as the backend mode unless overridden by the
+	 * user setting both hborder and vborder properties.
+	 */
+	if ((asyc->scaler.underscan.mode == UNDERSCAN_ON ||
+	    (asyc->scaler.underscan.mode == UNDERSCAN_AUTO &&
+	     drm_detect_hdmi_monitor(edid)))) {
+		u32 bX = asyc->scaler.underscan.hborder;
+		u32 bY = asyc->scaler.underscan.vborder;
+		u32 r = (asyh->view.oH << 19) / asyh->view.oW;
+
+		if (bX) {
+			asyh->view.oW -= (bX * 2);
+			if (bY) asyh->view.oH -= (bY * 2);
+			else    asyh->view.oH  = ((asyh->view.oW * r) + (r / 2)) >> 19;
+		} else {
+			asyh->view.oW -= (asyh->view.oW >> 4) + 32;
+			if (bY) asyh->view.oH -= (bY * 2);
+			else    asyh->view.oH  = ((asyh->view.oW * r) + (r / 2)) >> 19;
+		}
+	}
+
+	/* Handle CENTER/ASPECT scaling, taking into account the areas
+	 * removed already for overscan compensation.
+	 */
+	switch (mode) {
+	case DRM_MODE_SCALE_CENTER:
+		asyh->view.oW = min((u16)umode->hdisplay, asyh->view.oW);
+		asyh->view.oH = min((u16)umode_vdisplay, asyh->view.oH);
+		/* fall-through */
+	case DRM_MODE_SCALE_ASPECT:
+		if (asyh->view.oH < asyh->view.oW) {
+			u32 r = (asyh->view.iW << 19) / asyh->view.iH;
+			asyh->view.oW = ((asyh->view.oH * r) + (r / 2)) >> 19;
+		} else {
+			u32 r = (asyh->view.iH << 19) / asyh->view.iW;
+			asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
+		}
+		break;
+	default:
+		break;
+	}
+
+	asyh->set.view = true;
+}
+
+static void
+nv50_head_atomic_check_lut(struct nv50_head *head,
+			   struct nv50_head_atom *armh,
+			   struct nv50_head_atom *asyh)
+{
+	struct nv50_disp *disp = nv50_disp(head->base.base.dev);
+
+	/* An I8 surface without an input LUT makes no sense, and
+	 * EVO will throw an error if you try.
+	 *
+	 * Legacy clients actually cause this due to the order in
+	 * which they call ioctls, so we will enable the LUT with
+	 * whatever contents the buffer already contains to avoid
+	 * triggering the error check.
+	 */
+	if (!asyh->state.gamma_lut && asyh->base.cpp != 1) {
+		asyh->ilut.handle = 0;
+		asyh->clr.ilut = armh->ilut.visible;
+		return;
+	}
+
+	if (disp->disp->object.oclass < GF110_DISP) {
+		asyh->ilut.mode = (asyh->base.cpp == 1) ? 0 : 1;
+		asyh->set.ilut = true;
+	} else {
+		asyh->ilut.mode = 7;
+		asyh->set.ilut = asyh->state.color_mgmt_changed;
+	}
+	asyh->ilut.handle = disp->core->chan.vram.handle;
+}
+
+static void
+nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct drm_display_mode *mode = &asyh->state.adjusted_mode;
+	struct nv50_head_mode *m = &asyh->mode;
+	u32 blankus;
+
+	drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V | CRTC_STEREO_DOUBLE);
+
+	/*
+	 * DRM modes are defined in terms of a repeating interval
+	 * starting with the active display area.  The hardware modes
+	 * are defined in terms of a repeating interval starting one
+	 * unit (pixel or line) into the sync pulse.  So, add bias.
+	 */
+
+	m->h.active = mode->crtc_htotal;
+	m->h.synce  = mode->crtc_hsync_end - mode->crtc_hsync_start - 1;
+	m->h.blanke = mode->crtc_hblank_end - mode->crtc_hsync_start - 1;
+	m->h.blanks = m->h.blanke + mode->crtc_hdisplay;
+
+	m->v.active = mode->crtc_vtotal;
+	m->v.synce  = mode->crtc_vsync_end - mode->crtc_vsync_start - 1;
+	m->v.blanke = mode->crtc_vblank_end - mode->crtc_vsync_start - 1;
+	m->v.blanks = m->v.blanke + mode->crtc_vdisplay;
+
+	/*XXX: Safe underestimate, even "0" works */
+	blankus = (m->v.active - mode->crtc_vdisplay - 2) * m->h.active;
+	blankus *= 1000;
+	blankus /= mode->crtc_clock;
+	m->v.blankus = blankus;
+
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+		m->v.blank2e =  m->v.active + m->v.blanke;
+		m->v.blank2s =  m->v.blank2e + mode->crtc_vdisplay;
+		m->v.active  = (m->v.active * 2) + 1;
+		m->interlace = true;
+	} else {
+		m->v.blank2e = 0;
+		m->v.blank2s = 1;
+		m->interlace = false;
+	}
+	m->clock = mode->crtc_clock;
+
+	asyh->or.nhsync = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
+	asyh->or.nvsync = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
+	asyh->set.or = head->func->or != NULL;
+	asyh->set.mode = true;
+}
+
+static int
+nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
+{
+	struct nouveau_drm *drm = nouveau_drm(crtc->dev);
+	struct nv50_disp *disp = nv50_disp(crtc->dev);
+	struct nv50_head *head = nv50_head(crtc);
+	struct nv50_head_atom *armh = nv50_head_atom(crtc->state);
+	struct nv50_head_atom *asyh = nv50_head_atom(state);
+	struct nouveau_conn_atom *asyc = NULL;
+	struct drm_connector_state *conns;
+	struct drm_connector *conn;
+	int i;
+
+	NV_ATOMIC(drm, "%s atomic_check %d\n", crtc->name, asyh->state.active);
+	if (asyh->state.active) {
+		for_each_new_connector_in_state(asyh->state.state, conn, conns, i) {
+			if (conns->crtc == crtc) {
+				asyc = nouveau_conn_atom(conns);
+				break;
+			}
+		}
+
+		if (armh->state.active) {
+			if (asyc) {
+				if (asyh->state.mode_changed)
+					asyc->set.scaler = true;
+				if (armh->base.depth != asyh->base.depth)
+					asyc->set.dither = true;
+			}
+		} else {
+			if (asyc)
+				asyc->set.mask = ~0;
+			asyh->set.mask = ~0;
+			asyh->set.or = head->func->or != NULL;
+		}
+
+		if (asyh->state.mode_changed)
+			nv50_head_atomic_check_mode(head, asyh);
+
+		if (asyh->state.color_mgmt_changed ||
+		    asyh->base.cpp != armh->base.cpp)
+			nv50_head_atomic_check_lut(head, armh, asyh);
+		asyh->ilut.visible = asyh->ilut.handle != 0;
+
+		if (asyc) {
+			if (asyc->set.scaler)
+				nv50_head_atomic_check_view(armh, asyh, asyc);
+			if (asyc->set.dither)
+				nv50_head_atomic_check_dither(armh, asyh, asyc);
+			if (asyc->set.procamp)
+				nv50_head_atomic_check_procamp(armh, asyh, asyc);
+		}
+
+		if ((asyh->core.visible = (asyh->base.cpp != 0))) {
+			asyh->core.x = asyh->base.x;
+			asyh->core.y = asyh->base.y;
+			asyh->core.w = asyh->base.w;
+			asyh->core.h = asyh->base.h;
+		} else
+		if ((asyh->core.visible = asyh->curs.visible) ||
+		    (asyh->core.visible = asyh->ilut.visible)) {
+			/*XXX: We need to either find some way of having the
+			 *     primary base layer appear black, while still
+			 *     being able to display the other layers, or we
+			 *     need to allocate a dummy black surface here.
+			 */
+			asyh->core.x = 0;
+			asyh->core.y = 0;
+			asyh->core.w = asyh->state.mode.hdisplay;
+			asyh->core.h = asyh->state.mode.vdisplay;
+		}
+		asyh->core.handle = disp->core->chan.vram.handle;
+		asyh->core.offset = 0;
+		asyh->core.format = 0xcf;
+		asyh->core.kind = 0;
+		asyh->core.layout = 1;
+		asyh->core.block = 0;
+		asyh->core.pitch = ALIGN(asyh->core.w, 64) * 4;
+		asyh->set.base = armh->base.cpp != asyh->base.cpp;
+		asyh->set.ovly = armh->ovly.cpp != asyh->ovly.cpp;
+	} else {
+		asyh->ilut.visible = false;
+		asyh->core.visible = false;
+		asyh->curs.visible = false;
+		asyh->base.cpp = 0;
+		asyh->ovly.cpp = 0;
+	}
+
+	if (!drm_atomic_crtc_needs_modeset(&asyh->state)) {
+		if (asyh->core.visible) {
+			if (memcmp(&armh->core, &asyh->core, sizeof(asyh->core)))
+				asyh->set.core = true;
+		} else
+		if (armh->core.visible) {
+			asyh->clr.core = true;
+		}
+
+		if (asyh->curs.visible) {
+			if (memcmp(&armh->curs, &asyh->curs, sizeof(asyh->curs)))
+				asyh->set.curs = true;
+		} else
+		if (armh->curs.visible) {
+			asyh->clr.curs = true;
+		}
+	} else {
+		asyh->clr.ilut = armh->ilut.visible;
+		asyh->clr.core = armh->core.visible;
+		asyh->clr.curs = armh->curs.visible;
+		asyh->set.ilut = asyh->ilut.visible;
+		asyh->set.core = asyh->core.visible;
+		asyh->set.curs = asyh->curs.visible;
+	}
+
+	if (asyh->clr.mask || asyh->set.mask)
+		nv50_atom(asyh->state.state)->lock_core = true;
+	return 0;
+}
+
+static const struct drm_crtc_helper_funcs
+nv50_head_help = {
+	.atomic_check = nv50_head_atomic_check,
+};
+
+static void
+nv50_head_atomic_destroy_state(struct drm_crtc *crtc,
+			       struct drm_crtc_state *state)
+{
+	struct nv50_head_atom *asyh = nv50_head_atom(state);
+	__drm_atomic_helper_crtc_destroy_state(&asyh->state);
+	kfree(asyh);
+}
+
+static struct drm_crtc_state *
+nv50_head_atomic_duplicate_state(struct drm_crtc *crtc)
+{
+	struct nv50_head_atom *armh = nv50_head_atom(crtc->state);
+	struct nv50_head_atom *asyh;
+	if (!(asyh = kmalloc(sizeof(*asyh), GFP_KERNEL)))
+		return NULL;
+	__drm_atomic_helper_crtc_duplicate_state(crtc, &asyh->state);
+	asyh->view = armh->view;
+	asyh->mode = armh->mode;
+	asyh->ilut = armh->ilut;
+	asyh->core = armh->core;
+	asyh->curs = armh->curs;
+	asyh->base = armh->base;
+	asyh->ovly = armh->ovly;
+	asyh->dither = armh->dither;
+	asyh->procamp = armh->procamp;
+	asyh->clr.mask = 0;
+	asyh->set.mask = 0;
+	return &asyh->state;
+}
+
+static void
+__drm_atomic_helper_crtc_reset(struct drm_crtc *crtc,
+			       struct drm_crtc_state *state)
+{
+	if (crtc->state)
+		crtc->funcs->atomic_destroy_state(crtc, crtc->state);
+	crtc->state = state;
+	crtc->state->crtc = crtc;
+}
+
+static void
+nv50_head_reset(struct drm_crtc *crtc)
+{
+	struct nv50_head_atom *asyh;
+
+	if (WARN_ON(!(asyh = kzalloc(sizeof(*asyh), GFP_KERNEL))))
+		return;
+
+	__drm_atomic_helper_crtc_reset(crtc, &asyh->state);
+}
+
+static void
+nv50_head_destroy(struct drm_crtc *crtc)
+{
+	struct nv50_head *head = nv50_head(crtc);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(head->ilut.nvbo); i++)
+		nouveau_bo_unmap_unpin_unref(&head->ilut.nvbo[i]);
+
+	drm_crtc_cleanup(crtc);
+	kfree(head);
+}
+
+static const struct drm_crtc_funcs
+nv50_head_func = {
+	.reset = nv50_head_reset,
+	.gamma_set = drm_atomic_helper_legacy_gamma_set,
+	.destroy = nv50_head_destroy,
+	.set_config = drm_atomic_helper_set_config,
+	.page_flip = drm_atomic_helper_page_flip,
+	.atomic_duplicate_state = nv50_head_atomic_duplicate_state,
+	.atomic_destroy_state = nv50_head_atomic_destroy_state,
+};
+
+int
+nv50_head_create(struct drm_device *dev, int index)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nv50_disp *disp = nv50_disp(dev);
+	struct nv50_head *head;
+	struct nv50_wndw *curs, *wndw;
+	struct drm_crtc *crtc;
+	int ret, i;
+
+	head = kzalloc(sizeof(*head), GFP_KERNEL);
+	if (!head)
+		return -ENOMEM;
+
+	head->func = disp->core->func->head;
+	head->base.index = index;
+	ret = nv50_base_new(drm, head->base.index, &wndw);
+	if (ret == 0)
+		ret = nv50_curs_new(drm, head->base.index, &curs);
+	if (ret) {
+		kfree(head);
+		return ret;
+	}
+
+	crtc = &head->base.base;
+	drm_crtc_init_with_planes(dev, crtc, &wndw->plane, &curs->plane,
+				  &nv50_head_func, "head-%d", head->base.index);
+	drm_crtc_helper_add(crtc, &nv50_head_help);
+	drm_mode_crtc_set_gamma_size(crtc, 256);
+
+	for (i = 0; i < ARRAY_SIZE(head->ilut.nvbo); i++) {
+		ret = nouveau_bo_new_pin_map(&drm->client, 1025 * 8, 0x100,
+					     TTM_PL_FLAG_VRAM,
+					     &head->ilut.nvbo[i]);
+		if (ret)
+			goto out;
+	}
+
+	/* allocate overlay resources */
+	ret = nv50_ovly_new(drm, head->base.index, &wndw);
+out:
+	if (ret)
+		nv50_head_destroy(crtc);
+	return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.h b/drivers/gpu/drm/nouveau/dispnv50/head.h
new file mode 100644
index 000000000000..23099a82883b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.h
@@ -0,0 +1,39 @@
+#ifndef __NV50_KMS_HEAD_H__
+#define __NV50_KMS_HEAD_H__
+#define nv50_head(c) container_of((c), struct nv50_head, base.base)
+#include "disp.h"
+#include "atom.h"
+
+#include "nouveau_crtc.h"
+
+struct nv50_head {
+	const struct nv50_head_func *func;
+	struct nouveau_crtc base;
+	struct {
+		struct nouveau_bo *nvbo[2];
+		int next;
+	} ilut;
+};
+
+int nv50_head_create(struct drm_device *, int index);
+void nv50_head_flush_set(struct nv50_head *, struct nv50_head_atom *);
+void nv50_head_flush_clr(struct nv50_head *, struct nv50_head_atom *, bool y);
+
+struct nv50_head_func {
+	void (*view)(struct nv50_head *, struct nv50_head_atom *);
+	void (*mode)(struct nv50_head *, struct nv50_head_atom *);
+	void (*ilut_set)(struct nv50_head *, struct nv50_head_atom *);
+	void (*ilut_clr)(struct nv50_head *);
+	void (*core_set)(struct nv50_head *, struct nv50_head_atom *);
+	void (*core_clr)(struct nv50_head *);
+	void (*curs_set)(struct nv50_head *, struct nv50_head_atom *);
+	void (*curs_clr)(struct nv50_head *);
+	void (*base)(struct nv50_head *, struct nv50_head_atom *);
+	void (*ovly)(struct nv50_head *, struct nv50_head_atom *);
+	void (*dither)(struct nv50_head *, struct nv50_head_atom *);
+	void (*procamp)(struct nv50_head *, struct nv50_head_atom *);
+	void (*or)(struct nv50_head *, struct nv50_head_atom *);
+};
+
+extern const struct nv50_head_func head507d;
+#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head507d.c b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
new file mode 100644
index 000000000000..92fa249ba72f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
@@ -0,0 +1,403 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "head.h"
+#include "core.h"
+
+#include <nvif/class.h>
+
+static void
+head907d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if (core->base.user.oclass >= GF110_DISP_CORE_CHANNEL_DMA &&
+	    (push = evo_wait(core, 3))) {
+		evo_mthd(push, 0x0404 + (head->base.index * 0x300), 2);
+		evo_data(push, 0x00000001 | (asyh->or.depth  << 6) |
+					    (asyh->or.nvsync << 4) |
+					    (asyh->or.nhsync << 3));
+		evo_data(push, 0x31ec6000 | (head->base.index << 25) |
+					     asyh->mode.interlace);
+		evo_kick(push, core);
+	}
+}
+
+static void
+head507d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 2))) {
+		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
+			evo_mthd(push, 0x08a8 + (head->base.index * 0x400), 1);
+		else
+			evo_mthd(push, 0x0498 + (head->base.index * 0x300), 1);
+		evo_data(push, (asyh->procamp.sat.sin << 20) |
+			       (asyh->procamp.sat.cos << 8));
+		evo_kick(push, core);
+	}
+}
+
+static void
+head507d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 2))) {
+		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
+			evo_mthd(push, 0x08a0 + (head->base.index * 0x0400), 1);
+		else
+		if (core->base.user.oclass < GK104_DISP_CORE_CHANNEL_DMA)
+			evo_mthd(push, 0x0490 + (head->base.index * 0x0300), 1);
+		else
+			evo_mthd(push, 0x04a0 + (head->base.index * 0x0300), 1);
+		evo_data(push, (asyh->dither.mode << 3) |
+			       (asyh->dither.bits << 1) |
+			        asyh->dither.enable);
+		evo_kick(push, core);
+	}
+}
+
+static void
+head507d_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 bounds = 0;
+	u32 *push;
+
+	if (asyh->ovly.cpp) {
+		switch (asyh->ovly.cpp) {
+		case 8: bounds |= 0x00000500; break;
+		case 4: bounds |= 0x00000300; break;
+		case 2: bounds |= 0x00000100; break;
+		default:
+			WARN_ON(1);
+			break;
+		}
+		bounds |= 0x00000001;
+	}
+
+	if ((push = evo_wait(core, 2))) {
+		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
+			evo_mthd(push, 0x0904 + head->base.index * 0x400, 1);
+		else
+			evo_mthd(push, 0x04d4 + head->base.index * 0x300, 1);
+		evo_data(push, bounds);
+		evo_kick(push, core);
+	}
+}
+
+static void
+head507d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 bounds = 0;
+	u32 *push;
+
+	if (asyh->base.cpp) {
+		switch (asyh->base.cpp) {
+		case 8: bounds |= 0x00000500; break;
+		case 4: bounds |= 0x00000300; break;
+		case 2: bounds |= 0x00000100; break;
+		case 1: bounds |= 0x00000000; break;
+		default:
+			WARN_ON(1);
+			break;
+		}
+		bounds |= 0x00000001;
+	}
+
+	if ((push = evo_wait(core, 2))) {
+		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
+			evo_mthd(push, 0x0900 + head->base.index * 0x400, 1);
+		else
+			evo_mthd(push, 0x04d0 + head->base.index * 0x300, 1);
+		evo_data(push, bounds);
+		evo_kick(push, core);
+	}
+}
+
+static void
+head507d_curs_clr(struct nv50_head *head)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 4))) {
+		if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
+			evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
+			evo_data(push, 0x05000000);
+		} else
+		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+			evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
+			evo_data(push, 0x05000000);
+			evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
+			evo_data(push, 0x00000000);
+		} else {
+			evo_mthd(push, 0x0480 + head->base.index * 0x300, 1);
+			evo_data(push, 0x05000000);
+			evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
+			evo_data(push, 0x00000000);
+		}
+		evo_kick(push, core);
+	}
+}
+
+static void
+head507d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 5))) {
+		if (core->base.user.oclass < G82_DISP_BASE_CHANNEL_DMA) {
+			evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
+			evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
+						    (asyh->curs.format << 24));
+			evo_data(push, asyh->curs.offset >> 8);
+		} else
+		if (core->base.user.oclass < GF110_DISP_BASE_CHANNEL_DMA) {
+			evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
+			evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
+						    (asyh->curs.format << 24));
+			evo_data(push, asyh->curs.offset >> 8);
+			evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
+			evo_data(push, asyh->curs.handle);
+		} else {
+			evo_mthd(push, 0x0480 + head->base.index * 0x300, 2);
+			evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
+						    (asyh->curs.format << 24));
+			evo_data(push, asyh->curs.offset >> 8);
+			evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
+			evo_data(push, asyh->curs.handle);
+		}
+		evo_kick(push, core);
+	}
+}
+
+static void
+head507d_core_clr(struct nv50_head *head)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 2))) {
+		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
+			evo_mthd(push, 0x0874 + head->base.index * 0x400, 1);
+		else
+			evo_mthd(push, 0x0474 + head->base.index * 0x300, 1);
+		evo_data(push, 0x00000000);
+		evo_kick(push, core);
+	}
+}
+
+static void
+head507d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 9))) {
+		if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
+			evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
+			evo_data(push, asyh->core.offset >> 8);
+			evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
+			evo_data(push, (asyh->core.h << 16) | asyh->core.w);
+			evo_data(push, asyh->core.layout << 20 |
+				       (asyh->core.pitch >> 8) << 8 |
+				       asyh->core.block);
+			evo_data(push, asyh->core.kind << 16 |
+				       asyh->core.format << 8);
+			evo_data(push, asyh->core.handle);
+			evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
+			evo_data(push, (asyh->core.y << 16) | asyh->core.x);
+			/* EVO will complain with INVALID_STATE if we have an
+			 * active cursor and (re)specify HeadSetContextDmaIso
+			 * without also updating HeadSetOffsetCursor.
+			 */
+			asyh->set.curs = asyh->curs.visible;
+		} else
+		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+			evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
+			evo_data(push, asyh->core.offset >> 8);
+			evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
+			evo_data(push, (asyh->core.h << 16) | asyh->core.w);
+			evo_data(push, asyh->core.layout << 20 |
+				       (asyh->core.pitch >> 8) << 8 |
+				       asyh->core.block);
+			evo_data(push, asyh->core.format << 8);
+			evo_data(push, asyh->core.handle);
+			evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
+			evo_data(push, (asyh->core.y << 16) | asyh->core.x);
+		} else {
+			evo_mthd(push, 0x0460 + head->base.index * 0x300, 1);
+			evo_data(push, asyh->core.offset >> 8);
+			evo_mthd(push, 0x0468 + head->base.index * 0x300, 4);
+			evo_data(push, (asyh->core.h << 16) | asyh->core.w);
+			evo_data(push, asyh->core.layout << 24 |
+				       (asyh->core.pitch >> 8) << 8 |
+				       asyh->core.block);
+			evo_data(push, asyh->core.format << 8);
+			evo_data(push, asyh->core.handle);
+			evo_mthd(push, 0x04b0 + head->base.index * 0x300, 1);
+			evo_data(push, (asyh->core.y << 16) | asyh->core.x);
+		}
+		evo_kick(push, core);
+	}
+}
+
+static void
+head507d_ilut_clr(struct nv50_head *head)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 4))) {
+		if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
+			evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
+			evo_data(push, 0x40000000);
+		} else
+		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+			evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
+			evo_data(push, 0x40000000);
+			evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
+			evo_data(push, 0x00000000);
+		} else {
+			evo_mthd(push, 0x0440 + (head->base.index * 0x300), 1);
+			evo_data(push, 0x03000000);
+			evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
+			evo_data(push, 0x00000000);
+		}
+		evo_kick(push, core);
+	}
+}
+
+static void
+head507d_ilut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 7))) {
+		if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
+			evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
+			evo_data(push, 0x80000000 | asyh->ilut.mode << 30);
+			evo_data(push, asyh->ilut.offset >> 8);
+		} else
+		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+			evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
+			evo_data(push, 0x80000000 | asyh->ilut.mode << 30);
+			evo_data(push, asyh->ilut.offset >> 8);
+			evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
+			evo_data(push, asyh->ilut.handle);
+		} else {
+			evo_mthd(push, 0x0440 + (head->base.index * 0x300), 4);
+			evo_data(push, 0x80000000 | asyh->ilut.mode << 24);
+			evo_data(push, asyh->ilut.offset >> 8);
+			evo_data(push, 0x00000000);
+			evo_data(push, 0x00000000);
+			evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
+			evo_data(push, asyh->ilut.handle);
+		}
+		evo_kick(push, core);
+	}
+}
+
+static void
+head507d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	struct nv50_head_mode *m = &asyh->mode;
+	u32 *push;
+	if ((push = evo_wait(core, 14))) {
+		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+			evo_mthd(push, 0x0804 + (head->base.index * 0x400), 2);
+			evo_data(push, 0x00800000 | m->clock);
+			evo_data(push, m->interlace ? 0x00000002 : 0x00000000);
+			evo_mthd(push, 0x0810 + (head->base.index * 0x400), 7);
+			evo_data(push, 0x00000000);
+			evo_data(push, (m->v.active  << 16) | m->h.active );
+			evo_data(push, (m->v.synce   << 16) | m->h.synce  );
+			evo_data(push, (m->v.blanke  << 16) | m->h.blanke );
+			evo_data(push, (m->v.blanks  << 16) | m->h.blanks );
+			evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
+			evo_data(push, asyh->mode.v.blankus);
+			evo_mthd(push, 0x082c + (head->base.index * 0x400), 1);
+			evo_data(push, 0x00000000);
+		} else {
+			evo_mthd(push, 0x0410 + (head->base.index * 0x300), 6);
+			evo_data(push, 0x00000000);
+			evo_data(push, (m->v.active  << 16) | m->h.active );
+			evo_data(push, (m->v.synce   << 16) | m->h.synce  );
+			evo_data(push, (m->v.blanke  << 16) | m->h.blanke );
+			evo_data(push, (m->v.blanks  << 16) | m->h.blanks );
+			evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
+			evo_mthd(push, 0x042c + (head->base.index * 0x300), 2);
+			evo_data(push, 0x00000000); /* ??? */
+			evo_data(push, 0xffffff00);
+			evo_mthd(push, 0x0450 + (head->base.index * 0x300), 3);
+			evo_data(push, m->clock * 1000);
+			evo_data(push, 0x00200000); /* ??? */
+			evo_data(push, m->clock * 1000);
+		}
+		evo_kick(push, core);
+	}
+}
+
+static void
+head507d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 10))) {
+		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+			evo_mthd(push, 0x08a4 + (head->base.index * 0x400), 1);
+			evo_data(push, 0x00000000);
+			evo_mthd(push, 0x08c8 + (head->base.index * 0x400), 1);
+			evo_data(push, (asyh->view.iH << 16) | asyh->view.iW);
+			evo_mthd(push, 0x08d8 + (head->base.index * 0x400), 2);
+			evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
+			evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
+		} else {
+			evo_mthd(push, 0x0494 + (head->base.index * 0x300), 1);
+			evo_data(push, 0x00000000);
+			evo_mthd(push, 0x04b8 + (head->base.index * 0x300), 1);
+			evo_data(push, (asyh->view.iH << 16) | asyh->view.iW);
+			evo_mthd(push, 0x04c0 + (head->base.index * 0x300), 3);
+			evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
+			evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
+			evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
+		}
+		evo_kick(push, core);
+	}
+}
+
+const struct nv50_head_func
+head507d = {
+	.view = head507d_view,
+	.mode = head507d_mode,
+	.ilut_set = head507d_ilut_set,
+	.ilut_clr = head507d_ilut_clr,
+	.core_set = head507d_core_set,
+	.core_clr = head507d_core_clr,
+	.curs_set = head507d_curs_set,
+	.curs_clr = head507d_curs_clr,
+	.base = head507d_base,
+	.ovly = head507d_ovly,
+	.dither = head507d_dither,
+	.procamp = head507d_procamp,
+	.or = head907d_or,
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/oimm.c b/drivers/gpu/drm/nouveau/dispnv50/oimm.c
new file mode 100644
index 000000000000..2a2841d344c8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/oimm.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "oimm.h"
+
+#include <nvif/class.h>
+
+int
+nv50_oimm_init(struct nouveau_drm *drm, struct nv50_wndw *wndw)
+{
+	static const struct {
+		s32 oclass;
+		int version;
+		int (*init)(struct nouveau_drm *, s32, struct nv50_wndw *);
+	} oimms[] = {
+		{ GK104_DISP_OVERLAY, 0, oimm507b_init },
+		{ GF110_DISP_OVERLAY, 0, oimm507b_init },
+		{ GT214_DISP_OVERLAY, 0, oimm507b_init },
+		{   G82_DISP_OVERLAY, 0, oimm507b_init },
+		{  NV50_DISP_OVERLAY, 0, oimm507b_init },
+		{}
+	};
+	struct nv50_disp *disp = nv50_disp(drm->dev);
+	int cid;
+
+	cid = nvif_mclass(&disp->disp->object, oimms);
+	if (cid < 0) {
+		NV_ERROR(drm, "No supported overlay immediate class\n");
+		return cid;
+	}
+
+	return oimms[cid].init(drm, oimms[cid].oclass, wndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/oimm.h b/drivers/gpu/drm/nouveau/dispnv50/oimm.h
new file mode 100644
index 000000000000..6fa51f101e94
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/oimm.h
@@ -0,0 +1,8 @@
+#ifndef __NV50_KMS_OIMM_H__
+#define __NV50_KMS_OIMM_H__
+#include "wndw.h"
+
+int oimm507b_init(struct nouveau_drm *, s32, struct nv50_wndw *);
+
+int nv50_oimm_init(struct nouveau_drm *, struct nv50_wndw *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/oimm507b.c b/drivers/gpu/drm/nouveau/dispnv50/oimm507b.c
new file mode 100644
index 000000000000..c4baca82de14
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/oimm507b.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "oimm.h"
+
+#include <nvif/cl507b.h>
+
+static const struct nv50_wimm_func
+oimm507b = {
+};
+
+static int
+oimm507b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
+	       s32 oclass, struct nv50_wndw *wndw)
+{
+	struct nv50_disp_overlay_v0 args = {
+		.head = wndw->id,
+	};
+	struct nv50_disp *disp = nv50_disp(drm->dev);
+	int ret;
+
+	ret = nvif_object_init(&disp->disp->object, 0, oclass, &args,
+			       sizeof(args), &wndw->wimm.base.user);
+	if (ret) {
+		NV_ERROR(drm, "oimm%04x allocation failed: %d\n", oclass, ret);
+		return ret;
+	}
+
+	nvif_object_map(&wndw->wimm.base.user, NULL, 0);
+	wndw->immd = func;
+	return 0;
+}
+
+int
+oimm507b_init(struct nouveau_drm *drm, s32 oclass, struct nv50_wndw *wndw)
+{
+	return oimm507b_init_(&oimm507b, drm, oclass, wndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly.c b/drivers/gpu/drm/nouveau/dispnv50/ovly.c
new file mode 100644
index 000000000000..ac2d3b64f186
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ovly.h"
+#include "oimm.h"
+
+#include <nvif/class.h>
+
+int
+nv50_ovly_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
+{
+	static const struct {
+		s32 oclass;
+		int version;
+		int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
+	} ovlys[] = {
+		{ GK104_DISP_OVERLAY_CONTROL_DMA, 0, ovly507e_new },
+		{ GF110_DISP_OVERLAY_CONTROL_DMA, 0, ovly507e_new },
+		{ GT214_DISP_OVERLAY_CHANNEL_DMA, 0, ovly507e_new },
+		{ GT200_DISP_OVERLAY_CHANNEL_DMA, 0, ovly507e_new },
+		{   G82_DISP_OVERLAY_CHANNEL_DMA, 0, ovly507e_new },
+		{  NV50_DISP_OVERLAY_CHANNEL_DMA, 0, ovly507e_new },
+		{}
+	};
+	struct nv50_disp *disp = nv50_disp(drm->dev);
+	int cid, ret;
+
+	cid = nvif_mclass(&disp->disp->object, ovlys);
+	if (cid < 0) {
+		NV_ERROR(drm, "No supported overlay class\n");
+		return cid;
+	}
+
+	ret = ovlys[cid].new(drm, head, ovlys[cid].oclass, pwndw);
+	if (ret)
+		return ret;
+
+	return nv50_oimm_init(drm, *pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly.h b/drivers/gpu/drm/nouveau/dispnv50/ovly.h
new file mode 100644
index 000000000000..90af1f2f0aa0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly.h
@@ -0,0 +1,8 @@
+#ifndef __NV50_KMS_OVLY_H__
+#define __NV50_KMS_OVLY_H__
+#include "wndw.h"
+
+int ovly507e_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
+
+int nv50_ovly_new(struct nouveau_drm *, int head, struct nv50_wndw **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
new file mode 100644
index 000000000000..ceec5127a17d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ovly.h"
+
+#include <nvif/cl507e.h>
+
+#include "nouveau_bo.h"
+
+static const struct nv50_wndw_func
+ovly507e = {
+};
+
+static const u32
+ovly507e_format[] = {
+	0
+};
+
+static int
+ovly507e_new_(const struct nv50_wndw_func *func, const u32 *format,
+	      struct nouveau_drm *drm, int head, s32 oclass,
+	      struct nv50_wndw **pwndw)
+{
+	struct nv50_disp_overlay_channel_dma_v0 args = {
+		.head = head,
+	};
+	struct nv50_disp *disp = nv50_disp(drm->dev);
+	struct nv50_wndw *wndw;
+	int ret;
+
+	ret = nv50_wndw_new_(func, drm->dev, DRM_PLANE_TYPE_OVERLAY,
+			     "ovly", head, format, &wndw);
+	if (*pwndw = wndw, ret)
+		return ret;
+
+	ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
+			       &oclass, 0, &args, sizeof(args),
+			       disp->sync->bo.offset, &wndw->wndw);
+	if (ret) {
+		NV_ERROR(drm, "ovly%04x allocation failed: %d\n", oclass, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+int
+ovly507e_new(struct nouveau_drm *drm, int head, s32 oclass,
+	     struct nv50_wndw **pwndw)
+{
+	return ovly507e_new_(&ovly507e, ovly507e_format, drm, head, oclass, pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/pior507d.c b/drivers/gpu/drm/nouveau/dispnv50/pior507d.c
new file mode 100644
index 000000000000..a99ba6a7216f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/pior507d.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "core.h"
+
+#include <nvif/class.h>
+
+static void
+pior507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
+	      struct nv50_head_atom *asyh)
+{
+	u32 *push;
+	if ((push = evo_wait(&core->chan, 8))) {
+		if (core->chan.base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+			if (asyh) {
+				ctrl |= asyh->or.depth  << 16;
+				ctrl |= asyh->or.nvsync << 13;
+				ctrl |= asyh->or.nhsync << 12;
+			}
+			evo_mthd(push, 0x0700 + (or * 0x040), 1);
+			evo_data(push, ctrl);
+		}
+		evo_kick(push, &core->chan);
+	}
+}
+
+const struct nv50_outp_func
+pior507d = {
+	.ctrl = pior507d_ctrl,
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/sor507d.c b/drivers/gpu/drm/nouveau/dispnv50/sor507d.c
new file mode 100644
index 000000000000..2d540de27f59
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/sor507d.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "core.h"
+
+#include <nvif/class.h>
+
+static void
+sor507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
+	     struct nv50_head_atom *asyh)
+{
+	u32 *push;
+	if ((push = evo_wait(&core->chan, 6))) {
+		if (core->chan.base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
+			if (asyh) {
+				ctrl |= asyh->or.depth  << 16;
+				ctrl |= asyh->or.nvsync << 13;
+				ctrl |= asyh->or.nhsync << 12;
+			}
+			evo_mthd(push, 0x0600 + (or * 0x40), 1);
+		} else {
+			evo_mthd(push, 0x0200 + (or * 0x20), 1);
+		}
+		evo_data(push, ctrl);
+		evo_kick(push, &core->chan);
+	}
+}
+
+const struct nv50_outp_func
+sor507d = {
+	.ctrl = sor507d_ctrl,
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
new file mode 100644
index 000000000000..71a4c50af8ec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -0,0 +1,434 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "wndw.h"
+
+#include <nvif/class.h>
+#include <nvif/cl0002.h>
+
+#include <drm/drm_atomic_helper.h>
+#include "nouveau_bo.h"
+
+static void
+nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma)
+{
+	nvif_object_fini(&ctxdma->object);
+	list_del(&ctxdma->head);
+	kfree(ctxdma);
+}
+
+static struct nv50_wndw_ctxdma *
+nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct nouveau_framebuffer *fb)
+{
+	struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
+	struct nv50_wndw_ctxdma *ctxdma;
+	const u8    kind = fb->nvbo->kind;
+	const u32 handle = 0xfb000000 | kind;
+	struct {
+		struct nv_dma_v0 base;
+		union {
+			struct nv50_dma_v0 nv50;
+			struct gf100_dma_v0 gf100;
+			struct gf119_dma_v0 gf119;
+		};
+	} args = {};
+	u32 argc = sizeof(args.base);
+	int ret;
+
+	list_for_each_entry(ctxdma, &wndw->ctxdma.list, head) {
+		if (ctxdma->object.handle == handle)
+			return ctxdma;
+	}
+
+	if (!(ctxdma = kzalloc(sizeof(*ctxdma), GFP_KERNEL)))
+		return ERR_PTR(-ENOMEM);
+	list_add(&ctxdma->head, &wndw->ctxdma.list);
+
+	args.base.target = NV_DMA_V0_TARGET_VRAM;
+	args.base.access = NV_DMA_V0_ACCESS_RDWR;
+	args.base.start  = 0;
+	args.base.limit  = drm->client.device.info.ram_user - 1;
+
+	if (drm->client.device.info.chipset < 0x80) {
+		args.nv50.part = NV50_DMA_V0_PART_256;
+		argc += sizeof(args.nv50);
+	} else
+	if (drm->client.device.info.chipset < 0xc0) {
+		args.nv50.part = NV50_DMA_V0_PART_256;
+		args.nv50.kind = kind;
+		argc += sizeof(args.nv50);
+	} else
+	if (drm->client.device.info.chipset < 0xd0) {
+		args.gf100.kind = kind;
+		argc += sizeof(args.gf100);
+	} else {
+		args.gf119.page = GF119_DMA_V0_PAGE_LP;
+		args.gf119.kind = kind;
+		argc += sizeof(args.gf119);
+	}
+
+	ret = nvif_object_init(wndw->ctxdma.parent, handle, NV_DMA_IN_MEMORY,
+			       &args, argc, &ctxdma->object);
+	if (ret) {
+		nv50_wndw_ctxdma_del(ctxdma);
+		return ERR_PTR(ret);
+	}
+
+	return ctxdma;
+}
+
+int
+nv50_wndw_wait_armed(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+	if (asyw->set.ntfy)
+		return wndw->func->ntfy_wait_begun(wndw, asyw);
+	return 0;
+}
+
+u32
+nv50_wndw_flush_clr(struct nv50_wndw *wndw, u32 interlock, bool flush,
+		    struct nv50_wndw_atom *asyw)
+{
+	if (asyw->clr.sema && (!asyw->set.sema || flush))
+		wndw->func->sema_clr(wndw);
+	if (asyw->clr.ntfy && (!asyw->set.ntfy || flush))
+		wndw->func->ntfy_clr(wndw);
+	if (asyw->clr.image && (!asyw->set.image || flush))
+		wndw->func->image_clr(wndw);
+
+	return flush ? wndw->func->update(wndw, interlock) : 0;
+}
+
+u32
+nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 interlock,
+		    struct nv50_wndw_atom *asyw)
+{
+	if (interlock) {
+		asyw->image.mode = 0;
+		asyw->image.interval = 1;
+	}
+
+	if (asyw->set.sema ) wndw->func->sema_set (wndw, asyw);
+	if (asyw->set.ntfy ) wndw->func->ntfy_set (wndw, asyw);
+	if (asyw->set.image) wndw->func->image_set(wndw, asyw);
+	if (asyw->set.lut  ) wndw->func->lut      (wndw, asyw);
+	if (asyw->set.point) {
+		wndw->immd->point(wndw, asyw);
+		wndw->immd->update(wndw, interlock);
+	}
+
+	return wndw->func->update ? wndw->func->update(wndw, interlock) : 0;
+}
+
+static void
+nv50_wndw_atomic_check_release(struct nv50_wndw *wndw,
+			       struct nv50_wndw_atom *asyw,
+			       struct nv50_head_atom *asyh)
+{
+	struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
+	NV_ATOMIC(drm, "%s release\n", wndw->plane.name);
+	wndw->func->release(wndw, asyw, asyh);
+	asyw->ntfy.handle = 0;
+	asyw->sema.handle = 0;
+}
+
+static int
+nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
+			       struct nv50_wndw_atom *asyw,
+			       struct nv50_head_atom *asyh)
+{
+	struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb);
+	struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
+	int ret;
+
+	NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name);
+
+	asyw->image.w = fb->base.width;
+	asyw->image.h = fb->base.height;
+	asyw->image.kind = fb->nvbo->kind;
+
+	if (asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
+		asyw->interval = 0;
+	else
+		asyw->interval = 1;
+
+	if (asyw->image.kind) {
+		asyw->image.layout = 0;
+		if (drm->client.device.info.chipset >= 0xc0)
+			asyw->image.block = fb->nvbo->mode >> 4;
+		else
+			asyw->image.block = fb->nvbo->mode;
+		asyw->image.pitch = (fb->base.pitches[0] / 4) << 4;
+	} else {
+		asyw->image.layout = 1;
+		asyw->image.block  = 0;
+		asyw->image.pitch  = fb->base.pitches[0];
+	}
+
+	ret = wndw->func->acquire(wndw, asyw, asyh);
+	if (ret)
+		return ret;
+
+	if (asyw->set.image) {
+		if (!(asyw->image.mode = asyw->interval ? 0 : 1))
+			asyw->image.interval = asyw->interval;
+		else
+			asyw->image.interval = 0;
+	}
+
+	return 0;
+}
+
+int
+nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
+{
+	struct nouveau_drm *drm = nouveau_drm(plane->dev);
+	struct nv50_wndw *wndw = nv50_wndw(plane);
+	struct nv50_wndw_atom *armw = nv50_wndw_atom(wndw->plane.state);
+	struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
+	struct nv50_head_atom *harm = NULL, *asyh = NULL;
+	bool varm = false, asyv = false, asym = false;
+	int ret;
+
+	NV_ATOMIC(drm, "%s atomic_check\n", plane->name);
+	if (asyw->state.crtc) {
+		asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
+		if (IS_ERR(asyh))
+			return PTR_ERR(asyh);
+		asym = drm_atomic_crtc_needs_modeset(&asyh->state);
+		asyv = asyh->state.active;
+	}
+
+	if (armw->state.crtc) {
+		harm = nv50_head_atom_get(asyw->state.state, armw->state.crtc);
+		if (IS_ERR(harm))
+			return PTR_ERR(harm);
+		varm = harm->state.crtc->state->active;
+	}
+
+	if (asyv) {
+		asyw->point.x = asyw->state.crtc_x;
+		asyw->point.y = asyw->state.crtc_y;
+		if (memcmp(&armw->point, &asyw->point, sizeof(asyw->point)))
+			asyw->set.point = true;
+
+		ret = nv50_wndw_atomic_check_acquire(wndw, asyw, asyh);
+		if (ret)
+			return ret;
+	} else
+	if (varm) {
+		nv50_wndw_atomic_check_release(wndw, asyw, harm);
+	} else {
+		return 0;
+	}
+
+	if (!asyv || asym) {
+		asyw->clr.ntfy = armw->ntfy.handle != 0;
+		asyw->clr.sema = armw->sema.handle != 0;
+		if (wndw->func->image_clr)
+			asyw->clr.image = armw->image.handle != 0;
+		asyw->set.lut = wndw->func->lut && asyv;
+	}
+
+	return 0;
+}
+
+static void
+nv50_wndw_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state)
+{
+	struct nouveau_framebuffer *fb = nouveau_framebuffer(old_state->fb);
+	struct nouveau_drm *drm = nouveau_drm(plane->dev);
+
+	NV_ATOMIC(drm, "%s cleanup: %p\n", plane->name, old_state->fb);
+	if (!old_state->fb)
+		return;
+
+	nouveau_bo_unpin(fb->nvbo);
+}
+
+static int
+nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
+{
+	struct nouveau_framebuffer *fb = nouveau_framebuffer(state->fb);
+	struct nouveau_drm *drm = nouveau_drm(plane->dev);
+	struct nv50_wndw *wndw = nv50_wndw(plane);
+	struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
+	struct nv50_head_atom *asyh;
+	struct nv50_wndw_ctxdma *ctxdma;
+	int ret;
+
+	NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, state->fb);
+	if (!asyw->state.fb)
+		return 0;
+
+	ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM, true);
+	if (ret)
+		return ret;
+
+	ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
+	if (IS_ERR(ctxdma)) {
+		nouveau_bo_unpin(fb->nvbo);
+		return PTR_ERR(ctxdma);
+	}
+
+	asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv);
+	asyw->image.handle = ctxdma->object.handle;
+	asyw->image.offset = fb->nvbo->bo.offset;
+
+	if (wndw->func->prepare) {
+		asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
+		if (IS_ERR(asyh))
+			return PTR_ERR(asyh);
+
+		wndw->func->prepare(wndw, asyh, asyw);
+	}
+
+	return 0;
+}
+
+static const struct drm_plane_helper_funcs
+nv50_wndw_helper = {
+	.prepare_fb = nv50_wndw_prepare_fb,
+	.cleanup_fb = nv50_wndw_cleanup_fb,
+	.atomic_check = nv50_wndw_atomic_check,
+};
+
+static void
+nv50_wndw_atomic_destroy_state(struct drm_plane *plane,
+			       struct drm_plane_state *state)
+{
+	struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
+	__drm_atomic_helper_plane_destroy_state(&asyw->state);
+	kfree(asyw);
+}
+
+static struct drm_plane_state *
+nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
+{
+	struct nv50_wndw_atom *armw = nv50_wndw_atom(plane->state);
+	struct nv50_wndw_atom *asyw;
+	if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL)))
+		return NULL;
+	__drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
+	asyw->interval = 1;
+	asyw->sema = armw->sema;
+	asyw->ntfy = armw->ntfy;
+	asyw->image = armw->image;
+	asyw->point = armw->point;
+	asyw->lut = armw->lut;
+	asyw->clr.mask = 0;
+	asyw->set.mask = 0;
+	return &asyw->state;
+}
+
+static void
+nv50_wndw_reset(struct drm_plane *plane)
+{
+	struct nv50_wndw_atom *asyw;
+
+	if (WARN_ON(!(asyw = kzalloc(sizeof(*asyw), GFP_KERNEL))))
+		return;
+
+	if (plane->state)
+		plane->funcs->atomic_destroy_state(plane, plane->state);
+	plane->state = &asyw->state;
+	plane->state->plane = plane;
+	plane->state->rotation = DRM_MODE_ROTATE_0;
+}
+
+static void
+nv50_wndw_destroy(struct drm_plane *plane)
+{
+	struct nv50_wndw *wndw = nv50_wndw(plane);
+	struct nv50_wndw_ctxdma *ctxdma, *ctxtmp;
+
+	list_for_each_entry_safe(ctxdma, ctxtmp, &wndw->ctxdma.list, head) {
+		nv50_wndw_ctxdma_del(ctxdma);
+	}
+
+	nvif_notify_fini(&wndw->notify);
+	nv50_dmac_destroy(&wndw->wimm);
+	nv50_dmac_destroy(&wndw->wndw);
+	drm_plane_cleanup(&wndw->plane);
+	kfree(wndw);
+}
+
+const struct drm_plane_funcs
+nv50_wndw = {
+	.update_plane = drm_atomic_helper_update_plane,
+	.disable_plane = drm_atomic_helper_disable_plane,
+	.destroy = nv50_wndw_destroy,
+	.reset = nv50_wndw_reset,
+	.atomic_duplicate_state = nv50_wndw_atomic_duplicate_state,
+	.atomic_destroy_state = nv50_wndw_atomic_destroy_state,
+};
+
+static int
+nv50_wndw_notify(struct nvif_notify *notify)
+{
+	return NVIF_NOTIFY_KEEP;
+}
+
+void
+nv50_wndw_fini(struct nv50_wndw *wndw)
+{
+	nvif_notify_put(&wndw->notify);
+}
+
+void
+nv50_wndw_init(struct nv50_wndw *wndw)
+{
+	nvif_notify_get(&wndw->notify);
+}
+
+int
+nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
+	       enum drm_plane_type type, const char *name, int index,
+	       const u32 *format, struct nv50_wndw **pwndw)
+{
+	struct nv50_wndw *wndw;
+	int nformat;
+	int ret;
+
+	if (!(wndw = *pwndw = kzalloc(sizeof(*wndw), GFP_KERNEL)))
+		return -ENOMEM;
+	wndw->func = func;
+	wndw->id = index;
+
+	wndw->ctxdma.parent = &wndw->wndw.base.user;
+	INIT_LIST_HEAD(&wndw->ctxdma.list);
+
+	for (nformat = 0; format[nformat]; nformat++);
+
+	ret = drm_universal_plane_init(dev, &wndw->plane, 0, &nv50_wndw,
+				       format, nformat, NULL,
+				       type, "%s-%d", name, index);
+	if (ret) {
+		kfree(*pwndw);
+		*pwndw = NULL;
+		return ret;
+	}
+
+	drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper);
+
+	wndw->notify.func = nv50_wndw_notify;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
new file mode 100644
index 000000000000..1931e3068115
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
@@ -0,0 +1,73 @@
+#ifndef __NV50_KMS_WNDW_H__
+#define __NV50_KMS_WNDW_H__
+#define nv50_wndw(p) container_of((p), struct nv50_wndw, plane)
+#include "disp.h"
+#include "atom.h"
+
+#include <nvif/notify.h>
+
+struct nv50_wndw_ctxdma {
+	struct list_head head;
+	struct nvif_object object;
+};
+
+struct nv50_wndw {
+	const struct nv50_wndw_func *func;
+	const struct nv50_wimm_func *immd;
+	int id;
+
+	struct {
+		struct nvif_object *parent;
+		struct list_head list;
+	} ctxdma;
+
+	struct drm_plane plane;
+
+	struct nv50_dmac wndw;
+	struct nv50_dmac wimm;
+
+	struct nvif_notify notify;
+	u16 ntfy;
+	u16 sema;
+	u32 data;
+};
+
+int nv50_wndw_new_(const struct nv50_wndw_func *, struct drm_device *,
+		   enum drm_plane_type, const char *name, int index,
+		   const u32 *format, struct nv50_wndw **);
+void nv50_wndw_init(struct nv50_wndw *);
+void nv50_wndw_fini(struct nv50_wndw *);
+u32 nv50_wndw_flush_set(struct nv50_wndw *, u32 interlock,
+			struct nv50_wndw_atom *);
+u32 nv50_wndw_flush_clr(struct nv50_wndw *, u32 interlock, bool flush,
+			struct nv50_wndw_atom *);
+int nv50_wndw_wait_armed(struct nv50_wndw *, struct nv50_wndw_atom *);
+
+struct nv50_wndw_func {
+	int (*acquire)(struct nv50_wndw *, struct nv50_wndw_atom *asyw,
+		       struct nv50_head_atom *asyh);
+	void (*release)(struct nv50_wndw *, struct nv50_wndw_atom *asyw,
+			struct nv50_head_atom *asyh);
+	void (*prepare)(struct nv50_wndw *, struct nv50_head_atom *asyh,
+			struct nv50_wndw_atom *asyw);
+
+	void (*sema_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+	void (*sema_clr)(struct nv50_wndw *);
+	void (*ntfy_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+	void (*ntfy_clr)(struct nv50_wndw *);
+	int (*ntfy_wait_begun)(struct nv50_wndw *, struct nv50_wndw_atom *);
+	void (*image_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+	void (*image_clr)(struct nv50_wndw *);
+	void (*lut)(struct nv50_wndw *, struct nv50_wndw_atom *);
+
+	u32 (*update)(struct nv50_wndw *, u32 interlock);
+};
+
+extern const struct drm_plane_funcs nv50_wndw;
+
+struct nv50_wimm_func {
+	void (*point)(struct nv50_wndw *, struct nv50_wndw_atom *);
+
+	u32 (*update)(struct nv50_wndw *, u32 interlock);
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index 918187cee84b..fbd3b15583bc 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -28,7 +28,6 @@
 #define __NV50_DISPLAY_H__
 
 #include "nouveau_display.h"
-#include "nouveau_crtc.h"
 #include "nouveau_reg.h"
 
 int  nv50_display_create(struct drm_device *);

From 09e1b78aab5715eacab02e4047c7a47d72f6a1e9 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1119/1286] drm/nouveau/kms/nv50-: split core implementation by
 hardware class

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/dispnv50/Kbuild     |   8 +
 drivers/gpu/drm/nouveau/dispnv50/core.c     |  22 +-
 drivers/gpu/drm/nouveau/dispnv50/core.h     |  20 ++
 drivers/gpu/drm/nouveau/dispnv50/core507d.c |  51 ++-
 drivers/gpu/drm/nouveau/dispnv50/core827d.c |  41 +++
 drivers/gpu/drm/nouveau/dispnv50/core907d.c |  40 +++
 drivers/gpu/drm/nouveau/dispnv50/core917d.c |  40 +++
 drivers/gpu/drm/nouveau/dispnv50/dac507d.c  |  19 +-
 drivers/gpu/drm/nouveau/dispnv50/dac907d.c  |  39 +++
 drivers/gpu/drm/nouveau/dispnv50/disp.c     |  46 +--
 drivers/gpu/drm/nouveau/dispnv50/head.c     |  29 +-
 drivers/gpu/drm/nouveau/dispnv50/head.h     |  27 ++
 drivers/gpu/drm/nouveau/dispnv50/head507d.c | 328 +++++++-------------
 drivers/gpu/drm/nouveau/dispnv50/head827d.c | 120 +++++++
 drivers/gpu/drm/nouveau/dispnv50/head907d.c | 274 ++++++++++++++++
 drivers/gpu/drm/nouveau/dispnv50/head917d.c |  55 ++++
 drivers/gpu/drm/nouveau/dispnv50/pior507d.c |  18 +-
 drivers/gpu/drm/nouveau/dispnv50/sor507d.c  |  18 +-
 drivers/gpu/drm/nouveau/dispnv50/sor907d.c  |  41 +++
 19 files changed, 903 insertions(+), 333 deletions(-)
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/core827d.c
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/core907d.c
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/core917d.c
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/dac907d.c
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/head827d.c
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/head907d.c
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/head917d.c
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/sor907d.c

diff --git a/drivers/gpu/drm/nouveau/dispnv50/Kbuild b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
index f3877d2d8840..cde3ae98191a 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/Kbuild
+++ b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
@@ -2,15 +2,23 @@ nouveau-y += dispnv50/disp.o
 
 nouveau-y += dispnv50/core.o
 nouveau-y += dispnv50/core507d.o
+nouveau-y += dispnv50/core827d.o
+nouveau-y += dispnv50/core907d.o
+nouveau-y += dispnv50/core917d.o
 
 nouveau-y += dispnv50/dac507d.o
+nouveau-y += dispnv50/dac907d.o
 
 nouveau-y += dispnv50/pior507d.o
 
 nouveau-y += dispnv50/sor507d.o
+nouveau-y += dispnv50/sor907d.o
 
 nouveau-y += dispnv50/head.o
 nouveau-y += dispnv50/head507d.o
+nouveau-y += dispnv50/head827d.o
+nouveau-y += dispnv50/head907d.o
+nouveau-y += dispnv50/head917d.o
 
 nouveau-y += dispnv50/wndw.o
 
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.c b/drivers/gpu/drm/nouveau/dispnv50/core.c
index b12899fe052a..f87cbaa4f8ec 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.c
@@ -42,17 +42,17 @@ nv50_core_new(struct nouveau_drm *drm, struct nv50_core **pcore)
 		int version;
 		int (*new)(struct nouveau_drm *, s32, struct nv50_core **);
 	} cores[] = {
-		{ GP102_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
-		{ GP100_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
-		{ GM200_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
-		{ GM107_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
-		{ GK110_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
-		{ GK104_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
-		{ GF110_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
-		{ GT214_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
-		{ GT206_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
-		{ GT200_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
-		{   G82_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
+		{ GP102_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
+		{ GP100_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
+		{ GM200_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
+		{ GM107_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
+		{ GK110_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
+		{ GK104_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
+		{ GF110_DISP_CORE_CHANNEL_DMA, 0, core907d_new },
+		{ GT214_DISP_CORE_CHANNEL_DMA, 0, core827d_new },
+		{ GT206_DISP_CORE_CHANNEL_DMA, 0, core827d_new },
+		{ GT200_DISP_CORE_CHANNEL_DMA, 0, core827d_new },
+		{   G82_DISP_CORE_CHANNEL_DMA, 0, core827d_new },
 		{  NV50_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
 		{}
 	};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.h b/drivers/gpu/drm/nouveau/dispnv50/core.h
index 3cd54469311a..5fd7ddd31e5e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.h
@@ -12,6 +12,12 @@ int nv50_core_new(struct nouveau_drm *, struct nv50_core **);
 void nv50_core_del(struct nv50_core **);
 
 struct nv50_core_func {
+	void (*init)(struct nv50_core *);
+	void (*ntfy_init)(struct nouveau_bo *, u32 offset);
+	int (*ntfy_wait_done)(struct nouveau_bo *, u32 offset,
+			      struct nvif_device *);
+	void (*update)(struct nv50_core *, u32 interlock, bool ntfy);
+
 	const struct nv50_head_func *head;
 	const struct nv50_outp_func {
 		void (*ctrl)(struct nv50_core *, int or, u32 ctrl,
@@ -20,7 +26,21 @@ struct nv50_core_func {
 };
 
 int core507d_new(struct nouveau_drm *, s32, struct nv50_core **);
+int core507d_new_(const struct nv50_core_func *, struct nouveau_drm *, s32,
+		  struct nv50_core **);
+void core507d_init(struct nv50_core *);
+void core507d_ntfy_init(struct nouveau_bo *, u32);
+int core507d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *);
+void core507d_update(struct nv50_core *, u32, bool);
 extern const struct nv50_outp_func dac507d;
 extern const struct nv50_outp_func sor507d;
 extern const struct nv50_outp_func pior507d;
+
+int core827d_new(struct nouveau_drm *, s32, struct nv50_core **);
+
+int core907d_new(struct nouveau_drm *, s32, struct nv50_core **);
+extern const struct nv50_outp_func dac907d;
+extern const struct nv50_outp_func sor907d;
+
+int core917d_new(struct nouveau_drm *, s32, struct nv50_core **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core507d.c b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
index b0325f69a26f..96d7d8fde669 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
@@ -26,15 +26,64 @@
 
 #include "nouveau_bo.h"
 
+void
+core507d_update(struct nv50_core *core, u32 interlock, bool ntfy)
+{
+	u32 *push;
+	if ((push = evo_wait(&core->chan, 5))) {
+		if (ntfy) {
+			evo_mthd(push, 0x0084, 1);
+			evo_data(push, 0x80000000 | NV50_DISP_CORE_NTFY);
+		}
+		evo_mthd(push, 0x0080, 2);
+		evo_data(push, interlock);
+		evo_data(push, 0x00000000);
+		evo_kick(push, &core->chan);
+	}
+}
+
+int
+core507d_ntfy_wait_done(struct nouveau_bo *bo, u32 offset,
+			struct nvif_device *device)
+{
+	s64 time = nvif_msec(device, 2000ULL,
+		if (nouveau_bo_rd32(bo, offset / 4))
+			break;
+		usleep_range(1, 2);
+	);
+	return time < 0 ? time : 0;
+}
+
+void
+core507d_ntfy_init(struct nouveau_bo *bo, u32 offset)
+{
+	nouveau_bo_wr32(bo, offset / 4, 0x00000000);
+}
+
+void
+core507d_init(struct nv50_core *core)
+{
+	u32 *push;
+	if ((push = evo_wait(&core->chan, 2))) {
+		evo_mthd(push, 0x0088, 1);
+		evo_data(push, core->chan.sync.handle);
+		evo_kick(push, &core->chan);
+	}
+}
+
 static const struct nv50_core_func
 core507d = {
+	.init = core507d_init,
+	.ntfy_init = core507d_ntfy_init,
+	.ntfy_wait_done = core507d_ntfy_wait_done,
+	.update = core507d_update,
 	.head = &head507d,
 	.dac = &dac507d,
 	.sor = &sor507d,
 	.pior = &pior507d,
 };
 
-static int
+int
 core507d_new_(const struct nv50_core_func *func, struct nouveau_drm *drm,
 	      s32 oclass, struct nv50_core **pcore)
 {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core827d.c b/drivers/gpu/drm/nouveau/dispnv50/core827d.c
new file mode 100644
index 000000000000..6123a068f836
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/core827d.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "core.h"
+#include "head.h"
+
+static const struct nv50_core_func
+core827d = {
+	.init = core507d_init,
+	.ntfy_init = core507d_ntfy_init,
+	.ntfy_wait_done = core507d_ntfy_wait_done,
+	.update = core507d_update,
+	.head = &head827d,
+	.dac = &dac507d,
+	.sor = &sor507d,
+	.pior = &pior507d,
+};
+
+int
+core827d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore)
+{
+	return core507d_new_(&core827d, drm, oclass, pcore);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core907d.c b/drivers/gpu/drm/nouveau/dispnv50/core907d.c
new file mode 100644
index 000000000000..ef822f813435
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/core907d.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "core.h"
+#include "head.h"
+
+static const struct nv50_core_func
+core907d = {
+	.init = core507d_init,
+	.ntfy_init = core507d_ntfy_init,
+	.ntfy_wait_done = core507d_ntfy_wait_done,
+	.update = core507d_update,
+	.head = &head907d,
+	.dac = &dac907d,
+	.sor = &sor907d,
+};
+
+int
+core907d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore)
+{
+	return core507d_new_(&core907d, drm, oclass, pcore);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core917d.c b/drivers/gpu/drm/nouveau/dispnv50/core917d.c
new file mode 100644
index 000000000000..392338df5bfd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/core917d.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "core.h"
+#include "head.h"
+
+static const struct nv50_core_func
+core917d = {
+	.init = core507d_init,
+	.ntfy_init = core507d_ntfy_init,
+	.ntfy_wait_done = core507d_ntfy_wait_done,
+	.update = core507d_update,
+	.head = &head917d,
+	.dac = &dac907d,
+	.sor = &sor907d,
+};
+
+int
+core917d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore)
+{
+	return core507d_new_(&core917d, drm, oclass, pcore);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/dac507d.c b/drivers/gpu/drm/nouveau/dispnv50/dac507d.c
index 28b6025a80f3..2a10ef7d30a8 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/dac507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/dac507d.c
@@ -21,26 +21,19 @@
  */
 #include "core.h"
 
-#include <nvif/class.h>
-
 static void
 dac507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
 	     struct nv50_head_atom *asyh)
 {
 	u32 *push, sync = 0;
 	if ((push = evo_wait(&core->chan, 3))) {
-		if (core->chan.base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
-			if (asyh) {
-				sync |= asyh->or.nvsync << 1;
-				sync |= asyh->or.nhsync;
-			}
-			evo_mthd(push, 0x0400 + (or * 0x080), 2);
-			evo_data(push, ctrl);
-			evo_data(push, sync);
-		} else {
-			evo_mthd(push, 0x0180 + (or * 0x020), 1);
-			evo_data(push, ctrl);
+		if (asyh) {
+			sync |= asyh->or.nvsync << 1;
+			sync |= asyh->or.nhsync;
 		}
+		evo_mthd(push, 0x0400 + (or * 0x080), 2);
+		evo_data(push, ctrl);
+		evo_data(push, sync);
 		evo_kick(push, &core->chan);
 	}
 }
diff --git a/drivers/gpu/drm/nouveau/dispnv50/dac907d.c b/drivers/gpu/drm/nouveau/dispnv50/dac907d.c
new file mode 100644
index 000000000000..11e87fa53fac
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/dac907d.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "core.h"
+
+static void
+dac907d_ctrl(struct nv50_core *core, int or, u32 ctrl,
+	     struct nv50_head_atom *asyh)
+{
+	u32 *push;
+	if ((push = evo_wait(&core->chan, 2))) {
+		evo_mthd(push, 0x0180 + (or * 0x020), 1);
+		evo_data(push, ctrl);
+		evo_kick(push, &core->chan);
+	}
+}
+
+const struct nv50_outp_func
+dac907d = {
+	.ctrl = dac907d_ctrl,
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index a8367c5d4691..6136beeba3fc 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -1587,10 +1587,9 @@ static void
 nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 interlock)
 {
 	struct nv50_disp *disp = nv50_disp(drm->dev);
-	struct nv50_dmac *core = &disp->core->chan;
+	struct nv50_core *core = disp->core;
 	struct nv50_mstm *mstm;
 	struct drm_encoder *encoder;
-	u32 *push;
 
 	NV_ATOMIC(drm, "commit core %08x\n", interlock);
 
@@ -1602,21 +1601,11 @@ nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 interlock)
 		}
 	}
 
-	if ((push = evo_wait(core, 5))) {
-		evo_mthd(push, 0x0084, 1);
-		evo_data(push, 0x80000000);
-		evo_mthd(push, 0x0080, 2);
-		evo_data(push, interlock);
-		evo_data(push, 0x00000000);
-		nouveau_bo_wr32(disp->sync, 0, 0x00000000);
-		evo_kick(push, core);
-		if (nvif_msec(&drm->client.device, 2000ULL,
-			if (nouveau_bo_rd32(disp->sync, 0))
-				break;
-			usleep_range(1, 2);
-		) < 0)
-			NV_ERROR(drm, "EVO timeout\n");
-	}
+	core->func->ntfy_init(disp->sync, NV50_DISP_CORE_NTFY);
+	core->func->update(core, interlock, true);
+	if (core->func->ntfy_wait_done(disp->sync, NV50_DISP_CORE_NTFY,
+				       disp->core->chan.base.device))
+		NV_ERROR(drm, "core notifier timeout\n");
 
 	drm_for_each_encoder(encoder, drm->dev) {
 		if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
@@ -1770,16 +1759,10 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
 
 	/* Flush update. */
 	if (interlock_core) {
-		if (!interlock_chan && atom->state.legacy_cursor_update) {
-			u32 *push = evo_wait(&disp->core->chan, 2);
-			if (push) {
-				evo_mthd(push, 0x0080, 1);
-				evo_data(push, 0x00000000);
-				evo_kick(push, &disp->core->chan);
-			}
-		} else {
+		if (interlock_chan || !atom->state.legacy_cursor_update)
 			nv50_disp_atomic_commit_core(drm, interlock_chan);
-		}
+		else
+			disp->core->func->update(disp->core, 0, false);
 	}
 
 	if (atom->lock_core)
@@ -2079,18 +2062,11 @@ nv50_display_fini(struct drm_device *dev)
 int
 nv50_display_init(struct drm_device *dev)
 {
-	struct nv50_dmac *core = &nv50_disp(dev)->core->chan;
+	struct nv50_core *core = nv50_disp(dev)->core;
 	struct drm_encoder *encoder;
 	struct drm_plane *plane;
-	u32 *push;
 
-	push = evo_wait(core, 32);
-	if (!push)
-		return -EBUSY;
-
-	evo_mthd(push, 0x0088, 1);
-	evo_data(push, core->sync.handle);
-	evo_kick(push, core);
+	core->func->init(core);
 
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 		if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index 6a809ff24e14..1335c00500d1 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -324,7 +324,6 @@ static int
 nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
 {
 	struct nouveau_drm *drm = nouveau_drm(crtc->dev);
-	struct nv50_disp *disp = nv50_disp(crtc->dev);
 	struct nv50_head *head = nv50_head(crtc);
 	struct nv50_head_atom *armh = nv50_head_atom(crtc->state);
 	struct nv50_head_atom *asyh = nv50_head_atom(state);
@@ -373,31 +372,9 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
 				nv50_head_atomic_check_procamp(armh, asyh, asyc);
 		}
 
-		if ((asyh->core.visible = (asyh->base.cpp != 0))) {
-			asyh->core.x = asyh->base.x;
-			asyh->core.y = asyh->base.y;
-			asyh->core.w = asyh->base.w;
-			asyh->core.h = asyh->base.h;
-		} else
-		if ((asyh->core.visible = asyh->curs.visible) ||
-		    (asyh->core.visible = asyh->ilut.visible)) {
-			/*XXX: We need to either find some way of having the
-			 *     primary base layer appear black, while still
-			 *     being able to display the other layers, or we
-			 *     need to allocate a dummy black surface here.
-			 */
-			asyh->core.x = 0;
-			asyh->core.y = 0;
-			asyh->core.w = asyh->state.mode.hdisplay;
-			asyh->core.h = asyh->state.mode.vdisplay;
-		}
-		asyh->core.handle = disp->core->chan.vram.handle;
-		asyh->core.offset = 0;
-		asyh->core.format = 0xcf;
-		asyh->core.kind = 0;
-		asyh->core.layout = 1;
-		asyh->core.block = 0;
-		asyh->core.pitch = ALIGN(asyh->core.w, 64) * 4;
+		if (head->func->core_calc)
+			head->func->core_calc(head, asyh);
+
 		asyh->set.base = armh->base.cpp != asyh->base.cpp;
 		asyh->set.ovly = armh->ovly.cpp != asyh->ovly.cpp;
 	} else {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.h b/drivers/gpu/drm/nouveau/dispnv50/head.h
index 23099a82883b..d00cebdbd260 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.h
@@ -24,6 +24,7 @@ struct nv50_head_func {
 	void (*mode)(struct nv50_head *, struct nv50_head_atom *);
 	void (*ilut_set)(struct nv50_head *, struct nv50_head_atom *);
 	void (*ilut_clr)(struct nv50_head *);
+	void (*core_calc)(struct nv50_head *, struct nv50_head_atom *);
 	void (*core_set)(struct nv50_head *, struct nv50_head_atom *);
 	void (*core_clr)(struct nv50_head *);
 	void (*curs_set)(struct nv50_head *, struct nv50_head_atom *);
@@ -36,4 +37,30 @@ struct nv50_head_func {
 };
 
 extern const struct nv50_head_func head507d;
+void head507d_view(struct nv50_head *, struct nv50_head_atom *);
+void head507d_mode(struct nv50_head *, struct nv50_head_atom *);
+void head507d_core_calc(struct nv50_head *, struct nv50_head_atom *);
+void head507d_core_clr(struct nv50_head *);
+void head507d_base(struct nv50_head *, struct nv50_head_atom *);
+void head507d_ovly(struct nv50_head *, struct nv50_head_atom *);
+void head507d_dither(struct nv50_head *, struct nv50_head_atom *);
+void head507d_procamp(struct nv50_head *, struct nv50_head_atom *);
+
+extern const struct nv50_head_func head827d;
+
+extern const struct nv50_head_func head907d;
+void head907d_view(struct nv50_head *, struct nv50_head_atom *);
+void head907d_mode(struct nv50_head *, struct nv50_head_atom *);
+void head907d_ilut_set(struct nv50_head *, struct nv50_head_atom *);
+void head907d_ilut_clr(struct nv50_head *);
+void head907d_core_set(struct nv50_head *, struct nv50_head_atom *);
+void head907d_core_clr(struct nv50_head *);
+void head907d_curs_set(struct nv50_head *, struct nv50_head_atom *);
+void head907d_curs_clr(struct nv50_head *);
+void head907d_base(struct nv50_head *, struct nv50_head_atom *);
+void head907d_ovly(struct nv50_head *, struct nv50_head_atom *);
+void head907d_procamp(struct nv50_head *, struct nv50_head_atom *);
+void head907d_or(struct nv50_head *, struct nv50_head_atom *);
+
+extern const struct nv50_head_func head917d;
 #endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head507d.c b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
index 92fa249ba72f..5f06fa174832 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
@@ -22,62 +22,34 @@
 #include "head.h"
 #include "core.h"
 
-#include <nvif/class.h>
-
-static void
-head907d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
-{
-	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
-	u32 *push;
-	if (core->base.user.oclass >= GF110_DISP_CORE_CHANNEL_DMA &&
-	    (push = evo_wait(core, 3))) {
-		evo_mthd(push, 0x0404 + (head->base.index * 0x300), 2);
-		evo_data(push, 0x00000001 | (asyh->or.depth  << 6) |
-					    (asyh->or.nvsync << 4) |
-					    (asyh->or.nhsync << 3));
-		evo_data(push, 0x31ec6000 | (head->base.index << 25) |
-					     asyh->mode.interlace);
-		evo_kick(push, core);
-	}
-}
-
-static void
+void
 head507d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
 	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
 	u32 *push;
 	if ((push = evo_wait(core, 2))) {
-		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
-			evo_mthd(push, 0x08a8 + (head->base.index * 0x400), 1);
-		else
-			evo_mthd(push, 0x0498 + (head->base.index * 0x300), 1);
-		evo_data(push, (asyh->procamp.sat.sin << 20) |
-			       (asyh->procamp.sat.cos << 8));
+		evo_mthd(push, 0x08a8 + (head->base.index * 0x400), 1);
+		evo_data(push, asyh->procamp.sat.sin << 20 |
+			       asyh->procamp.sat.cos << 8);
 		evo_kick(push, core);
 	}
 }
 
-static void
+void
 head507d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
 	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
 	u32 *push;
 	if ((push = evo_wait(core, 2))) {
-		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
-			evo_mthd(push, 0x08a0 + (head->base.index * 0x0400), 1);
-		else
-		if (core->base.user.oclass < GK104_DISP_CORE_CHANNEL_DMA)
-			evo_mthd(push, 0x0490 + (head->base.index * 0x0300), 1);
-		else
-			evo_mthd(push, 0x04a0 + (head->base.index * 0x0300), 1);
-		evo_data(push, (asyh->dither.mode << 3) |
-			       (asyh->dither.bits << 1) |
-			        asyh->dither.enable);
+		evo_mthd(push, 0x08a0 + (head->base.index * 0x0400), 1);
+		evo_data(push, asyh->dither.mode << 3 |
+			       asyh->dither.bits << 1 |
+			       asyh->dither.enable);
 		evo_kick(push, core);
 	}
 }
 
-static void
+void
 head507d_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
 	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
@@ -97,16 +69,13 @@ head507d_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
 	}
 
 	if ((push = evo_wait(core, 2))) {
-		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
-			evo_mthd(push, 0x0904 + head->base.index * 0x400, 1);
-		else
-			evo_mthd(push, 0x04d4 + head->base.index * 0x300, 1);
+		evo_mthd(push, 0x0904 + head->base.index * 0x400, 1);
 		evo_data(push, bounds);
 		evo_kick(push, core);
 	}
 }
 
-static void
+void
 head507d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
 	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
@@ -127,10 +96,7 @@ head507d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
 	}
 
 	if ((push = evo_wait(core, 2))) {
-		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
-			evo_mthd(push, 0x0900 + head->base.index * 0x400, 1);
-		else
-			evo_mthd(push, 0x04d0 + head->base.index * 0x300, 1);
+		evo_mthd(push, 0x0900 + head->base.index * 0x400, 1);
 		evo_data(push, bounds);
 		evo_kick(push, core);
 	}
@@ -141,22 +107,9 @@ head507d_curs_clr(struct nv50_head *head)
 {
 	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
 	u32 *push;
-	if ((push = evo_wait(core, 4))) {
-		if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
-			evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
-			evo_data(push, 0x05000000);
-		} else
-		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
-			evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
-			evo_data(push, 0x05000000);
-			evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
-			evo_data(push, 0x00000000);
-		} else {
-			evo_mthd(push, 0x0480 + head->base.index * 0x300, 1);
-			evo_data(push, 0x05000000);
-			evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
-			evo_data(push, 0x00000000);
-		}
+	if ((push = evo_wait(core, 2))) {
+		evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
+		evo_data(push, 0x05000000);
 		evo_kick(push, core);
 	}
 }
@@ -166,42 +119,22 @@ head507d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
 	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
 	u32 *push;
-	if ((push = evo_wait(core, 5))) {
-		if (core->base.user.oclass < G82_DISP_BASE_CHANNEL_DMA) {
-			evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
-			evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
-						    (asyh->curs.format << 24));
-			evo_data(push, asyh->curs.offset >> 8);
-		} else
-		if (core->base.user.oclass < GF110_DISP_BASE_CHANNEL_DMA) {
-			evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
-			evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
-						    (asyh->curs.format << 24));
-			evo_data(push, asyh->curs.offset >> 8);
-			evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
-			evo_data(push, asyh->curs.handle);
-		} else {
-			evo_mthd(push, 0x0480 + head->base.index * 0x300, 2);
-			evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
-						    (asyh->curs.format << 24));
-			evo_data(push, asyh->curs.offset >> 8);
-			evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
-			evo_data(push, asyh->curs.handle);
-		}
+	if ((push = evo_wait(core, 3))) {
+		evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
+		evo_data(push, 0x80000000 | asyh->curs.layout << 26 |
+					    asyh->curs.format << 24);
+		evo_data(push, asyh->curs.offset >> 8);
 		evo_kick(push, core);
 	}
 }
 
-static void
+void
 head507d_core_clr(struct nv50_head *head)
 {
 	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
 	u32 *push;
 	if ((push = evo_wait(core, 2))) {
-		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
-			evo_mthd(push, 0x0874 + head->base.index * 0x400, 1);
-		else
-			evo_mthd(push, 0x0474 + head->base.index * 0x300, 1);
+		evo_mthd(push, 0x0874 + head->base.index * 0x400, 1);
 		evo_data(push, 0x00000000);
 		evo_kick(push, core);
 	}
@@ -213,75 +146,67 @@ head507d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
 	u32 *push;
 	if ((push = evo_wait(core, 9))) {
-		if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
-			evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
-			evo_data(push, asyh->core.offset >> 8);
-			evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
-			evo_data(push, (asyh->core.h << 16) | asyh->core.w);
-			evo_data(push, asyh->core.layout << 20 |
-				       (asyh->core.pitch >> 8) << 8 |
-				       asyh->core.block);
-			evo_data(push, asyh->core.kind << 16 |
-				       asyh->core.format << 8);
-			evo_data(push, asyh->core.handle);
-			evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
-			evo_data(push, (asyh->core.y << 16) | asyh->core.x);
-			/* EVO will complain with INVALID_STATE if we have an
-			 * active cursor and (re)specify HeadSetContextDmaIso
-			 * without also updating HeadSetOffsetCursor.
-			 */
-			asyh->set.curs = asyh->curs.visible;
-		} else
-		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
-			evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
-			evo_data(push, asyh->core.offset >> 8);
-			evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
-			evo_data(push, (asyh->core.h << 16) | asyh->core.w);
-			evo_data(push, asyh->core.layout << 20 |
-				       (asyh->core.pitch >> 8) << 8 |
-				       asyh->core.block);
-			evo_data(push, asyh->core.format << 8);
-			evo_data(push, asyh->core.handle);
-			evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
-			evo_data(push, (asyh->core.y << 16) | asyh->core.x);
-		} else {
-			evo_mthd(push, 0x0460 + head->base.index * 0x300, 1);
-			evo_data(push, asyh->core.offset >> 8);
-			evo_mthd(push, 0x0468 + head->base.index * 0x300, 4);
-			evo_data(push, (asyh->core.h << 16) | asyh->core.w);
-			evo_data(push, asyh->core.layout << 24 |
-				       (asyh->core.pitch >> 8) << 8 |
-				       asyh->core.block);
-			evo_data(push, asyh->core.format << 8);
-			evo_data(push, asyh->core.handle);
-			evo_mthd(push, 0x04b0 + head->base.index * 0x300, 1);
-			evo_data(push, (asyh->core.y << 16) | asyh->core.x);
-		}
+		evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
+		evo_data(push, asyh->core.offset >> 8);
+		evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
+		evo_data(push, asyh->core.h << 16 | asyh->core.w);
+		evo_data(push, asyh->core.layout << 20 |
+			       asyh->core.pitch >> 8 << 8 |
+			       asyh->core.block);
+		evo_data(push, asyh->core.kind << 16 |
+			       asyh->core.format << 8);
+		evo_data(push, asyh->core.handle);
+		evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
+		evo_data(push, asyh->core.y << 16 | asyh->core.x);
 		evo_kick(push, core);
+
+		/* EVO will complain with INVALID_STATE if we have an
+		 * active cursor and (re)specify HeadSetContextDmaIso
+		 * without also updating HeadSetOffsetCursor.
+		 */
+		asyh->set.curs = asyh->curs.visible;
 	}
 }
 
+void
+head507d_core_calc(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_disp *disp = nv50_disp(head->base.base.dev);
+	if ((asyh->core.visible = (asyh->base.cpp != 0))) {
+		asyh->core.x = asyh->base.x;
+		asyh->core.y = asyh->base.y;
+		asyh->core.w = asyh->base.w;
+		asyh->core.h = asyh->base.h;
+	} else
+	if ((asyh->core.visible = asyh->curs.visible) ||
+	    (asyh->core.visible = asyh->ilut.visible)) {
+		/*XXX: We need to either find some way of having the
+		 *     primary base layer appear black, while still
+		 *     being able to display the other layers, or we
+		 *     need to allocate a dummy black surface here.
+		 */
+		asyh->core.x = 0;
+		asyh->core.y = 0;
+		asyh->core.w = asyh->state.mode.hdisplay;
+		asyh->core.h = asyh->state.mode.vdisplay;
+	}
+	asyh->core.handle = disp->core->chan.vram.handle;
+	asyh->core.offset = 0;
+	asyh->core.format = 0xcf;
+	asyh->core.kind = 0;
+	asyh->core.layout = 1;
+	asyh->core.block = 0;
+	asyh->core.pitch = ALIGN(asyh->core.w, 64) * 4;
+}
+
 static void
 head507d_ilut_clr(struct nv50_head *head)
 {
 	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
 	u32 *push;
-	if ((push = evo_wait(core, 4))) {
-		if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
-			evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
-			evo_data(push, 0x40000000);
-		} else
-		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
-			evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
-			evo_data(push, 0x40000000);
-			evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
-			evo_data(push, 0x00000000);
-		} else {
-			evo_mthd(push, 0x0440 + (head->base.index * 0x300), 1);
-			evo_data(push, 0x03000000);
-			evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
-			evo_data(push, 0x00000000);
-		}
+	if ((push = evo_wait(core, 2))) {
+		evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
+		evo_data(push, 0x40000000);
 		evo_kick(push, core);
 	}
 }
@@ -291,96 +216,51 @@ head507d_ilut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
 	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
 	u32 *push;
-	if ((push = evo_wait(core, 7))) {
-		if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
-			evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
-			evo_data(push, 0x80000000 | asyh->ilut.mode << 30);
-			evo_data(push, asyh->ilut.offset >> 8);
-		} else
-		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
-			evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
-			evo_data(push, 0x80000000 | asyh->ilut.mode << 30);
-			evo_data(push, asyh->ilut.offset >> 8);
-			evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
-			evo_data(push, asyh->ilut.handle);
-		} else {
-			evo_mthd(push, 0x0440 + (head->base.index * 0x300), 4);
-			evo_data(push, 0x80000000 | asyh->ilut.mode << 24);
-			evo_data(push, asyh->ilut.offset >> 8);
-			evo_data(push, 0x00000000);
-			evo_data(push, 0x00000000);
-			evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
-			evo_data(push, asyh->ilut.handle);
-		}
+	if ((push = evo_wait(core, 3))) {
+		evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
+		evo_data(push, 0x80000000 | asyh->ilut.mode << 30);
+		evo_data(push, asyh->ilut.offset >> 8);
 		evo_kick(push, core);
 	}
 }
 
-static void
+void
 head507d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
 	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
 	struct nv50_head_mode *m = &asyh->mode;
 	u32 *push;
-	if ((push = evo_wait(core, 14))) {
-		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
-			evo_mthd(push, 0x0804 + (head->base.index * 0x400), 2);
-			evo_data(push, 0x00800000 | m->clock);
-			evo_data(push, m->interlace ? 0x00000002 : 0x00000000);
-			evo_mthd(push, 0x0810 + (head->base.index * 0x400), 7);
-			evo_data(push, 0x00000000);
-			evo_data(push, (m->v.active  << 16) | m->h.active );
-			evo_data(push, (m->v.synce   << 16) | m->h.synce  );
-			evo_data(push, (m->v.blanke  << 16) | m->h.blanke );
-			evo_data(push, (m->v.blanks  << 16) | m->h.blanks );
-			evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
-			evo_data(push, asyh->mode.v.blankus);
-			evo_mthd(push, 0x082c + (head->base.index * 0x400), 1);
-			evo_data(push, 0x00000000);
-		} else {
-			evo_mthd(push, 0x0410 + (head->base.index * 0x300), 6);
-			evo_data(push, 0x00000000);
-			evo_data(push, (m->v.active  << 16) | m->h.active );
-			evo_data(push, (m->v.synce   << 16) | m->h.synce  );
-			evo_data(push, (m->v.blanke  << 16) | m->h.blanke );
-			evo_data(push, (m->v.blanks  << 16) | m->h.blanks );
-			evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
-			evo_mthd(push, 0x042c + (head->base.index * 0x300), 2);
-			evo_data(push, 0x00000000); /* ??? */
-			evo_data(push, 0xffffff00);
-			evo_mthd(push, 0x0450 + (head->base.index * 0x300), 3);
-			evo_data(push, m->clock * 1000);
-			evo_data(push, 0x00200000); /* ??? */
-			evo_data(push, m->clock * 1000);
-		}
+	if ((push = evo_wait(core, 13))) {
+		evo_mthd(push, 0x0804 + (head->base.index * 0x400), 2);
+		evo_data(push, 0x00800000 | m->clock);
+		evo_data(push, m->interlace ? 0x00000002 : 0x00000000);
+		evo_mthd(push, 0x0810 + (head->base.index * 0x400), 7);
+		evo_data(push, 0x00000000);
+		evo_data(push, m->v.active  << 16 | m->h.active );
+		evo_data(push, m->v.synce   << 16 | m->h.synce  );
+		evo_data(push, m->v.blanke  << 16 | m->h.blanke );
+		evo_data(push, m->v.blanks  << 16 | m->h.blanks );
+		evo_data(push, m->v.blank2e << 16 | m->v.blank2s);
+		evo_data(push, asyh->mode.v.blankus);
+		evo_mthd(push, 0x082c + (head->base.index * 0x400), 1);
+		evo_data(push, 0x00000000);
 		evo_kick(push, core);
 	}
 }
 
-static void
+void
 head507d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
 	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
 	u32 *push;
-	if ((push = evo_wait(core, 10))) {
-		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
-			evo_mthd(push, 0x08a4 + (head->base.index * 0x400), 1);
-			evo_data(push, 0x00000000);
-			evo_mthd(push, 0x08c8 + (head->base.index * 0x400), 1);
-			evo_data(push, (asyh->view.iH << 16) | asyh->view.iW);
-			evo_mthd(push, 0x08d8 + (head->base.index * 0x400), 2);
-			evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
-			evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
-		} else {
-			evo_mthd(push, 0x0494 + (head->base.index * 0x300), 1);
-			evo_data(push, 0x00000000);
-			evo_mthd(push, 0x04b8 + (head->base.index * 0x300), 1);
-			evo_data(push, (asyh->view.iH << 16) | asyh->view.iW);
-			evo_mthd(push, 0x04c0 + (head->base.index * 0x300), 3);
-			evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
-			evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
-			evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
-		}
+	if ((push = evo_wait(core, 7))) {
+		evo_mthd(push, 0x08a4 + (head->base.index * 0x400), 1);
+		evo_data(push, 0x00000000);
+		evo_mthd(push, 0x08c8 + (head->base.index * 0x400), 1);
+		evo_data(push, asyh->view.iH << 16 | asyh->view.iW);
+		evo_mthd(push, 0x08d8 + (head->base.index * 0x400), 2);
+		evo_data(push, asyh->view.oH << 16 | asyh->view.oW);
+		evo_data(push, asyh->view.oH << 16 | asyh->view.oW);
 		evo_kick(push, core);
 	}
 }
@@ -391,6 +271,7 @@ head507d = {
 	.mode = head507d_mode,
 	.ilut_set = head507d_ilut_set,
 	.ilut_clr = head507d_ilut_clr,
+	.core_calc = head507d_core_calc,
 	.core_set = head507d_core_set,
 	.core_clr = head507d_core_clr,
 	.curs_set = head507d_curs_set,
@@ -399,5 +280,4 @@ head507d = {
 	.ovly = head507d_ovly,
 	.dither = head507d_dither,
 	.procamp = head507d_procamp,
-	.or = head907d_or,
 };
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head827d.c b/drivers/gpu/drm/nouveau/dispnv50/head827d.c
new file mode 100644
index 000000000000..84ce595fbe79
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/head827d.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "head.h"
+#include "core.h"
+
+static void
+head827d_curs_clr(struct nv50_head *head)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 4))) {
+		evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
+		evo_data(push, 0x05000000);
+		evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
+		evo_data(push, 0x00000000);
+		evo_kick(push, core);
+	}
+}
+
+static void
+head827d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 5))) {
+		evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
+		evo_data(push, 0x80000000 | asyh->curs.layout << 26 |
+					    asyh->curs.format << 24);
+		evo_data(push, asyh->curs.offset >> 8);
+		evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
+		evo_data(push, asyh->curs.handle);
+		evo_kick(push, core);
+	}
+}
+
+static void
+head827d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 9))) {
+		evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
+		evo_data(push, asyh->core.offset >> 8);
+		evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
+		evo_data(push, asyh->core.h << 16 | asyh->core.w);
+		evo_data(push, asyh->core.layout << 20 |
+			       (asyh->core.pitch >> 8) << 8 |
+			       asyh->core.block);
+		evo_data(push, asyh->core.format << 8);
+		evo_data(push, asyh->core.handle);
+		evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
+		evo_data(push, asyh->core.y << 16 | asyh->core.x);
+		evo_kick(push, core);
+	}
+}
+
+static void
+head827d_ilut_clr(struct nv50_head *head)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 4))) {
+		evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
+		evo_data(push, 0x40000000);
+		evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
+		evo_data(push, 0x00000000);
+		evo_kick(push, core);
+	}
+}
+
+static void
+head827d_ilut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 5))) {
+		evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
+		evo_data(push, 0x80000000 | asyh->ilut.mode << 30);
+		evo_data(push, asyh->ilut.offset >> 8);
+		evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
+		evo_data(push, asyh->ilut.handle);
+		evo_kick(push, core);
+	}
+}
+
+const struct nv50_head_func
+head827d = {
+	.view = head507d_view,
+	.mode = head507d_mode,
+	.ilut_set = head827d_ilut_set,
+	.ilut_clr = head827d_ilut_clr,
+	.core_calc = head507d_core_calc,
+	.core_set = head827d_core_set,
+	.core_clr = head507d_core_clr,
+	.curs_set = head827d_curs_set,
+	.curs_clr = head827d_curs_clr,
+	.base = head507d_base,
+	.ovly = head507d_ovly,
+	.dither = head507d_dither,
+	.procamp = head507d_procamp,
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head907d.c b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
new file mode 100644
index 000000000000..0035eccd62d6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
@@ -0,0 +1,274 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "head.h"
+#include "core.h"
+
+void
+head907d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 3))) {
+		evo_mthd(push, 0x0404 + (head->base.index * 0x300), 2);
+		evo_data(push, 0x00000001 | asyh->or.depth  << 6 |
+					    asyh->or.nvsync << 4 |
+					    asyh->or.nhsync << 3);
+		evo_data(push, 0x31ec6000 | head->base.index << 25 |
+					    asyh->mode.interlace);
+		evo_kick(push, core);
+	}
+}
+
+void
+head907d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 2))) {
+		evo_mthd(push, 0x0498 + (head->base.index * 0x300), 1);
+		evo_data(push, asyh->procamp.sat.sin << 20 |
+			       asyh->procamp.sat.cos << 8);
+		evo_kick(push, core);
+	}
+}
+
+static void
+head907d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 2))) {
+		evo_mthd(push, 0x0490 + (head->base.index * 0x0300), 1);
+		evo_data(push, asyh->dither.mode << 3 |
+			       asyh->dither.bits << 1 |
+			       asyh->dither.enable);
+		evo_kick(push, core);
+	}
+}
+
+void
+head907d_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 bounds = 0;
+	u32 *push;
+
+	if (asyh->ovly.cpp) {
+		switch (asyh->ovly.cpp) {
+		case 8: bounds |= 0x00000500; break;
+		case 4: bounds |= 0x00000300; break;
+		case 2: bounds |= 0x00000100; break;
+		default:
+			WARN_ON(1);
+			break;
+		}
+		bounds |= 0x00000001;
+	}
+
+	if ((push = evo_wait(core, 2))) {
+		evo_mthd(push, 0x04d4 + head->base.index * 0x300, 1);
+		evo_data(push, bounds);
+		evo_kick(push, core);
+	}
+}
+
+void
+head907d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 bounds = 0;
+	u32 *push;
+
+	if (asyh->base.cpp) {
+		switch (asyh->base.cpp) {
+		case 8: bounds |= 0x00000500; break;
+		case 4: bounds |= 0x00000300; break;
+		case 2: bounds |= 0x00000100; break;
+		case 1: bounds |= 0x00000000; break;
+		default:
+			WARN_ON(1);
+			break;
+		}
+		bounds |= 0x00000001;
+	}
+
+	if ((push = evo_wait(core, 2))) {
+		evo_mthd(push, 0x04d0 + head->base.index * 0x300, 1);
+		evo_data(push, bounds);
+		evo_kick(push, core);
+	}
+}
+
+void
+head907d_curs_clr(struct nv50_head *head)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 4))) {
+		evo_mthd(push, 0x0480 + head->base.index * 0x300, 1);
+		evo_data(push, 0x05000000);
+		evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
+		evo_data(push, 0x00000000);
+		evo_kick(push, core);
+	}
+}
+
+void
+head907d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 5))) {
+		evo_mthd(push, 0x0480 + head->base.index * 0x300, 2);
+		evo_data(push, 0x80000000 | asyh->curs.layout << 26 |
+					    asyh->curs.format << 24);
+		evo_data(push, asyh->curs.offset >> 8);
+		evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
+		evo_data(push, asyh->curs.handle);
+		evo_kick(push, core);
+	}
+}
+
+void
+head907d_core_clr(struct nv50_head *head)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 2))) {
+		evo_mthd(push, 0x0474 + head->base.index * 0x300, 1);
+		evo_data(push, 0x00000000);
+		evo_kick(push, core);
+	}
+}
+
+void
+head907d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 9))) {
+		evo_mthd(push, 0x0460 + head->base.index * 0x300, 1);
+		evo_data(push, asyh->core.offset >> 8);
+		evo_mthd(push, 0x0468 + head->base.index * 0x300, 4);
+		evo_data(push, asyh->core.h << 16 | asyh->core.w);
+		evo_data(push, asyh->core.layout << 24 |
+			       (asyh->core.pitch >> 8) << 8 |
+			       asyh->core.block);
+		evo_data(push, asyh->core.format << 8);
+		evo_data(push, asyh->core.handle);
+		evo_mthd(push, 0x04b0 + head->base.index * 0x300, 1);
+		evo_data(push, asyh->core.y << 16 | asyh->core.x);
+		evo_kick(push, core);
+	}
+}
+
+void
+head907d_ilut_clr(struct nv50_head *head)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 4))) {
+		evo_mthd(push, 0x0440 + (head->base.index * 0x300), 1);
+		evo_data(push, 0x03000000);
+		evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
+		evo_data(push, 0x00000000);
+		evo_kick(push, core);
+	}
+}
+
+void
+head907d_ilut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 7))) {
+		evo_mthd(push, 0x0440 + (head->base.index * 0x300), 4);
+		evo_data(push, 0x80000000 | asyh->ilut.mode << 24);
+		evo_data(push, asyh->ilut.offset >> 8);
+		evo_data(push, 0x00000000);
+		evo_data(push, 0x00000000);
+		evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
+		evo_data(push, asyh->ilut.handle);
+		evo_kick(push, core);
+	}
+}
+
+void
+head907d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	struct nv50_head_mode *m = &asyh->mode;
+	u32 *push;
+	if ((push = evo_wait(core, 14))) {
+		evo_mthd(push, 0x0410 + (head->base.index * 0x300), 6);
+		evo_data(push, 0x00000000);
+		evo_data(push, m->v.active  << 16 | m->h.active );
+		evo_data(push, m->v.synce   << 16 | m->h.synce  );
+		evo_data(push, m->v.blanke  << 16 | m->h.blanke );
+		evo_data(push, m->v.blanks  << 16 | m->h.blanks );
+		evo_data(push, m->v.blank2e << 16 | m->v.blank2s);
+		evo_mthd(push, 0x042c + (head->base.index * 0x300), 2);
+		evo_data(push, 0x00000000); /* ??? */
+		evo_data(push, 0xffffff00);
+		evo_mthd(push, 0x0450 + (head->base.index * 0x300), 3);
+		evo_data(push, m->clock * 1000);
+		evo_data(push, 0x00200000); /* ??? */
+		evo_data(push, m->clock * 1000);
+		evo_kick(push, core);
+	}
+}
+
+void
+head907d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 8))) {
+		evo_mthd(push, 0x0494 + (head->base.index * 0x300), 1);
+		evo_data(push, 0x00000000);
+		evo_mthd(push, 0x04b8 + (head->base.index * 0x300), 1);
+		evo_data(push, asyh->view.iH << 16 | asyh->view.iW);
+		evo_mthd(push, 0x04c0 + (head->base.index * 0x300), 3);
+		evo_data(push, asyh->view.oH << 16 | asyh->view.oW);
+		evo_data(push, asyh->view.oH << 16 | asyh->view.oW);
+		evo_data(push, asyh->view.oH << 16 | asyh->view.oW);
+		evo_kick(push, core);
+	}
+}
+
+const struct nv50_head_func
+head907d = {
+	.view = head907d_view,
+	.mode = head907d_mode,
+	.ilut_set = head907d_ilut_set,
+	.ilut_clr = head907d_ilut_clr,
+	.core_calc = head507d_core_calc,
+	.core_set = head907d_core_set,
+	.core_clr = head907d_core_clr,
+	.curs_set = head907d_curs_set,
+	.curs_clr = head907d_curs_clr,
+	.base = head907d_base,
+	.ovly = head907d_ovly,
+	.dither = head907d_dither,
+	.procamp = head907d_procamp,
+	.or = head907d_or,
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head917d.c b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
new file mode 100644
index 000000000000..5341ea3bc7b6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "head.h"
+#include "core.h"
+
+static void
+head917d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 2))) {
+		evo_mthd(push, 0x04a0 + (head->base.index * 0x0300), 1);
+		evo_data(push, asyh->dither.mode << 3 |
+			       asyh->dither.bits << 1 |
+			       asyh->dither.enable);
+		evo_kick(push, core);
+	}
+}
+
+const struct nv50_head_func
+head917d = {
+	.view = head907d_view,
+	.mode = head907d_mode,
+	.ilut_set = head907d_ilut_set,
+	.ilut_clr = head907d_ilut_clr,
+	.core_calc = head507d_core_calc,
+	.core_set = head907d_core_set,
+	.core_clr = head907d_core_clr,
+	.curs_set = head907d_curs_set,
+	.curs_clr = head907d_curs_clr,
+	.base = head907d_base,
+	.ovly = head907d_ovly,
+	.dither = head917d_dither,
+	.procamp = head907d_procamp,
+	.or = head907d_or,
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/pior507d.c b/drivers/gpu/drm/nouveau/dispnv50/pior507d.c
index a99ba6a7216f..d2bac6a341dc 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/pior507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/pior507d.c
@@ -21,23 +21,19 @@
  */
 #include "core.h"
 
-#include <nvif/class.h>
-
 static void
 pior507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
 	      struct nv50_head_atom *asyh)
 {
 	u32 *push;
-	if ((push = evo_wait(&core->chan, 8))) {
-		if (core->chan.base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
-			if (asyh) {
-				ctrl |= asyh->or.depth  << 16;
-				ctrl |= asyh->or.nvsync << 13;
-				ctrl |= asyh->or.nhsync << 12;
-			}
-			evo_mthd(push, 0x0700 + (or * 0x040), 1);
-			evo_data(push, ctrl);
+	if ((push = evo_wait(&core->chan, 2))) {
+		if (asyh) {
+			ctrl |= asyh->or.depth  << 16;
+			ctrl |= asyh->or.nvsync << 13;
+			ctrl |= asyh->or.nhsync << 12;
 		}
+		evo_mthd(push, 0x0700 + (or * 0x040), 1);
+		evo_data(push, ctrl);
 		evo_kick(push, &core->chan);
 	}
 }
diff --git a/drivers/gpu/drm/nouveau/dispnv50/sor507d.c b/drivers/gpu/drm/nouveau/dispnv50/sor507d.c
index 2d540de27f59..5222fe6a9b21 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/sor507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/sor507d.c
@@ -21,24 +21,18 @@
  */
 #include "core.h"
 
-#include <nvif/class.h>
-
 static void
 sor507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
 	     struct nv50_head_atom *asyh)
 {
 	u32 *push;
-	if ((push = evo_wait(&core->chan, 6))) {
-		if (core->chan.base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
-			if (asyh) {
-				ctrl |= asyh->or.depth  << 16;
-				ctrl |= asyh->or.nvsync << 13;
-				ctrl |= asyh->or.nhsync << 12;
-			}
-			evo_mthd(push, 0x0600 + (or * 0x40), 1);
-		} else {
-			evo_mthd(push, 0x0200 + (or * 0x20), 1);
+	if ((push = evo_wait(&core->chan, 2))) {
+		if (asyh) {
+			ctrl |= asyh->or.depth  << 16;
+			ctrl |= asyh->or.nvsync << 13;
+			ctrl |= asyh->or.nhsync << 12;
 		}
+		evo_mthd(push, 0x0600 + (or * 0x40), 1);
 		evo_data(push, ctrl);
 		evo_kick(push, &core->chan);
 	}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/sor907d.c b/drivers/gpu/drm/nouveau/dispnv50/sor907d.c
new file mode 100644
index 000000000000..b0314ec11fb3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/sor907d.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "core.h"
+
+#include <nvif/class.h>
+
+static void
+sor907d_ctrl(struct nv50_core *core, int or, u32 ctrl,
+	     struct nv50_head_atom *asyh)
+{
+	u32 *push;
+	if ((push = evo_wait(&core->chan, 2))) {
+		evo_mthd(push, 0x0200 + (or * 0x20), 1);
+		evo_data(push, ctrl);
+		evo_kick(push, &core->chan);
+	}
+}
+
+const struct nv50_outp_func
+sor907d = {
+	.ctrl = sor907d_ctrl,
+};

From ccd27db8c731817ef36e75de2b5fdc2e79550213 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1120/1286] drm/nouveau/kms/nv50-: split base implementation by
 hardware class

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/dispnv50/Kbuild     |   2 +
 drivers/gpu/drm/nouveau/dispnv50/base.c     |  12 +--
 drivers/gpu/drm/nouveau/dispnv50/base.h     |  21 ++++
 drivers/gpu/drm/nouveau/dispnv50/base507c.c | 101 ++++++++------------
 drivers/gpu/drm/nouveau/dispnv50/base827c.c |  67 +++++++++++++
 drivers/gpu/drm/nouveau/dispnv50/base907c.c |  80 ++++++++++++++++
 drivers/gpu/drm/nouveau/dispnv50/disp.c     |  11 +--
 drivers/gpu/drm/nouveau/dispnv50/wndw.c     |  22 ++++-
 drivers/gpu/drm/nouveau/dispnv50/wndw.h     |   5 +-
 9 files changed, 242 insertions(+), 79 deletions(-)
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/base827c.c
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/base907c.c

diff --git a/drivers/gpu/drm/nouveau/dispnv50/Kbuild b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
index cde3ae98191a..674221dea7a1 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/Kbuild
+++ b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
@@ -24,6 +24,8 @@ nouveau-y += dispnv50/wndw.o
 
 nouveau-y += dispnv50/base.o
 nouveau-y += dispnv50/base507c.o
+nouveau-y += dispnv50/base827c.o
+nouveau-y += dispnv50/base907c.o
 
 nouveau-y += dispnv50/curs.o
 nouveau-y += dispnv50/curs507a.o
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base.c b/drivers/gpu/drm/nouveau/dispnv50/base.c
index 12ca5d70509c..5f184ab833e8 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base.c
@@ -31,12 +31,12 @@ nv50_base_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
 		int version;
 		int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
 	} bases[] = {
-		{ GK110_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
-		{ GK104_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
-		{ GF110_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
-		{ GT214_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
-		{ GT200_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
-		{   G82_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
+		{ GK110_DISP_BASE_CHANNEL_DMA, 0, base907c_new },
+		{ GK104_DISP_BASE_CHANNEL_DMA, 0, base907c_new },
+		{ GF110_DISP_BASE_CHANNEL_DMA, 0, base907c_new },
+		{ GT214_DISP_BASE_CHANNEL_DMA, 0, base827c_new },
+		{ GT200_DISP_BASE_CHANNEL_DMA, 0, base827c_new },
+		{   G82_DISP_BASE_CHANNEL_DMA, 0, base827c_new },
 		{  NV50_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
 		{}
 	};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base.h b/drivers/gpu/drm/nouveau/dispnv50/base.h
index 1daba7319ba9..edf96a8d645f 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/base.h
@@ -3,6 +3,27 @@
 #include "wndw.h"
 
 int base507c_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
+int base507c_new_(const struct nv50_wndw_func *, const u32 *format,
+		  struct nouveau_drm *, int head, s32 oclass,
+		  struct nv50_wndw **);
+extern const u32 base507c_format[];
+int base507c_acquire(struct nv50_wndw *, struct nv50_wndw_atom *,
+		     struct nv50_head_atom *);
+void base507c_release(struct nv50_wndw *, struct nv50_wndw_atom *,
+		      struct nv50_head_atom *);
+void base507c_sema_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+void base507c_sema_clr(struct nv50_wndw *);
+void base507c_ntfy_reset(struct nouveau_bo *, u32);
+void base507c_ntfy_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+void base507c_ntfy_clr(struct nv50_wndw *);
+int base507c_ntfy_wait_begun(struct nouveau_bo *, u32, struct nvif_device *);
+void base507c_image_clr(struct nv50_wndw *);
+void base507c_lut(struct nv50_wndw *, struct nv50_wndw_atom *);
+u32 base507c_update(struct nv50_wndw *, u32);
+
+int base827c_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
+
+int base907c_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
 
 int nv50_base_new(struct nouveau_drm *, int head, struct nv50_wndw **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
index b73e7b4d86a5..850c8de670b7 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base507c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
@@ -21,7 +21,6 @@
  */
 #include "base.h"
 
-#include <nvif/class.h>
 #include <nvif/cl507c.h>
 #include <nvif/event.h>
 
@@ -29,23 +28,20 @@
 #include <drm/drm_plane_helper.h>
 #include "nouveau_bo.h"
 
-static u32
+u32
 base507c_update(struct nv50_wndw *wndw, u32 interlock)
 {
 	u32 *push;
-
-	if (!(push = evo_wait(&wndw->wndw, 2)))
-		return 0;
-	evo_mthd(push, 0x0080, 1);
-	evo_data(push, interlock);
-	evo_kick(push, &wndw->wndw);
-
-	if (wndw->wndw.base.user.oclass < GF110_DISP_BASE_CHANNEL_DMA)
+	if ((push = evo_wait(&wndw->wndw, 2))) {
+		evo_mthd(push, 0x0080, 1);
+		evo_data(push, interlock);
+		evo_kick(push, &wndw->wndw);
 		return interlock ? 2 << (wndw->id * 8) : 0;
-	return interlock ? 2 << (wndw->id * 4) : 0;
+	}
+	return 0;
 }
 
-static void
+void
 base507c_lut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
 	u32 *push;
@@ -56,7 +52,7 @@ base507c_lut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 	}
 }
 
-static void
+void
 base507c_image_clr(struct nv50_wndw *wndw)
 {
 	u32 *push;
@@ -72,7 +68,6 @@ base507c_image_clr(struct nv50_wndw *wndw)
 static void
 base507c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
-	const s32 oclass = wndw->wndw.base.user.oclass;
 	u32 *push;
 	if ((push = evo_wait(&wndw->wndw, 10))) {
 		evo_mthd(push, 0x0084, 1);
@@ -80,56 +75,33 @@ base507c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 			       asyw->image.interval << 4);
 		evo_mthd(push, 0x00c0, 1);
 		evo_data(push, asyw->image.handle);
-		if (oclass < G82_DISP_BASE_CHANNEL_DMA) {
-			evo_mthd(push, 0x0800, 5);
-			evo_data(push, asyw->image.offset >> 8);
-			evo_data(push, 0x00000000);
-			evo_data(push, (asyw->image.h << 16) | asyw->image.w);
-			evo_data(push, (asyw->image.layout << 20) |
-					asyw->image.pitch |
-					asyw->image.block);
-			evo_data(push, (asyw->image.kind << 16) |
-				       (asyw->image.format << 8));
-		} else
-		if (oclass < GF110_DISP_BASE_CHANNEL_DMA) {
-			evo_mthd(push, 0x0800, 5);
-			evo_data(push, asyw->image.offset >> 8);
-			evo_data(push, 0x00000000);
-			evo_data(push, (asyw->image.h << 16) | asyw->image.w);
-			evo_data(push, (asyw->image.layout << 20) |
-					asyw->image.pitch |
-					asyw->image.block);
-			evo_data(push, asyw->image.format << 8);
-		} else {
-			evo_mthd(push, 0x0400, 5);
-			evo_data(push, asyw->image.offset >> 8);
-			evo_data(push, 0x00000000);
-			evo_data(push, (asyw->image.h << 16) | asyw->image.w);
-			evo_data(push, (asyw->image.layout << 24) |
-					asyw->image.pitch |
-					asyw->image.block);
-			evo_data(push, asyw->image.format << 8);
-		}
+		evo_mthd(push, 0x0800, 5);
+		evo_data(push, asyw->image.offset >> 8);
+		evo_data(push, 0x00000000);
+		evo_data(push, asyw->image.h << 16 | asyw->image.w);
+		evo_data(push, asyw->image.layout << 20 |
+			       asyw->image.pitch |
+			       asyw->image.block);
+		evo_data(push, asyw->image.kind << 16 |
+			       asyw->image.format << 8);
 		evo_kick(push, &wndw->wndw);
 	}
 }
 
-static int
-base507c_ntfy_wait_begun(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+int
+base507c_ntfy_wait_begun(struct nouveau_bo *bo, u32 offset,
+			 struct nvif_device *device)
 {
-	struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
-	struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
-	if (nvif_msec(&drm->client.device, 2000ULL,
-		u32 data = nouveau_bo_rd32(disp->sync, asyw->ntfy.offset / 4);
+	s64 time = nvif_msec(device, 2000ULL,
+		u32 data = nouveau_bo_rd32(bo, offset / 4);
 		if ((data & 0xc0000000) == 0x40000000)
 			break;
 		usleep_range(1, 2);
-	) < 0)
-		return -ETIMEDOUT;
-	return 0;
+	);
+	return time < 0 ? time : 0;
 }
 
-static void
+void
 base507c_ntfy_clr(struct nv50_wndw *wndw)
 {
 	u32 *push;
@@ -140,7 +112,7 @@ base507c_ntfy_clr(struct nv50_wndw *wndw)
 	}
 }
 
-static void
+void
 base507c_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
 	u32 *push;
@@ -152,7 +124,13 @@ base507c_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 	}
 }
 
-static void
+void
+base507c_ntfy_reset(struct nouveau_bo *bo, u32 offset)
+{
+	nouveau_bo_wr32(bo, offset / 4, 0x00000000);
+}
+
+void
 base507c_sema_clr(struct nv50_wndw *wndw)
 {
 	u32 *push;
@@ -163,7 +141,7 @@ base507c_sema_clr(struct nv50_wndw *wndw)
 	}
 }
 
-static void
+void
 base507c_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
 	u32 *push;
@@ -177,14 +155,14 @@ base507c_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 	}
 }
 
-static void
+void
 base507c_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
 		 struct nv50_head_atom *asyh)
 {
 	asyh->base.cpp = 0;
 }
 
-static int
+int
 base507c_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
 		 struct nv50_head_atom *asyh)
 {
@@ -229,7 +207,7 @@ base507c_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
 	return 0;
 }
 
-static const u32
+const u32
 base507c_format[] = {
 	DRM_FORMAT_C8,
 	DRM_FORMAT_RGB565,
@@ -250,6 +228,7 @@ base507c = {
 	.release = base507c_release,
 	.sema_set = base507c_sema_set,
 	.sema_clr = base507c_sema_clr,
+	.ntfy_reset = base507c_ntfy_reset,
 	.ntfy_set = base507c_ntfy_set,
 	.ntfy_clr = base507c_ntfy_clr,
 	.ntfy_wait_begun = base507c_ntfy_wait_begun,
@@ -259,7 +238,7 @@ base507c = {
 	.update = base507c_update,
 };
 
-static int
+int
 base507c_new_(const struct nv50_wndw_func *func, const u32 *format,
 	      struct nouveau_drm *drm, int head, s32 oclass,
 	      struct nv50_wndw **pwndw)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base827c.c b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
new file mode 100644
index 000000000000..0d356aeeda2b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "base.h"
+
+static void
+base827c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+	u32 *push;
+	if ((push = evo_wait(&wndw->wndw, 10))) {
+		evo_mthd(push, 0x0084, 1);
+		evo_data(push, asyw->image.mode << 8 |
+			       asyw->image.interval << 4);
+		evo_mthd(push, 0x00c0, 1);
+		evo_data(push, asyw->image.handle);
+		evo_mthd(push, 0x0800, 5);
+		evo_data(push, asyw->image.offset >> 8);
+		evo_data(push, 0x00000000);
+		evo_data(push, asyw->image.h << 16 | asyw->image.w);
+		evo_data(push, asyw->image.layout << 20 |
+			       asyw->image.pitch |
+			       asyw->image.block);
+		evo_data(push, asyw->image.format << 8);
+		evo_kick(push, &wndw->wndw);
+	}
+}
+
+static const struct nv50_wndw_func
+base827c = {
+	.acquire = base507c_acquire,
+	.release = base507c_release,
+	.sema_set = base507c_sema_set,
+	.sema_clr = base507c_sema_clr,
+	.ntfy_reset = base507c_ntfy_reset,
+	.ntfy_set = base507c_ntfy_set,
+	.ntfy_clr = base507c_ntfy_clr,
+	.ntfy_wait_begun = base507c_ntfy_wait_begun,
+	.image_set = base827c_image_set,
+	.image_clr = base507c_image_clr,
+	.lut = base507c_lut,
+	.update = base507c_update,
+};
+
+int
+base827c_new(struct nouveau_drm *drm, int head, s32 oclass,
+	     struct nv50_wndw **pwndw)
+{
+	return base507c_new_(&base827c, base507c_format, drm, head, oclass, pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base907c.c b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
new file mode 100644
index 000000000000..171d97872962
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "base.h"
+
+static u32
+base907c_update(struct nv50_wndw *wndw, u32 interlock)
+{
+	u32 *push;
+	if ((push = evo_wait(&wndw->wndw, 2))) {
+		evo_mthd(push, 0x0080, 1);
+		evo_data(push, interlock);
+		evo_kick(push, &wndw->wndw);
+		return interlock ? 2 << (wndw->id * 4) : 0;
+	}
+	return 0;
+}
+
+static void
+base907c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+	u32 *push;
+	if ((push = evo_wait(&wndw->wndw, 10))) {
+		evo_mthd(push, 0x0084, 1);
+		evo_data(push, asyw->image.mode << 8 |
+			       asyw->image.interval << 4);
+		evo_mthd(push, 0x00c0, 1);
+		evo_data(push, asyw->image.handle);
+		evo_mthd(push, 0x0400, 5);
+		evo_data(push, asyw->image.offset >> 8);
+		evo_data(push, 0x00000000);
+		evo_data(push, asyw->image.h << 16 | asyw->image.w);
+		evo_data(push, asyw->image.layout << 24 |
+			       asyw->image.pitch |
+			       asyw->image.block);
+		evo_data(push, asyw->image.format << 8);
+		evo_kick(push, &wndw->wndw);
+	}
+}
+
+static const struct nv50_wndw_func
+base907c = {
+	.acquire = base507c_acquire,
+	.release = base507c_release,
+	.sema_set = base507c_sema_set,
+	.sema_clr = base507c_sema_clr,
+	.ntfy_reset = base507c_ntfy_reset,
+	.ntfy_set = base507c_ntfy_set,
+	.ntfy_clr = base507c_ntfy_clr,
+	.ntfy_wait_begun = base507c_ntfy_wait_begun,
+	.image_set = base907c_image_set,
+	.image_clr = base507c_image_clr,
+	.lut = base507c_lut,
+	.update = base907c_update,
+};
+
+int
+base907c_new(struct nouveau_drm *drm, int head, s32 oclass,
+	     struct nv50_wndw **pwndw)
+{
+	return base507c_new_(&base907c, base507c_format, drm, head, oclass, pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 6136beeba3fc..c2b1578ed552 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -1812,7 +1812,6 @@ nv50_disp_atomic_commit(struct drm_device *dev,
 			struct drm_atomic_state *state, bool nonblock)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nv50_disp *disp = nv50_disp(dev);
 	struct drm_plane_state *new_plane_state;
 	struct drm_plane *plane;
 	struct drm_crtc *crtc;
@@ -1847,14 +1846,8 @@ nv50_disp_atomic_commit(struct drm_device *dev,
 		struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
 		struct nv50_wndw *wndw = nv50_wndw(plane);
 
-		if (asyw->set.image) {
-			asyw->ntfy.handle = wndw->wndw.sync.handle;
-			asyw->ntfy.offset = wndw->ntfy;
-			asyw->ntfy.awaken = false;
-			asyw->set.ntfy = true;
-			nouveau_bo_wr32(disp->sync, wndw->ntfy / 4, 0x00000000);
-			wndw->ntfy ^= 0x10;
-		}
+		if (asyw->set.image)
+			nv50_wndw_ntfy_enable(wndw, asyw);
 	}
 
 	drm_atomic_state_get(state);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 71a4c50af8ec..a1e53c74c8a8 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -98,8 +98,12 @@ nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct nouveau_framebuffer *fb)
 int
 nv50_wndw_wait_armed(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
-	if (asyw->set.ntfy)
-		return wndw->func->ntfy_wait_begun(wndw, asyw);
+	struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
+	if (asyw->set.ntfy) {
+		return wndw->func->ntfy_wait_begun(disp->sync,
+						   asyw->ntfy.offset,
+						   wndw->wndw.base.device);
+	}
 	return 0;
 }
 
@@ -138,6 +142,20 @@ nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 interlock,
 	return wndw->func->update ? wndw->func->update(wndw, interlock) : 0;
 }
 
+void
+nv50_wndw_ntfy_enable(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+	struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
+
+	asyw->ntfy.handle = wndw->wndw.sync.handle;
+	asyw->ntfy.offset = wndw->ntfy;
+	asyw->ntfy.awaken = false;
+	asyw->set.ntfy = true;
+
+	wndw->func->ntfy_reset(disp->sync, wndw->ntfy);
+	wndw->ntfy ^= 0x10;
+}
+
 static void
 nv50_wndw_atomic_check_release(struct nv50_wndw *wndw,
 			       struct nv50_wndw_atom *asyw,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
index 1931e3068115..70259732d938 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
@@ -41,6 +41,7 @@ u32 nv50_wndw_flush_set(struct nv50_wndw *, u32 interlock,
 			struct nv50_wndw_atom *);
 u32 nv50_wndw_flush_clr(struct nv50_wndw *, u32 interlock, bool flush,
 			struct nv50_wndw_atom *);
+void nv50_wndw_ntfy_enable(struct nv50_wndw *, struct nv50_wndw_atom *);
 int nv50_wndw_wait_armed(struct nv50_wndw *, struct nv50_wndw_atom *);
 
 struct nv50_wndw_func {
@@ -53,9 +54,11 @@ struct nv50_wndw_func {
 
 	void (*sema_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
 	void (*sema_clr)(struct nv50_wndw *);
+	void (*ntfy_reset)(struct nouveau_bo *, u32 offset);
 	void (*ntfy_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
 	void (*ntfy_clr)(struct nv50_wndw *);
-	int (*ntfy_wait_begun)(struct nv50_wndw *, struct nv50_wndw_atom *);
+	int (*ntfy_wait_begun)(struct nouveau_bo *, u32 offset,
+			       struct nvif_device *);
 	void (*image_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
 	void (*image_clr)(struct nv50_wndw *);
 	void (*lut)(struct nv50_wndw *, struct nv50_wndw_atom *);

From 9d6c2fe1917fc5ba6a9e8586ca16d007410baf42 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1121/1286] drm/nouveau/kms/nv50-: allow specification of valid
 heads for a window

This will be required to support Volta, where window ID != head.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/dispnv50/base507c.c | 2 +-
 drivers/gpu/drm/nouveau/dispnv50/curs507a.c | 2 +-
 drivers/gpu/drm/nouveau/dispnv50/ovly507e.c | 2 +-
 drivers/gpu/drm/nouveau/dispnv50/wndw.c     | 4 ++--
 drivers/gpu/drm/nouveau/dispnv50/wndw.h     | 2 +-
 5 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
index 850c8de670b7..548a6a67f874 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base507c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
@@ -251,7 +251,7 @@ base507c_new_(const struct nv50_wndw_func *func, const u32 *format,
 	int ret;
 
 	ret = nv50_wndw_new_(func, drm->dev, DRM_PLANE_TYPE_PRIMARY,
-			     "base", head, format, &wndw);
+			     "base", head, format, BIT(head), &wndw);
 	if (*pwndw = wndw, ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
index 1a3e199b5b45..3e5e98c835a3 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
@@ -126,7 +126,7 @@ curs507a_new_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
 	int ret;
 
 	ret = nv50_wndw_new_(&curs507a_wndw, drm->dev, DRM_PLANE_TYPE_CURSOR,
-			     "curs", head, curs507a_format, &wndw);
+			     "curs", head, curs507a_format, BIT(head), &wndw);
 	if (*pwndw = wndw, ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
index ceec5127a17d..1b85262bf23b 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
@@ -47,7 +47,7 @@ ovly507e_new_(const struct nv50_wndw_func *func, const u32 *format,
 	int ret;
 
 	ret = nv50_wndw_new_(func, drm->dev, DRM_PLANE_TYPE_OVERLAY,
-			     "ovly", head, format, &wndw);
+			     "ovly", head, format, BIT(head), &wndw);
 	if (*pwndw = wndw, ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index a1e53c74c8a8..764db736cf29 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -420,7 +420,7 @@ nv50_wndw_init(struct nv50_wndw *wndw)
 int
 nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
 	       enum drm_plane_type type, const char *name, int index,
-	       const u32 *format, struct nv50_wndw **pwndw)
+	       const u32 *format, u32 heads, struct nv50_wndw **pwndw)
 {
 	struct nv50_wndw *wndw;
 	int nformat;
@@ -436,7 +436,7 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
 
 	for (nformat = 0; format[nformat]; nformat++);
 
-	ret = drm_universal_plane_init(dev, &wndw->plane, 0, &nv50_wndw,
+	ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw,
 				       format, nformat, NULL,
 				       type, "%s-%d", name, index);
 	if (ret) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
index 70259732d938..8672c280a6a4 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
@@ -34,7 +34,7 @@ struct nv50_wndw {
 
 int nv50_wndw_new_(const struct nv50_wndw_func *, struct drm_device *,
 		   enum drm_plane_type, const char *name, int index,
-		   const u32 *format, struct nv50_wndw **);
+		   const u32 *format, u32 heads, struct nv50_wndw **);
 void nv50_wndw_init(struct nv50_wndw *);
 void nv50_wndw_fini(struct nv50_wndw *);
 u32 nv50_wndw_flush_set(struct nv50_wndw *, u32 interlock,

From f88bc9d3ecca5ddc29642269f4624d07265c1bf5 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1122/1286] drm/nouveau/kms/nv50-: unify set/clr masks

This is a simplification that'll be used to improve interlock handling.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/dispnv50/atom.h | 26 ++++---------------------
 drivers/gpu/drm/nouveau/dispnv50/disp.c | 11 ++---------
 drivers/gpu/drm/nouveau/dispnv50/head.c | 15 +++++++-------
 drivers/gpu/drm/nouveau/dispnv50/wndw.c | 12 ++++++------
 4 files changed, 20 insertions(+), 44 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/dispnv50/atom.h b/drivers/gpu/drm/nouveau/dispnv50/atom.h
index 8c97e25c881f..8bf180666bb7 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/atom.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/atom.h
@@ -105,16 +105,7 @@ struct nv50_head_atom {
 		u8 depth:4;
 	} or;
 
-	union {
-		struct {
-			bool ilut:1;
-			bool core:1;
-			bool curs:1;
-		};
-		u8 mask;
-	} clr;
-
-	union {
+	union nv50_head_atom_mask {
 		struct {
 			bool ilut:1;
 			bool core:1;
@@ -128,7 +119,7 @@ struct nv50_head_atom {
 			bool or:1;
 		};
 		u16 mask;
-	} set;
+	} set, clr;
 };
 
 static inline struct nv50_head_atom *
@@ -184,16 +175,7 @@ struct nv50_wndw_atom {
 		u16 y;
 	} point;
 
-	union {
-		struct {
-			bool ntfy:1;
-			bool sema:1;
-			bool image:1;
-		};
-		u8 mask;
-	} clr;
-
-	union {
+	union nv50_wndw_atom_mask {
 		struct {
 			bool ntfy:1;
 			bool sema:1;
@@ -202,6 +184,6 @@ struct nv50_wndw_atom {
 			bool point:1;
 		};
 		u8 mask;
-	} set;
+	} set, clr;
 };
 #endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index c2b1578ed552..006562f7f23e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -62,19 +62,12 @@ struct nv50_outp_atom {
 	struct drm_encoder *encoder;
 	bool flush_disable;
 
-	union {
+	union nv50_outp_atom_mask {
 		struct {
 			bool ctrl:1;
 		};
 		u8 mask;
-	} clr;
-
-	union {
-		struct {
-			bool ctrl:1;
-		};
-		u8 mask;
-	} set;
+	} set, clr;
 };
 
 /******************************************************************************
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index 1335c00500d1..2eb7fdb61131 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -81,14 +81,15 @@ nv50_head_lut_load(struct drm_property_blob *blob, int mode,
 }
 
 void
-nv50_head_flush_clr(struct nv50_head *head, struct nv50_head_atom *asyh, bool y)
+nv50_head_flush_clr(struct nv50_head *head,
+		    struct nv50_head_atom *asyh, bool flush)
 {
-	if (asyh->clr.ilut && (!asyh->set.ilut || y))
-		head->func->ilut_clr(head);
-	if (asyh->clr.core && (!asyh->set.core || y))
-		head->func->core_clr(head);
-	if (asyh->clr.curs && (!asyh->set.curs || y))
-		head->func->curs_clr(head);
+	union nv50_head_atom_mask clr = {
+		.mask = asyh->clr.mask & ~(flush ? 0 : asyh->set.mask),
+	};
+	if (clr.ilut) head->func->ilut_clr(head);
+	if (clr.core) head->func->core_clr(head);
+	if (clr.curs) head->func->curs_clr(head);
 }
 
 void
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 764db736cf29..8a7636f8a242 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -111,12 +111,12 @@ u32
 nv50_wndw_flush_clr(struct nv50_wndw *wndw, u32 interlock, bool flush,
 		    struct nv50_wndw_atom *asyw)
 {
-	if (asyw->clr.sema && (!asyw->set.sema || flush))
-		wndw->func->sema_clr(wndw);
-	if (asyw->clr.ntfy && (!asyw->set.ntfy || flush))
-		wndw->func->ntfy_clr(wndw);
-	if (asyw->clr.image && (!asyw->set.image || flush))
-		wndw->func->image_clr(wndw);
+	union nv50_wndw_atom_mask clr = {
+		.mask = asyw->clr.mask & ~(flush ? 0 : asyw->set.mask),
+	};
+	if (clr.sema ) wndw->func-> sema_clr(wndw);
+	if (clr.ntfy ) wndw->func-> ntfy_clr(wndw);
+	if (clr.image) wndw->func->image_clr(wndw);
 
 	return flush ? wndw->func->update(wndw, interlock) : 0;
 }

From 43c181e9deb5f4215d4ef0cb227fde509da7cc5e Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1123/1286] drm/nouveau/kms/nv50-: move drm format->hw
 conversion into common code

This will be required to support additional HW features.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/dispnv50/base507c.c | 16 -------------
 drivers/gpu/drm/nouveau/dispnv50/curs507a.c |  4 ++--
 drivers/gpu/drm/nouveau/dispnv50/wndw.c     | 25 +++++++++++++++++++++
 3 files changed, 27 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
index 548a6a67f874..43dcbcd49e71 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base507c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
@@ -186,22 +186,6 @@ base507c_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
 	asyh->base.w = asyw->state.fb->width;
 	asyh->base.h = asyw->state.fb->height;
 
-	switch (fb->format->format) {
-	case DRM_FORMAT_C8         : asyw->image.format = 0x1e; break;
-	case DRM_FORMAT_RGB565     : asyw->image.format = 0xe8; break;
-	case DRM_FORMAT_XRGB1555   :
-	case DRM_FORMAT_ARGB1555   : asyw->image.format = 0xe9; break;
-	case DRM_FORMAT_XRGB8888   :
-	case DRM_FORMAT_ARGB8888   : asyw->image.format = 0xcf; break;
-	case DRM_FORMAT_XBGR2101010:
-	case DRM_FORMAT_ABGR2101010: asyw->image.format = 0xd1; break;
-	case DRM_FORMAT_XBGR8888   :
-	case DRM_FORMAT_ABGR8888   : asyw->image.format = 0xd5; break;
-	default:
-		WARN_ON(1);
-		return -EINVAL;
-	}
-
 	asyw->lut.enable = 1;
 	asyw->set.image = true;
 	return 0;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
index 3e5e98c835a3..f7e56a88e77d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
@@ -91,8 +91,8 @@ curs507a_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
 	if (asyw->state.fb->width != asyw->state.fb->height)
 		return -EINVAL;
 
-	switch (asyw->state.fb->format->format) {
-	case DRM_FORMAT_ARGB8888: asyh->curs.format = 1; break;
+	switch (asyw->image.format) {
+	case 0xcf: asyh->curs.format = 1; break;
 	default:
 		WARN_ON(1);
 		return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 8a7636f8a242..cfd998a85418 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -168,6 +168,27 @@ nv50_wndw_atomic_check_release(struct nv50_wndw *wndw,
 	asyw->sema.handle = 0;
 }
 
+static int
+nv50_wndw_atomic_check_acquire_rgb(struct nv50_wndw_atom *asyw)
+{
+	switch (asyw->state.fb->format->format) {
+	case DRM_FORMAT_C8         : asyw->image.format = 0x1e; break;
+	case DRM_FORMAT_XRGB8888   :
+	case DRM_FORMAT_ARGB8888   : asyw->image.format = 0xcf; break;
+	case DRM_FORMAT_RGB565     : asyw->image.format = 0xe8; break;
+	case DRM_FORMAT_XRGB1555   :
+	case DRM_FORMAT_ARGB1555   : asyw->image.format = 0xe9; break;
+	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_ABGR2101010: asyw->image.format = 0xd1; break;
+	case DRM_FORMAT_XBGR8888   :
+	case DRM_FORMAT_ABGR8888   : asyw->image.format = 0xd5; break;
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+	return 0;
+}
+
 static int
 nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
 			       struct nv50_wndw_atom *asyw,
@@ -183,6 +204,10 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
 	asyw->image.h = fb->base.height;
 	asyw->image.kind = fb->nvbo->kind;
 
+	ret = nv50_wndw_atomic_check_acquire_rgb(asyw);
+	if (ret)
+		return ret;
+
 	if (asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
 		asyw->interval = 0;
 	else

From 261fcfa96991d6652b061262c1879cc0bdd1aa3a Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1124/1286] drm/nouveau/kms/nv50-: extend window image data for
 stereo/planar formats

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/dispnv50/atom.h     |  6 +++---
 drivers/gpu/drm/nouveau/dispnv50/base507c.c |  6 +++---
 drivers/gpu/drm/nouveau/dispnv50/base827c.c |  6 +++---
 drivers/gpu/drm/nouveau/dispnv50/base907c.c |  6 +++---
 drivers/gpu/drm/nouveau/dispnv50/curs507a.c |  2 +-
 drivers/gpu/drm/nouveau/dispnv50/wndw.c     | 10 +++++-----
 6 files changed, 18 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/dispnv50/atom.h b/drivers/gpu/drm/nouveau/dispnv50/atom.h
index 8bf180666bb7..53638ee83361 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/atom.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/atom.h
@@ -162,12 +162,12 @@ struct nv50_wndw_atom {
 		u8  kind:7;
 		u8  layout:1;
 		u8  block:4;
-		u32 pitch:20;
+		u32 pitch[3];
 		u16 w;
 		u16 h;
 
-		u32 handle;
-		u64 offset;
+		u32 handle[6];
+		u64 offset[6];
 	} image;
 
 	struct {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
index 43dcbcd49e71..1c65ddc4747e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base507c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
@@ -74,13 +74,13 @@ base507c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 		evo_data(push, asyw->image.mode << 8 |
 			       asyw->image.interval << 4);
 		evo_mthd(push, 0x00c0, 1);
-		evo_data(push, asyw->image.handle);
+		evo_data(push, asyw->image.handle[0]);
 		evo_mthd(push, 0x0800, 5);
-		evo_data(push, asyw->image.offset >> 8);
+		evo_data(push, asyw->image.offset[0] >> 8);
 		evo_data(push, 0x00000000);
 		evo_data(push, asyw->image.h << 16 | asyw->image.w);
 		evo_data(push, asyw->image.layout << 20 |
-			       asyw->image.pitch |
+			       asyw->image.pitch[0] |
 			       asyw->image.block);
 		evo_data(push, asyw->image.kind << 16 |
 			       asyw->image.format << 8);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base827c.c b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
index 0d356aeeda2b..9dc968c83c66 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base827c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
@@ -30,13 +30,13 @@ base827c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 		evo_data(push, asyw->image.mode << 8 |
 			       asyw->image.interval << 4);
 		evo_mthd(push, 0x00c0, 1);
-		evo_data(push, asyw->image.handle);
+		evo_data(push, asyw->image.handle[0]);
 		evo_mthd(push, 0x0800, 5);
-		evo_data(push, asyw->image.offset >> 8);
+		evo_data(push, asyw->image.offset[0] >> 8);
 		evo_data(push, 0x00000000);
 		evo_data(push, asyw->image.h << 16 | asyw->image.w);
 		evo_data(push, asyw->image.layout << 20 |
-			       asyw->image.pitch |
+			       asyw->image.pitch[0] |
 			       asyw->image.block);
 		evo_data(push, asyw->image.format << 8);
 		evo_kick(push, &wndw->wndw);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base907c.c b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
index 171d97872962..5321c55951b9 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base907c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
@@ -43,13 +43,13 @@ base907c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 		evo_data(push, asyw->image.mode << 8 |
 			       asyw->image.interval << 4);
 		evo_mthd(push, 0x00c0, 1);
-		evo_data(push, asyw->image.handle);
+		evo_data(push, asyw->image.handle[0]);
 		evo_mthd(push, 0x0400, 5);
-		evo_data(push, asyw->image.offset >> 8);
+		evo_data(push, asyw->image.offset[0] >> 8);
 		evo_data(push, 0x00000000);
 		evo_data(push, asyw->image.h << 16 | asyw->image.w);
 		evo_data(push, asyw->image.layout << 24 |
-			       asyw->image.pitch |
+			       asyw->image.pitch[0] |
 			       asyw->image.block);
 		evo_data(push, asyw->image.format << 8);
 		evo_kick(push, &wndw->wndw);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
index f7e56a88e77d..589c75c22b3a 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
@@ -52,7 +52,7 @@ curs507a_prepare(struct nv50_wndw *wndw, struct nv50_head_atom *asyh,
 		 struct nv50_wndw_atom *asyw)
 {
 	u32 handle = nv50_disp(wndw->plane.dev)->core->chan.vram.handle;
-	u32 offset = asyw->image.offset;
+	u32 offset = asyw->image.offset[0];
 	if (asyh->curs.handle != handle || asyh->curs.offset != offset) {
 		asyh->curs.handle = handle;
 		asyh->curs.offset = offset;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index cfd998a85418..4b64f64b7891 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -219,11 +219,11 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
 			asyw->image.block = fb->nvbo->mode >> 4;
 		else
 			asyw->image.block = fb->nvbo->mode;
-		asyw->image.pitch = (fb->base.pitches[0] / 4) << 4;
+		asyw->image.pitch[0] = (fb->base.pitches[0] / 4) << 4;
 	} else {
 		asyw->image.layout = 1;
 		asyw->image.block  = 0;
-		asyw->image.pitch  = fb->base.pitches[0];
+		asyw->image.pitch[0] = fb->base.pitches[0];
 	}
 
 	ret = wndw->func->acquire(wndw, asyw, asyh);
@@ -287,7 +287,7 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
 		asyw->clr.ntfy = armw->ntfy.handle != 0;
 		asyw->clr.sema = armw->sema.handle != 0;
 		if (wndw->func->image_clr)
-			asyw->clr.image = armw->image.handle != 0;
+			asyw->clr.image = armw->image.handle[0] != 0;
 		asyw->set.lut = wndw->func->lut && asyv;
 	}
 
@@ -333,8 +333,8 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
 	}
 
 	asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv);
-	asyw->image.handle = ctxdma->object.handle;
-	asyw->image.offset = fb->nvbo->bo.offset;
+	asyw->image.handle[0] = ctxdma->object.handle;
+	asyw->image.offset[0] = fb->nvbo->bo.offset;
 
 	if (wndw->func->prepare) {
 		asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);

From 34508f9d260cbd7b91f988c858f50ad956750ee3 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1125/1286] drm/nouveau/kms/nv50-: determine MST support from
 DP Info Table

GV100 doesn't support MST, use the information provided in VBIOS tables to
detect its presence instead.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/dispnv50/disp.c               | 9 +++++++--
 drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dp.h | 4 ++++
 drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c         | 2 +-
 3 files changed, 12 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 006562f7f23e..eaa63b43282b 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -52,6 +52,8 @@
 #include "nouveau_fence.h"
 #include "nouveau_fbcon.h"
 
+#include <subdev/bios/dp.h>
+
 /******************************************************************************
  * Atomic state
  *****************************************************************************/
@@ -1383,9 +1385,12 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
 {
 	struct nouveau_connector *nv_connector = nouveau_connector(connector);
 	struct nouveau_drm *drm = nouveau_drm(connector->dev);
+	struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
 	struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
 	struct nouveau_encoder *nv_encoder;
 	struct drm_encoder *encoder;
+	u8 ver, hdr, cnt, len;
+	u32 data;
 	int type, ret;
 
 	switch (dcbe->type) {
@@ -1429,8 +1434,8 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
 			nv_encoder->aux = aux;
 		}
 
-		/*TODO: Use DP Info Table to check for support. */
-		if (disp->disp->object.oclass >= GF110_DISP) {
+		if ((data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len)) &&
+		    ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04)) {
 			ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
 					    nv_connector->base.base.id,
 					    &nv_encoder->dp.mstm);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dp.h
index df34b41838d6..512e25a41803 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dp.h
@@ -1,6 +1,10 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #ifndef __NVBIOS_DP_H__
 #define __NVBIOS_DP_H__
+
+u16
+nvbios_dp_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+
 struct nvbios_dpout {
 	u16 type;
 	u16 mask;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
index 7c7efa4ea0d0..3133b28f849c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
@@ -25,7 +25,7 @@
 #include <subdev/bios/bit.h>
 #include <subdev/bios/dp.h>
 
-static u16
+u16
 nvbios_dp_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 {
 	struct bit_entry d;

From 53e0a3e70de69dc9f498d26c6b5495b2771ee374 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1126/1286] drm/nouveau/kms/nv50-: simplify tracking of channel
 interlocks

Instead of windows returning their core channel interlock mask if they
know core has been modified, it's recorded unconditionally and used if
required when update methods are emitted.

This will be required to support Volta.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/dispnv50/Kbuild     |  3 ++
 drivers/gpu/drm/nouveau/dispnv50/base.h     |  4 +-
 drivers/gpu/drm/nouveau/dispnv50/base507c.c | 16 ++++----
 drivers/gpu/drm/nouveau/dispnv50/base827c.c |  3 +-
 drivers/gpu/drm/nouveau/dispnv50/base907c.c | 18 ++-------
 drivers/gpu/drm/nouveau/dispnv50/core.h     |  5 ++-
 drivers/gpu/drm/nouveau/dispnv50/core507d.c |  5 ++-
 drivers/gpu/drm/nouveau/dispnv50/curs.c     |  4 +-
 drivers/gpu/drm/nouveau/dispnv50/curs.h     |  6 +++
 drivers/gpu/drm/nouveau/dispnv50/curs507a.c | 18 +++++----
 drivers/gpu/drm/nouveau/dispnv50/curs907a.c | 30 ++++++++++++++
 drivers/gpu/drm/nouveau/dispnv50/disp.c     | 42 +++++++++-----------
 drivers/gpu/drm/nouveau/dispnv50/disp.h     | 11 ++++++
 drivers/gpu/drm/nouveau/dispnv50/ovly.c     | 10 ++---
 drivers/gpu/drm/nouveau/dispnv50/ovly.h     |  8 ++++
 drivers/gpu/drm/nouveau/dispnv50/ovly507e.c | 11 ++++--
 drivers/gpu/drm/nouveau/dispnv50/ovly827e.c | 43 +++++++++++++++++++++
 drivers/gpu/drm/nouveau/dispnv50/ovly907e.c | 34 ++++++++++++++++
 drivers/gpu/drm/nouveau/dispnv50/wndw.c     | 23 +++++++----
 drivers/gpu/drm/nouveau/dispnv50/wndw.h     | 16 ++++----
 20 files changed, 224 insertions(+), 86 deletions(-)
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/curs907a.c
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/ovly907e.c

diff --git a/drivers/gpu/drm/nouveau/dispnv50/Kbuild b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
index 674221dea7a1..3e53484b4589 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/Kbuild
+++ b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
@@ -29,9 +29,12 @@ nouveau-y += dispnv50/base907c.o
 
 nouveau-y += dispnv50/curs.o
 nouveau-y += dispnv50/curs507a.o
+nouveau-y += dispnv50/curs907a.o
 
 nouveau-y += dispnv50/oimm.o
 nouveau-y += dispnv50/oimm507b.o
 
 nouveau-y += dispnv50/ovly.o
 nouveau-y += dispnv50/ovly507e.o
+nouveau-y += dispnv50/ovly827e.o
+nouveau-y += dispnv50/ovly907e.o
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base.h b/drivers/gpu/drm/nouveau/dispnv50/base.h
index edf96a8d645f..71fc10369b37 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/base.h
@@ -5,7 +5,7 @@
 int base507c_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
 int base507c_new_(const struct nv50_wndw_func *, const u32 *format,
 		  struct nouveau_drm *, int head, s32 oclass,
-		  struct nv50_wndw **);
+		  u32 interlock_data, struct nv50_wndw **);
 extern const u32 base507c_format[];
 int base507c_acquire(struct nv50_wndw *, struct nv50_wndw_atom *,
 		     struct nv50_head_atom *);
@@ -19,7 +19,7 @@ void base507c_ntfy_clr(struct nv50_wndw *);
 int base507c_ntfy_wait_begun(struct nouveau_bo *, u32, struct nvif_device *);
 void base507c_image_clr(struct nv50_wndw *);
 void base507c_lut(struct nv50_wndw *, struct nv50_wndw_atom *);
-u32 base507c_update(struct nv50_wndw *, u32);
+void base507c_update(struct nv50_wndw *, u32 *);
 
 int base827c_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
 
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
index 1c65ddc4747e..819403f4b958 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base507c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
@@ -28,17 +28,15 @@
 #include <drm/drm_plane_helper.h>
 #include "nouveau_bo.h"
 
-u32
-base507c_update(struct nv50_wndw *wndw, u32 interlock)
+void
+base507c_update(struct nv50_wndw *wndw, u32 *interlock)
 {
 	u32 *push;
 	if ((push = evo_wait(&wndw->wndw, 2))) {
 		evo_mthd(push, 0x0080, 1);
-		evo_data(push, interlock);
+		evo_data(push, interlock[NV50_DISP_INTERLOCK_CORE]);
 		evo_kick(push, &wndw->wndw);
-		return interlock ? 2 << (wndw->id * 8) : 0;
 	}
-	return 0;
 }
 
 void
@@ -224,7 +222,7 @@ base507c = {
 
 int
 base507c_new_(const struct nv50_wndw_func *func, const u32 *format,
-	      struct nouveau_drm *drm, int head, s32 oclass,
+	      struct nouveau_drm *drm, int head, s32 oclass, u32 interlock_data,
 	      struct nv50_wndw **pwndw)
 {
 	struct nv50_disp_base_channel_dma_v0 args = {
@@ -235,7 +233,8 @@ base507c_new_(const struct nv50_wndw_func *func, const u32 *format,
 	int ret;
 
 	ret = nv50_wndw_new_(func, drm->dev, DRM_PLANE_TYPE_PRIMARY,
-			     "base", head, format, BIT(head), &wndw);
+			     "base", head, format, BIT(head),
+			     NV50_DISP_INTERLOCK_BASE, interlock_data, &wndw);
 	if (*pwndw = wndw, ret)
 		return ret;
 
@@ -266,5 +265,6 @@ int
 base507c_new(struct nouveau_drm *drm, int head, s32 oclass,
 	     struct nv50_wndw **pwndw)
 {
-	return base507c_new_(&base507c, base507c_format, drm, head, oclass, pwndw);
+	return base507c_new_(&base507c, base507c_format, drm, head, oclass,
+			     0x00000002 << (head * 8), pwndw);
 }
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base827c.c b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
index 9dc968c83c66..240a6409329d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base827c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
@@ -63,5 +63,6 @@ int
 base827c_new(struct nouveau_drm *drm, int head, s32 oclass,
 	     struct nv50_wndw **pwndw)
 {
-	return base507c_new_(&base827c, base507c_format, drm, head, oclass, pwndw);
+	return base507c_new_(&base827c, base507c_format, drm, head, oclass,
+			     0x00000002 << (head * 8), pwndw);
 }
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base907c.c b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
index 5321c55951b9..6c32a4e5cb7d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base907c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
@@ -21,19 +21,6 @@
  */
 #include "base.h"
 
-static u32
-base907c_update(struct nv50_wndw *wndw, u32 interlock)
-{
-	u32 *push;
-	if ((push = evo_wait(&wndw->wndw, 2))) {
-		evo_mthd(push, 0x0080, 1);
-		evo_data(push, interlock);
-		evo_kick(push, &wndw->wndw);
-		return interlock ? 2 << (wndw->id * 4) : 0;
-	}
-	return 0;
-}
-
 static void
 base907c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
@@ -69,12 +56,13 @@ base907c = {
 	.image_set = base907c_image_set,
 	.image_clr = base507c_image_clr,
 	.lut = base507c_lut,
-	.update = base907c_update,
+	.update = base507c_update,
 };
 
 int
 base907c_new(struct nouveau_drm *drm, int head, s32 oclass,
 	     struct nv50_wndw **pwndw)
 {
-	return base507c_new_(&base907c, base507c_format, drm, head, oclass, pwndw);
+	return base507c_new_(&base907c, base507c_format, drm, head, oclass,
+			     0x00000002 << (head * 4), pwndw);
 }
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.h b/drivers/gpu/drm/nouveau/dispnv50/core.h
index 5fd7ddd31e5e..c490d7d497b2 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.h
@@ -16,7 +16,7 @@ struct nv50_core_func {
 	void (*ntfy_init)(struct nouveau_bo *, u32 offset);
 	int (*ntfy_wait_done)(struct nouveau_bo *, u32 offset,
 			      struct nvif_device *);
-	void (*update)(struct nv50_core *, u32 interlock, bool ntfy);
+	void (*update)(struct nv50_core *, u32 *interlock, bool ntfy);
 
 	const struct nv50_head_func *head;
 	const struct nv50_outp_func {
@@ -31,7 +31,8 @@ int core507d_new_(const struct nv50_core_func *, struct nouveau_drm *, s32,
 void core507d_init(struct nv50_core *);
 void core507d_ntfy_init(struct nouveau_bo *, u32);
 int core507d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *);
-void core507d_update(struct nv50_core *, u32, bool);
+void core507d_update(struct nv50_core *, u32 *, bool);
+
 extern const struct nv50_outp_func dac507d;
 extern const struct nv50_outp_func sor507d;
 extern const struct nv50_outp_func pior507d;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core507d.c b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
index 96d7d8fde669..e7fcfa6e6467 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
@@ -27,7 +27,7 @@
 #include "nouveau_bo.h"
 
 void
-core507d_update(struct nv50_core *core, u32 interlock, bool ntfy)
+core507d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
 {
 	u32 *push;
 	if ((push = evo_wait(&core->chan, 5))) {
@@ -36,7 +36,8 @@ core507d_update(struct nv50_core *core, u32 interlock, bool ntfy)
 			evo_data(push, 0x80000000 | NV50_DISP_CORE_NTFY);
 		}
 		evo_mthd(push, 0x0080, 2);
-		evo_data(push, interlock);
+		evo_data(push, interlock[NV50_DISP_INTERLOCK_BASE] |
+			       interlock[NV50_DISP_INTERLOCK_OVLY]);
 		evo_data(push, 0x00000000);
 		evo_kick(push, &core->chan);
 	}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs.c b/drivers/gpu/drm/nouveau/dispnv50/curs.c
index 6d60e978db69..fb842ed2592f 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs.c
@@ -31,8 +31,8 @@ nv50_curs_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
 		int version;
 		int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
 	} curses[] = {
-		{ GK104_DISP_CURSOR, 0, curs507a_new },
-		{ GF110_DISP_CURSOR, 0, curs507a_new },
+		{ GK104_DISP_CURSOR, 0, curs907a_new },
+		{ GF110_DISP_CURSOR, 0, curs907a_new },
 		{ GT214_DISP_CURSOR, 0, curs507a_new },
 		{   G82_DISP_CURSOR, 0, curs507a_new },
 		{  NV50_DISP_CURSOR, 0, curs507a_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs.h b/drivers/gpu/drm/nouveau/dispnv50/curs.h
index b85ca9fa419c..2285247dc2a3 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs.h
@@ -3,6 +3,12 @@
 #include "wndw.h"
 
 int curs507a_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
+int curs507a_new_(const struct nv50_wimm_func *, struct nouveau_drm *,
+		  int head, s32 oclass, u32 interlock_data,
+		  struct nv50_wndw **);
+extern const struct nv50_wimm_func curs507a;
+
+int curs907a_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
 
 int nv50_curs_new(struct nouveau_drm *, int head, struct nv50_wndw **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
index 589c75c22b3a..ba05bcb13ae7 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
@@ -27,11 +27,10 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_plane_helper.h>
 
-static u32
-curs507a_update(struct nv50_wndw *wndw, u32 interlock)
+static void
+curs507a_update(struct nv50_wndw *wndw, u32 *interlock)
 {
 	nvif_wr32(&wndw->wimm.base.user, 0x0080, 0x00000000);
-	return 0;
 }
 
 static void
@@ -41,7 +40,7 @@ curs507a_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 						 asyw->point.x);
 }
 
-static const struct nv50_wimm_func
+const struct nv50_wimm_func
 curs507a = {
 	.point = curs507a_point,
 	.update = curs507a_update,
@@ -114,9 +113,10 @@ curs507a_wndw = {
 	.prepare = curs507a_prepare,
 };
 
-static int
+int
 curs507a_new_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
-	      int head, s32 oclass, struct nv50_wndw **pwndw)
+	      int head, s32 oclass, u32 interlock_data,
+	      struct nv50_wndw **pwndw)
 {
 	struct nv50_disp_cursor_v0 args = {
 		.head = head,
@@ -126,7 +126,8 @@ curs507a_new_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
 	int ret;
 
 	ret = nv50_wndw_new_(&curs507a_wndw, drm->dev, DRM_PLANE_TYPE_CURSOR,
-			     "curs", head, curs507a_format, BIT(head), &wndw);
+			     "curs", head, curs507a_format, BIT(head),
+			     NV50_DISP_INTERLOCK_CURS, interlock_data, &wndw);
 	if (*pwndw = wndw, ret)
 		return ret;
 
@@ -147,5 +148,6 @@ int
 curs507a_new(struct nouveau_drm *drm, int head, s32 oclass,
 	     struct nv50_wndw **pwndw)
 {
-	return curs507a_new_(&curs507a, drm, head, oclass, pwndw);
+	return curs507a_new_(&curs507a, drm, head, oclass,
+			     0x00000001 << (head * 8), pwndw);
 }
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs907a.c b/drivers/gpu/drm/nouveau/dispnv50/curs907a.c
new file mode 100644
index 000000000000..d742362de03e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs907a.c
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "curs.h"
+
+int
+curs907a_new(struct nouveau_drm *drm, int head, s32 oclass,
+	     struct nv50_wndw **pwndw)
+{
+	return curs507a_new_(&curs507a, drm, head, oclass,
+			     0x00000001 << (head * 4), pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index eaa63b43282b..e80d11c9a456 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -1582,14 +1582,14 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
  *****************************************************************************/
 
 static void
-nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 interlock)
+nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 *interlock)
 {
 	struct nv50_disp *disp = nv50_disp(drm->dev);
 	struct nv50_core *core = disp->core;
 	struct nv50_mstm *mstm;
 	struct drm_encoder *encoder;
 
-	NV_ATOMIC(drm, "commit core %08x\n", interlock);
+	NV_ATOMIC(drm, "commit core %08x\n", interlock[NV50_DISP_INTERLOCK_BASE]);
 
 	drm_for_each_encoder(encoder, drm->dev) {
 		if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
@@ -1626,8 +1626,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
 	struct nv50_disp *disp = nv50_disp(dev);
 	struct nv50_atom *atom = nv50_atom(state);
 	struct nv50_outp_atom *outp, *outt;
-	u32 interlock_core = 0;
-	u32 interlock_chan = 0;
+	u32 interlock[NV50_DISP_INTERLOCK__SIZE] = {};
 	int i;
 
 	NV_ATOMIC(drm, "commit %d %d\n", atom->lock_core, atom->flush_disable);
@@ -1650,7 +1649,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
 
 		if (asyh->clr.mask) {
 			nv50_head_flush_clr(head, asyh, atom->flush_disable);
-			interlock_core |= 1;
+			interlock[NV50_DISP_INTERLOCK_CORE] |= 1;
 		}
 	}
 
@@ -1664,9 +1663,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
 		if (!asyw->clr.mask)
 			continue;
 
-		interlock_chan |= nv50_wndw_flush_clr(wndw, interlock_core,
-						      atom->flush_disable,
-						      asyw);
+		nv50_wndw_flush_clr(wndw, interlock, atom->flush_disable, asyw);
 	}
 
 	/* Disable output path(s). */
@@ -1682,21 +1679,19 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
 
 		if (outp->clr.mask) {
 			help->disable(encoder);
-			interlock_core |= 1;
+			interlock[NV50_DISP_INTERLOCK_CORE] |= 1;
 			if (outp->flush_disable) {
-				nv50_disp_atomic_commit_core(drm, interlock_chan);
-				interlock_core = 0;
-				interlock_chan = 0;
+				nv50_disp_atomic_commit_core(drm, interlock);
+				memset(interlock, 0x00, sizeof(interlock));
 			}
 		}
 	}
 
 	/* Flush disable. */
-	if (interlock_core) {
+	if (interlock[NV50_DISP_INTERLOCK_CORE]) {
 		if (atom->flush_disable) {
-			nv50_disp_atomic_commit_core(drm, interlock_chan);
-			interlock_core = 0;
-			interlock_chan = 0;
+			nv50_disp_atomic_commit_core(drm, interlock);
+			memset(interlock, 0x00, sizeof(interlock));
 		}
 	}
 
@@ -1713,7 +1708,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
 
 		if (outp->set.mask) {
 			help->enable(encoder);
-			interlock_core = 1;
+			interlock[NV50_DISP_INTERLOCK_CORE] = 1;
 		}
 
 		list_del(&outp->head);
@@ -1730,7 +1725,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
 
 		if (asyh->set.mask) {
 			nv50_head_flush_set(head, asyh);
-			interlock_core = 1;
+			interlock[NV50_DISP_INTERLOCK_CORE] = 1;
 		}
 
 		if (new_crtc_state->active) {
@@ -1752,15 +1747,16 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
 		    (!asyw->clr.mask || atom->flush_disable))
 			continue;
 
-		interlock_chan |= nv50_wndw_flush_set(wndw, interlock_core, asyw);
+		nv50_wndw_flush_set(wndw, interlock, asyw);
 	}
 
 	/* Flush update. */
-	if (interlock_core) {
-		if (interlock_chan || !atom->state.legacy_cursor_update)
-			nv50_disp_atomic_commit_core(drm, interlock_chan);
+	if (interlock[NV50_DISP_INTERLOCK_CORE]) {
+		if (interlock[NV50_DISP_INTERLOCK_BASE] ||
+		    !atom->state.legacy_cursor_update)
+			nv50_disp_atomic_commit_core(drm, interlock);
 		else
-			disp->core->func->update(disp->core, 0, false);
+			disp->core->func->update(disp->core, interlock, false);
 	}
 
 	if (atom->lock_core)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
index 7cbd66849743..f3a963b0ab77 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -27,6 +27,17 @@ nv50_disp(struct drm_device *dev)
 	return nouveau_display(dev)->priv;
 }
 
+struct nv50_disp_interlock {
+	enum nv50_disp_interlock_type {
+		NV50_DISP_INTERLOCK_CORE = 0,
+		NV50_DISP_INTERLOCK_CURS,
+		NV50_DISP_INTERLOCK_BASE,
+		NV50_DISP_INTERLOCK_OVLY,
+		NV50_DISP_INTERLOCK__SIZE
+	} type;
+	u32 data;
+};
+
 struct nv50_chan {
 	struct nvif_object user;
 	struct nvif_device *device;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly.c b/drivers/gpu/drm/nouveau/dispnv50/ovly.c
index ac2d3b64f186..be0f16fdcd5b 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly.c
@@ -32,11 +32,11 @@ nv50_ovly_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
 		int version;
 		int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
 	} ovlys[] = {
-		{ GK104_DISP_OVERLAY_CONTROL_DMA, 0, ovly507e_new },
-		{ GF110_DISP_OVERLAY_CONTROL_DMA, 0, ovly507e_new },
-		{ GT214_DISP_OVERLAY_CHANNEL_DMA, 0, ovly507e_new },
-		{ GT200_DISP_OVERLAY_CHANNEL_DMA, 0, ovly507e_new },
-		{   G82_DISP_OVERLAY_CHANNEL_DMA, 0, ovly507e_new },
+		{ GK104_DISP_OVERLAY_CONTROL_DMA, 0, ovly907e_new },
+		{ GF110_DISP_OVERLAY_CONTROL_DMA, 0, ovly907e_new },
+		{ GT214_DISP_OVERLAY_CHANNEL_DMA, 0, ovly827e_new },
+		{ GT200_DISP_OVERLAY_CHANNEL_DMA, 0, ovly827e_new },
+		{   G82_DISP_OVERLAY_CHANNEL_DMA, 0, ovly827e_new },
 		{  NV50_DISP_OVERLAY_CHANNEL_DMA, 0, ovly507e_new },
 		{}
 	};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly.h b/drivers/gpu/drm/nouveau/dispnv50/ovly.h
index 90af1f2f0aa0..d149ef6f957e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly.h
@@ -3,6 +3,14 @@
 #include "wndw.h"
 
 int ovly507e_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
+int ovly507e_new_(const struct nv50_wndw_func *, const u32 *format,
+		  struct nouveau_drm *, int head, s32 oclass,
+		  u32 interlock_data, struct nv50_wndw **);
+
+extern const u32 ovly827e_format[];
+
+int ovly827e_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
+int ovly907e_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
 
 int nv50_ovly_new(struct nouveau_drm *, int head, struct nv50_wndw **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
index 1b85262bf23b..732eea39e4de 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
@@ -34,9 +34,9 @@ ovly507e_format[] = {
 	0
 };
 
-static int
+int
 ovly507e_new_(const struct nv50_wndw_func *func, const u32 *format,
-	      struct nouveau_drm *drm, int head, s32 oclass,
+	      struct nouveau_drm *drm, int head, s32 oclass, u32 interlock_data,
 	      struct nv50_wndw **pwndw)
 {
 	struct nv50_disp_overlay_channel_dma_v0 args = {
@@ -47,7 +47,9 @@ ovly507e_new_(const struct nv50_wndw_func *func, const u32 *format,
 	int ret;
 
 	ret = nv50_wndw_new_(func, drm->dev, DRM_PLANE_TYPE_OVERLAY,
-			     "ovly", head, format, BIT(head), &wndw);
+			     "ovly", head, format, BIT(head),
+			     NV50_DISP_INTERLOCK_OVLY, interlock_data,
+			     &wndw);
 	if (*pwndw = wndw, ret)
 		return ret;
 
@@ -66,5 +68,6 @@ int
 ovly507e_new(struct nouveau_drm *drm, int head, s32 oclass,
 	     struct nv50_wndw **pwndw)
 {
-	return ovly507e_new_(&ovly507e, ovly507e_format, drm, head, oclass, pwndw);
+	return ovly507e_new_(&ovly507e, ovly507e_format, drm, head, oclass,
+			     0x00000004 << (head * 8), pwndw);
 }
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
new file mode 100644
index 000000000000..a8115f13406e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ovly.h"
+
+#include <nouveau_bo.h>
+
+#include <nvif/cl507e.h>
+
+static const struct nv50_wndw_func
+ovly827e = {
+};
+
+const u32
+ovly827e_format[] = {
+	0
+};
+
+int
+ovly827e_new(struct nouveau_drm *drm, int head, s32 oclass,
+	     struct nv50_wndw **pwndw)
+{
+	return ovly507e_new_(&ovly827e, ovly827e_format, drm, head, oclass,
+			     0x00000004 << (head * 8), pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c
new file mode 100644
index 000000000000..f50da6461d41
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ovly.h"
+
+static const struct nv50_wndw_func
+ovly907e = {
+};
+
+int
+ovly907e_new(struct nouveau_drm *drm, int head, s32 oclass,
+	     struct nv50_wndw **pwndw)
+{
+	return ovly507e_new_(&ovly907e, ovly827e_format, drm, head, oclass,
+			     0x00000004 << (head * 4), pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 4b64f64b7891..8f62c2a811ff 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -107,8 +107,8 @@ nv50_wndw_wait_armed(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 	return 0;
 }
 
-u32
-nv50_wndw_flush_clr(struct nv50_wndw *wndw, u32 interlock, bool flush,
+void
+nv50_wndw_flush_clr(struct nv50_wndw *wndw, u32 *interlock, bool flush,
 		    struct nv50_wndw_atom *asyw)
 {
 	union nv50_wndw_atom_mask clr = {
@@ -118,11 +118,13 @@ nv50_wndw_flush_clr(struct nv50_wndw *wndw, u32 interlock, bool flush,
 	if (clr.ntfy ) wndw->func-> ntfy_clr(wndw);
 	if (clr.image) wndw->func->image_clr(wndw);
 
-	return flush ? wndw->func->update(wndw, interlock) : 0;
+	interlock[wndw->interlock.type] |= wndw->interlock.data;
+	if (flush)
+		wndw->func->update(wndw, interlock);
 }
 
-u32
-nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 interlock,
+void
+nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
 		    struct nv50_wndw_atom *asyw)
 {
 	if (interlock) {
@@ -139,7 +141,9 @@ nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 interlock,
 		wndw->immd->update(wndw, interlock);
 	}
 
-	return wndw->func->update ? wndw->func->update(wndw, interlock) : 0;
+	interlock[wndw->interlock.type] |= wndw->interlock.data;
+	if (wndw->func->update)
+		wndw->func->update(wndw, interlock);
 }
 
 void
@@ -445,7 +449,9 @@ nv50_wndw_init(struct nv50_wndw *wndw)
 int
 nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
 	       enum drm_plane_type type, const char *name, int index,
-	       const u32 *format, u32 heads, struct nv50_wndw **pwndw)
+	       const u32 *format, u32 heads,
+	       enum nv50_disp_interlock_type interlock_type, u32 interlock_data,
+	       struct nv50_wndw **pwndw)
 {
 	struct nv50_wndw *wndw;
 	int nformat;
@@ -455,6 +461,9 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
 		return -ENOMEM;
 	wndw->func = func;
 	wndw->id = index;
+	wndw->interlock.type = interlock_type;
+	wndw->interlock.data = interlock_data;
+	wndw->ctxdma.parent = &wndw->wndw.base.user;
 
 	wndw->ctxdma.parent = &wndw->wndw.base.user;
 	INIT_LIST_HEAD(&wndw->ctxdma.list);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
index 8672c280a6a4..c26796c612f6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
@@ -15,6 +15,7 @@ struct nv50_wndw {
 	const struct nv50_wndw_func *func;
 	const struct nv50_wimm_func *immd;
 	int id;
+	struct nv50_disp_interlock interlock;
 
 	struct {
 		struct nvif_object *parent;
@@ -34,13 +35,14 @@ struct nv50_wndw {
 
 int nv50_wndw_new_(const struct nv50_wndw_func *, struct drm_device *,
 		   enum drm_plane_type, const char *name, int index,
-		   const u32 *format, u32 heads, struct nv50_wndw **);
+		   const u32 *format, enum nv50_disp_interlock_type,
+		   u32 interlock_data, u32 heads, struct nv50_wndw **);
 void nv50_wndw_init(struct nv50_wndw *);
 void nv50_wndw_fini(struct nv50_wndw *);
-u32 nv50_wndw_flush_set(struct nv50_wndw *, u32 interlock,
-			struct nv50_wndw_atom *);
-u32 nv50_wndw_flush_clr(struct nv50_wndw *, u32 interlock, bool flush,
-			struct nv50_wndw_atom *);
+void nv50_wndw_flush_set(struct nv50_wndw *, u32 *interlock,
+			 struct nv50_wndw_atom *);
+void nv50_wndw_flush_clr(struct nv50_wndw *, u32 *interlock, bool flush,
+			 struct nv50_wndw_atom *);
 void nv50_wndw_ntfy_enable(struct nv50_wndw *, struct nv50_wndw_atom *);
 int nv50_wndw_wait_armed(struct nv50_wndw *, struct nv50_wndw_atom *);
 
@@ -63,7 +65,7 @@ struct nv50_wndw_func {
 	void (*image_clr)(struct nv50_wndw *);
 	void (*lut)(struct nv50_wndw *, struct nv50_wndw_atom *);
 
-	u32 (*update)(struct nv50_wndw *, u32 interlock);
+	void (*update)(struct nv50_wndw *, u32 *interlock);
 };
 
 extern const struct drm_plane_funcs nv50_wndw;
@@ -71,6 +73,6 @@ extern const struct drm_plane_funcs nv50_wndw;
 struct nv50_wimm_func {
 	void (*point)(struct nv50_wndw *, struct nv50_wndw_atom *);
 
-	u32 (*update)(struct nv50_wndw *, u32 interlock);
+	void (*update)(struct nv50_wndw *, u32 *interlock);
 };
 #endif

From 04fc14be7726edbb34404f69297e74061a8a9563 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1127/1286] drm/nouveau/kms/nv50-: decouple window state
 changes, and update method submisssion

This will be required to support Volta.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/dispnv50/disp.c | 16 ++++++++++++++++
 drivers/gpu/drm/nouveau/dispnv50/wndw.c |  4 ----
 2 files changed, 16 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index e80d11c9a456..0f2020010aab 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -1690,6 +1690,14 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
 	/* Flush disable. */
 	if (interlock[NV50_DISP_INTERLOCK_CORE]) {
 		if (atom->flush_disable) {
+			for_each_new_plane_in_state(state, plane, new_plane_state, i) {
+				struct nv50_wndw *wndw = nv50_wndw(plane);
+				if (interlock[wndw->interlock.type] & wndw->interlock.data) {
+					if (wndw->func->update)
+						wndw->func->update(wndw, interlock);
+				}
+			}
+
 			nv50_disp_atomic_commit_core(drm, interlock);
 			memset(interlock, 0x00, sizeof(interlock));
 		}
@@ -1751,6 +1759,14 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
 	}
 
 	/* Flush update. */
+	for_each_new_plane_in_state(state, plane, new_plane_state, i) {
+		struct nv50_wndw *wndw = nv50_wndw(plane);
+		if (interlock[wndw->interlock.type] & wndw->interlock.data) {
+			if (wndw->func->update)
+				wndw->func->update(wndw, interlock);
+		}
+	}
+
 	if (interlock[NV50_DISP_INTERLOCK_CORE]) {
 		if (interlock[NV50_DISP_INTERLOCK_BASE] ||
 		    !atom->state.legacy_cursor_update)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 8f62c2a811ff..0fba4e0a4bb4 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -119,8 +119,6 @@ nv50_wndw_flush_clr(struct nv50_wndw *wndw, u32 *interlock, bool flush,
 	if (clr.image) wndw->func->image_clr(wndw);
 
 	interlock[wndw->interlock.type] |= wndw->interlock.data;
-	if (flush)
-		wndw->func->update(wndw, interlock);
 }
 
 void
@@ -142,8 +140,6 @@ nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
 	}
 
 	interlock[wndw->interlock.type] |= wndw->interlock.data;
-	if (wndw->func->update)
-		wndw->func->update(wndw, interlock);
 }
 
 void

From 45a2945a3759479c08a4aceaee181639c92f9d48 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1128/1286] drm/nouveau/kms/nv50-: simplify swap interval
 handling

This is just cleaning up some left-overs from when we needed a custom
legacy page flip implementation.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/dispnv50/atom.h |  1 -
 drivers/gpu/drm/nouveau/dispnv50/wndw.c | 11 +++--------
 2 files changed, 3 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/dispnv50/atom.h b/drivers/gpu/drm/nouveau/dispnv50/atom.h
index 53638ee83361..b5b8a12a18f2 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/atom.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/atom.h
@@ -135,7 +135,6 @@ nv50_head_atom_get(struct drm_atomic_state *state, struct drm_crtc *crtc)
 
 struct nv50_wndw_atom {
 	struct drm_plane_state state;
-	u8 interval;
 
 	struct {
 		u32  handle;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 0fba4e0a4bb4..06d1696b7d03 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -208,11 +208,6 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
 	if (ret)
 		return ret;
 
-	if (asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
-		asyw->interval = 0;
-	else
-		asyw->interval = 1;
-
 	if (asyw->image.kind) {
 		asyw->image.layout = 0;
 		if (drm->client.device.info.chipset >= 0xc0)
@@ -231,10 +226,11 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
 		return ret;
 
 	if (asyw->set.image) {
-		if (!(asyw->image.mode = asyw->interval ? 0 : 1))
-			asyw->image.interval = asyw->interval;
+		if (!(asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC))
+			asyw->image.interval = 1;
 		else
 			asyw->image.interval = 0;
+		asyw->image.mode = asyw->image.interval ? 0 : 1;
 	}
 
 	return 0;
@@ -371,7 +367,6 @@ nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
 	if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL)))
 		return NULL;
 	__drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
-	asyw->interval = 1;
 	asyw->sema = armw->sema;
 	asyw->ntfy = armw->ntfy;
 	asyw->image = armw->image;

From 859b456b6b19a19761883cf52993dec645a36152 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1129/1286] drm/nouveau/kms/nv50-: store window visibility in
 state

Window visibility is going to become a little more complicated with the
upcoming LUT changes, so store the calculated value to avoid needing to
recalculate the armed state again.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/dispnv50/atom.h |  2 ++
 drivers/gpu/drm/nouveau/dispnv50/wndw.c | 27 +++++++++++++++++--------
 2 files changed, 21 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/dispnv50/atom.h b/drivers/gpu/drm/nouveau/dispnv50/atom.h
index b5b8a12a18f2..fefb9caaf7b8 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/atom.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/atom.h
@@ -136,6 +136,8 @@ nv50_head_atom_get(struct drm_atomic_state *state, struct drm_crtc *crtc)
 struct nv50_wndw_atom {
 	struct drm_plane_state state;
 
+	bool visible;
+
 	struct {
 		u32  handle;
 		u16  offset:12;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 06d1696b7d03..4a685d78ed33 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -244,26 +244,33 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
 	struct nv50_wndw_atom *armw = nv50_wndw_atom(wndw->plane.state);
 	struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
 	struct nv50_head_atom *harm = NULL, *asyh = NULL;
-	bool varm = false, asyv = false, asym = false;
+	bool modeset = false;
 	int ret;
 
 	NV_ATOMIC(drm, "%s atomic_check\n", plane->name);
+
+	/* Fetch the assembly state for the head the window will belong to,
+	 * and determine whether the window will be visible.
+	 */
 	if (asyw->state.crtc) {
 		asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
 		if (IS_ERR(asyh))
 			return PTR_ERR(asyh);
-		asym = drm_atomic_crtc_needs_modeset(&asyh->state);
-		asyv = asyh->state.active;
+		modeset = drm_atomic_crtc_needs_modeset(&asyh->state);
+		asyw->visible = asyh->state.active;
+	} else {
+		asyw->visible = false;
 	}
 
+	/* Fetch assembly state for the head the window used to belong to. */
 	if (armw->state.crtc) {
 		harm = nv50_head_atom_get(asyw->state.state, armw->state.crtc);
 		if (IS_ERR(harm))
 			return PTR_ERR(harm);
-		varm = harm->state.crtc->state->active;
 	}
 
-	if (asyv) {
+	/* Calculate new window state. */
+	if (asyw->visible) {
 		asyw->point.x = asyw->state.crtc_x;
 		asyw->point.y = asyw->state.crtc_y;
 		if (memcmp(&armw->point, &asyw->point, sizeof(asyw->point)))
@@ -273,18 +280,22 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
 		if (ret)
 			return ret;
 	} else
-	if (varm) {
+	if (armw->visible) {
 		nv50_wndw_atomic_check_release(wndw, asyw, harm);
 	} else {
 		return 0;
 	}
 
-	if (!asyv || asym) {
+	/* Aside from the obvious case where the window is actively being
+	 * disabled, we might also need to temporarily disable the window
+	 * when performing certain modeset operations.
+	 */
+	if (!asyw->visible || modeset) {
 		asyw->clr.ntfy = armw->ntfy.handle != 0;
 		asyw->clr.sema = armw->sema.handle != 0;
 		if (wndw->func->image_clr)
 			asyw->clr.image = armw->image.handle[0] != 0;
-		asyw->set.lut = wndw->func->lut && asyv;
+		asyw->set.lut = wndw->func->lut && asyw->visible;
 	}
 
 	return 0;

From e349a05dc8faad6b27700383945a1783612cbae6 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1130/1286] drm/nouveau/kms/nv50-: plane updates don't always
 require image_set()

When only the position of a window changes, there's no need to submit
an image update as well.

Will be required to support the overlays, and Volta windows.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/dispnv50/base507c.c |  1 -
 drivers/gpu/drm/nouveau/dispnv50/wndw.c     | 63 +++++++++++----------
 2 files changed, 32 insertions(+), 32 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
index 819403f4b958..d8d351669367 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base507c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
@@ -185,7 +185,6 @@ base507c_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
 	asyh->base.h = asyw->state.fb->height;
 
 	asyw->lut.enable = 1;
-	asyw->set.image = true;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 4a685d78ed33..0f6de6049be4 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -190,7 +190,8 @@ nv50_wndw_atomic_check_acquire_rgb(struct nv50_wndw_atom *asyw)
 }
 
 static int
-nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
+nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
+			       struct nv50_wndw_atom *armw,
 			       struct nv50_wndw_atom *asyw,
 			       struct nv50_head_atom *asyh)
 {
@@ -200,40 +201,44 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
 
 	NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name);
 
-	asyw->image.w = fb->base.width;
-	asyw->image.h = fb->base.height;
-	asyw->image.kind = fb->nvbo->kind;
+	if (asyw->state.fb != armw->state.fb || !armw->visible || modeset) {
+		asyw->image.w = fb->base.width;
+		asyw->image.h = fb->base.height;
+		asyw->image.kind = fb->nvbo->kind;
 
-	ret = nv50_wndw_atomic_check_acquire_rgb(asyw);
-	if (ret)
-		return ret;
+		ret = nv50_wndw_atomic_check_acquire_rgb(asyw);
+		if (ret)
+			return ret;
 
-	if (asyw->image.kind) {
-		asyw->image.layout = 0;
-		if (drm->client.device.info.chipset >= 0xc0)
-			asyw->image.block = fb->nvbo->mode >> 4;
-		else
-			asyw->image.block = fb->nvbo->mode;
-		asyw->image.pitch[0] = (fb->base.pitches[0] / 4) << 4;
-	} else {
-		asyw->image.layout = 1;
-		asyw->image.block  = 0;
-		asyw->image.pitch[0] = fb->base.pitches[0];
-	}
+		if (asyw->image.kind) {
+			asyw->image.layout = 0;
+			if (drm->client.device.info.chipset >= 0xc0)
+				asyw->image.block = fb->nvbo->mode >> 4;
+			else
+				asyw->image.block = fb->nvbo->mode;
+			asyw->image.pitch[0] = (fb->base.pitches[0] / 4) << 4;
+		} else {
+			asyw->image.layout = 1;
+			asyw->image.block  = 0;
+			asyw->image.pitch[0] = fb->base.pitches[0];
+		}
 
-	ret = wndw->func->acquire(wndw, asyw, asyh);
-	if (ret)
-		return ret;
-
-	if (asyw->set.image) {
 		if (!(asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC))
 			asyw->image.interval = 1;
 		else
 			asyw->image.interval = 0;
 		asyw->image.mode = asyw->image.interval ? 0 : 1;
+		asyw->set.image = wndw->func->image_set != NULL;
 	}
 
-	return 0;
+	if (wndw->immd) {
+		asyw->point.x = asyw->state.crtc_x;
+		asyw->point.y = asyw->state.crtc_y;
+		if (memcmp(&armw->point, &asyw->point, sizeof(asyw->point)))
+			asyw->set.point = true;
+	}
+
+	return wndw->func->acquire(wndw, asyw, asyh);
 }
 
 int
@@ -271,12 +276,8 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
 
 	/* Calculate new window state. */
 	if (asyw->visible) {
-		asyw->point.x = asyw->state.crtc_x;
-		asyw->point.y = asyw->state.crtc_y;
-		if (memcmp(&armw->point, &asyw->point, sizeof(asyw->point)))
-			asyw->set.point = true;
-
-		ret = nv50_wndw_atomic_check_acquire(wndw, asyw, asyh);
+		ret = nv50_wndw_atomic_check_acquire(wndw, modeset,
+						     armw, asyw, asyh);
 		if (ret)
 			return ret;
 	} else

From 119608a7f3f1ef899f1f98d05306340b92834836 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1131/1286] drm/nouveau/kms/nv50-: handle degamma LUT from
 window channels

Required to eventually support DRM colour management APIs, and to
support Volta.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/dispnv50/Kbuild     |   1 +
 drivers/gpu/drm/nouveau/dispnv50/atom.h     |  25 +++-
 drivers/gpu/drm/nouveau/dispnv50/base.h     |   3 +-
 drivers/gpu/drm/nouveau/dispnv50/base507c.c |  44 ++++--
 drivers/gpu/drm/nouveau/dispnv50/base827c.c |   4 +-
 drivers/gpu/drm/nouveau/dispnv50/base907c.c |  43 +++++-
 drivers/gpu/drm/nouveau/dispnv50/disp.c     |  11 ++
 drivers/gpu/drm/nouveau/dispnv50/head.c     | 158 ++++++++------------
 drivers/gpu/drm/nouveau/dispnv50/head.h     |  18 +--
 drivers/gpu/drm/nouveau/dispnv50/head507d.c |  29 ++--
 drivers/gpu/drm/nouveau/dispnv50/head827d.c |  17 ++-
 drivers/gpu/drm/nouveau/dispnv50/head907d.c |  33 ++--
 drivers/gpu/drm/nouveau/dispnv50/head917d.c |  34 ++++-
 drivers/gpu/drm/nouveau/dispnv50/lut.c      |  95 ++++++++++++
 drivers/gpu/drm/nouveau/dispnv50/lut.h      |  15 ++
 drivers/gpu/drm/nouveau/dispnv50/wndw.c     |  93 +++++++++++-
 drivers/gpu/drm/nouveau/dispnv50/wndw.h     |   8 +-
 17 files changed, 461 insertions(+), 170 deletions(-)
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/lut.c
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/lut.h

diff --git a/drivers/gpu/drm/nouveau/dispnv50/Kbuild b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
index 3e53484b4589..d074bb8ecd1b 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/Kbuild
+++ b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
@@ -1,4 +1,5 @@
 nouveau-y += dispnv50/disp.o
+nouveau-y += dispnv50/lut.o
 
 nouveau-y += dispnv50/core.o
 nouveau-y += dispnv50/core507d.o
diff --git a/drivers/gpu/drm/nouveau/dispnv50/atom.h b/drivers/gpu/drm/nouveau/dispnv50/atom.h
index fefb9caaf7b8..3e9e8832d0dd 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/atom.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/atom.h
@@ -16,6 +16,11 @@ struct nv50_atom {
 struct nv50_head_atom {
 	struct drm_crtc_state state;
 
+	struct {
+		u32 mask;
+		u32 olut;
+	} wndw;
+
 	struct {
 		u16 iW;
 		u16 iH;
@@ -47,8 +52,9 @@ struct nv50_head_atom {
 		bool visible;
 		u32 handle;
 		u64 offset:40;
-		u8  mode:4;
-	} ilut;
+		u8 buffer:1;
+		u8 mode:4;
+	} olut;
 
 	struct {
 		bool visible;
@@ -107,7 +113,7 @@ struct nv50_head_atom {
 
 	union nv50_head_atom_mask {
 		struct {
-			bool ilut:1;
+			bool olut:1;
 			bool core:1;
 			bool curs:1;
 			bool view:1;
@@ -136,6 +142,7 @@ nv50_head_atom_get(struct drm_atomic_state *state, struct drm_crtc *crtc)
 struct nv50_wndw_atom {
 	struct drm_plane_state state;
 
+	struct drm_property_blob *ilut;
 	bool visible;
 
 	struct {
@@ -152,8 +159,14 @@ struct nv50_wndw_atom {
 	} sema;
 
 	struct {
-		u8 enable:2;
-	} lut;
+		u32 handle;
+		struct {
+			u64 offset:40;
+			u8  buffer:1;
+			u8  enable:2;
+			u8  mode:4;
+		} i;
+	} xlut;
 
 	struct {
 		u8  mode:2;
@@ -180,8 +193,8 @@ struct nv50_wndw_atom {
 		struct {
 			bool ntfy:1;
 			bool sema:1;
+			bool xlut:1;
 			bool image:1;
-			bool lut:1;
 			bool point:1;
 		};
 		u8 mask;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base.h b/drivers/gpu/drm/nouveau/dispnv50/base.h
index 71fc10369b37..87ec8394b7f3 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/base.h
@@ -17,8 +17,9 @@ void base507c_ntfy_reset(struct nouveau_bo *, u32);
 void base507c_ntfy_set(struct nv50_wndw *, struct nv50_wndw_atom *);
 void base507c_ntfy_clr(struct nv50_wndw *);
 int base507c_ntfy_wait_begun(struct nouveau_bo *, u32, struct nvif_device *);
+void base507c_xlut_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+void base507c_xlut_clr(struct nv50_wndw *);
 void base507c_image_clr(struct nv50_wndw *);
-void base507c_lut(struct nv50_wndw *, struct nv50_wndw_atom *);
 void base507c_update(struct nv50_wndw *, u32 *);
 
 int base827c_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
index d8d351669367..5d664d75b645 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base507c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
@@ -39,17 +39,6 @@ base507c_update(struct nv50_wndw *wndw, u32 *interlock)
 	}
 }
 
-void
-base507c_lut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
-{
-	u32 *push;
-	if ((push = evo_wait(&wndw->wndw, 2))) {
-		evo_mthd(push, 0x00e0, 1);
-		evo_data(push, asyw->lut.enable << 30);
-		evo_kick(push, &wndw->wndw);
-	}
-}
-
 void
 base507c_image_clr(struct nv50_wndw *wndw)
 {
@@ -86,6 +75,28 @@ base507c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 	}
 }
 
+void
+base507c_xlut_clr(struct nv50_wndw *wndw)
+{
+	u32 *push;
+	if ((push = evo_wait(&wndw->wndw, 2))) {
+		evo_mthd(push, 0x00e0, 1);
+		evo_data(push, 0x00000000);
+		evo_kick(push, &wndw->wndw);
+	}
+}
+
+void
+base507c_xlut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+	u32 *push;
+	if ((push = evo_wait(&wndw->wndw, 2))) {
+		evo_mthd(push, 0x00e0, 1);
+		evo_data(push, 0x40000000);
+		evo_kick(push, &wndw->wndw);
+	}
+}
+
 int
 base507c_ntfy_wait_begun(struct nouveau_bo *bo, u32 offset,
 			 struct nvif_device *device)
@@ -177,14 +188,17 @@ base507c_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
 	if (ret)
 		return ret;
 
+	if (!wndw->func->ilut) {
+		if ((asyh->base.cpp != 1) ^ (fb->format->cpp[0] != 1))
+			asyh->state.color_mgmt_changed = true;
+	}
+
 	asyh->base.depth = fb->format->depth;
 	asyh->base.cpp = fb->format->cpp[0];
 	asyh->base.x = asyw->state.src.x1 >> 16;
 	asyh->base.y = asyw->state.src.y1 >> 16;
 	asyh->base.w = asyw->state.fb->width;
 	asyh->base.h = asyw->state.fb->height;
-
-	asyw->lut.enable = 1;
 	return 0;
 }
 
@@ -213,9 +227,11 @@ base507c = {
 	.ntfy_set = base507c_ntfy_set,
 	.ntfy_clr = base507c_ntfy_clr,
 	.ntfy_wait_begun = base507c_ntfy_wait_begun,
+	.olut_core = 1,
+	.xlut_set = base507c_xlut_set,
+	.xlut_clr = base507c_xlut_clr,
 	.image_set = base507c_image_set,
 	.image_clr = base507c_image_clr,
-	.lut = base507c_lut,
 	.update = base507c_update,
 };
 
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base827c.c b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
index 240a6409329d..d886858a5724 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base827c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
@@ -53,9 +53,11 @@ base827c = {
 	.ntfy_set = base507c_ntfy_set,
 	.ntfy_clr = base507c_ntfy_clr,
 	.ntfy_wait_begun = base507c_ntfy_wait_begun,
+	.olut_core = 1,
+	.xlut_set = base507c_xlut_set,
+	.xlut_clr = base507c_xlut_clr,
 	.image_set = base827c_image_set,
 	.image_clr = base507c_image_clr,
-	.lut = base507c_lut,
 	.update = base507c_update,
 };
 
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base907c.c b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
index 6c32a4e5cb7d..2643592ad827 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base907c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
@@ -43,6 +43,44 @@ base907c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 	}
 }
 
+static void
+base907c_xlut_clr(struct nv50_wndw *wndw)
+{
+	u32 *push;
+	if ((push = evo_wait(&wndw->wndw, 6))) {
+		evo_mthd(push, 0x00e0, 1);
+		evo_data(push, 0x00000000);
+		evo_mthd(push, 0x00e8, 1);
+		evo_data(push, 0x00000000);
+		evo_mthd(push, 0x00fc, 1);
+		evo_data(push, 0x00000000);
+		evo_kick(push, &wndw->wndw);
+	}
+}
+
+static void
+base907c_xlut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+	u32 *push;
+	if ((push = evo_wait(&wndw->wndw, 6))) {
+		evo_mthd(push, 0x00e0, 3);
+		evo_data(push, asyw->xlut.i.enable << 30 |
+			       asyw->xlut.i.mode << 24);
+		evo_data(push, asyw->xlut.i.offset >> 8);
+		evo_data(push, 0x40000000);
+		evo_mthd(push, 0x00fc, 1);
+		evo_data(push, asyw->xlut.handle);
+		evo_kick(push, &wndw->wndw);
+	}
+}
+
+static void
+base907c_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+	asyw->xlut.i.mode = 7;
+	asyw->xlut.i.enable = 2;
+}
+
 static const struct nv50_wndw_func
 base907c = {
 	.acquire = base507c_acquire,
@@ -53,9 +91,12 @@ base907c = {
 	.ntfy_set = base507c_ntfy_set,
 	.ntfy_clr = base507c_ntfy_clr,
 	.ntfy_wait_begun = base507c_ntfy_wait_begun,
+	.ilut = base907c_ilut,
+	.olut_core = true,
+	.xlut_set = base907c_xlut_set,
+	.xlut_clr = base907c_xlut_clr,
 	.image_set = base907c_image_set,
 	.image_clr = base507c_image_clr,
-	.lut = base507c_lut,
 	.update = base507c_update,
 };
 
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 0f2020010aab..6c860e8b1b16 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -1971,8 +1971,19 @@ nv50_disp_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
 	struct nv50_atom *atom = nv50_atom(state);
 	struct drm_connector_state *old_connector_state, *new_connector_state;
 	struct drm_connector *connector;
+	struct drm_crtc_state *new_crtc_state;
+	struct drm_crtc *crtc;
 	int ret, i;
 
+	/* We need to handle colour management on a per-plane basis. */
+	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+		if (new_crtc_state->color_mgmt_changed) {
+			ret = drm_atomic_add_affected_planes(state, crtc);
+			if (ret)
+				return ret;
+		}
+	}
+
 	ret = drm_atomic_helper_check(dev, state);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index 2eb7fdb61131..ca83006510b7 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -30,56 +30,6 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include "nouveau_connector.h"
-#include "nouveau_bo.h"
-
-static void
-nv50_head_lut_load(struct drm_property_blob *blob, int mode,
-		   struct nouveau_bo *nvbo)
-{
-	struct drm_color_lut *in = (struct drm_color_lut *)blob->data;
-	void __iomem *lut = (u8 *)nvbo_kmap_obj_iovirtual(nvbo);
-	const int size = blob->length / sizeof(*in);
-	int bits, shift, i;
-	u16 zero, r, g, b;
-
-	/* This can't happen.. But it shuts the compiler up. */
-	if (WARN_ON(size != 256))
-		return;
-
-	switch (mode) {
-	case 0: /* LORES. */
-	case 1: /* HIRES. */
-		bits = 11;
-		shift = 3;
-		zero = 0x0000;
-		break;
-	case 7: /* INTERPOLATE_257_UNITY_RANGE. */
-		bits = 14;
-		shift = 0;
-		zero = 0x6000;
-		break;
-	default:
-		WARN_ON(1);
-		return;
-	}
-
-	for (i = 0; i < size; i++) {
-		r = (drm_color_lut_extract(in[i].  red, bits) + zero) << shift;
-		g = (drm_color_lut_extract(in[i].green, bits) + zero) << shift;
-		b = (drm_color_lut_extract(in[i]. blue, bits) + zero) << shift;
-		writew(r, lut + (i * 0x08) + 0);
-		writew(g, lut + (i * 0x08) + 2);
-		writew(b, lut + (i * 0x08) + 4);
-	}
-
-	/* INTERPOLATE modes require a "next" entry to interpolate with,
-	 * so we replicate the last entry to deal with this for now.
-	 */
-	writew(r, lut + (i * 0x08) + 0);
-	writew(g, lut + (i * 0x08) + 2);
-	writew(b, lut + (i * 0x08) + 4);
-}
-
 void
 nv50_head_flush_clr(struct nv50_head *head,
 		    struct nv50_head_atom *asyh, bool flush)
@@ -87,7 +37,7 @@ nv50_head_flush_clr(struct nv50_head *head,
 	union nv50_head_atom_mask clr = {
 		.mask = asyh->clr.mask & ~(flush ? 0 : asyh->set.mask),
 	};
-	if (clr.ilut) head->func->ilut_clr(head);
+	if (clr.olut) head->func->olut_clr(head);
 	if (clr.core) head->func->core_clr(head);
 	if (clr.curs) head->func->curs_clr(head);
 }
@@ -97,16 +47,14 @@ nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
 	if (asyh->set.view   ) head->func->view    (head, asyh);
 	if (asyh->set.mode   ) head->func->mode    (head, asyh);
-	if (asyh->set.ilut   ) {
-		struct nouveau_bo *nvbo = head->ilut.nvbo[head->ilut.next];
-		struct drm_property_blob *blob = asyh->state.gamma_lut;
-		if (blob)
-			nv50_head_lut_load(blob, asyh->ilut.mode, nvbo);
-		asyh->ilut.offset = nvbo->bo.offset;
-		head->ilut.next ^= 1;
-		head->func->ilut_set(head, asyh);
-	}
 	if (asyh->set.core   ) head->func->core_set(head, asyh);
+	if (asyh->set.olut   ) {
+		asyh->olut.offset = nv50_lut_load(&head->olut,
+						  asyh->olut.mode <= 1,
+						  asyh->olut.buffer,
+						  asyh->state.gamma_lut);
+		head->func->olut_set(head, asyh);
+	}
 	if (asyh->set.curs   ) head->func->curs_set(head, asyh);
 	if (asyh->set.base   ) head->func->base    (head, asyh);
 	if (asyh->set.ovly   ) head->func->ovly    (head, asyh);
@@ -240,35 +188,37 @@ nv50_head_atomic_check_view(struct nv50_head_atom *armh,
 	asyh->set.view = true;
 }
 
-static void
+static int
 nv50_head_atomic_check_lut(struct nv50_head *head,
-			   struct nv50_head_atom *armh,
 			   struct nv50_head_atom *asyh)
 {
 	struct nv50_disp *disp = nv50_disp(head->base.base.dev);
+	struct drm_property_blob *olut = asyh->state.gamma_lut;
 
-	/* An I8 surface without an input LUT makes no sense, and
-	 * EVO will throw an error if you try.
-	 *
-	 * Legacy clients actually cause this due to the order in
-	 * which they call ioctls, so we will enable the LUT with
-	 * whatever contents the buffer already contains to avoid
-	 * triggering the error check.
-	 */
-	if (!asyh->state.gamma_lut && asyh->base.cpp != 1) {
-		asyh->ilut.handle = 0;
-		asyh->clr.ilut = armh->ilut.visible;
-		return;
+	/* Determine whether core output LUT should be enabled. */
+	if (olut) {
+		/* Check if any window(s) have stolen the core output LUT
+		 * to as an input LUT for legacy gamma + I8 colour format.
+		 */
+		if (asyh->wndw.olut) {
+			/* If any window has stolen the core output LUT,
+			 * all of them must.
+			 */
+			if (asyh->wndw.olut != asyh->wndw.mask)
+				return -EINVAL;
+			olut = NULL;
+		}
 	}
 
-	if (disp->disp->object.oclass < GF110_DISP) {
-		asyh->ilut.mode = (asyh->base.cpp == 1) ? 0 : 1;
-		asyh->set.ilut = true;
-	} else {
-		asyh->ilut.mode = 7;
-		asyh->set.ilut = asyh->state.color_mgmt_changed;
+	if (!olut) {
+		asyh->olut.handle = 0;
+		return 0;
 	}
-	asyh->ilut.handle = disp->core->chan.vram.handle;
+
+	asyh->olut.handle = disp->core->chan.vram.handle;
+	asyh->olut.buffer = !asyh->olut.buffer;
+	head->func->olut(head, asyh);
+	return 0;
 }
 
 static void
@@ -360,9 +310,13 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
 			nv50_head_atomic_check_mode(head, asyh);
 
 		if (asyh->state.color_mgmt_changed ||
-		    asyh->base.cpp != armh->base.cpp)
-			nv50_head_atomic_check_lut(head, armh, asyh);
-		asyh->ilut.visible = asyh->ilut.handle != 0;
+		    memcmp(&armh->wndw, &asyh->wndw, sizeof(asyh->wndw))) {
+			int ret = nv50_head_atomic_check_lut(head, asyh);
+			if (ret)
+				return ret;
+
+			asyh->olut.visible = asyh->olut.handle != 0;
+		}
 
 		if (asyc) {
 			if (asyc->set.scaler)
@@ -373,13 +327,16 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
 				nv50_head_atomic_check_procamp(armh, asyh, asyc);
 		}
 
-		if (head->func->core_calc)
+		if (head->func->core_calc) {
 			head->func->core_calc(head, asyh);
+			if (!asyh->core.visible)
+				asyh->olut.visible = false;
+		}
 
 		asyh->set.base = armh->base.cpp != asyh->base.cpp;
 		asyh->set.ovly = armh->ovly.cpp != asyh->ovly.cpp;
 	} else {
-		asyh->ilut.visible = false;
+		asyh->olut.visible = false;
 		asyh->core.visible = false;
 		asyh->curs.visible = false;
 		asyh->base.cpp = 0;
@@ -402,11 +359,19 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
 		if (armh->curs.visible) {
 			asyh->clr.curs = true;
 		}
+
+		if (asyh->olut.visible) {
+			if (memcmp(&armh->olut, &asyh->olut, sizeof(asyh->olut)))
+				asyh->set.olut = true;
+		} else
+		if (armh->olut.visible) {
+			asyh->clr.olut = true;
+		}
 	} else {
-		asyh->clr.ilut = armh->ilut.visible;
+		asyh->clr.olut = armh->olut.visible;
 		asyh->clr.core = armh->core.visible;
 		asyh->clr.curs = armh->curs.visible;
-		asyh->set.ilut = asyh->ilut.visible;
+		asyh->set.olut = asyh->olut.visible;
 		asyh->set.core = asyh->core.visible;
 		asyh->set.curs = asyh->curs.visible;
 	}
@@ -438,9 +403,10 @@ nv50_head_atomic_duplicate_state(struct drm_crtc *crtc)
 	if (!(asyh = kmalloc(sizeof(*asyh), GFP_KERNEL)))
 		return NULL;
 	__drm_atomic_helper_crtc_duplicate_state(crtc, &asyh->state);
+	asyh->wndw = armh->wndw;
 	asyh->view = armh->view;
 	asyh->mode = armh->mode;
-	asyh->ilut = armh->ilut;
+	asyh->olut = armh->olut;
 	asyh->core = armh->core;
 	asyh->curs = armh->curs;
 	asyh->base = armh->base;
@@ -477,11 +443,7 @@ static void
 nv50_head_destroy(struct drm_crtc *crtc)
 {
 	struct nv50_head *head = nv50_head(crtc);
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(head->ilut.nvbo); i++)
-		nouveau_bo_unmap_unpin_unref(&head->ilut.nvbo[i]);
-
+	nv50_lut_fini(&head->olut);
 	drm_crtc_cleanup(crtc);
 	kfree(head);
 }
@@ -505,7 +467,7 @@ nv50_head_create(struct drm_device *dev, int index)
 	struct nv50_head *head;
 	struct nv50_wndw *curs, *wndw;
 	struct drm_crtc *crtc;
-	int ret, i;
+	int ret;
 
 	head = kzalloc(sizeof(*head), GFP_KERNEL);
 	if (!head)
@@ -527,10 +489,8 @@ nv50_head_create(struct drm_device *dev, int index)
 	drm_crtc_helper_add(crtc, &nv50_head_help);
 	drm_mode_crtc_set_gamma_size(crtc, 256);
 
-	for (i = 0; i < ARRAY_SIZE(head->ilut.nvbo); i++) {
-		ret = nouveau_bo_new_pin_map(&drm->client, 1025 * 8, 0x100,
-					     TTM_PL_FLAG_VRAM,
-					     &head->ilut.nvbo[i]);
+	if (head->func->olut_set) {
+		ret = nv50_lut_init(disp, &drm->client.mmu, &head->olut);
 		if (ret)
 			goto out;
 	}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.h b/drivers/gpu/drm/nouveau/dispnv50/head.h
index d00cebdbd260..0802271bc90c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.h
@@ -3,16 +3,14 @@
 #define nv50_head(c) container_of((c), struct nv50_head, base.base)
 #include "disp.h"
 #include "atom.h"
+#include "lut.h"
 
 #include "nouveau_crtc.h"
 
 struct nv50_head {
 	const struct nv50_head_func *func;
 	struct nouveau_crtc base;
-	struct {
-		struct nouveau_bo *nvbo[2];
-		int next;
-	} ilut;
+	struct nv50_lut olut;
 };
 
 int nv50_head_create(struct drm_device *, int index);
@@ -22,8 +20,9 @@ void nv50_head_flush_clr(struct nv50_head *, struct nv50_head_atom *, bool y);
 struct nv50_head_func {
 	void (*view)(struct nv50_head *, struct nv50_head_atom *);
 	void (*mode)(struct nv50_head *, struct nv50_head_atom *);
-	void (*ilut_set)(struct nv50_head *, struct nv50_head_atom *);
-	void (*ilut_clr)(struct nv50_head *);
+	void (*olut)(struct nv50_head *, struct nv50_head_atom *);
+	void (*olut_set)(struct nv50_head *, struct nv50_head_atom *);
+	void (*olut_clr)(struct nv50_head *);
 	void (*core_calc)(struct nv50_head *, struct nv50_head_atom *);
 	void (*core_set)(struct nv50_head *, struct nv50_head_atom *);
 	void (*core_clr)(struct nv50_head *);
@@ -39,6 +38,7 @@ struct nv50_head_func {
 extern const struct nv50_head_func head507d;
 void head507d_view(struct nv50_head *, struct nv50_head_atom *);
 void head507d_mode(struct nv50_head *, struct nv50_head_atom *);
+void head507d_olut(struct nv50_head *, struct nv50_head_atom *);
 void head507d_core_calc(struct nv50_head *, struct nv50_head_atom *);
 void head507d_core_clr(struct nv50_head *);
 void head507d_base(struct nv50_head *, struct nv50_head_atom *);
@@ -51,13 +51,13 @@ extern const struct nv50_head_func head827d;
 extern const struct nv50_head_func head907d;
 void head907d_view(struct nv50_head *, struct nv50_head_atom *);
 void head907d_mode(struct nv50_head *, struct nv50_head_atom *);
-void head907d_ilut_set(struct nv50_head *, struct nv50_head_atom *);
-void head907d_ilut_clr(struct nv50_head *);
+void head907d_olut(struct nv50_head *, struct nv50_head_atom *);
+void head907d_olut_set(struct nv50_head *, struct nv50_head_atom *);
+void head907d_olut_clr(struct nv50_head *);
 void head907d_core_set(struct nv50_head *, struct nv50_head_atom *);
 void head907d_core_clr(struct nv50_head *);
 void head907d_curs_set(struct nv50_head *, struct nv50_head_atom *);
 void head907d_curs_clr(struct nv50_head *);
-void head907d_base(struct nv50_head *, struct nv50_head_atom *);
 void head907d_ovly(struct nv50_head *, struct nv50_head_atom *);
 void head907d_procamp(struct nv50_head *, struct nv50_head_atom *);
 void head907d_or(struct nv50_head *, struct nv50_head_atom *);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head507d.c b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
index 5f06fa174832..75575c33c5d6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
@@ -165,6 +165,7 @@ head507d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 		 * without also updating HeadSetOffsetCursor.
 		 */
 		asyh->set.curs = asyh->curs.visible;
+		asyh->set.olut = asyh->olut.handle != 0;
 	}
 }
 
@@ -178,8 +179,8 @@ head507d_core_calc(struct nv50_head *head, struct nv50_head_atom *asyh)
 		asyh->core.w = asyh->base.w;
 		asyh->core.h = asyh->base.h;
 	} else
-	if ((asyh->core.visible = asyh->curs.visible) ||
-	    (asyh->core.visible = asyh->ilut.visible)) {
+	if ((asyh->core.visible = (asyh->ovly.cpp != 0)) ||
+	    (asyh->core.visible = asyh->curs.visible)) {
 		/*XXX: We need to either find some way of having the
 		 *     primary base layer appear black, while still
 		 *     being able to display the other layers, or we
@@ -200,30 +201,39 @@ head507d_core_calc(struct nv50_head *head, struct nv50_head_atom *asyh)
 }
 
 static void
-head507d_ilut_clr(struct nv50_head *head)
+head507d_olut_clr(struct nv50_head *head)
 {
 	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
 	u32 *push;
 	if ((push = evo_wait(core, 2))) {
 		evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
-		evo_data(push, 0x40000000);
+		evo_data(push, 0x00000000);
 		evo_kick(push, core);
 	}
 }
 
 static void
-head507d_ilut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+head507d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
 	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
 	u32 *push;
 	if ((push = evo_wait(core, 3))) {
 		evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
-		evo_data(push, 0x80000000 | asyh->ilut.mode << 30);
-		evo_data(push, asyh->ilut.offset >> 8);
+		evo_data(push, 0x80000000 | asyh->olut.mode << 30);
+		evo_data(push, asyh->olut.offset >> 8);
 		evo_kick(push, core);
 	}
 }
 
+void
+head507d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	if (asyh->base.cpp == 1)
+		asyh->olut.mode = 0;
+	else
+		asyh->olut.mode = 1;
+}
+
 void
 head507d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
@@ -269,8 +279,9 @@ const struct nv50_head_func
 head507d = {
 	.view = head507d_view,
 	.mode = head507d_mode,
-	.ilut_set = head507d_ilut_set,
-	.ilut_clr = head507d_ilut_clr,
+	.olut = head507d_olut,
+	.olut_set = head507d_olut_set,
+	.olut_clr = head507d_olut_clr,
 	.core_calc = head507d_core_calc,
 	.core_set = head507d_core_set,
 	.core_clr = head507d_core_clr,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head827d.c b/drivers/gpu/drm/nouveau/dispnv50/head827d.c
index 84ce595fbe79..ddc143bac305 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head827d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head827d.c
@@ -74,13 +74,13 @@ head827d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 }
 
 static void
-head827d_ilut_clr(struct nv50_head *head)
+head827d_olut_clr(struct nv50_head *head)
 {
 	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
 	u32 *push;
 	if ((push = evo_wait(core, 4))) {
 		evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
-		evo_data(push, 0x40000000);
+		evo_data(push, 0x00000000);
 		evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
 		evo_data(push, 0x00000000);
 		evo_kick(push, core);
@@ -88,16 +88,16 @@ head827d_ilut_clr(struct nv50_head *head)
 }
 
 static void
-head827d_ilut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+head827d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
 	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
 	u32 *push;
 	if ((push = evo_wait(core, 5))) {
 		evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
-		evo_data(push, 0x80000000 | asyh->ilut.mode << 30);
-		evo_data(push, asyh->ilut.offset >> 8);
+		evo_data(push, 0x80000000 | asyh->olut.mode << 30);
+		evo_data(push, asyh->olut.offset >> 8);
 		evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
-		evo_data(push, asyh->ilut.handle);
+		evo_data(push, asyh->olut.handle);
 		evo_kick(push, core);
 	}
 }
@@ -106,8 +106,9 @@ const struct nv50_head_func
 head827d = {
 	.view = head507d_view,
 	.mode = head507d_mode,
-	.ilut_set = head827d_ilut_set,
-	.ilut_clr = head827d_ilut_clr,
+	.olut = head507d_olut,
+	.olut_set = head827d_olut_set,
+	.olut_clr = head827d_olut_clr,
 	.core_calc = head507d_core_calc,
 	.core_set = head827d_core_set,
 	.core_clr = head507d_core_clr,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head907d.c b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
index 0035eccd62d6..0fa0159bfafb 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
@@ -91,7 +91,7 @@ head907d_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
 	}
 }
 
-void
+static void
 head907d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
 	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
@@ -182,13 +182,13 @@ head907d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 }
 
 void
-head907d_ilut_clr(struct nv50_head *head)
+head907d_olut_clr(struct nv50_head *head)
 {
 	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
 	u32 *push;
 	if ((push = evo_wait(core, 4))) {
-		evo_mthd(push, 0x0440 + (head->base.index * 0x300), 1);
-		evo_data(push, 0x03000000);
+		evo_mthd(push, 0x0448 + (head->base.index * 0x300), 1);
+		evo_data(push, 0x00000000);
 		evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
 		evo_data(push, 0x00000000);
 		evo_kick(push, core);
@@ -196,22 +196,26 @@ head907d_ilut_clr(struct nv50_head *head)
 }
 
 void
-head907d_ilut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+head907d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
 	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
 	u32 *push;
-	if ((push = evo_wait(core, 7))) {
-		evo_mthd(push, 0x0440 + (head->base.index * 0x300), 4);
-		evo_data(push, 0x80000000 | asyh->ilut.mode << 24);
-		evo_data(push, asyh->ilut.offset >> 8);
-		evo_data(push, 0x00000000);
-		evo_data(push, 0x00000000);
+	if ((push = evo_wait(core, 5))) {
+		evo_mthd(push, 0x0448 + (head->base.index * 0x300), 2);
+		evo_data(push, 0x80000000 | asyh->olut.mode << 24);
+		evo_data(push, asyh->olut.offset >> 8);
 		evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
-		evo_data(push, asyh->ilut.handle);
+		evo_data(push, asyh->olut.handle);
 		evo_kick(push, core);
 	}
 }
 
+void
+head907d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	asyh->olut.mode = 7;
+}
+
 void
 head907d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
@@ -259,8 +263,9 @@ const struct nv50_head_func
 head907d = {
 	.view = head907d_view,
 	.mode = head907d_mode,
-	.ilut_set = head907d_ilut_set,
-	.ilut_clr = head907d_ilut_clr,
+	.olut = head907d_olut,
+	.olut_set = head907d_olut_set,
+	.olut_clr = head907d_olut_clr,
 	.core_calc = head507d_core_calc,
 	.core_set = head907d_core_set,
 	.core_clr = head907d_core_clr,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head917d.c b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
index 5341ea3bc7b6..5f654512c8c2 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head917d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
@@ -36,18 +36,46 @@ head917d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
 	}
 }
 
+static void
+head917d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 bounds = 0;
+	u32 *push;
+
+	if (asyh->base.cpp) {
+		switch (asyh->base.cpp) {
+		case 8: bounds |= 0x00000500; break;
+		case 4: bounds |= 0x00000300; break;
+		case 2: bounds |= 0x00000100; break;
+		case 1: bounds |= 0x00000000; break;
+		default:
+			WARN_ON(1);
+			break;
+		}
+		bounds |= 0x00020001;
+	}
+
+	if ((push = evo_wait(core, 2))) {
+		evo_mthd(push, 0x04d0 + head->base.index * 0x300, 1);
+		evo_data(push, bounds);
+		evo_kick(push, core);
+	}
+}
+
 const struct nv50_head_func
 head917d = {
 	.view = head907d_view,
 	.mode = head907d_mode,
-	.ilut_set = head907d_ilut_set,
-	.ilut_clr = head907d_ilut_clr,
+	.olut = head907d_olut,
+	.olut_set = head907d_olut_set,
+	.olut_clr = head907d_olut_clr,
 	.core_calc = head507d_core_calc,
 	.core_set = head907d_core_set,
 	.core_clr = head907d_core_clr,
 	.curs_set = head907d_curs_set,
 	.curs_clr = head907d_curs_clr,
-	.base = head907d_base,
+	.base = head917d_base,
 	.ovly = head907d_ovly,
 	.dither = head917d_dither,
 	.procamp = head907d_procamp,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/lut.c b/drivers/gpu/drm/nouveau/dispnv50/lut.c
new file mode 100644
index 000000000000..a6b96ae2a22f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/lut.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "lut.h"
+#include "disp.h"
+
+#include <drm/drm_color_mgmt.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_property.h>
+
+#include <nvif/class.h>
+
+u32
+nv50_lut_load(struct nv50_lut *lut, bool legacy, int buffer,
+	      struct drm_property_blob *blob)
+{
+	struct drm_color_lut *in = (struct drm_color_lut *)blob->data;
+	void __iomem *mem = lut->mem[buffer].object.map.ptr;
+	const int size = blob->length / sizeof(*in);
+	int bits, shift, i;
+	u16 zero, r, g, b;
+	u32 addr = lut->mem[buffer].addr;
+
+	/* This can't happen.. But it shuts the compiler up. */
+	if (WARN_ON(size != 256))
+		return 0;
+
+	if (legacy) {
+		bits = 11;
+		shift = 3;
+		zero = 0x0000;
+	} else {
+		bits = 14;
+		shift = 0;
+		zero = 0x6000;
+	}
+
+	for (i = 0; i < size; i++) {
+		r = (drm_color_lut_extract(in[i].  red, bits) + zero) << shift;
+		g = (drm_color_lut_extract(in[i].green, bits) + zero) << shift;
+		b = (drm_color_lut_extract(in[i]. blue, bits) + zero) << shift;
+		writew(r, mem + (i * 0x08) + 0);
+		writew(g, mem + (i * 0x08) + 2);
+		writew(b, mem + (i * 0x08) + 4);
+	}
+
+	/* INTERPOLATE modes require a "next" entry to interpolate with,
+	 * so we replicate the last entry to deal with this for now.
+	 */
+	writew(r, mem + (i * 0x08) + 0);
+	writew(g, mem + (i * 0x08) + 2);
+	writew(b, mem + (i * 0x08) + 4);
+	return addr;
+}
+
+void
+nv50_lut_fini(struct nv50_lut *lut)
+{
+	int i;
+	for (i = 0; i < ARRAY_SIZE(lut->mem); i++)
+		nvif_mem_fini(&lut->mem[i]);
+}
+
+int
+nv50_lut_init(struct nv50_disp *disp, struct nvif_mmu *mmu,
+	      struct nv50_lut *lut)
+{
+	const u32 size = disp->disp->object.oclass < GF110_DISP ? 257 : 1025;
+	int i;
+	for (i = 0; i < ARRAY_SIZE(lut->mem); i++) {
+		int ret = nvif_mem_init_map(mmu, NVIF_MEM_VRAM, size * 8,
+					    &lut->mem[i]);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/lut.h b/drivers/gpu/drm/nouveau/dispnv50/lut.h
new file mode 100644
index 000000000000..6d7b8352e4cb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/lut.h
@@ -0,0 +1,15 @@
+#ifndef __NV50_KMS_LUT_H__
+#define __NV50_KMS_LUT_H__
+#include <nvif/mem.h>
+struct drm_property_blob;
+struct nv50_disp;
+
+struct nv50_lut {
+	struct nvif_mem mem[2];
+};
+
+int nv50_lut_init(struct nv50_disp *, struct nvif_mmu *, struct nv50_lut *);
+void nv50_lut_fini(struct nv50_lut *);
+u32 nv50_lut_load(struct nv50_lut *, bool legacy, int buffer,
+		  struct drm_property_blob *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 0f6de6049be4..fbaf8b7ed203 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -116,6 +116,7 @@ nv50_wndw_flush_clr(struct nv50_wndw *wndw, u32 *interlock, bool flush,
 	};
 	if (clr.sema ) wndw->func-> sema_clr(wndw);
 	if (clr.ntfy ) wndw->func-> ntfy_clr(wndw);
+	if (clr.xlut ) wndw->func-> xlut_clr(wndw);
 	if (clr.image) wndw->func->image_clr(wndw);
 
 	interlock[wndw->interlock.type] |= wndw->interlock.data;
@@ -133,7 +134,18 @@ nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
 	if (asyw->set.sema ) wndw->func->sema_set (wndw, asyw);
 	if (asyw->set.ntfy ) wndw->func->ntfy_set (wndw, asyw);
 	if (asyw->set.image) wndw->func->image_set(wndw, asyw);
-	if (asyw->set.lut  ) wndw->func->lut      (wndw, asyw);
+
+	if (asyw->set.xlut ) {
+		if (asyw->ilut) {
+			asyw->xlut.i.offset =
+				nv50_lut_load(&wndw->ilut,
+					      asyw->xlut.i.mode <= 1,
+					      asyw->xlut.i.buffer,
+					      asyw->ilut);
+		}
+		wndw->func->xlut_set(wndw, asyw);
+	}
+
 	if (asyw->set.point) {
 		wndw->immd->point(wndw, asyw);
 		wndw->immd->update(wndw, interlock);
@@ -241,7 +253,56 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
 	return wndw->func->acquire(wndw, asyw, asyh);
 }
 
-int
+static void
+nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
+			   struct nv50_wndw_atom *armw,
+			   struct nv50_wndw_atom *asyw,
+			   struct nv50_head_atom *asyh)
+{
+	struct drm_property_blob *ilut = asyh->state.degamma_lut;
+
+	/* I8 format without an input LUT makes no sense, and the
+	 * HW error-checks for this.
+	 *
+	 * In order to handle legacy gamma, when there's no input
+	 * LUT we need to steal the output LUT and use it instead.
+	 */
+	if (!ilut && asyw->state.fb->format->format == DRM_FORMAT_C8) {
+		/* This should be an error, but there's legacy clients
+		 * that do a modeset before providing a gamma table.
+		 *
+		 * We keep the window disabled to avoid angering HW.
+		 */
+		if (!(ilut = asyh->state.gamma_lut)) {
+			asyw->visible = false;
+			return;
+		}
+
+		if (wndw->func->ilut)
+			asyh->wndw.olut |= BIT(wndw->id);
+	} else {
+		asyh->wndw.olut &= ~BIT(wndw->id);
+	}
+
+	/* Recalculate LUT state. */
+	memset(&asyw->xlut, 0x00, sizeof(asyw->xlut));
+	if ((asyw->ilut = wndw->func->ilut ? ilut : NULL)) {
+		wndw->func->ilut(wndw, asyw);
+		asyw->xlut.handle = wndw->wndw.vram.handle;
+		asyw->xlut.i.buffer = !asyw->xlut.i.buffer;
+		asyw->set.xlut = true;
+	}
+
+	/* Handle setting base SET_OUTPUT_LUT_LO_ENABLE_USE_CORE_LUT. */
+	if (wndw->func->olut_core &&
+	    (!armw->visible || (armw->xlut.handle && !asyw->xlut.handle)))
+		asyw->set.xlut = true;
+
+	/* Can't do an immediate flip while changing the LUT. */
+	asyh->state.pageflip_flags &= ~DRM_MODE_PAGE_FLIP_ASYNC;
+}
+
+static int
 nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
 {
 	struct nouveau_drm *drm = nouveau_drm(plane->dev);
@@ -274,15 +335,26 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
 			return PTR_ERR(harm);
 	}
 
+	/* LUT configuration can potentially cause the window to be disabled. */
+	if (asyw->visible && wndw->func->xlut_set &&
+	    (!armw->visible ||
+	     asyh->state.color_mgmt_changed ||
+	     asyw->state.fb->format->format !=
+	     armw->state.fb->format->format))
+		nv50_wndw_atomic_check_lut(wndw, armw, asyw, asyh);
+
 	/* Calculate new window state. */
 	if (asyw->visible) {
 		ret = nv50_wndw_atomic_check_acquire(wndw, modeset,
 						     armw, asyw, asyh);
 		if (ret)
 			return ret;
+
+		asyh->wndw.mask |= BIT(wndw->id);
 	} else
 	if (armw->visible) {
 		nv50_wndw_atomic_check_release(wndw, asyw, harm);
+		harm->wndw.mask &= ~BIT(wndw->id);
 	} else {
 		return 0;
 	}
@@ -294,9 +366,9 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
 	if (!asyw->visible || modeset) {
 		asyw->clr.ntfy = armw->ntfy.handle != 0;
 		asyw->clr.sema = armw->sema.handle != 0;
+		asyw->clr.xlut = armw->xlut.handle != 0;
 		if (wndw->func->image_clr)
 			asyw->clr.image = armw->image.handle[0] != 0;
-		asyw->set.lut = wndw->func->lut && asyw->visible;
 	}
 
 	return 0;
@@ -381,9 +453,10 @@ nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
 	__drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
 	asyw->sema = armw->sema;
 	asyw->ntfy = armw->ntfy;
+	asyw->ilut = NULL;
+	asyw->xlut = armw->xlut;
 	asyw->image = armw->image;
 	asyw->point = armw->point;
-	asyw->lut = armw->lut;
 	asyw->clr.mask = 0;
 	asyw->set.mask = 0;
 	return &asyw->state;
@@ -417,6 +490,9 @@ nv50_wndw_destroy(struct drm_plane *plane)
 	nvif_notify_fini(&wndw->notify);
 	nv50_dmac_destroy(&wndw->wimm);
 	nv50_dmac_destroy(&wndw->wndw);
+
+	nv50_lut_fini(&wndw->ilut);
+
 	drm_plane_cleanup(&wndw->plane);
 	kfree(wndw);
 }
@@ -456,6 +532,9 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
 	       enum nv50_disp_interlock_type interlock_type, u32 interlock_data,
 	       struct nv50_wndw **pwndw)
 {
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvif_mmu *mmu = &drm->client.mmu;
+	struct nv50_disp *disp = nv50_disp(dev);
 	struct nv50_wndw *wndw;
 	int nformat;
 	int ret;
@@ -484,6 +563,12 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
 
 	drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper);
 
+	if (wndw->func->ilut) {
+		ret = nv50_lut_init(disp, mmu, &wndw->ilut);
+		if (ret)
+			return ret;
+	}
+
 	wndw->notify.func = nv50_wndw_notify;
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
index c26796c612f6..223cf3f37dae 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
@@ -3,6 +3,7 @@
 #define nv50_wndw(p) container_of((p), struct nv50_wndw, plane)
 #include "disp.h"
 #include "atom.h"
+#include "lut.h"
 
 #include <nvif/notify.h>
 
@@ -24,6 +25,8 @@ struct nv50_wndw {
 
 	struct drm_plane plane;
 
+	struct nv50_lut ilut;
+
 	struct nv50_dmac wndw;
 	struct nv50_dmac wimm;
 
@@ -61,9 +64,12 @@ struct nv50_wndw_func {
 	void (*ntfy_clr)(struct nv50_wndw *);
 	int (*ntfy_wait_begun)(struct nouveau_bo *, u32 offset,
 			       struct nvif_device *);
+	void (*ilut)(struct nv50_wndw *, struct nv50_wndw_atom *);
+	bool olut_core;
+	void (*xlut_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+	void (*xlut_clr)(struct nv50_wndw *);
 	void (*image_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
 	void (*image_clr)(struct nv50_wndw *);
-	void (*lut)(struct nv50_wndw *, struct nv50_wndw_atom *);
 
 	void (*update)(struct nv50_wndw *, u32 *interlock);
 };

From b05d873808c77fedd25130b0355acc0da1c11e19 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1132/1286] drm/nouveau/kms/nv50-: separate blocklinear vs
 linear pitch

Will be required to support Volta.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/dispnv50/atom.h     |  6 ++++--
 drivers/gpu/drm/nouveau/dispnv50/base507c.c |  5 +++--
 drivers/gpu/drm/nouveau/dispnv50/base827c.c |  5 +++--
 drivers/gpu/drm/nouveau/dispnv50/base907c.c |  5 +++--
 drivers/gpu/drm/nouveau/dispnv50/head507d.c |  8 +++++---
 drivers/gpu/drm/nouveau/dispnv50/head827d.c |  3 ++-
 drivers/gpu/drm/nouveau/dispnv50/head907d.c |  3 ++-
 drivers/gpu/drm/nouveau/dispnv50/wndw.c     | 10 ++++++----
 8 files changed, 28 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/dispnv50/atom.h b/drivers/gpu/drm/nouveau/dispnv50/atom.h
index 3e9e8832d0dd..0409947bf196 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/atom.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/atom.h
@@ -63,7 +63,8 @@ struct nv50_head_atom {
 		u8  format;
 		u8  kind:7;
 		u8  layout:1;
-		u8  block:4;
+		u8  blockh:4;
+		u16 blocks:12;
 		u32 pitch:20;
 		u16 x;
 		u16 y;
@@ -175,7 +176,8 @@ struct nv50_wndw_atom {
 		u8  format;
 		u8  kind:7;
 		u8  layout:1;
-		u8  block:4;
+		u8  blockh:4;
+		u16 blocks[3];
 		u32 pitch[3];
 		u16 w;
 		u16 h;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
index 5d664d75b645..d5e295ca2caa 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base507c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
@@ -67,8 +67,9 @@ base507c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 		evo_data(push, 0x00000000);
 		evo_data(push, asyw->image.h << 16 | asyw->image.w);
 		evo_data(push, asyw->image.layout << 20 |
-			       asyw->image.pitch[0] |
-			       asyw->image.block);
+			       (asyw->image.pitch[0] >> 8) << 8 |
+			       asyw->image.blocks[0] << 8 |
+			       asyw->image.blockh);
 		evo_data(push, asyw->image.kind << 16 |
 			       asyw->image.format << 8);
 		evo_kick(push, &wndw->wndw);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base827c.c b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
index d886858a5724..73646819a0d6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base827c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
@@ -36,8 +36,9 @@ base827c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 		evo_data(push, 0x00000000);
 		evo_data(push, asyw->image.h << 16 | asyw->image.w);
 		evo_data(push, asyw->image.layout << 20 |
-			       asyw->image.pitch[0] |
-			       asyw->image.block);
+			       (asyw->image.pitch[0] >> 8) << 8 |
+			       asyw->image.blocks[0] << 8 |
+			       asyw->image.blockh);
 		evo_data(push, asyw->image.format << 8);
 		evo_kick(push, &wndw->wndw);
 	}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base907c.c b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
index 2643592ad827..8edc0598bda8 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base907c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
@@ -36,8 +36,9 @@ base907c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 		evo_data(push, 0x00000000);
 		evo_data(push, asyw->image.h << 16 | asyw->image.w);
 		evo_data(push, asyw->image.layout << 24 |
-			       asyw->image.pitch[0] |
-			       asyw->image.block);
+			       (asyw->image.pitch[0] >> 8) << 8 |
+			       asyw->image.blocks[0] << 8 |
+			       asyw->image.blockh);
 		evo_data(push, asyw->image.format << 8);
 		evo_kick(push, &wndw->wndw);
 	}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head507d.c b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
index 75575c33c5d6..8a8aa9b69ef8 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
@@ -151,8 +151,9 @@ head507d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 		evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
 		evo_data(push, asyh->core.h << 16 | asyh->core.w);
 		evo_data(push, asyh->core.layout << 20 |
-			       asyh->core.pitch >> 8 << 8 |
-			       asyh->core.block);
+			       (asyh->core.pitch >> 8) << 8 |
+			       asyh->core.blocks << 8 |
+			       asyh->core.blockh);
 		evo_data(push, asyh->core.kind << 16 |
 			       asyh->core.format << 8);
 		evo_data(push, asyh->core.handle);
@@ -196,7 +197,8 @@ head507d_core_calc(struct nv50_head *head, struct nv50_head_atom *asyh)
 	asyh->core.format = 0xcf;
 	asyh->core.kind = 0;
 	asyh->core.layout = 1;
-	asyh->core.block = 0;
+	asyh->core.blockh = 0;
+	asyh->core.blocks = 0;
 	asyh->core.pitch = ALIGN(asyh->core.w, 64) * 4;
 }
 
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head827d.c b/drivers/gpu/drm/nouveau/dispnv50/head827d.c
index ddc143bac305..ae33e21790ee 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head827d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head827d.c
@@ -64,7 +64,8 @@ head827d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 		evo_data(push, asyh->core.h << 16 | asyh->core.w);
 		evo_data(push, asyh->core.layout << 20 |
 			       (asyh->core.pitch >> 8) << 8 |
-			       asyh->core.block);
+			       asyh->core.blocks << 8 |
+			       asyh->core.blockh);
 		evo_data(push, asyh->core.format << 8);
 		evo_data(push, asyh->core.handle);
 		evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head907d.c b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
index 0fa0159bfafb..a05dfccadcfa 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
@@ -172,7 +172,8 @@ head907d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 		evo_data(push, asyh->core.h << 16 | asyh->core.w);
 		evo_data(push, asyh->core.layout << 24 |
 			       (asyh->core.pitch >> 8) << 8 |
-			       asyh->core.block);
+			       asyh->core.blocks << 8 |
+			       asyh->core.blockh);
 		evo_data(push, asyh->core.format << 8);
 		evo_data(push, asyh->core.handle);
 		evo_mthd(push, 0x04b0 + head->base.index * 0x300, 1);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index fbaf8b7ed203..b96dc3d4dab5 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -225,13 +225,15 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
 		if (asyw->image.kind) {
 			asyw->image.layout = 0;
 			if (drm->client.device.info.chipset >= 0xc0)
-				asyw->image.block = fb->nvbo->mode >> 4;
+				asyw->image.blockh = fb->nvbo->mode >> 4;
 			else
-				asyw->image.block = fb->nvbo->mode;
-			asyw->image.pitch[0] = (fb->base.pitches[0] / 4) << 4;
+				asyw->image.blockh = fb->nvbo->mode;
+			asyw->image.blocks[0] = fb->base.pitches[0] / 64;
+			asyw->image.pitch[0] = 0;
 		} else {
 			asyw->image.layout = 1;
-			asyw->image.block  = 0;
+			asyw->image.blockh = 0;
+			asyw->image.blocks[0] = 0;
 			asyw->image.pitch[0] = fb->base.pitches[0];
 		}
 

From 01d380ab4f702fffa6da60c4b006547b8dd66de8 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1133/1286] drm/nouveau/kms/gk104-: support additional cursor
 sizes

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/dispnv50/atom.h     |  2 +-
 drivers/gpu/drm/nouveau/dispnv50/curs507a.c | 22 ++++++----------
 drivers/gpu/drm/nouveau/dispnv50/head.h     |  8 ++++++
 drivers/gpu/drm/nouveau/dispnv50/head507d.c | 28 +++++++++++++++++++++
 drivers/gpu/drm/nouveau/dispnv50/head827d.c |  2 ++
 drivers/gpu/drm/nouveau/dispnv50/head907d.c |  2 ++
 drivers/gpu/drm/nouveau/dispnv50/head917d.c | 17 +++++++++++++
 7 files changed, 65 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/dispnv50/atom.h b/drivers/gpu/drm/nouveau/dispnv50/atom.h
index 0409947bf196..3d059df78322 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/atom.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/atom.h
@@ -76,7 +76,7 @@ struct nv50_head_atom {
 		bool visible;
 		u32 handle;
 		u64 offset:40;
-		u8  layout:1;
+		u8  layout:2;
 		u8  format:1;
 	} curs;
 
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
index ba05bcb13ae7..291c08117ab6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
@@ -21,6 +21,7 @@
  */
 #include "curs.h"
 #include "core.h"
+#include "head.h"
 
 #include <nvif/cl507a.h>
 
@@ -70,6 +71,7 @@ static int
 curs507a_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
 		 struct nv50_head_atom *asyh)
 {
+	struct nv50_head *head = nv50_head(asyw->state.crtc);
 	int ret;
 
 	ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
@@ -80,24 +82,14 @@ curs507a_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
 	if (ret || !asyh->curs.visible)
 		return ret;
 
-	switch (asyw->state.fb->width) {
-	case 32: asyh->curs.layout = 0; break;
-	case 64: asyh->curs.layout = 1; break;
-	default:
-		return -EINVAL;
-	}
-
-	if (asyw->state.fb->width != asyw->state.fb->height)
+	if (asyw->image.w != asyw->image.h)
 		return -EINVAL;
 
-	switch (asyw->image.format) {
-	case 0xcf: asyh->curs.format = 1; break;
-	default:
-		WARN_ON(1);
-		return -EINVAL;
-	}
+	ret = head->func->curs_layout(head, asyw, asyh);
+	if (ret)
+		return ret;
 
-	return 0;
+	return head->func->curs_format(head, asyw, asyh);
 }
 
 static const u32
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.h b/drivers/gpu/drm/nouveau/dispnv50/head.h
index 0802271bc90c..8f2c3ffa4e61 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.h
@@ -26,6 +26,10 @@ struct nv50_head_func {
 	void (*core_calc)(struct nv50_head *, struct nv50_head_atom *);
 	void (*core_set)(struct nv50_head *, struct nv50_head_atom *);
 	void (*core_clr)(struct nv50_head *);
+	int (*curs_layout)(struct nv50_head *, struct nv50_wndw_atom *,
+			   struct nv50_head_atom *);
+	int (*curs_format)(struct nv50_head *, struct nv50_wndw_atom *,
+			   struct nv50_head_atom *);
 	void (*curs_set)(struct nv50_head *, struct nv50_head_atom *);
 	void (*curs_clr)(struct nv50_head *);
 	void (*base)(struct nv50_head *, struct nv50_head_atom *);
@@ -41,6 +45,10 @@ void head507d_mode(struct nv50_head *, struct nv50_head_atom *);
 void head507d_olut(struct nv50_head *, struct nv50_head_atom *);
 void head507d_core_calc(struct nv50_head *, struct nv50_head_atom *);
 void head507d_core_clr(struct nv50_head *);
+int head507d_curs_layout(struct nv50_head *, struct nv50_wndw_atom *,
+			 struct nv50_head_atom *);
+int head507d_curs_format(struct nv50_head *, struct nv50_wndw_atom *,
+			 struct nv50_head_atom *);
 void head507d_base(struct nv50_head *, struct nv50_head_atom *);
 void head507d_ovly(struct nv50_head *, struct nv50_head_atom *);
 void head507d_dither(struct nv50_head *, struct nv50_head_atom *);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head507d.c b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
index 8a8aa9b69ef8..5b6a280ab804 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
@@ -128,6 +128,32 @@ head507d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 	}
 }
 
+int
+head507d_curs_format(struct nv50_head *head, struct nv50_wndw_atom *asyw,
+		     struct nv50_head_atom *asyh)
+{
+	switch (asyw->image.format) {
+	case 0xcf: asyh->curs.format = 1; break;
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int
+head507d_curs_layout(struct nv50_head *head, struct nv50_wndw_atom *asyw,
+		     struct nv50_head_atom *asyh)
+{
+	switch (asyw->image.w) {
+	case 32: asyh->curs.layout = 0; break;
+	case 64: asyh->curs.layout = 1; break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
 void
 head507d_core_clr(struct nv50_head *head)
 {
@@ -287,6 +313,8 @@ head507d = {
 	.core_calc = head507d_core_calc,
 	.core_set = head507d_core_set,
 	.core_clr = head507d_core_clr,
+	.curs_layout = head507d_curs_layout,
+	.curs_format = head507d_curs_format,
 	.curs_set = head507d_curs_set,
 	.curs_clr = head507d_curs_clr,
 	.base = head507d_base,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head827d.c b/drivers/gpu/drm/nouveau/dispnv50/head827d.c
index ae33e21790ee..af5e7bd5978b 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head827d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head827d.c
@@ -113,6 +113,8 @@ head827d = {
 	.core_calc = head507d_core_calc,
 	.core_set = head827d_core_set,
 	.core_clr = head507d_core_clr,
+	.curs_layout = head507d_curs_layout,
+	.curs_format = head507d_curs_format,
 	.curs_set = head827d_curs_set,
 	.curs_clr = head827d_curs_clr,
 	.base = head507d_base,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head907d.c b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
index a05dfccadcfa..c09620f540f9 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
@@ -270,6 +270,8 @@ head907d = {
 	.core_calc = head507d_core_calc,
 	.core_set = head907d_core_set,
 	.core_clr = head907d_core_clr,
+	.curs_layout = head507d_curs_layout,
+	.curs_format = head507d_curs_format,
 	.curs_set = head907d_curs_set,
 	.curs_clr = head907d_curs_clr,
 	.base = head907d_base,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head917d.c b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
index 5f654512c8c2..4c019a4417ea 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head917d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
@@ -63,6 +63,21 @@ head917d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
 	}
 }
 
+static int
+head917d_curs_layout(struct nv50_head *head, struct nv50_wndw_atom *asyw,
+		     struct nv50_head_atom *asyh)
+{
+	switch (asyw->state.fb->width) {
+	case  32: asyh->curs.layout = 0; break;
+	case  64: asyh->curs.layout = 1; break;
+	case 128: asyh->curs.layout = 2; break;
+	case 256: asyh->curs.layout = 3; break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
 const struct nv50_head_func
 head917d = {
 	.view = head907d_view,
@@ -73,6 +88,8 @@ head917d = {
 	.core_calc = head507d_core_calc,
 	.core_set = head907d_core_set,
 	.core_clr = head907d_core_clr,
+	.curs_layout = head917d_curs_layout,
+	.curs_format = head507d_curs_format,
 	.curs_set = head907d_curs_set,
 	.curs_clr = head907d_curs_clr,
 	.base = head917d_base,

From 88b600d421a5550cd56e13f2eda34cbefe417c28 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1134/1286] drm/nouveau/kms/gk104-: add support for
 [XA]2R10G10B10 formats

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/dispnv50/Kbuild     |  1 +
 drivers/gpu/drm/nouveau/dispnv50/base.c     |  4 +-
 drivers/gpu/drm/nouveau/dispnv50/base.h     |  3 ++
 drivers/gpu/drm/nouveau/dispnv50/base907c.c |  2 +-
 drivers/gpu/drm/nouveau/dispnv50/base917c.c | 48 +++++++++++++++++++++
 drivers/gpu/drm/nouveau/dispnv50/wndw.c     |  2 +
 6 files changed, 57 insertions(+), 3 deletions(-)
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/base917c.c

diff --git a/drivers/gpu/drm/nouveau/dispnv50/Kbuild b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
index d074bb8ecd1b..7c337fd80158 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/Kbuild
+++ b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
@@ -27,6 +27,7 @@ nouveau-y += dispnv50/base.o
 nouveau-y += dispnv50/base507c.o
 nouveau-y += dispnv50/base827c.o
 nouveau-y += dispnv50/base907c.o
+nouveau-y += dispnv50/base917c.o
 
 nouveau-y += dispnv50/curs.o
 nouveau-y += dispnv50/curs507a.o
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base.c b/drivers/gpu/drm/nouveau/dispnv50/base.c
index 5f184ab833e8..7c752acf2b48 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base.c
@@ -31,8 +31,8 @@ nv50_base_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
 		int version;
 		int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
 	} bases[] = {
-		{ GK110_DISP_BASE_CHANNEL_DMA, 0, base907c_new },
-		{ GK104_DISP_BASE_CHANNEL_DMA, 0, base907c_new },
+		{ GK110_DISP_BASE_CHANNEL_DMA, 0, base917c_new },
+		{ GK104_DISP_BASE_CHANNEL_DMA, 0, base917c_new },
 		{ GF110_DISP_BASE_CHANNEL_DMA, 0, base907c_new },
 		{ GT214_DISP_BASE_CHANNEL_DMA, 0, base827c_new },
 		{ GT200_DISP_BASE_CHANNEL_DMA, 0, base827c_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base.h b/drivers/gpu/drm/nouveau/dispnv50/base.h
index 87ec8394b7f3..7afd9e26f9f9 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/base.h
@@ -25,6 +25,9 @@ void base507c_update(struct nv50_wndw *, u32 *);
 int base827c_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
 
 int base907c_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
+extern const struct nv50_wndw_func base907c;
+
+int base917c_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
 
 int nv50_base_new(struct nouveau_drm *, int head, struct nv50_wndw **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base907c.c b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
index 8edc0598bda8..a562fc94ce59 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base907c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
@@ -82,7 +82,7 @@ base907c_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 	asyw->xlut.i.enable = 2;
 }
 
-static const struct nv50_wndw_func
+const struct nv50_wndw_func
 base907c = {
 	.acquire = base507c_acquire,
 	.release = base507c_release,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base917c.c b/drivers/gpu/drm/nouveau/dispnv50/base917c.c
new file mode 100644
index 000000000000..54d705bb81a5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/base917c.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "base.h"
+#include "atom.h"
+
+const u32
+base917c_format[] = {
+	DRM_FORMAT_C8,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_XRGB1555,
+	DRM_FORMAT_ARGB1555,
+	DRM_FORMAT_XBGR2101010,
+	DRM_FORMAT_ABGR2101010,
+	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_ABGR8888,
+	DRM_FORMAT_XRGB2101010,
+	DRM_FORMAT_ARGB2101010,
+	0
+};
+
+int
+base917c_new(struct nouveau_drm *drm, int head, s32 oclass,
+	     struct nv50_wndw **pwndw)
+{
+	return base507c_new_(&base907c, base917c_format, drm, head, oclass,
+			     0x00000002 << (head * 4), pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index b96dc3d4dab5..861fb0ec6b61 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -194,6 +194,8 @@ nv50_wndw_atomic_check_acquire_rgb(struct nv50_wndw_atom *asyw)
 	case DRM_FORMAT_ABGR2101010: asyw->image.format = 0xd1; break;
 	case DRM_FORMAT_XBGR8888   :
 	case DRM_FORMAT_ABGR8888   : asyw->image.format = 0xd5; break;
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_ARGB2101010: asyw->image.format = 0xdf; break;
 	default:
 		WARN_ON(1);
 		return -EINVAL;

From 2ce7f38629891eeaf3e5d406add102a3fa6f6632 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1135/1286] drm/nouveau/kms/nv50-: initial overlay support

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/dispnv50/Kbuild     |   1 +
 drivers/gpu/drm/nouveau/dispnv50/atom.h     |  11 ++
 drivers/gpu/drm/nouveau/dispnv50/base.h     |   2 -
 drivers/gpu/drm/nouveau/dispnv50/curs.h     |   1 -
 drivers/gpu/drm/nouveau/dispnv50/disp.h     |   3 +
 drivers/gpu/drm/nouveau/dispnv50/head507d.c |   3 +-
 drivers/gpu/drm/nouveau/dispnv50/head907d.c |   2 +
 drivers/gpu/drm/nouveau/dispnv50/oimm507b.c |   6 +-
 drivers/gpu/drm/nouveau/dispnv50/ovly.c     |   2 +-
 drivers/gpu/drm/nouveau/dispnv50/ovly.h     |  14 ++
 drivers/gpu/drm/nouveau/dispnv50/ovly507e.c | 144 ++++++++++++++++++++
 drivers/gpu/drm/nouveau/dispnv50/ovly827e.c |  66 ++++++++-
 drivers/gpu/drm/nouveau/dispnv50/ovly907e.c |  38 +++++-
 drivers/gpu/drm/nouveau/dispnv50/ovly917e.c |  45 ++++++
 drivers/gpu/drm/nouveau/dispnv50/wndw.c     |  35 ++++-
 drivers/gpu/drm/nouveau/dispnv50/wndw.h     |   6 +
 16 files changed, 364 insertions(+), 15 deletions(-)
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/ovly917e.c

diff --git a/drivers/gpu/drm/nouveau/dispnv50/Kbuild b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
index 7c337fd80158..ebd18cb9feda 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/Kbuild
+++ b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
@@ -40,3 +40,4 @@ nouveau-y += dispnv50/ovly.o
 nouveau-y += dispnv50/ovly507e.o
 nouveau-y += dispnv50/ovly827e.o
 nouveau-y += dispnv50/ovly907e.o
+nouveau-y += dispnv50/ovly917e.o
diff --git a/drivers/gpu/drm/nouveau/dispnv50/atom.h b/drivers/gpu/drm/nouveau/dispnv50/atom.h
index 3d059df78322..d8337e7996e8 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/atom.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/atom.h
@@ -173,6 +173,7 @@ struct nv50_wndw_atom {
 		u8  mode:2;
 		u8  interval:4;
 
+		u8  colorspace:2;
 		u8  format;
 		u8  kind:7;
 		u8  layout:1;
@@ -186,6 +187,15 @@ struct nv50_wndw_atom {
 		u64 offset[6];
 	} image;
 
+	struct {
+		u16 sx;
+		u16 sy;
+		u16 sw;
+		u16 sh;
+		u16 dw;
+		u16 dh;
+	} scale;
+
 	struct {
 		u16 x;
 		u16 y;
@@ -197,6 +207,7 @@ struct nv50_wndw_atom {
 			bool sema:1;
 			bool xlut:1;
 			bool image:1;
+			bool scale:1;
 			bool point:1;
 		};
 		u8 mask;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base.h b/drivers/gpu/drm/nouveau/dispnv50/base.h
index 7afd9e26f9f9..e7f14f230f35 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/base.h
@@ -13,10 +13,8 @@ void base507c_release(struct nv50_wndw *, struct nv50_wndw_atom *,
 		      struct nv50_head_atom *);
 void base507c_sema_set(struct nv50_wndw *, struct nv50_wndw_atom *);
 void base507c_sema_clr(struct nv50_wndw *);
-void base507c_ntfy_reset(struct nouveau_bo *, u32);
 void base507c_ntfy_set(struct nv50_wndw *, struct nv50_wndw_atom *);
 void base507c_ntfy_clr(struct nv50_wndw *);
-int base507c_ntfy_wait_begun(struct nouveau_bo *, u32, struct nvif_device *);
 void base507c_xlut_set(struct nv50_wndw *, struct nv50_wndw_atom *);
 void base507c_xlut_clr(struct nv50_wndw *);
 void base507c_image_clr(struct nv50_wndw *);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs.h b/drivers/gpu/drm/nouveau/dispnv50/curs.h
index 2285247dc2a3..8edac4507ec8 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs.h
@@ -6,7 +6,6 @@ int curs507a_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
 int curs507a_new_(const struct nv50_wimm_func *, struct nouveau_drm *,
 		  int head, s32 oclass, u32 interlock_data,
 		  struct nv50_wndw **);
-extern const struct nv50_wimm_func curs507a;
 
 int curs907a_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
 
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
index f3a963b0ab77..a89b83f95187 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -16,6 +16,9 @@ struct nv50_disp {
 #define NV50_DISP_BASE_SEM0(c)                    NV50_DISP_WNDW_SEM0(0 + (c))
 #define NV50_DISP_BASE_SEM1(c)                    NV50_DISP_WNDW_SEM1(0 + (c))
 #define NV50_DISP_BASE_NTFY(c)                    NV50_DISP_WNDW_NTFY(0 + (c))
+#define NV50_DISP_OVLY_SEM0(c)                    NV50_DISP_WNDW_SEM0(4 + (c))
+#define NV50_DISP_OVLY_SEM1(c)                    NV50_DISP_WNDW_SEM1(4 + (c))
+#define NV50_DISP_OVLY_NTFY(c)                    NV50_DISP_WNDW_NTFY(4 + (c))
 	struct nouveau_bo *sync;
 
 	struct mutex mutex;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head507d.c b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
index 5b6a280ab804..51bc5996fd37 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
@@ -58,7 +58,6 @@ head507d_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
 
 	if (asyh->ovly.cpp) {
 		switch (asyh->ovly.cpp) {
-		case 8: bounds |= 0x00000500; break;
 		case 4: bounds |= 0x00000300; break;
 		case 2: bounds |= 0x00000100; break;
 		default:
@@ -66,6 +65,8 @@ head507d_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
 			break;
 		}
 		bounds |= 0x00000001;
+	} else {
+		bounds |= 0x00000100;
 	}
 
 	if ((push = evo_wait(core, 2))) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head907d.c b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
index c09620f540f9..633907163eb1 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
@@ -82,6 +82,8 @@ head907d_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
 			break;
 		}
 		bounds |= 0x00000001;
+	} else {
+		bounds |= 0x00000100;
 	}
 
 	if ((push = evo_wait(core, 2))) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/oimm507b.c b/drivers/gpu/drm/nouveau/dispnv50/oimm507b.c
index c4baca82de14..2ee404b3e19f 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/oimm507b.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/oimm507b.c
@@ -23,10 +23,6 @@
 
 #include <nvif/cl507b.h>
 
-static const struct nv50_wimm_func
-oimm507b = {
-};
-
 static int
 oimm507b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
 	       s32 oclass, struct nv50_wndw *wndw)
@@ -52,5 +48,5 @@ oimm507b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
 int
 oimm507b_init(struct nouveau_drm *drm, s32 oclass, struct nv50_wndw *wndw)
 {
-	return oimm507b_init_(&oimm507b, drm, oclass, wndw);
+	return oimm507b_init_(&curs507a, drm, oclass, wndw);
 }
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly.c b/drivers/gpu/drm/nouveau/dispnv50/ovly.c
index be0f16fdcd5b..90c246d47604 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly.c
@@ -32,7 +32,7 @@ nv50_ovly_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
 		int version;
 		int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
 	} ovlys[] = {
-		{ GK104_DISP_OVERLAY_CONTROL_DMA, 0, ovly907e_new },
+		{ GK104_DISP_OVERLAY_CONTROL_DMA, 0, ovly917e_new },
 		{ GF110_DISP_OVERLAY_CONTROL_DMA, 0, ovly907e_new },
 		{ GT214_DISP_OVERLAY_CHANNEL_DMA, 0, ovly827e_new },
 		{ GT200_DISP_OVERLAY_CHANNEL_DMA, 0, ovly827e_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly.h b/drivers/gpu/drm/nouveau/dispnv50/ovly.h
index d149ef6f957e..4869d52d1786 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly.h
@@ -6,11 +6,25 @@ int ovly507e_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
 int ovly507e_new_(const struct nv50_wndw_func *, const u32 *format,
 		  struct nouveau_drm *, int head, s32 oclass,
 		  u32 interlock_data, struct nv50_wndw **);
+int ovly507e_acquire(struct nv50_wndw *, struct nv50_wndw_atom *,
+		     struct nv50_head_atom *);
+void ovly507e_release(struct nv50_wndw *, struct nv50_wndw_atom *,
+		      struct nv50_head_atom *);
+void ovly507e_ntfy_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+void ovly507e_ntfy_clr(struct nv50_wndw *);
+void ovly507e_image_clr(struct nv50_wndw *);
+void ovly507e_scale_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+void ovly507e_update(struct nv50_wndw *, u32 *);
 
 extern const u32 ovly827e_format[];
+void ovly827e_ntfy_reset(struct nouveau_bo *, u32);
+int ovly827e_ntfy_wait_begun(struct nouveau_bo *, u32, struct nvif_device *);
+
+extern const struct nv50_wndw_func ovly907e;
 
 int ovly827e_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
 int ovly907e_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
+int ovly917e_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
 
 int nv50_ovly_new(struct nouveau_drm *, int head, struct nv50_wndw **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
index 732eea39e4de..cc417664f823 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
@@ -20,17 +20,149 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 #include "ovly.h"
+#include "atom.h"
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
 
 #include <nvif/cl507e.h>
+#include <nvif/event.h>
+
+void
+ovly507e_update(struct nv50_wndw *wndw, u32 *interlock)
+{
+	u32 *push;
+	if ((push = evo_wait(&wndw->wndw, 2))) {
+		evo_mthd(push, 0x0080, 1);
+		evo_data(push, interlock[NV50_DISP_INTERLOCK_CORE]);
+		evo_kick(push, &wndw->wndw);
+	}
+}
+
+void
+ovly507e_scale_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+	u32 *push;
+	if ((push = evo_wait(&wndw->wndw, 4))) {
+		evo_mthd(push, 0x00e0, 3);
+		evo_data(push, asyw->scale.sy << 16 | asyw->scale.sx);
+		evo_data(push, asyw->scale.sh << 16 | asyw->scale.sw);
+		evo_data(push, asyw->scale.dw);
+		evo_kick(push, &wndw->wndw);
+	}
+}
+
+void
+ovly507e_image_clr(struct nv50_wndw *wndw)
+{
+	u32 *push;
+	if ((push = evo_wait(&wndw->wndw, 4))) {
+		evo_mthd(push, 0x0084, 1);
+		evo_data(push, 0x00000000);
+		evo_mthd(push, 0x00c0, 1);
+		evo_data(push, 0x00000000);
+		evo_kick(push, &wndw->wndw);
+	}
+}
+
+static void
+ovly507e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+	u32 *push;
+	if ((push = evo_wait(&wndw->wndw, 12))) {
+		evo_mthd(push, 0x0084, 1);
+		evo_data(push, asyw->image.interval << 4);
+		evo_mthd(push, 0x00c0, 1);
+		evo_data(push, asyw->image.handle[0]);
+		evo_mthd(push, 0x0100, 1);
+		evo_data(push, 0x00000002);
+		evo_mthd(push, 0x0800, 1);
+		evo_data(push, asyw->image.offset[0] >> 8);
+		evo_mthd(push, 0x0808, 3);
+		evo_data(push, asyw->image.h << 16 | asyw->image.w);
+		evo_data(push, asyw->image.layout << 20 |
+			       (asyw->image.pitch[0] >> 8) << 8 |
+			       asyw->image.blocks[0] << 8 |
+			       asyw->image.blockh);
+		evo_data(push, asyw->image.kind << 16 |
+			       asyw->image.format << 8 |
+			       asyw->image.colorspace);
+		evo_kick(push, &wndw->wndw);
+	}
+}
+
+void
+ovly507e_ntfy_clr(struct nv50_wndw *wndw)
+{
+	u32 *push;
+	if ((push = evo_wait(&wndw->wndw, 2))) {
+		evo_mthd(push, 0x00a4, 1);
+		evo_data(push, 0x00000000);
+		evo_kick(push, &wndw->wndw);
+	}
+}
+
+void
+ovly507e_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+	u32 *push;
+	if ((push = evo_wait(&wndw->wndw, 3))) {
+		evo_mthd(push, 0x00a0, 2);
+		evo_data(push, asyw->ntfy.awaken << 30 | asyw->ntfy.offset);
+		evo_data(push, asyw->ntfy.handle);
+		evo_kick(push, &wndw->wndw);
+	}
+}
+
+void
+ovly507e_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+		 struct nv50_head_atom *asyh)
+{
+	asyh->ovly.cpp = 0;
+}
+
+int
+ovly507e_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+		 struct nv50_head_atom *asyh)
+{
+	const struct drm_framebuffer *fb = asyw->state.fb;
+	int ret;
+
+	ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
+						  DRM_PLANE_HELPER_NO_SCALING,
+						  DRM_PLANE_HELPER_NO_SCALING,
+						  true, true);
+	if (ret)
+		return ret;
+
+	asyh->ovly.cpp = fb->format->cpp[0];
+	return 0;
+}
 
 #include "nouveau_bo.h"
 
 static const struct nv50_wndw_func
 ovly507e = {
+	.acquire = ovly507e_acquire,
+	.release = ovly507e_release,
+	.ntfy_set = ovly507e_ntfy_set,
+	.ntfy_clr = ovly507e_ntfy_clr,
+	.ntfy_reset = base507c_ntfy_reset,
+	.ntfy_wait_begun = base507c_ntfy_wait_begun,
+	.image_set = ovly507e_image_set,
+	.image_clr = ovly507e_image_clr,
+	.scale_set = ovly507e_scale_set,
+	.update = ovly507e_update,
 };
 
 static const u32
 ovly507e_format[] = {
+	DRM_FORMAT_YUYV,
+	DRM_FORMAT_UYVY,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_XRGB1555,
+	DRM_FORMAT_ARGB1555,
 	0
 };
 
@@ -61,6 +193,18 @@ ovly507e_new_(const struct nv50_wndw_func *func, const u32 *format,
 		return ret;
 	}
 
+	ret = nvif_notify_init(&wndw->wndw.base.user, wndw->notify.func, false,
+			       NV50_DISP_OVERLAY_CHANNEL_DMA_V0_NTFY_UEVENT,
+			       &(struct nvif_notify_uevent_req) {},
+			       sizeof(struct nvif_notify_uevent_req),
+			       sizeof(struct nvif_notify_uevent_rep),
+			       &wndw->notify);
+	if (ret)
+		return ret;
+
+	wndw->ntfy = NV50_DISP_OVLY_NTFY(wndw->id);
+	wndw->sema = NV50_DISP_OVLY_SEM0(wndw->id);
+	wndw->data = 0x00000000;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
index a8115f13406e..aaa9fe5a4fc8 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
@@ -20,17 +20,81 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 #include "ovly.h"
+#include "atom.h"
 
 #include <nouveau_bo.h>
 
-#include <nvif/cl507e.h>
+static void
+ovly827e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+	u32 *push;
+	if ((push = evo_wait(&wndw->wndw, 12))) {
+		evo_mthd(push, 0x0084, 1);
+		evo_data(push, asyw->image.interval << 4);
+		evo_mthd(push, 0x00c0, 1);
+		evo_data(push, asyw->image.handle[0]);
+		evo_mthd(push, 0x0100, 1);
+		evo_data(push, 0x00000002);
+		evo_mthd(push, 0x0800, 1);
+		evo_data(push, asyw->image.offset[0] >> 8);
+		evo_mthd(push, 0x0808, 3);
+		evo_data(push, asyw->image.h << 16 | asyw->image.w);
+		evo_data(push, asyw->image.layout << 20 |
+			       (asyw->image.pitch[0] >> 8) << 8 |
+			       asyw->image.blocks[0] << 8 |
+			       asyw->image.blockh);
+		evo_data(push, asyw->image.format << 8 |
+			       asyw->image.colorspace);
+		evo_kick(push, &wndw->wndw);
+	}
+}
+
+int
+ovly827e_ntfy_wait_begun(struct nouveau_bo *bo, u32 offset,
+			 struct nvif_device *device)
+{
+	s64 time = nvif_msec(device, 2000ULL,
+		u32 data = nouveau_bo_rd32(bo, offset / 4 + 3);
+		if ((data & 0xffff0000) == 0xffff0000)
+			break;
+		usleep_range(1, 2);
+	);
+	return time < 0 ? time : 0;
+}
+
+void
+ovly827e_ntfy_reset(struct nouveau_bo *bo, u32 offset)
+{
+	nouveau_bo_wr32(bo, offset / 4 + 0, 0x00000000);
+	nouveau_bo_wr32(bo, offset / 4 + 1, 0x00000000);
+	nouveau_bo_wr32(bo, offset / 4 + 2, 0x00000000);
+	nouveau_bo_wr32(bo, offset / 4 + 3, 0x80000000);
+}
 
 static const struct nv50_wndw_func
 ovly827e = {
+	.acquire = ovly507e_acquire,
+	.release = ovly507e_release,
+	.ntfy_set = ovly507e_ntfy_set,
+	.ntfy_clr = ovly507e_ntfy_clr,
+	.ntfy_reset = ovly827e_ntfy_reset,
+	.ntfy_wait_begun = ovly827e_ntfy_wait_begun,
+	.image_set = ovly827e_image_set,
+	.image_clr = ovly507e_image_clr,
+	.scale_set = ovly507e_scale_set,
+	.update = ovly507e_update,
 };
 
 const u32
 ovly827e_format[] = {
+	DRM_FORMAT_YUYV,
+	DRM_FORMAT_UYVY,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_XRGB1555,
+	DRM_FORMAT_ARGB1555,
+	DRM_FORMAT_XBGR2101010,
+	DRM_FORMAT_ABGR2101010,
 	0
 };
 
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c
index f50da6461d41..a3ce53046015 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c
@@ -20,9 +20,45 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 #include "ovly.h"
+#include "atom.h"
 
-static const struct nv50_wndw_func
+static void
+ovly907e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+	u32 *push;
+	if ((push = evo_wait(&wndw->wndw, 12))) {
+		evo_mthd(push, 0x0084, 1);
+		evo_data(push, asyw->image.interval << 4);
+		evo_mthd(push, 0x00c0, 1);
+		evo_data(push, asyw->image.handle[0]);
+		evo_mthd(push, 0x0100, 1);
+		evo_data(push, 0x00000002);
+		evo_mthd(push, 0x0400, 1);
+		evo_data(push, asyw->image.offset[0] >> 8);
+		evo_mthd(push, 0x0408, 3);
+		evo_data(push, asyw->image.h << 16 | asyw->image.w);
+		evo_data(push, asyw->image.layout << 24 |
+			       (asyw->image.pitch[0] >> 8) << 8 |
+			       asyw->image.blocks[0] << 8 |
+			       asyw->image.blockh);
+		evo_data(push, asyw->image.format << 8 |
+			       asyw->image.colorspace);
+		evo_kick(push, &wndw->wndw);
+	}
+}
+
+const struct nv50_wndw_func
 ovly907e = {
+	.acquire = ovly507e_acquire,
+	.release = ovly507e_release,
+	.ntfy_set = ovly507e_ntfy_set,
+	.ntfy_clr = ovly507e_ntfy_clr,
+	.ntfy_reset = ovly827e_ntfy_reset,
+	.ntfy_wait_begun = ovly827e_ntfy_wait_begun,
+	.image_set = ovly907e_image_set,
+	.image_clr = ovly507e_image_clr,
+	.scale_set = ovly507e_scale_set,
+	.update = ovly507e_update,
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly917e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly917e.c
new file mode 100644
index 000000000000..505fa7e78523
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly917e.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ovly.h"
+
+static const u32
+ovly917e_format[] = {
+	DRM_FORMAT_YUYV,
+	DRM_FORMAT_UYVY,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_XRGB1555,
+	DRM_FORMAT_ARGB1555,
+	DRM_FORMAT_XBGR2101010,
+	DRM_FORMAT_ABGR2101010,
+	DRM_FORMAT_XRGB2101010,
+	DRM_FORMAT_ARGB2101010,
+	0
+};
+
+int
+ovly917e_new(struct nouveau_drm *drm, int head, s32 oclass,
+	     struct nv50_wndw **pwndw)
+{
+	return ovly507e_new_(&ovly907e, ovly917e_format, drm, head, oclass,
+			     0x00000004 << (head * 4), pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 861fb0ec6b61..c7c08fae383f 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -146,6 +146,7 @@ nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
 		wndw->func->xlut_set(wndw, asyw);
 	}
 
+	if (asyw->set.scale) wndw->func->scale_set(wndw, asyw);
 	if (asyw->set.point) {
 		wndw->immd->point(wndw, asyw);
 		wndw->immd->update(wndw, interlock);
@@ -180,6 +181,20 @@ nv50_wndw_atomic_check_release(struct nv50_wndw *wndw,
 	asyw->sema.handle = 0;
 }
 
+static int
+nv50_wndw_atomic_check_acquire_yuv(struct nv50_wndw_atom *asyw)
+{
+	switch (asyw->state.fb->format->format) {
+	case DRM_FORMAT_YUYV: asyw->image.format = 0x28; break;
+	case DRM_FORMAT_UYVY: asyw->image.format = 0x29; break;
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+	asyw->image.colorspace = 1;
+	return 0;
+}
+
 static int
 nv50_wndw_atomic_check_acquire_rgb(struct nv50_wndw_atom *asyw)
 {
@@ -197,9 +212,9 @@ nv50_wndw_atomic_check_acquire_rgb(struct nv50_wndw_atom *asyw)
 	case DRM_FORMAT_XRGB2101010:
 	case DRM_FORMAT_ARGB2101010: asyw->image.format = 0xdf; break;
 	default:
-		WARN_ON(1);
 		return -EINVAL;
 	}
+	asyw->image.colorspace = 0;
 	return 0;
 }
 
@@ -221,8 +236,11 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
 		asyw->image.kind = fb->nvbo->kind;
 
 		ret = nv50_wndw_atomic_check_acquire_rgb(asyw);
-		if (ret)
-			return ret;
+		if (ret) {
+			ret = nv50_wndw_atomic_check_acquire_yuv(asyw);
+			if (ret)
+				return ret;
+		}
 
 		if (asyw->image.kind) {
 			asyw->image.layout = 0;
@@ -247,6 +265,17 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
 		asyw->set.image = wndw->func->image_set != NULL;
 	}
 
+	if (wndw->func->scale_set) {
+		asyw->scale.sx = asyw->state.src_x >> 16;
+		asyw->scale.sy = asyw->state.src_y >> 16;
+		asyw->scale.sw = asyw->state.src_w >> 16;
+		asyw->scale.sh = asyw->state.src_h >> 16;
+		asyw->scale.dw = asyw->state.crtc_w;
+		asyw->scale.dh = asyw->state.crtc_h;
+		if (memcmp(&armw->scale, &asyw->scale, sizeof(asyw->scale)))
+			asyw->set.scale = true;
+	}
+
 	if (wndw->immd) {
 		asyw->point.x = asyw->state.crtc_x;
 		asyw->point.y = asyw->state.crtc_y;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
index 223cf3f37dae..745304d06af1 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
@@ -70,15 +70,21 @@ struct nv50_wndw_func {
 	void (*xlut_clr)(struct nv50_wndw *);
 	void (*image_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
 	void (*image_clr)(struct nv50_wndw *);
+	void (*scale_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
 
 	void (*update)(struct nv50_wndw *, u32 *interlock);
 };
 
 extern const struct drm_plane_funcs nv50_wndw;
 
+void base507c_ntfy_reset(struct nouveau_bo *, u32);
+int base507c_ntfy_wait_begun(struct nouveau_bo *, u32, struct nvif_device *);
+
 struct nv50_wimm_func {
 	void (*point)(struct nv50_wndw *, struct nv50_wndw_atom *);
 
 	void (*update)(struct nv50_wndw *, u32 *interlock);
 };
+
+extern const struct nv50_wimm_func curs507a;
 #endif

From 890c85f3ee106e2f13cb510d3ee73f0214e1c620 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1136/1286] drm/nouveau/core: increase maximum number of copy
 engines to 9

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/include/nvkm/core/device.h | 9 ++++++---
 drivers/gpu/drm/nouveau/nvkm/core/subdev.c         | 3 +++
 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c  | 6 ++++++
 3 files changed, 15 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index f2f9b9e7ce2e..08c52e3afc03 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -38,7 +38,10 @@ enum nvkm_devidx {
 	NVKM_ENGINE_CE3,
 	NVKM_ENGINE_CE4,
 	NVKM_ENGINE_CE5,
-	NVKM_ENGINE_CE_LAST = NVKM_ENGINE_CE5,
+	NVKM_ENGINE_CE6,
+	NVKM_ENGINE_CE7,
+	NVKM_ENGINE_CE8,
+	NVKM_ENGINE_CE_LAST = NVKM_ENGINE_CE8,
 
 	NVKM_ENGINE_CIPHER,
 	NVKM_ENGINE_DISP,
@@ -145,7 +148,7 @@ struct nvkm_device {
 	struct nvkm_volt *volt;
 
 	struct nvkm_engine *bsp;
-	struct nvkm_engine *ce[6];
+	struct nvkm_engine *ce[9];
 	struct nvkm_engine *cipher;
 	struct nvkm_disp *disp;
 	struct nvkm_dma *dma;
@@ -217,7 +220,7 @@ struct nvkm_device_chip {
 	int (*volt    )(struct nvkm_device *, int idx, struct nvkm_volt **);
 
 	int (*bsp     )(struct nvkm_device *, int idx, struct nvkm_engine **);
-	int (*ce[6]   )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*ce[9]   )(struct nvkm_device *, int idx, struct nvkm_engine **);
 	int (*cipher  )(struct nvkm_device *, int idx, struct nvkm_engine **);
 	int (*disp    )(struct nvkm_device *, int idx, struct nvkm_disp **);
 	int (*dma     )(struct nvkm_device *, int idx, struct nvkm_dma **);
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index b96f9e2f237a..03f676c18aad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -61,6 +61,9 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = {
 	[NVKM_ENGINE_CE3     ] = "ce3",
 	[NVKM_ENGINE_CE4     ] = "ce4",
 	[NVKM_ENGINE_CE5     ] = "ce5",
+	[NVKM_ENGINE_CE6     ] = "ce6",
+	[NVKM_ENGINE_CE7     ] = "ce7",
+	[NVKM_ENGINE_CE8     ] = "ce8",
 	[NVKM_ENGINE_CIPHER  ] = "cipher",
 	[NVKM_ENGINE_DISP    ] = "disp",
 	[NVKM_ENGINE_DMAOBJ  ] = "dma",
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 5c79c795acaa..b9b6bef3b805 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2471,6 +2471,9 @@ nvkm_device_engine(struct nvkm_device *device, int index)
 	_(CE3    , device->ce[3]   ,  device->ce[3]);
 	_(CE4    , device->ce[4]   ,  device->ce[4]);
 	_(CE5    , device->ce[5]   ,  device->ce[5]);
+	_(CE6    , device->ce[6]   ,  device->ce[6]);
+	_(CE7    , device->ce[7]   ,  device->ce[7]);
+	_(CE8    , device->ce[8]   ,  device->ce[8]);
 	_(CIPHER , device->cipher  ,  device->cipher);
 	_(DISP   , device->disp    , &device->disp->engine);
 	_(DMAOBJ , device->dma     , &device->dma->engine);
@@ -2925,6 +2928,9 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
 		_(NVKM_ENGINE_CE3     ,    ce[3]);
 		_(NVKM_ENGINE_CE4     ,    ce[4]);
 		_(NVKM_ENGINE_CE5     ,    ce[5]);
+		_(NVKM_ENGINE_CE6     ,    ce[6]);
+		_(NVKM_ENGINE_CE7     ,    ce[7]);
+		_(NVKM_ENGINE_CE8     ,    ce[8]);
 		_(NVKM_ENGINE_CIPHER  ,   cipher);
 		_(NVKM_ENGINE_DISP    ,     disp);
 		_(NVKM_ENGINE_DMAOBJ  ,      dma);

From c1f856bb99499f82420d74886884d193e9d63db7 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1137/1286] drm/nouveau/core: recognise gv100

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/include/nvif/cl0080.h      | 1 +
 drivers/gpu/drm/nouveau/include/nvkm/core/device.h | 1 +
 drivers/gpu/drm/nouveau/nouveau_abi16.c            | 1 +
 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c  | 7 +++++++
 drivers/gpu/drm/nouveau/nvkm/engine/device/user.c  | 1 +
 5 files changed, 11 insertions(+)

diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
index 49c1c90d2bde..4f5233107f5f 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
@@ -31,6 +31,7 @@ struct nv_device_info_v0 {
 #define NV_DEVICE_INFO_V0_KEPLER                                           0x08
 #define NV_DEVICE_INFO_V0_MAXWELL                                          0x09
 #define NV_DEVICE_INFO_V0_PASCAL                                           0x0a
+#define NV_DEVICE_INFO_V0_VOLTA                                            0x0b
 	__u8  family;
 	__u8  pad06[2];
 	__u64 ram_size;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 08c52e3afc03..d83d834b7452 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -113,6 +113,7 @@ struct nvkm_device {
 		NV_E0    = 0xe0,
 		GM100    = 0x110,
 		GP100    = 0x130,
+		GV100    = 0x140,
 	} card_type;
 	u32 chipset;
 	u8  chiprev;
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index ea2472770b21..e2211bb2cf79 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -103,6 +103,7 @@ nouveau_abi16_swclass(struct nouveau_drm *drm)
 	case NV_DEVICE_INFO_V0_KEPLER:
 	case NV_DEVICE_INFO_V0_MAXWELL:
 	case NV_DEVICE_INFO_V0_PASCAL:
+	case NV_DEVICE_INFO_V0_VOLTA:
 		return NVIF_CLASS_SW_GF100;
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index b9b6bef3b805..e0e4d286f7e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2394,6 +2394,11 @@ nv13b_chipset = {
 	.sw = gf100_sw_new,
 };
 
+static const struct nvkm_device_chip
+nv140_chipset = {
+	.name = "GV100",
+};
+
 static int
 nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
 		       struct nvkm_notify *notify)
@@ -2750,6 +2755,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
 			case 0x110:
 			case 0x120: device->card_type = GM100; break;
 			case 0x130: device->card_type = GP100; break;
+			case 0x140: device->card_type = GV100; break;
 			default:
 				break;
 			}
@@ -2841,6 +2847,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
 		case 0x137: device->chip = &nv137_chipset; break;
 		case 0x138: device->chip = &nv138_chipset; break;
 		case 0x13b: device->chip = &nv13b_chipset; break;
+		case 0x140: device->chip = &nv140_chipset; break;
 		default:
 			nvdev_error(device, "unknown chipset (%08x)\n", boot0);
 			goto done;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index 600bdb870462..dde6bbafa709 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -174,6 +174,7 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
 	case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break;
 	case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break;
 	case GP100: args->v0.family = NV_DEVICE_INFO_V0_PASCAL; break;
+	case GV100: args->v0.family = NV_DEVICE_INFO_V0_VOLTA; break;
 	default:
 		args->v0.family = 0;
 		break;

From 893855d8215f5ec6b0e1fac399960405c8237c53 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1138/1286] drm/nouveau/pci/gv100: initial support

Appears to be compatible with GP100.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index e0e4d286f7e6..3674db6a1a0f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2397,6 +2397,7 @@ nv13b_chipset = {
 static const struct nvkm_device_chip
 nv140_chipset = {
 	.name = "GV100",
+	.pci = gp100_pci_new,
 };
 
 static int

From 75e482efd38a40497e06c217e6ae9f92940b218e Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1139/1286] drm/nouveau/bios/gv100: initial support

No real surprises here so far.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c      | 1 +
 drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c | 5 ++++-
 2 files changed, 5 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 3674db6a1a0f..939452276a2d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2397,6 +2397,7 @@ nv13b_chipset = {
 static const struct nvkm_device_chip
 nv140_chipset = {
 	.name = "GV100",
+	.bios = nvkm_bios_new,
 	.pci = gp100_pci_new,
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
index 0f537c22804c..3634cd0630b8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
@@ -78,7 +78,10 @@ pramin_init(struct nvkm_bios *bios, const char *name)
 	 * important as we don't want to be touching vram on an
 	 * uninitialised board
 	 */
-	addr = nvkm_rd32(device, 0x619f04);
+	if (device->card_type >= GV100)
+		addr = nvkm_rd32(device, 0x625f04);
+	else
+		addr = nvkm_rd32(device, 0x619f04);
 	if (!(addr & 0x00000008)) {
 		nvkm_debug(subdev, "... not enabled\n");
 		return ERR_PTR(-ENODEV);

From 6827c9a8683d8102479ba7add0cc7ab181143c82 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1140/1286] drm/nouveau/bios/pll: limits table 5.0

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/nvkm/subdev/bios/pll.c    | 19 ++++++++++++++++++-
 1 file changed, 18 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
index 2ca23a9157ab..e6e804cee2bc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
@@ -193,7 +193,10 @@ pll_map_type(struct nvkm_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
 		data += hdr;
 		while (cnt--) {
 			if (nvbios_rd08(bios, data + 0) == type) {
-				*reg = nvbios_rd32(bios, data + 3);
+				if (*ver < 0x50)
+					*reg = nvbios_rd32(bios, data + 3);
+				else
+					*reg = 0;
 				return data;
 			}
 			data += *len;
@@ -361,6 +364,20 @@ nvbios_pll_parse(struct nvkm_bios *bios, u32 type, struct nvbios_pll *info)
 		info->min_p = nvbios_rd08(bios, data + 12);
 		info->max_p = nvbios_rd08(bios, data + 13);
 		break;
+	case 0x50:
+		info->refclk = nvbios_rd16(bios, data + 1) * 1000;
+		/* info->refclk_alt = nvbios_rd16(bios, data + 3) * 1000; */
+		info->vco1.min_freq = nvbios_rd16(bios, data + 5) * 1000;
+		info->vco1.max_freq = nvbios_rd16(bios, data + 7) * 1000;
+		info->vco1.min_inputfreq = nvbios_rd16(bios, data + 9) * 1000;
+		info->vco1.max_inputfreq = nvbios_rd16(bios, data + 11) * 1000;
+		info->vco1.min_m = nvbios_rd08(bios, data + 13);
+		info->vco1.max_m = nvbios_rd08(bios, data + 14);
+		info->vco1.min_n = nvbios_rd08(bios, data + 15);
+		info->vco1.max_n = nvbios_rd08(bios, data + 16);
+		info->min_p = nvbios_rd08(bios, data + 17);
+		info->max_p = nvbios_rd08(bios, data + 18);
+		break;
 	default:
 		nvkm_error(subdev, "unknown pll limits version 0x%02x\n", ver);
 		return -EINVAL;

From 8769dc989c53e5ed38460b7585d55f381f51e9d8 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1141/1286] drm/nouveau/devinit/gv100: initial support

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../drm/nouveau/include/nvkm/subdev/devinit.h |  1 +
 .../gpu/drm/nouveau/nvkm/engine/device/base.c |  1 +
 .../drm/nouveau/nvkm/subdev/devinit/Kbuild    |  1 +
 .../drm/nouveau/nvkm/subdev/devinit/gm200.c   |  2 +-
 .../drm/nouveau/nvkm/subdev/devinit/gv100.c   | 79 +++++++++++++++++++
 .../drm/nouveau/nvkm/subdev/devinit/nv50.h    |  2 +
 6 files changed, 85 insertions(+), 1 deletion(-)
 create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gv100.c

diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
index 40558064d589..486e7635c29d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
@@ -30,4 +30,5 @@ int mcp89_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
 int gf100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
 int gm107_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
 int gm200_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int gv100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 939452276a2d..4a2d413f12b9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2398,6 +2398,7 @@ static const struct nvkm_device_chip
 nv140_chipset = {
 	.name = "GV100",
 	.bios = nvkm_bios_new,
+	.devinit = gv100_devinit_new,
 	.pci = gp100_pci_new,
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
index eac88e3dc6e5..50a436926484 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
@@ -12,3 +12,4 @@ nvkm-y += nvkm/subdev/devinit/mcp89.o
 nvkm-y += nvkm/subdev/devinit/gf100.o
 nvkm-y += nvkm/subdev/devinit/gm107.o
 nvkm-y += nvkm/subdev/devinit/gm200.o
+nvkm-y += nvkm/subdev/devinit/gv100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
index 1730371933df..b80618e35491 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
@@ -107,7 +107,7 @@ pmu_load(struct nv50_devinit *init, u8 type, bool post,
 	return pmu_exec(init, pmu.init_addr_pmu), 0;
 }
 
-static int
+int
 gm200_devinit_post(struct nvkm_devinit *base, bool post)
 {
 	struct nv50_devinit *init = nv50_devinit(base);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gv100.c
new file mode 100644
index 000000000000..fbde6828bd38
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gv100.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nv50.h"
+
+#include <subdev/bios.h>
+#include <subdev/bios/pll.h>
+#include <subdev/clk/pll.h>
+
+static int
+gv100_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
+{
+	struct nvkm_subdev *subdev = &init->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvbios_pll info;
+	int head = type - PLL_VPLL0;
+	int N, fN, M, P;
+	int ret;
+
+	ret = nvbios_pll_parse(device->bios, type, &info);
+	if (ret)
+		return ret;
+
+	ret = gt215_pll_calc(subdev, &info, freq, &N, &fN, &M, &P);
+	if (ret < 0)
+		return ret;
+
+	switch (info.type) {
+	case PLL_VPLL0:
+	case PLL_VPLL1:
+	case PLL_VPLL2:
+	case PLL_VPLL3:
+		nvkm_wr32(device, 0x00ef10 + (head * 0x40), fN << 16);
+		nvkm_wr32(device, 0x00ef04 + (head * 0x40), (P << 16) |
+							    (N <<  8) |
+							    (M <<  0));
+		break;
+	default:
+		nvkm_warn(subdev, "%08x/%dKhz unimplemented\n", type, freq);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static const struct nvkm_devinit_func
+gv100_devinit = {
+	.preinit = gf100_devinit_preinit,
+	.init = nv50_devinit_init,
+	.post = gm200_devinit_post,
+	.pll_set = gv100_devinit_pll_set,
+	.disable = gm107_devinit_disable,
+};
+
+int
+gv100_devinit_new(struct nvkm_device *device, int index,
+		struct nvkm_devinit **pinit)
+{
+	return nv50_devinit_new_(&gv100_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
index 315ebaff1165..9b9f0dc1e192 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
@@ -24,4 +24,6 @@ int  gf100_devinit_pll_set(struct nvkm_devinit *, u32, u32);
 void gf100_devinit_preinit(struct nvkm_devinit *);
 
 u64  gm107_devinit_disable(struct nvkm_devinit *);
+
+int gm200_devinit_post(struct nvkm_devinit *, bool);
 #endif

From a1c771a5cb86e2a45bb4516b40f6127112aaa464 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1142/1286] drm/nouveau/top/gv100: initial support

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | 1 +
 drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c   | 3 ++-
 2 files changed, 3 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 4a2d413f12b9..777ba68f4a0f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2400,6 +2400,7 @@ nv140_chipset = {
 	.bios = nvkm_bios_new,
 	.devinit = gv100_devinit_new,
 	.pci = gp100_pci_new,
+	.top = gk104_top_new,
 };
 
 static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
index fea4957291da..4f1f3e890650 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
@@ -48,7 +48,8 @@ gk104_top_oneinit(struct nvkm_top *top)
 		case 0x00000001: /* DATA */
 			inst        = (data & 0x3c000000) >> 26;
 			info->addr  = (data & 0x00fff000);
-			info->fault = (data & 0x000000f8) >> 3;
+			if (data & 0x00000004)
+				info->fault = (data & 0x000003f8) >> 3;
 			break;
 		case 0x00000002: /* ENUM */
 			if (data & 0x00000020)

From 46fe1a813adf9abcf71d0a6641ef2de4ce443485 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1143/1286] drm/nouveau/ibus/gv100: initial support

Appears to be compatible with GM200.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 777ba68f4a0f..def4acdb860e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2399,6 +2399,7 @@ nv140_chipset = {
 	.name = "GV100",
 	.bios = nvkm_bios_new,
 	.devinit = gv100_devinit_new,
+	.ibus = gm200_ibus_new,
 	.pci = gp100_pci_new,
 	.top = gk104_top_new,
 };

From 8afbcca54976abd47d3cd77ffd99aa8103944483 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1144/1286] drm/nouveau/gpio/gv100: initial support

Appears to be compatible with GK104.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index def4acdb860e..5fbc0867cd2c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2399,6 +2399,7 @@ nv140_chipset = {
 	.name = "GV100",
 	.bios = nvkm_bios_new,
 	.devinit = gv100_devinit_new,
+	.gpio = gk104_gpio_new,
 	.ibus = gm200_ibus_new,
 	.pci = gp100_pci_new,
 	.top = gk104_top_new,

From d2e3b57d81992442221f00938d137568ee895953 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1145/1286] drm/nouveau/i2c/gv100: initial support

Appears to be compatible with GM200.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 5fbc0867cd2c..92290de0e6bf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2400,6 +2400,7 @@ nv140_chipset = {
 	.bios = nvkm_bios_new,
 	.devinit = gv100_devinit_new,
 	.gpio = gk104_gpio_new,
+	.i2c = gm200_i2c_new,
 	.ibus = gm200_ibus_new,
 	.pci = gp100_pci_new,
 	.top = gk104_top_new,

From 292550499af0117c7137071d8a0aee93fab81f44 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1146/1286] drm/nouveau/fuse/gv100: initial support

Appears to be compatible with GM107.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 92290de0e6bf..a9a087a5b94b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2399,6 +2399,7 @@ nv140_chipset = {
 	.name = "GV100",
 	.bios = nvkm_bios_new,
 	.devinit = gv100_devinit_new,
+	.fuse = gm107_fuse_new,
 	.gpio = gk104_gpio_new,
 	.i2c = gm200_i2c_new,
 	.ibus = gm200_ibus_new,

From 41af75bd3569eeb105635455cc7a99a930dc35ff Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1147/1286] drm/nouveau/mc/gv100: initial support

Appears to be compatible with GP100.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index a9a087a5b94b..d3306753d4f8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2403,6 +2403,7 @@ nv140_chipset = {
 	.gpio = gk104_gpio_new,
 	.i2c = gm200_i2c_new,
 	.ibus = gm200_ibus_new,
+	.mc = gp100_mc_new,
 	.pci = gp100_pci_new,
 	.top = gk104_top_new,
 };

From 9506bd24072af64dae998ac5b62c16b3492fc8e8 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1148/1286] drm/nouveau/bus/gv100: initial support

Appears to be compatible with GF100.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index d3306753d4f8..9b2b3b8aa5d9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2398,6 +2398,7 @@ static const struct nvkm_device_chip
 nv140_chipset = {
 	.name = "GV100",
 	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
 	.devinit = gv100_devinit_new,
 	.fuse = gm107_fuse_new,
 	.gpio = gk104_gpio_new,

From 936240c9bbb2f10fe11b8fbd4aca79f8f5b1b1da Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1149/1286] drm/nouveau/tmr/gv100: initial support

Appears to be compatible with GK20A.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 9b2b3b8aa5d9..87d50bd4866a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2406,6 +2406,7 @@ nv140_chipset = {
 	.ibus = gm200_ibus_new,
 	.mc = gp100_mc_new,
 	.pci = gp100_pci_new,
+	.timer = gk20a_timer_new,
 	.top = gk104_top_new,
 };
 

From a4a0cfb6420152de2725e1e407fd86301aceccd1 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1150/1286] drm/nouveau/imem/gv100: initial support

Can't imagine this will be any different.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 87d50bd4866a..8528e8366842 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2404,6 +2404,7 @@ nv140_chipset = {
 	.gpio = gk104_gpio_new,
 	.i2c = gm200_i2c_new,
 	.ibus = gm200_ibus_new,
+	.imem = nv50_instmem_new,
 	.mc = gp100_mc_new,
 	.pci = gp100_pci_new,
 	.timer = gk20a_timer_new,

From 3582942c2820a4dfcd64585140bc6e1ad72c1130 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1151/1286] drm/nouveau/fb/gv100: initial support

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/include/nvkm/subdev/fb.h  |  1 +
 .../gpu/drm/nouveau/nvkm/engine/device/base.c |  1 +
 drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild |  1 +
 .../gpu/drm/nouveau/nvkm/subdev/fb/gp100.c    |  2 +-
 .../gpu/drm/nouveau/nvkm/subdev/fb/gv100.c    | 46 +++++++++++++++++++
 drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h |  2 +
 6 files changed, 52 insertions(+), 1 deletion(-)
 create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c

diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 92be0e5269c6..96ccc624ee81 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -83,6 +83,7 @@ int gm20b_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
 int gp100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
 int gp102_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
 int gp10b_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gv100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
 
 #include <subdev/bios.h>
 #include <subdev/bios/ramcfg.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 8528e8366842..170a7c5224db 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2400,6 +2400,7 @@ nv140_chipset = {
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.devinit = gv100_devinit_new,
+	.fb = gv100_fb_new,
 	.fuse = gm107_fuse_new,
 	.gpio = gk104_gpio_new,
 	.i2c = gm200_i2c_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index b4f22cce5d43..969610951263 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -30,6 +30,7 @@ nvkm-y += nvkm/subdev/fb/gm20b.o
 nvkm-y += nvkm/subdev/fb/gp100.o
 nvkm-y += nvkm/subdev/fb/gp102.o
 nvkm-y += nvkm/subdev/fb/gp10b.o
+nvkm-y += nvkm/subdev/fb/gv100.o
 
 nvkm-y += nvkm/subdev/fb/ram.o
 nvkm-y += nvkm/subdev/fb/ramnv04.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
index d0a47b9a8cd8..dffe1f5e1071 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
@@ -26,7 +26,7 @@
 
 #include <core/memory.h>
 
-static void
+void
 gp100_fb_init_unkn(struct nvkm_fb *base)
 {
 	struct nvkm_device *device = gf100_fb(base)->base.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
new file mode 100644
index 000000000000..3c5e02e9794a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+#include "ram.h"
+
+static int
+gv100_fb_init_page(struct nvkm_fb *fb)
+{
+	return (fb->page == 16) ? 0 : -EINVAL;
+}
+
+static const struct nvkm_fb_func
+gv100_fb = {
+	.dtor = gf100_fb_dtor,
+	.oneinit = gf100_fb_oneinit,
+	.init = gp100_fb_init,
+	.init_page = gv100_fb_init_page,
+	.init_unkn = gp100_fb_init_unkn,
+	.ram_new = gp100_ram_new,
+	.default_bigpage = 16,
+};
+
+int
+gv100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return gf100_fb_new_(&gv100_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index 414a423e0e55..2857f31466bf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -68,4 +68,6 @@ int gf100_fb_oneinit(struct nvkm_fb *);
 int gf100_fb_init_page(struct nvkm_fb *);
 
 int gm200_fb_init_page(struct nvkm_fb *);
+
+void gp100_fb_init_unkn(struct nvkm_fb *);
 #endif

From 1bce57250ad226e410f9a1a55c0722f075b01652 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1152/1286] drm/nouveau/ltc/gv100: initial support

Appears to be compatible with GP102.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 170a7c5224db..8da7b44697c4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2406,6 +2406,7 @@ nv140_chipset = {
 	.i2c = gm200_i2c_new,
 	.ibus = gm200_ibus_new,
 	.imem = nv50_instmem_new,
+	.ltc = gp102_ltc_new,
 	.mc = gp100_mc_new,
 	.pci = gp100_pci_new,
 	.timer = gk20a_timer_new,

From edf50395c7c5e8563843eb586aae57c7ac1214ed Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1153/1286] drm/nouveau/mmu/gv100: initial support

VEID support hacked in here, as it's the most convenient place for now.

Will be refined once it's better understood.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/include/nvkm/subdev/mmu.h |  1 +
 .../gpu/drm/nouveau/nvkm/engine/device/base.c |  1 +
 .../gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild    |  2 +
 .../gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c   | 43 +++++++++
 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h |  3 +
 .../drm/nouveau/nvkm/subdev/mmu/vmmgv100.c    | 87 +++++++++++++++++++
 6 files changed, 137 insertions(+)
 create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c
 create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgv100.c

diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index baab93398e54..688595545e21 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -129,4 +129,5 @@ int gm200_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int gm20b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int gp100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int gp10b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int gv100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 8da7b44697c4..46dff27a234b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2408,6 +2408,7 @@ nv140_chipset = {
 	.imem = nv50_instmem_new,
 	.ltc = gp102_ltc_new,
 	.mc = gp100_mc_new,
+	.mmu = gv100_mmu_new,
 	.pci = gp100_pci_new,
 	.timer = gk20a_timer_new,
 	.top = gk104_top_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
index 67ee983bb026..58a24e3a0598 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
@@ -12,6 +12,7 @@ nvkm-y += nvkm/subdev/mmu/gm200.o
 nvkm-y += nvkm/subdev/mmu/gm20b.o
 nvkm-y += nvkm/subdev/mmu/gp100.o
 nvkm-y += nvkm/subdev/mmu/gp10b.o
+nvkm-y += nvkm/subdev/mmu/gv100.o
 
 nvkm-y += nvkm/subdev/mmu/mem.o
 nvkm-y += nvkm/subdev/mmu/memnv04.o
@@ -31,6 +32,7 @@ nvkm-y += nvkm/subdev/mmu/vmmgm200.o
 nvkm-y += nvkm/subdev/mmu/vmmgm20b.o
 nvkm-y += nvkm/subdev/mmu/vmmgp100.o
 nvkm-y += nvkm/subdev/mmu/vmmgp10b.o
+nvkm-y += nvkm/subdev/mmu/vmmgv100.o
 
 nvkm-y += nvkm/subdev/mmu/umem.o
 nvkm-y += nvkm/subdev/mmu/ummu.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c
new file mode 100644
index 000000000000..f666cb57f69e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <core/option.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+gv100_mmu = {
+	.dma_bits = 47,
+	.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+	.mem = {{ -1,  0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
+	.vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gv100_vmm_new },
+	.kind = gm200_mmu_kind,
+	.kind_sys = true,
+};
+
+int
+gv100_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+	return nvkm_mmu_new_(&gv100_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
index da06e64d8a7d..1a3b0a3724ca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
@@ -236,6 +236,9 @@ int gp100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
 int gp10b_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
 		  struct lock_class_key *, const char *,
 		  struct nvkm_vmm **);
+int gv100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+		  struct lock_class_key *, const char *,
+		  struct nvkm_vmm **);
 
 #define VMM_PRINT(l,v,p,f,a...) do {                                           \
 	struct nvkm_vmm *_vmm = (v);                                           \
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgv100.c
new file mode 100644
index 000000000000..2fa40c16e6d2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgv100.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <subdev/fb.h>
+#include <subdev/ltc.h>
+
+#include <nvif/ifc00d.h>
+#include <nvif/unpack.h>
+
+int
+gv100_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+	u64 data[2], mask;
+	int ret = gp100_vmm_join(vmm, inst), i;
+	if (ret)
+		return ret;
+
+	nvkm_kmap(inst);
+	data[0] = nvkm_ro32(inst, 0x200);
+	data[1] = nvkm_ro32(inst, 0x204);
+	mask = BIT_ULL(0);
+
+	nvkm_wo32(inst, 0x21c, 0x00000000);
+
+	for (i = 0; i < 64; i++) {
+		if (mask & BIT_ULL(i)) {
+			nvkm_wo32(inst, 0x2a4 + (i * 0x10), data[1]);
+			nvkm_wo32(inst, 0x2a0 + (i * 0x10), data[0]);
+		} else {
+			nvkm_wo32(inst, 0x2a4 + (i * 0x10), 0x00000001);
+			nvkm_wo32(inst, 0x2a0 + (i * 0x10), 0x00000001);
+		}
+		nvkm_wo32(inst, 0x2a8 + (i * 0x10), 0x00000000);
+	}
+
+	nvkm_wo32(inst, 0x298, lower_32_bits(mask));
+	nvkm_wo32(inst, 0x29c, upper_32_bits(mask));
+	nvkm_done(inst);
+	return 0;
+}
+
+static const struct nvkm_vmm_func
+gv100_vmm = {
+	.join = gv100_vmm_join,
+	.part = gf100_vmm_part,
+	.aper = gf100_vmm_aper,
+	.valid = gp100_vmm_valid,
+	.flush = gp100_vmm_flush,
+	.page = {
+		{ 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
+		{ 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
+		{ 29, &gp100_vmm_desc_16[2], NVKM_VMM_PAGE_Sxxx },
+		{ 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SVxC },
+		{ 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SVxC },
+		{ 12, &gp100_vmm_desc_12[0], NVKM_VMM_PAGE_SVHx },
+		{}
+	}
+};
+
+int
+gv100_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+	      struct lock_class_key *key, const char *name,
+	      struct nvkm_vmm **pvmm)
+{
+	return nv04_vmm_new_(&gv100_vmm, mmu, 0, addr, size,
+			     argv, argc, key, name, pvmm);
+}

From 013b7b37739ca883b2dd5ef979e0e250ac3dafc1 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1154/1286] drm/nouveau/bar/gv100: initial support

Appears to be compatible with GM107.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 46dff27a234b..6aaa3d9cb88c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2397,6 +2397,7 @@ nv13b_chipset = {
 static const struct nvkm_device_chip
 nv140_chipset = {
 	.name = "GV100",
+	.bar = gm107_bar_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.devinit = gv100_devinit_new,

From 8b811951c604e417b4511e3d17a75bb8c84b8f08 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1155/1286] drm/nouveau/fault/gv100: initial support

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../drm/nouveau/include/nvkm/subdev/fault.h   |   6 +-
 .../gpu/drm/nouveau/nvkm/engine/device/base.c |   1 +
 .../gpu/drm/nouveau/nvkm/subdev/fault/Kbuild  |   1 +
 .../gpu/drm/nouveau/nvkm/subdev/fault/gv100.c | 206 ++++++++++++++++++
 4 files changed, 213 insertions(+), 1 deletion(-)
 create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c

diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
index 8e9bc30fe65d..5a77498fe6a0 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
@@ -1,15 +1,18 @@
 #ifndef __NVKM_FAULT_H__
 #define __NVKM_FAULT_H__
 #include <core/subdev.h>
+#include <core/notify.h>
 
 struct nvkm_fault {
 	const struct nvkm_fault_func *func;
 	struct nvkm_subdev subdev;
 
-	struct nvkm_fault_buffer *buffer[1];
+	struct nvkm_fault_buffer *buffer[2];
 	int buffer_nr;
 
 	struct nvkm_event event;
+
+	struct nvkm_notify nrpfb;
 };
 
 struct nvkm_fault_data {
@@ -26,4 +29,5 @@ struct nvkm_fault_data {
 };
 
 int gp100_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
+int gv100_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 6aaa3d9cb88c..7f0385dbed06 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2401,6 +2401,7 @@ nv140_chipset = {
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
 	.devinit = gv100_devinit_new,
+	.fault = gv100_fault_new,
 	.fb = gv100_fb_new,
 	.fuse = gm107_fuse_new,
 	.gpio = gk104_gpio_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
index 807ea402a162..45bb46fb0929 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
@@ -1,2 +1,3 @@
 nvkm-y += nvkm/subdev/fault/base.o
 nvkm-y += nvkm/subdev/fault/gp100.o
+nvkm-y += nvkm/subdev/fault/gv100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
new file mode 100644
index 000000000000..73c7728b5969
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/memory.h>
+#include <subdev/mmu.h>
+#include <engine/fifo.h>
+
+static void
+gv100_fault_buffer_process(struct nvkm_fault_buffer *buffer)
+{
+	struct nvkm_device *device = buffer->fault->subdev.device;
+	struct nvkm_memory *mem = buffer->mem;
+	const u32 foff = buffer->id * 0x14;
+	u32 get = nvkm_rd32(device, 0x100e2c + foff);
+	u32 put = nvkm_rd32(device, 0x100e30 + foff);
+	if (put == get)
+		return;
+
+	nvkm_kmap(mem);
+	while (get != put) {
+		const u32   base = get * buffer->fault->func->buffer.entry_size;
+		const u32 instlo = nvkm_ro32(mem, base + 0x00);
+		const u32 insthi = nvkm_ro32(mem, base + 0x04);
+		const u32 addrlo = nvkm_ro32(mem, base + 0x08);
+		const u32 addrhi = nvkm_ro32(mem, base + 0x0c);
+		const u32 timelo = nvkm_ro32(mem, base + 0x10);
+		const u32 timehi = nvkm_ro32(mem, base + 0x14);
+		const u32  info0 = nvkm_ro32(mem, base + 0x18);
+		const u32  info1 = nvkm_ro32(mem, base + 0x1c);
+		struct nvkm_fault_data info;
+
+		if (++get == buffer->entries)
+			get = 0;
+		nvkm_wr32(device, 0x100e2c + foff, get);
+
+		info.addr   = ((u64)addrhi << 32) | addrlo;
+		info.inst   = ((u64)insthi << 32) | instlo;
+		info.time   = ((u64)timehi << 32) | timelo;
+		info.engine = (info0 & 0x000000ff);
+		info.valid  = (info1 & 0x80000000) >> 31;
+		info.gpc    = (info1 & 0x1f000000) >> 24;
+		info.hub    = (info1 & 0x00100000) >> 20;
+		info.access = (info1 & 0x000f0000) >> 16;
+		info.client = (info1 & 0x00007f00) >> 8;
+		info.reason = (info1 & 0x0000001f);
+
+		nvkm_fifo_fault(device->fifo, &info);
+	}
+	nvkm_done(mem);
+}
+
+static void
+gv100_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
+{
+	struct nvkm_device *device = buffer->fault->subdev.device;
+	const u32 intr = buffer->id ? 0x08000000 : 0x20000000;
+	const u32 foff = buffer->id * 0x14;
+
+	nvkm_mask(device, 0x100a34, intr, intr);
+	nvkm_mask(device, 0x100e34 + foff, 0x80000000, 0x00000000);
+}
+
+static void
+gv100_fault_buffer_init(struct nvkm_fault_buffer *buffer)
+{
+	struct nvkm_device *device = buffer->fault->subdev.device;
+	const u32 intr = buffer->id ? 0x08000000 : 0x20000000;
+	const u32 foff = buffer->id * 0x14;
+
+	nvkm_mask(device, 0x100e34 + foff, 0xc0000000, 0x40000000);
+	nvkm_wr32(device, 0x100e28 + foff, upper_32_bits(buffer->vma->addr));
+	nvkm_wr32(device, 0x100e24 + foff, lower_32_bits(buffer->vma->addr));
+	nvkm_mask(device, 0x100e34 + foff, 0x80000000, 0x80000000);
+	nvkm_mask(device, 0x100a2c, intr, intr);
+}
+
+static u32
+gv100_fault_buffer_entries(struct nvkm_fault_buffer *buffer)
+{
+	struct nvkm_device *device = buffer->fault->subdev.device;
+	const u32 foff = buffer->id * 0x14;
+	nvkm_mask(device, 0x100e34 + foff, 0x40000000, 0x40000000);
+	return nvkm_rd32(device, 0x100e34 + foff) & 0x000fffff;
+}
+
+static int
+gv100_fault_ntfy_nrpfb(struct nvkm_notify *notify)
+{
+	struct nvkm_fault *fault = container_of(notify, typeof(*fault), nrpfb);
+	gv100_fault_buffer_process(fault->buffer[0]);
+	return NVKM_NOTIFY_KEEP;
+}
+
+static void
+gv100_fault_intr_fault(struct nvkm_fault *fault)
+{
+	struct nvkm_subdev *subdev = &fault->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_fault_data info;
+	const u32 addrlo = nvkm_rd32(device, 0x100e4c);
+	const u32 addrhi = nvkm_rd32(device, 0x100e50);
+	const u32  info0 = nvkm_rd32(device, 0x100e54);
+	const u32 insthi = nvkm_rd32(device, 0x100e58);
+	const u32  info1 = nvkm_rd32(device, 0x100e5c);
+
+	info.addr = ((u64)addrhi << 32) | addrlo;
+	info.inst = ((u64)insthi << 32) | (info0 & 0xfffff000);
+	info.time = 0;
+	info.engine = (info0 & 0x000000ff);
+	info.valid  = (info1 & 0x80000000) >> 31;
+	info.gpc    = (info1 & 0x1f000000) >> 24;
+	info.hub    = (info1 & 0x00100000) >> 20;
+	info.access = (info1 & 0x000f0000) >> 16;
+	info.client = (info1 & 0x00007f00) >> 8;
+	info.reason = (info1 & 0x0000001f);
+
+	nvkm_fifo_fault(device->fifo, &info);
+}
+
+static void
+gv100_fault_intr(struct nvkm_fault *fault)
+{
+	struct nvkm_subdev *subdev = &fault->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x100a20);
+
+	if (stat & 0x80000000) {
+		gv100_fault_intr_fault(fault);
+		nvkm_wr32(device, 0x100e60, 0x80000000);
+		stat &= ~0x80000000;
+	}
+
+	if (stat & 0x20000000) {
+		if (fault->buffer[0]) {
+			nvkm_event_send(&fault->event, 1, 0, NULL, 0);
+			stat &= ~0x20000000;
+		}
+	}
+
+	if (stat) {
+		nvkm_debug(subdev, "intr %08x\n", stat);
+	}
+}
+
+static void
+gv100_fault_fini(struct nvkm_fault *fault)
+{
+	nvkm_notify_put(&fault->nrpfb);
+	nvkm_mask(fault->subdev.device, 0x100a34, 0x80000000, 0x80000000);
+}
+
+static void
+gv100_fault_init(struct nvkm_fault *fault)
+{
+	nvkm_mask(fault->subdev.device, 0x100a2c, 0x80000000, 0x80000000);
+	nvkm_notify_get(&fault->nrpfb);
+}
+
+static const struct nvkm_fault_func
+gv100_fault = {
+	.init = gv100_fault_init,
+	.fini = gv100_fault_fini,
+	.intr = gv100_fault_intr,
+	.buffer.nr = 2,
+	.buffer.entry_size = 32,
+	.buffer.entries = gv100_fault_buffer_entries,
+	.buffer.init = gv100_fault_buffer_init,
+	.buffer.fini = gv100_fault_buffer_fini,
+};
+
+int
+gv100_fault_new(struct nvkm_device *device, int index,
+		struct nvkm_fault **pfault)
+{
+	struct nvkm_fault *fault;
+	int ret;
+
+	ret = nvkm_fault_new_(&gv100_fault, device, index, &fault);
+	*pfault = fault;
+	if (ret)
+		return ret;
+
+	return nvkm_notify_init(&fault->buffer[0]->object, &fault->event,
+				gv100_fault_ntfy_nrpfb, false, NULL, 0, 0,
+				&fault->nrpfb);
+}

From ada0c562814ca466386d9bf1e61fd5ee46f2a72c Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1156/1286] drm/nouveau/pmu/gv100: initial support

Appears to be compatible with GP102.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 7f0385dbed06..12e24ebc85b2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2412,6 +2412,7 @@ nv140_chipset = {
 	.mc = gp100_mc_new,
 	.mmu = gv100_mmu_new,
 	.pci = gp100_pci_new,
+	.pmu = gp102_pmu_new,
 	.timer = gk20a_timer_new,
 	.top = gk104_top_new,
 };

From 24a7513c1026fb1b2d42df1c31ea2da56c1604e2 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1157/1286] drm/nouveau/therm/gv100: initial support

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 12e24ebc85b2..965c4332380f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2413,6 +2413,7 @@ nv140_chipset = {
 	.mmu = gv100_mmu_new,
 	.pci = gp100_pci_new,
 	.pmu = gp102_pmu_new,
+	.therm = gp100_therm_new,
 	.timer = gk20a_timer_new,
 	.top = gk104_top_new,
 };

From 6fb566b913728fe2dadc8271a568583b8854af93 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1158/1286] drm/nouveau/dma/gv100: initial support

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 .../gpu/drm/nouveau/include/nvkm/engine/dma.h |   1 +
 .../gpu/drm/nouveau/nvkm/engine/device/base.c |   1 +
 .../gpu/drm/nouveau/nvkm/engine/dma/Kbuild    |   2 +
 .../gpu/drm/nouveau/nvkm/engine/dma/gv100.c   |  34 +++++
 .../gpu/drm/nouveau/nvkm/engine/dma/user.h    |   2 +
 .../drm/nouveau/nvkm/engine/dma/usergv100.c   | 119 ++++++++++++++++++
 6 files changed, 159 insertions(+)
 create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/dma/gv100.c
 create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/dma/usergv100.c

diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
index 0f9c1c702ed6..f0c1b2c8c78c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
@@ -27,4 +27,5 @@ int nv04_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
 int nv50_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
 int gf100_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
 int gf119_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
+int gv100_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 965c4332380f..f2c3fe1cba2c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2416,6 +2416,7 @@ nv140_chipset = {
 	.therm = gp100_therm_new,
 	.timer = gk20a_timer_new,
 	.top = gk104_top_new,
+	.dma = gv100_dma_new,
 };
 
 static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild
index c4a2ce9b0d71..e96d1f57f9f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild
@@ -3,9 +3,11 @@ nvkm-y += nvkm/engine/dma/nv04.o
 nvkm-y += nvkm/engine/dma/nv50.o
 nvkm-y += nvkm/engine/dma/gf100.o
 nvkm-y += nvkm/engine/dma/gf119.o
+nvkm-y += nvkm/engine/dma/gv100.o
 
 nvkm-y += nvkm/engine/dma/user.o
 nvkm-y += nvkm/engine/dma/usernv04.o
 nvkm-y += nvkm/engine/dma/usernv50.o
 nvkm-y += nvkm/engine/dma/usergf100.o
 nvkm-y += nvkm/engine/dma/usergf119.o
+nvkm-y += nvkm/engine/dma/usergv100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gv100.c
new file mode 100644
index 000000000000..c65a4c2ea93d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gv100.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include "user.h"
+
+static const struct nvkm_dma_func
+gv100_dma = {
+	.class_new = gv100_dmaobj_new,
+};
+
+int
+gv100_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
+{
+	return nvkm_dma_new_(&gv100_dma, device, index, pdma);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h
index 4bbac8a21c71..9fe01fd75474 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h
@@ -16,4 +16,6 @@ int gf100_dmaobj_new(struct nvkm_dma *, const struct nvkm_oclass *, void *, u32,
 		     struct nvkm_dmaobj **);
 int gf119_dmaobj_new(struct nvkm_dma *, const struct nvkm_oclass *, void *, u32,
 		     struct nvkm_dmaobj **);
+int gv100_dmaobj_new(struct nvkm_dma *, const struct nvkm_oclass *, void *, u32,
+		     struct nvkm_dmaobj **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergv100.c
new file mode 100644
index 000000000000..39eba9fc82be
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergv100.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#define gv100_dmaobj(p) container_of((p), struct gv100_dmaobj, base)
+#include "user.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/fb.h>
+
+#include <nvif/cl0002.h>
+#include <nvif/unpack.h>
+
+struct gv100_dmaobj {
+	struct nvkm_dmaobj base;
+	u32 flags0;
+};
+
+static int
+gv100_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
+		  int align, struct nvkm_gpuobj **pgpuobj)
+{
+	struct gv100_dmaobj *dmaobj = gv100_dmaobj(base);
+	struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
+	u64 start = dmaobj->base.start >> 8;
+	u64 limit = dmaobj->base.limit >> 8;
+	int ret;
+
+	ret = nvkm_gpuobj_new(device, 24, align, false, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0);
+		nvkm_wo32(*pgpuobj, 0x04, lower_32_bits(start));
+		nvkm_wo32(*pgpuobj, 0x08, upper_32_bits(start));
+		nvkm_wo32(*pgpuobj, 0x0c, lower_32_bits(limit));
+		nvkm_wo32(*pgpuobj, 0x10, upper_32_bits(limit));
+		nvkm_done(*pgpuobj);
+	}
+
+	return ret;
+}
+
+static const struct nvkm_dmaobj_func
+gv100_dmaobj_func = {
+	.bind = gv100_dmaobj_bind,
+};
+
+int
+gv100_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
+		 void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
+{
+	union {
+		struct gf119_dma_v0 v0;
+	} *args;
+	struct nvkm_object *parent = oclass->parent;
+	struct gv100_dmaobj *dmaobj;
+	u32 kind, page;
+	int ret;
+
+	if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
+		return -ENOMEM;
+	*pdmaobj = &dmaobj->base;
+
+	ret = nvkm_dmaobj_ctor(&gv100_dmaobj_func, dma, oclass,
+			       &data, &size, &dmaobj->base);
+	if (ret)
+		return ret;
+
+	ret  = -ENOSYS;
+	args = data;
+
+	nvif_ioctl(parent, "create gv100 dma size %d\n", size);
+	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+		nvif_ioctl(parent,
+			   "create gv100 dma vers %d page %d kind %02x\n",
+			   args->v0.version, args->v0.page, args->v0.kind);
+		kind = args->v0.kind != 0;
+		page = args->v0.page != 0;
+	} else
+	if (size == 0) {
+		kind = 0;
+		page = GF119_DMA_V0_PAGE_SP;
+	} else
+		return ret;
+
+	if (kind)
+		dmaobj->flags0 |= 0x00100000;
+	if (page)
+		dmaobj->flags0 |= 0x00000040;
+	dmaobj->flags0 |= 0x00000004; /* rw */
+
+	switch (dmaobj->base.target) {
+	case NV_MEM_TARGET_VRAM       : dmaobj->flags0 |= 0x00000001; break;
+	case NV_MEM_TARGET_PCI        : dmaobj->flags0 |= 0x00000002; break;
+	case NV_MEM_TARGET_PCI_NOSNOOP: dmaobj->flags0 |= 0x00000003; break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}

From 290ffeafcc1a953aa287c8a7bf7f6d9af25b7e77 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:47 +1000
Subject: [PATCH 1159/1286] drm/nouveau/disp/gv100: initial support

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/include/nvif/class.h  |   7 +
 drivers/gpu/drm/nouveau/include/nvif/clc37b.h |  11 +
 drivers/gpu/drm/nouveau/include/nvif/clc37e.h |  13 +
 .../drm/nouveau/include/nvkm/engine/disp.h    |   1 +
 drivers/gpu/drm/nouveau/nvif/disp.c           |   1 +
 .../gpu/drm/nouveau/nvkm/engine/device/base.c |   1 +
 .../gpu/drm/nouveau/nvkm/engine/disp/Kbuild   |  13 +
 .../drm/nouveau/nvkm/engine/disp/changv100.c  |  34 ++
 .../drm/nouveau/nvkm/engine/disp/channv50.h   |  14 +
 .../drm/nouveau/nvkm/engine/disp/coregv100.c  | 204 +++++++++
 .../drm/nouveau/nvkm/engine/disp/cursgv100.c  |  81 ++++
 .../drm/nouveau/nvkm/engine/disp/dmacgv100.c  |  77 ++++
 .../gpu/drm/nouveau/nvkm/engine/disp/gv100.c  | 427 ++++++++++++++++++
 .../drm/nouveau/nvkm/engine/disp/hdmigv100.c  |  85 ++++
 .../gpu/drm/nouveau/nvkm/engine/disp/head.h   |   4 +
 .../drm/nouveau/nvkm/engine/disp/headgf119.c  |   2 +-
 .../drm/nouveau/nvkm/engine/disp/headgv100.c  | 105 +++++
 .../gpu/drm/nouveau/nvkm/engine/disp/ior.h    |  10 +-
 .../gpu/drm/nouveau/nvkm/engine/disp/nv50.c   |   9 +-
 .../gpu/drm/nouveau/nvkm/engine/disp/nv50.h   |   9 +-
 .../drm/nouveau/nvkm/engine/disp/rootgv100.c  |  52 +++
 .../drm/nouveau/nvkm/engine/disp/rootnv50.h   |   1 +
 .../drm/nouveau/nvkm/engine/disp/sorgm200.c   |   6 +-
 .../drm/nouveau/nvkm/engine/disp/sorgv100.c   | 120 +++++
 .../drm/nouveau/nvkm/engine/disp/wimmgv100.c  |  82 ++++
 .../drm/nouveau/nvkm/engine/disp/wndwgv100.c  | 184 ++++++++
 26 files changed, 1544 insertions(+), 9 deletions(-)
 create mode 100644 drivers/gpu/drm/nouveau/include/nvif/clc37b.h
 create mode 100644 drivers/gpu/drm/nouveau/include/nvif/clc37e.h
 create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/changv100.c
 create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/coregv100.c
 create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgv100.c
 create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgv100.c
 create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
 create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c
 create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/headgv100.c
 create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgv100.c
 create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c
 create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/wimmgv100.c
 create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/wndwgv100.c

diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index a7c5bf572788..8c9aa556be0e 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -79,6 +79,7 @@
 #define GM200_DISP                                    /* cl5070.h */ 0x00009570
 #define GP100_DISP                                    /* cl5070.h */ 0x00009770
 #define GP102_DISP                                    /* cl5070.h */ 0x00009870
+#define GV100_DISP                                    /* cl5070.h */ 0x0000c370
 
 #define NV31_MPEG                                                    0x00003174
 #define G82_MPEG                                                     0x00008274
@@ -90,6 +91,7 @@
 #define GT214_DISP_CURSOR                             /* cl507a.h */ 0x0000857a
 #define GF110_DISP_CURSOR                             /* cl507a.h */ 0x0000907a
 #define GK104_DISP_CURSOR                             /* cl507a.h */ 0x0000917a
+#define GV100_DISP_CURSOR                             /* cl507a.h */ 0x0000c37a
 
 #define NV50_DISP_OVERLAY                             /* cl507b.h */ 0x0000507b
 #define G82_DISP_OVERLAY                              /* cl507b.h */ 0x0000827b
@@ -97,6 +99,8 @@
 #define GF110_DISP_OVERLAY                            /* cl507b.h */ 0x0000907b
 #define GK104_DISP_OVERLAY                            /* cl507b.h */ 0x0000917b
 
+#define GV100_DISP_WINDOW_IMM_CHANNEL_DMA             /* clc37b.h */ 0x0000c37b
+
 #define NV50_DISP_BASE_CHANNEL_DMA                    /* cl507c.h */ 0x0000507c
 #define G82_DISP_BASE_CHANNEL_DMA                     /* cl507c.h */ 0x0000827c
 #define GT200_DISP_BASE_CHANNEL_DMA                   /* cl507c.h */ 0x0000837c
@@ -117,6 +121,7 @@
 #define GM200_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000957d
 #define GP100_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000977d
 #define GP102_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000987d
+#define GV100_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000c37d
 
 #define NV50_DISP_OVERLAY_CHANNEL_DMA                 /* cl507e.h */ 0x0000507e
 #define G82_DISP_OVERLAY_CHANNEL_DMA                  /* cl507e.h */ 0x0000827e
@@ -125,6 +130,8 @@
 #define GF110_DISP_OVERLAY_CONTROL_DMA                /* cl507e.h */ 0x0000907e
 #define GK104_DISP_OVERLAY_CONTROL_DMA                /* cl507e.h */ 0x0000917e
 
+#define GV100_DISP_WINDOW_CHANNEL_DMA                 /* clc37e.h */ 0x0000c37e
+
 #define NV50_TESLA                                                   0x00005097
 #define G82_TESLA                                                    0x00008297
 #define GT200_TESLA                                                  0x00008397
diff --git a/drivers/gpu/drm/nouveau/include/nvif/clc37b.h b/drivers/gpu/drm/nouveau/include/nvif/clc37b.h
new file mode 100644
index 000000000000..89b18189d43b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/clc37b.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NVIF_CLC37B_H__
+#define __NVIF_CLC37B_H__
+
+struct nvc37b_window_imm_channel_dma_v0 {
+	__u8  version;
+	__u8  index;
+	__u8  pad02[6];
+	__u64 pushbuf;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/clc37e.h b/drivers/gpu/drm/nouveau/include/nvif/clc37e.h
new file mode 100644
index 000000000000..899db9e915ef
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/clc37e.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NVIF_CLC37E_H__
+#define __NVIF_CLC37E_H__
+
+struct nvc37e_window_channel_dma_v0 {
+	__u8  version;
+	__u8  index;
+	__u8  pad02[6];
+	__u64 pushbuf;
+};
+
+#define NVC37E_WINDOW_CHANNEL_DMA_V0_NTFY_UEVENT                           0x00
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
index e83193d3ccab..ef7dc0844d26 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -35,4 +35,5 @@ int gm107_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 int gm200_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 int gp100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 int gp102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gv100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvif/disp.c b/drivers/gpu/drm/nouveau/nvif/disp.c
index 7006482e8e29..18c7d064f75c 100644
--- a/drivers/gpu/drm/nouveau/nvif/disp.c
+++ b/drivers/gpu/drm/nouveau/nvif/disp.c
@@ -34,6 +34,7 @@ int
 nvif_disp_ctor(struct nvif_device *device, s32 oclass, struct nvif_disp *disp)
 {
 	static const struct nvif_mclass disps[] = {
+		{ GV100_DISP, -1 },
 		{ GP102_DISP, -1 },
 		{ GP100_DISP, -1 },
 		{ GM200_DISP, -1 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index f2c3fe1cba2c..43f6b7afdb52 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2416,6 +2416,7 @@ nv140_chipset = {
 	.therm = gp100_therm_new,
 	.timer = gk20a_timer_new,
 	.top = gk104_top_new,
+	.disp = gv100_disp_new,
 	.dma = gv100_dma_new,
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index b580581ef5b8..3d485dbf310a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -14,12 +14,14 @@ nvkm-y += nvkm/engine/disp/gm107.o
 nvkm-y += nvkm/engine/disp/gm200.o
 nvkm-y += nvkm/engine/disp/gp100.o
 nvkm-y += nvkm/engine/disp/gp102.o
+nvkm-y += nvkm/engine/disp/gv100.o
 nvkm-y += nvkm/engine/disp/vga.o
 
 nvkm-y += nvkm/engine/disp/head.o
 nvkm-y += nvkm/engine/disp/headnv04.o
 nvkm-y += nvkm/engine/disp/headnv50.o
 nvkm-y += nvkm/engine/disp/headgf119.o
+nvkm-y += nvkm/engine/disp/headgv100.o
 
 nvkm-y += nvkm/engine/disp/ior.o
 nvkm-y += nvkm/engine/disp/dacnv50.o
@@ -35,6 +37,7 @@ nvkm-y += nvkm/engine/disp/sorgf119.o
 nvkm-y += nvkm/engine/disp/sorgk104.o
 nvkm-y += nvkm/engine/disp/sorgm107.o
 nvkm-y += nvkm/engine/disp/sorgm200.o
+nvkm-y += nvkm/engine/disp/sorgv100.o
 
 nvkm-y += nvkm/engine/disp/outp.o
 nvkm-y += nvkm/engine/disp/dp.o
@@ -47,6 +50,7 @@ nvkm-y += nvkm/engine/disp/hdmig84.o
 nvkm-y += nvkm/engine/disp/hdmigt215.o
 nvkm-y += nvkm/engine/disp/hdmigf119.o
 nvkm-y += nvkm/engine/disp/hdmigk104.o
+nvkm-y += nvkm/engine/disp/hdmigv100.o
 
 nvkm-y += nvkm/engine/disp/conn.o
 
@@ -63,13 +67,16 @@ nvkm-y += nvkm/engine/disp/rootgm107.o
 nvkm-y += nvkm/engine/disp/rootgm200.o
 nvkm-y += nvkm/engine/disp/rootgp100.o
 nvkm-y += nvkm/engine/disp/rootgp102.o
+nvkm-y += nvkm/engine/disp/rootgv100.o
 
 nvkm-y += nvkm/engine/disp/channv50.o
 nvkm-y += nvkm/engine/disp/changf119.o
+nvkm-y += nvkm/engine/disp/changv100.o
 
 nvkm-y += nvkm/engine/disp/dmacnv50.o
 nvkm-y += nvkm/engine/disp/dmacgf119.o
 nvkm-y += nvkm/engine/disp/dmacgp102.o
+nvkm-y += nvkm/engine/disp/dmacgv100.o
 
 nvkm-y += nvkm/engine/disp/basenv50.o
 nvkm-y += nvkm/engine/disp/baseg84.o
@@ -82,6 +89,7 @@ nvkm-y += nvkm/engine/disp/coreg94.o
 nvkm-y += nvkm/engine/disp/coregf119.o
 nvkm-y += nvkm/engine/disp/coregk104.o
 nvkm-y += nvkm/engine/disp/coregp102.o
+nvkm-y += nvkm/engine/disp/coregv100.o
 
 nvkm-y += nvkm/engine/disp/ovlynv50.o
 nvkm-y += nvkm/engine/disp/ovlyg84.o
@@ -90,12 +98,17 @@ nvkm-y += nvkm/engine/disp/ovlygf119.o
 nvkm-y += nvkm/engine/disp/ovlygk104.o
 nvkm-y += nvkm/engine/disp/ovlygp102.o
 
+nvkm-y += nvkm/engine/disp/wimmgv100.o
+
+nvkm-y += nvkm/engine/disp/wndwgv100.o
+
 nvkm-y += nvkm/engine/disp/piocnv50.o
 nvkm-y += nvkm/engine/disp/piocgf119.o
 
 nvkm-y += nvkm/engine/disp/cursnv50.o
 nvkm-y += nvkm/engine/disp/cursgf119.o
 nvkm-y += nvkm/engine/disp/cursgp102.o
+nvkm-y += nvkm/engine/disp/cursgv100.o
 
 nvkm-y += nvkm/engine/disp/oimmnv50.o
 nvkm-y += nvkm/engine/disp/oimmgf119.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/changv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/changv100.c
new file mode 100644
index 000000000000..75247c9c7e10
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/changv100.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "channv50.h"
+
+const struct nvkm_event_func
+gv100_disp_chan_uevent = {
+	.ctor = nv50_disp_chan_uevent_ctor,
+};
+
+u64
+gv100_disp_chan_user(struct nv50_disp_chan *chan, u64 *psize)
+{
+	*psize = 0x1000;
+	return 0x690000 + ((chan->chid.user - 1) * 0x1000);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
index 391b007a6824..adc9d76d09cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -57,6 +57,11 @@ void gf119_disp_core_fini(struct nv50_disp_chan *);
 
 extern const struct nv50_disp_chan_func gp102_disp_dmac_func;
 
+u64 gv100_disp_chan_user(struct nv50_disp_chan *, u64 *);
+int gv100_disp_dmac_init(struct nv50_disp_chan *);
+void gv100_disp_dmac_fini(struct nv50_disp_chan *);
+int gv100_disp_dmac_bind(struct nv50_disp_chan *, struct nvkm_object *, u32);
+
 int nv50_disp_curs_new_(const struct nv50_disp_chan_func *,
 			struct nv50_disp *, int ctrl, int user,
 			const struct nvkm_oclass *, void *argv, u32 argc,
@@ -132,6 +137,15 @@ int gp102_disp_core_new(const struct nvkm_oclass *, void *, u32,
 int gp102_disp_ovly_new(const struct nvkm_oclass *, void *, u32,
 			struct nv50_disp *, struct nvkm_object **);
 
+int gv100_disp_curs_new(const struct nvkm_oclass *, void *, u32,
+			struct nv50_disp *, struct nvkm_object **);
+int gv100_disp_wimm_new(const struct nvkm_oclass *, void *, u32,
+			struct nv50_disp *, struct nvkm_object **);
+int gv100_disp_core_new(const struct nvkm_oclass *, void *, u32,
+			struct nv50_disp *, struct nvkm_object **);
+int gv100_disp_wndw_new(const struct nvkm_oclass *, void *, u32,
+			struct nv50_disp *, struct nvkm_object **);
+
 struct nv50_disp_mthd_list {
 	u32 mthd;
 	u32 addr;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregv100.c
new file mode 100644
index 000000000000..4592d0e69fec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregv100.c
@@ -0,0 +1,204 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "channv50.h"
+
+#include <subdev/timer.h>
+
+const struct nv50_disp_mthd_list
+gv100_disp_core_mthd_base = {
+	.mthd = 0x0000,
+	.addr = 0x000000,
+	.data = {
+		{ 0x0200, 0x680200 },
+		{ 0x0208, 0x680208 },
+		{ 0x020c, 0x68020c },
+		{ 0x0210, 0x680210 },
+		{ 0x0214, 0x680214 },
+		{ 0x0218, 0x680218 },
+		{ 0x021c, 0x68021c },
+		{}
+	}
+};
+
+const struct nv50_disp_mthd_list
+gv100_disp_core_mthd_sor = {
+	.mthd = 0x0020,
+	.addr = 0x000020,
+	.data = {
+		{ 0x0300, 0x680300 },
+		{ 0x0304, 0x680304 },
+		{ 0x0308, 0x680308 },
+		{ 0x030c, 0x68030c },
+		{}
+	}
+};
+
+static const struct nv50_disp_mthd_list
+gv100_disp_core_mthd_wndw = {
+	.mthd = 0x0080,
+	.addr = 0x000080,
+	.data = {
+		{ 0x1000, 0x681000 },
+		{ 0x1004, 0x681004 },
+		{ 0x1008, 0x681008 },
+		{ 0x100c, 0x68100c },
+		{ 0x1010, 0x681010 },
+		{}
+	}
+};
+
+static const struct nv50_disp_mthd_list
+gv100_disp_core_mthd_head = {
+	.mthd = 0x0400,
+	.addr = 0x000400,
+	.data = {
+		{ 0x2000, 0x682000 },
+		{ 0x2004, 0x682004 },
+		{ 0x2008, 0x682008 },
+		{ 0x200c, 0x68200c },
+		{ 0x2014, 0x682014 },
+		{ 0x2018, 0x682018 },
+		{ 0x201c, 0x68201c },
+		{ 0x2020, 0x682020 },
+		{ 0x2028, 0x682028 },
+		{ 0x202c, 0x68202c },
+		{ 0x2030, 0x682030 },
+		{ 0x2038, 0x682038 },
+		{ 0x203c, 0x68203c },
+		{ 0x2048, 0x682048 },
+		{ 0x204c, 0x68204c },
+		{ 0x2050, 0x682050 },
+		{ 0x2054, 0x682054 },
+		{ 0x2058, 0x682058 },
+		{ 0x205c, 0x68205c },
+		{ 0x2060, 0x682060 },
+		{ 0x2064, 0x682064 },
+		{ 0x2068, 0x682068 },
+		{ 0x206c, 0x68206c },
+		{ 0x2070, 0x682070 },
+		{ 0x2074, 0x682074 },
+		{ 0x2078, 0x682078 },
+		{ 0x207c, 0x68207c },
+		{ 0x2080, 0x682080 },
+		{ 0x2088, 0x682088 },
+		{ 0x2090, 0x682090 },
+		{ 0x209c, 0x68209c },
+		{ 0x20a0, 0x6820a0 },
+		{ 0x20a4, 0x6820a4 },
+		{ 0x20a8, 0x6820a8 },
+		{ 0x20ac, 0x6820ac },
+		{ 0x218c, 0x68218c },
+		{ 0x2194, 0x682194 },
+		{ 0x2198, 0x682198 },
+		{ 0x219c, 0x68219c },
+		{ 0x21a0, 0x6821a0 },
+		{ 0x21a4, 0x6821a4 },
+		{ 0x2214, 0x682214 },
+		{ 0x2218, 0x682218 },
+		{}
+	}
+};
+
+static const struct nv50_disp_chan_mthd
+gv100_disp_core_mthd = {
+	.name = "Core",
+	.addr = 0x000000,
+	.prev = 0x008000,
+	.data = {
+		{ "Global", 1, &gv100_disp_core_mthd_base },
+		{    "SOR", 4, &gv100_disp_core_mthd_sor  },
+		{ "WINDOW", 8, &gv100_disp_core_mthd_wndw },
+		{   "HEAD", 4, &gv100_disp_core_mthd_head },
+		{}
+	}
+};
+
+static int
+gv100_disp_core_idle(struct nv50_disp_chan *chan)
+{
+	struct nvkm_device *device = chan->disp->base.engine.subdev.device;
+	nvkm_msec(device, 2000,
+		u32 stat = nvkm_rd32(device, 0x610630);
+		if ((stat & 0x001f0000) == 0x000b0000)
+			return 0;
+	);
+	return -EBUSY;
+}
+
+static u64
+gv100_disp_core_user(struct nv50_disp_chan *chan, u64 *psize)
+{
+	*psize = 0x10000;
+	return 0x680000;
+}
+
+static void
+gv100_disp_core_intr(struct nv50_disp_chan *chan, bool en)
+{
+	struct nvkm_device *device = chan->disp->base.engine.subdev.device;
+	const u32 mask = 0x00000001;
+	const u32 data = en ? mask : 0;
+	nvkm_mask(device, 0x611dac, mask, data);
+}
+
+static void
+gv100_disp_core_fini(struct nv50_disp_chan *chan)
+{
+	struct nvkm_device *device = chan->disp->base.engine.subdev.device;
+	nvkm_mask(device, 0x6104e0, 0x00000010, 0x00000000);
+	gv100_disp_core_idle(chan);
+	nvkm_mask(device, 0x6104e0, 0x00000002, 0x00000000);
+}
+
+static int
+gv100_disp_core_init(struct nv50_disp_chan *chan)
+{
+	struct nvkm_subdev *subdev = &chan->disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+
+	nvkm_wr32(device, 0x610b24, lower_32_bits(chan->push));
+	nvkm_wr32(device, 0x610b20, upper_32_bits(chan->push));
+	nvkm_wr32(device, 0x610b28, 0x00000001);
+	nvkm_wr32(device, 0x610b2c, 0x00000040);
+
+	nvkm_mask(device, 0x6104e0, 0x00000010, 0x00000010);
+	nvkm_wr32(device, 0x680000, 0x00000000);
+	nvkm_wr32(device, 0x6104e0, 0x00000013);
+	return gv100_disp_core_idle(chan);
+}
+
+static const struct nv50_disp_chan_func
+gv100_disp_core = {
+	.init = gv100_disp_core_init,
+	.fini = gv100_disp_core_fini,
+	.intr = gv100_disp_core_intr,
+	.user = gv100_disp_core_user,
+	.bind = gv100_disp_dmac_bind,
+};
+
+int
+gv100_disp_core_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		    struct nv50_disp *disp, struct nvkm_object **pobject)
+{
+	return nv50_disp_core_new_(&gv100_disp_core, &gv100_disp_core_mthd,
+				   disp, 0, oclass, argv, argc, pobject);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgv100.c
new file mode 100644
index 000000000000..a3e4f6900245
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgv100.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "channv50.h"
+
+#include <subdev/timer.h>
+
+static int
+gv100_disp_curs_idle(struct nv50_disp_chan *chan)
+{
+	struct nvkm_device *device = chan->disp->base.engine.subdev.device;
+	const u32 soff = (chan->chid.ctrl - 1) * 0x04;
+	nvkm_msec(device, 2000,
+		u32 stat = nvkm_rd32(device, 0x610664 + soff);
+		if ((stat & 0x00070000) == 0x00040000)
+			return 0;
+	);
+	return -EBUSY;
+}
+
+static void
+gv100_disp_curs_intr(struct nv50_disp_chan *chan, bool en)
+{
+	struct nvkm_device *device = chan->disp->base.engine.subdev.device;
+	const u32 mask = 0x00010000 << chan->head;
+	const u32 data = en ? mask : 0;
+	nvkm_mask(device, 0x611dac, mask, data);
+}
+
+static void
+gv100_disp_curs_fini(struct nv50_disp_chan *chan)
+{
+	struct nvkm_device *device = chan->disp->base.engine.subdev.device;
+	const u32 hoff = chan->chid.ctrl * 4;
+	nvkm_mask(device, 0x6104e0 + hoff, 0x00000010, 0x00000010);
+	gv100_disp_curs_idle(chan);
+	nvkm_mask(device, 0x6104e0 + hoff, 0x00000001, 0x00000000);
+}
+
+static int
+gv100_disp_curs_init(struct nv50_disp_chan *chan)
+{
+	struct nvkm_subdev *subdev = &chan->disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	nvkm_wr32(device, 0x6104e0 + chan->chid.ctrl * 4, 0x00000001);
+	return gv100_disp_curs_idle(chan);
+}
+
+static const struct nv50_disp_chan_func
+gv100_disp_curs = {
+	.init = gv100_disp_curs_init,
+	.fini = gv100_disp_curs_fini,
+	.intr = gv100_disp_curs_intr,
+	.user = gv100_disp_chan_user,
+};
+
+int
+gv100_disp_curs_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		    struct nv50_disp *disp, struct nvkm_object **pobject)
+{
+	return nv50_disp_curs_new_(&gv100_disp_curs, disp, 73, 73,
+				   oclass, argv, argc, pobject);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgv100.c
new file mode 100644
index 000000000000..eac0e42da354
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgv100.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "channv50.h"
+
+#include <core/ramht.h>
+#include <subdev/timer.h>
+
+static int
+gv100_disp_dmac_idle(struct nv50_disp_chan *chan)
+{
+	struct nvkm_device *device = chan->disp->base.engine.subdev.device;
+	const u32 soff = (chan->chid.ctrl - 1) * 0x04;
+	nvkm_msec(device, 2000,
+		u32 stat = nvkm_rd32(device, 0x610664 + soff);
+		if ((stat & 0x000f0000) == 0x00040000)
+			return 0;
+	);
+	return -EBUSY;
+}
+
+int
+gv100_disp_dmac_bind(struct nv50_disp_chan *chan,
+		     struct nvkm_object *object, u32 handle)
+{
+	return nvkm_ramht_insert(chan->disp->ramht, object,
+				 chan->chid.user, -9, handle,
+				 chan->chid.user << 25 | 0x00000040);
+}
+
+void
+gv100_disp_dmac_fini(struct nv50_disp_chan *chan)
+{
+	struct nvkm_device *device = chan->disp->base.engine.subdev.device;
+	const u32 coff = chan->chid.ctrl * 0x04;
+	nvkm_mask(device, 0x6104e0 + coff, 0x00000010, 0x00000000);
+	gv100_disp_dmac_idle(chan);
+	nvkm_mask(device, 0x6104e0 + coff, 0x00000002, 0x00000000);
+}
+
+int
+gv100_disp_dmac_init(struct nv50_disp_chan *chan)
+{
+	struct nvkm_subdev *subdev = &chan->disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	const u32 uoff = (chan->chid.ctrl - 1) * 0x1000;
+	const u32 poff = chan->chid.ctrl * 0x10;
+	const u32 coff = chan->chid.ctrl * 0x04;
+
+	nvkm_wr32(device, 0x610b24 + poff, lower_32_bits(chan->push));
+	nvkm_wr32(device, 0x610b20 + poff, upper_32_bits(chan->push));
+	nvkm_wr32(device, 0x610b28 + poff, 0x00000001);
+	nvkm_wr32(device, 0x610b2c + poff, 0x00000040);
+
+	nvkm_mask(device, 0x6104e0 + coff, 0x00000010, 0x00000010);
+	nvkm_wr32(device, 0x690000 + uoff, 0x00000000);
+	nvkm_wr32(device, 0x6104e0 + coff, 0x00000013);
+	return gv100_disp_dmac_idle(chan);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
new file mode 100644
index 000000000000..d0a7e3456da1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
@@ -0,0 +1,427 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nv50.h"
+#include "head.h"
+#include "ior.h"
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <core/gpuobj.h>
+#include <subdev/timer.h>
+
+static int
+gv100_disp_wndw_cnt(struct nvkm_disp *disp, unsigned long *pmask)
+{
+	struct nvkm_device *device = disp->engine.subdev.device;
+	*pmask = nvkm_rd32(device, 0x610064);
+	return (nvkm_rd32(device, 0x610074) & 0x03f00000) >> 20;
+}
+
+static void
+gv100_disp_super(struct work_struct *work)
+{
+	struct nv50_disp *disp =
+		container_of(work, struct nv50_disp, supervisor);
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_head *head;
+	u32 stat = nvkm_rd32(device, 0x6107a8);
+	u32 mask[4];
+
+	nvkm_debug(subdev, "supervisor %d: %08x\n", ffs(disp->super), stat);
+	list_for_each_entry(head, &disp->base.head, head) {
+		mask[head->id] = nvkm_rd32(device, 0x6107ac + (head->id * 4));
+		HEAD_DBG(head, "%08x", mask[head->id]);
+	}
+
+	if (disp->super & 0x00000001) {
+		nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
+		nv50_disp_super_1(disp);
+		list_for_each_entry(head, &disp->base.head, head) {
+			if (!(mask[head->id] & 0x00001000))
+				continue;
+			nv50_disp_super_1_0(disp, head);
+		}
+	} else
+	if (disp->super & 0x00000002) {
+		list_for_each_entry(head, &disp->base.head, head) {
+			if (!(mask[head->id] & 0x00001000))
+				continue;
+			nv50_disp_super_2_0(disp, head);
+		}
+		nvkm_outp_route(&disp->base);
+		list_for_each_entry(head, &disp->base.head, head) {
+			if (!(mask[head->id] & 0x00010000))
+				continue;
+			nv50_disp_super_2_1(disp, head);
+		}
+		list_for_each_entry(head, &disp->base.head, head) {
+			if (!(mask[head->id] & 0x00001000))
+				continue;
+			nv50_disp_super_2_2(disp, head);
+		}
+	} else
+	if (disp->super & 0x00000004) {
+		list_for_each_entry(head, &disp->base.head, head) {
+			if (!(mask[head->id] & 0x00001000))
+				continue;
+			nv50_disp_super_3_0(disp, head);
+		}
+	}
+
+	list_for_each_entry(head, &disp->base.head, head)
+		nvkm_wr32(device, 0x6107ac + (head->id * 4), 0x00000000);
+	nvkm_wr32(device, 0x6107a8, 0x80000000);
+}
+
+static void
+gv100_disp_exception(struct nv50_disp *disp, int chid)
+{
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x611020 + (chid * 12));
+	u32 type = (stat & 0x00007000) >> 12;
+	u32 mthd = (stat & 0x00000fff) << 2;
+	u32 data = nvkm_rd32(device, 0x611024 + (chid * 12));
+	u32 code = nvkm_rd32(device, 0x611028 + (chid * 12));
+
+	nvkm_error(subdev, "chid %d %08x [type %d mthd %04x] "
+			   "data %08x code %08x\n",
+		   chid, stat, type, mthd, data, code);
+
+	if (chid < ARRAY_SIZE(disp->chan) && disp->chan[chid]) {
+		switch (mthd) {
+		case 0x0200:
+			nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
+			break;
+		default:
+			break;
+		}
+	}
+
+	nvkm_wr32(device, 0x611020 + (chid * 12), 0x90000000);
+}
+
+static void
+gv100_disp_intr_ctrl_disp(struct nv50_disp *disp)
+{
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x611c30);
+
+	if (stat & 0x00000007) {
+		disp->super = (stat & 0x00000007);
+		queue_work(disp->wq, &disp->supervisor);
+		nvkm_wr32(device, 0x611860, disp->super);
+		stat &= ~0x00000007;
+	}
+
+	/*TODO: I would guess this is VBIOS_RELEASE, however, NFI how to
+	 *      ACK it, nor does RM appear to bother.
+	 */
+	if (stat & 0x00000008)
+		stat &= ~0x00000008;
+
+	if (stat & 0x00000100) {
+		unsigned long wndws = nvkm_rd32(device, 0x611858);
+		unsigned long other = nvkm_rd32(device, 0x61185c);
+		int wndw;
+
+		nvkm_wr32(device, 0x611858, wndws);
+		nvkm_wr32(device, 0x61185c, other);
+
+		/* AWAKEN_OTHER_CORE. */
+		if (other & 0x00000001)
+			nv50_disp_chan_uevent_send(disp, 0);
+
+		/* AWAKEN_WIN_CH(n). */
+		for_each_set_bit(wndw, &wndws, disp->wndw.nr) {
+			nv50_disp_chan_uevent_send(disp, 1 + wndw);
+		}
+	}
+
+	if (stat)
+		nvkm_warn(subdev, "ctrl %08x\n", stat);
+}
+
+static void
+gv100_disp_intr_exc_other(struct nv50_disp *disp)
+{
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x611854);
+	unsigned long mask;
+	int head;
+
+	if (stat & 0x00000001) {
+		nvkm_wr32(device, 0x611854, 0x00000001);
+		gv100_disp_exception(disp, 0);
+		stat &= ~0x00000001;
+	}
+
+	if ((mask = (stat & 0x00ff0000) >> 16)) {
+		for_each_set_bit(head, &mask, disp->wndw.nr) {
+			nvkm_wr32(device, 0x611854, 0x00010000 << head);
+			gv100_disp_exception(disp, 73 + head);
+			stat &= ~(0x00010000 << head);
+		}
+	}
+
+	if (stat) {
+		nvkm_warn(subdev, "exception %08x\n", stat);
+		nvkm_wr32(device, 0x611854, stat);
+	}
+}
+
+static void
+gv100_disp_intr_exc_winim(struct nv50_disp *disp)
+{
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	unsigned long stat = nvkm_rd32(device, 0x611850);
+	int wndw;
+
+	for_each_set_bit(wndw, &stat, disp->wndw.nr) {
+		nvkm_wr32(device, 0x611850, BIT(wndw));
+		gv100_disp_exception(disp, 33 + wndw);
+		stat &= ~BIT(wndw);
+	}
+
+	if (stat) {
+		nvkm_warn(subdev, "wimm %08x\n", (u32)stat);
+		nvkm_wr32(device, 0x611850, stat);
+	}
+}
+
+static void
+gv100_disp_intr_exc_win(struct nv50_disp *disp)
+{
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	unsigned long stat = nvkm_rd32(device, 0x61184c);
+	int wndw;
+
+	for_each_set_bit(wndw, &stat, disp->wndw.nr) {
+		nvkm_wr32(device, 0x61184c, BIT(wndw));
+		gv100_disp_exception(disp, 1 + wndw);
+		stat &= ~BIT(wndw);
+	}
+
+	if (stat) {
+		nvkm_warn(subdev, "wndw %08x\n", (u32)stat);
+		nvkm_wr32(device, 0x61184c, stat);
+	}
+}
+
+static void
+gv100_disp_intr_head_timing(struct nv50_disp *disp, int head)
+{
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x611800 + (head * 0x04));
+
+	/* LAST_DATA, LOADV. */
+	if (stat & 0x00000003) {
+		nvkm_wr32(device, 0x611800 + (head * 0x04), stat & 0x00000003);
+		stat &= ~0x00000003;
+	}
+
+	if (stat & 0x00000004) {
+		nvkm_disp_vblank(&disp->base, head);
+		nvkm_wr32(device, 0x611800 + (head * 0x04), 0x00000004);
+		stat &= ~0x00000004;
+	}
+
+	if (stat) {
+		nvkm_warn(subdev, "head %08x\n", stat);
+		nvkm_wr32(device, 0x611800 + (head * 0x04), stat);
+	}
+}
+
+static void
+gv100_disp_intr(struct nv50_disp *disp)
+{
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x611ec0);
+	unsigned long mask;
+	int head;
+
+	if ((mask = (stat & 0x000000ff))) {
+		for_each_set_bit(head, &mask, 8) {
+			gv100_disp_intr_head_timing(disp, head);
+			stat &= ~BIT(head);
+		}
+	}
+
+	if (stat & 0x00000200) {
+		gv100_disp_intr_exc_win(disp);
+		stat &= ~0x00000200;
+	}
+
+	if (stat & 0x00000400) {
+		gv100_disp_intr_exc_winim(disp);
+		stat &= ~0x00000400;
+	}
+
+	if (stat & 0x00000800) {
+		gv100_disp_intr_exc_other(disp);
+		stat &= ~0x00000800;
+	}
+
+	if (stat & 0x00001000) {
+		gv100_disp_intr_ctrl_disp(disp);
+		stat &= ~0x00001000;
+	}
+
+	if (stat)
+		nvkm_warn(subdev, "intr %08x\n", stat);
+}
+
+static void
+gv100_disp_fini(struct nv50_disp *disp)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	nvkm_wr32(device, 0x611db0, 0x00000000);
+}
+
+static int
+gv100_disp_init(struct nv50_disp *disp)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	struct nvkm_head *head;
+	int i, j;
+	u32 tmp;
+
+	/* Claim ownership of display. */
+	if (nvkm_rd32(device, 0x6254e8) & 0x00000002) {
+		nvkm_mask(device, 0x6254e8, 0x00000001, 0x00000000);
+		if (nvkm_msec(device, 2000,
+			if (!(nvkm_rd32(device, 0x6254e8) & 0x00000002))
+				break;
+		) < 0)
+			return -EBUSY;
+	}
+
+	/* Lock pin capabilities. */
+	tmp = nvkm_rd32(device, 0x610068);
+	nvkm_wr32(device, 0x640008, tmp);
+
+	/* SOR capabilities. */
+	for (i = 0; i < disp->sor.nr; i++) {
+		tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
+		nvkm_mask(device, 0x640000, 0x00000100 << i, 0x00000100 << i);
+		nvkm_wr32(device, 0x640144 + (i * 0x08), tmp);
+	}
+
+	/* Head capabilities. */
+	list_for_each_entry(head, &disp->base.head, head) {
+		const int id = head->id;
+
+		/* RG. */
+		tmp = nvkm_rd32(device, 0x616300 + (id * 0x800));
+		nvkm_wr32(device, 0x640048 + (id * 0x020), tmp);
+
+		/* POSTCOMP. */
+		for (j = 0; j < 6 * 4; j += 4) {
+			tmp = nvkm_rd32(device, 0x616100 + (id * 0x800) + j);
+			nvkm_wr32(device, 0x640030 + (id * 0x20) + j, tmp);
+		}
+	}
+
+	/* Window capabilities. */
+	for (i = 0; i < disp->wndw.nr; i++) {
+		nvkm_mask(device, 0x640004, 1 << i, 1 << i);
+		for (j = 0; j < 6 * 4; j += 4) {
+			tmp = nvkm_rd32(device, 0x630050 + (i * 0x800) + j);
+			nvkm_wr32(device, 0x6401e4 + (i * 0x20) + j, tmp);
+		}
+	}
+
+	/* IHUB capabilities. */
+	for (i = 0; i < 4; i++) {
+		tmp = nvkm_rd32(device, 0x62e000 + (i * 0x04));
+		nvkm_wr32(device, 0x640010 + (i * 0x04), tmp);
+	}
+
+	nvkm_mask(device, 0x610078, 0x00000001, 0x00000001);
+
+	/* Setup instance memory. */
+	switch (nvkm_memory_target(disp->inst->memory)) {
+	case NVKM_MEM_TARGET_VRAM: tmp = 0x00000001; break;
+	case NVKM_MEM_TARGET_NCOH: tmp = 0x00000002; break;
+	case NVKM_MEM_TARGET_HOST: tmp = 0x00000003; break;
+	default:
+		break;
+	}
+	nvkm_wr32(device, 0x610010, 0x00000008 | tmp);
+	nvkm_wr32(device, 0x610014, disp->inst->addr >> 16);
+
+	/* CTRL_DISP: AWAKEN, ERROR, SUPERVISOR[1-3]. */
+	nvkm_wr32(device, 0x611cf0, 0x00000187); /* MSK. */
+	nvkm_wr32(device, 0x611db0, 0x00000187); /* EN. */
+
+	/* EXC_OTHER: CURSn, CORE. */
+	nvkm_wr32(device, 0x611cec, disp->head.mask << 16 |
+				    0x00000001); /* MSK. */
+	nvkm_wr32(device, 0x611dac, 0x00000000); /* EN. */
+
+	/* EXC_WINIM. */
+	nvkm_wr32(device, 0x611ce8, disp->wndw.mask); /* MSK. */
+	nvkm_wr32(device, 0x611da8, 0x00000000); /* EN. */
+
+	/* EXC_WIN. */
+	nvkm_wr32(device, 0x611ce4, disp->wndw.mask); /* MSK. */
+	nvkm_wr32(device, 0x611da4, 0x00000000); /* EN. */
+
+	/* HEAD_TIMING(n): VBLANK. */
+	list_for_each_entry(head, &disp->base.head, head) {
+		const u32 hoff = head->id * 4;
+		nvkm_wr32(device, 0x611cc0 + hoff, 0x00000004); /* MSK. */
+		nvkm_wr32(device, 0x611d80 + hoff, 0x00000000); /* EN. */
+	}
+
+	/* OR. */
+	nvkm_wr32(device, 0x611cf4, 0x00000000); /* MSK. */
+	nvkm_wr32(device, 0x611db4, 0x00000000); /* EN. */
+	return 0;
+}
+
+static const struct nv50_disp_func
+gv100_disp = {
+	.init = gv100_disp_init,
+	.fini = gv100_disp_fini,
+	.intr = gv100_disp_intr,
+	.uevent = &gv100_disp_chan_uevent,
+	.super = gv100_disp_super,
+	.root = &gv100_disp_root_oclass,
+	.wndw = { .cnt = gv100_disp_wndw_cnt },
+	.head = { .cnt = gv100_head_cnt, .new = gv100_head_new },
+	.sor = { .cnt = gv100_sor_cnt, .new = gv100_sor_new },
+	.ramht_size = 0x2000,
+};
+
+int
+gv100_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+{
+	return nv50_disp_new_(&gv100_disp, device, index, pdisp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c
new file mode 100644
index 000000000000..6e3c450eaace
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "hdmi.h"
+
+void
+gv100_hdmi_ctrl(struct nvkm_ior *ior, int head, bool enable, u8 max_ac_packet,
+		u8 rekey, u8 *avi, u8 avi_size, u8 *vendor, u8 vendor_size)
+{
+	struct nvkm_device *device = ior->disp->engine.subdev.device;
+	const u32 ctrl = 0x40000000 * enable |
+			 max_ac_packet << 16 |
+			 rekey;
+	const u32 hoff = head * 0x800;
+	const u32 hdmi = head * 0x400;
+	struct packed_hdmi_infoframe avi_infoframe;
+	struct packed_hdmi_infoframe vendor_infoframe;
+
+	pack_hdmi_infoframe(&avi_infoframe, avi, avi_size);
+	pack_hdmi_infoframe(&vendor_infoframe, vendor, vendor_size);
+
+	if (!(ctrl & 0x40000000)) {
+		nvkm_mask(device, 0x6165c0 + hoff, 0x40000000, 0x00000000);
+		nvkm_mask(device, 0x6f0100 + hdmi, 0x00000001, 0x00000000);
+		nvkm_mask(device, 0x6f00c0 + hdmi, 0x00000001, 0x00000000);
+		nvkm_mask(device, 0x6f0000 + hdmi, 0x00000001, 0x00000000);
+		return;
+	}
+
+	/* AVI InfoFrame (AVI). */
+	nvkm_mask(device, 0x6f0000 + hdmi, 0x00000001, 0x00000000);
+	if (avi_size) {
+		nvkm_wr32(device, 0x6f0008 + hdmi, avi_infoframe.header);
+		nvkm_wr32(device, 0x6f000c + hdmi, avi_infoframe.subpack0_low);
+		nvkm_wr32(device, 0x6f0010 + hdmi, avi_infoframe.subpack0_high);
+		nvkm_wr32(device, 0x6f0014 + hdmi, avi_infoframe.subpack1_low);
+		nvkm_wr32(device, 0x6f0018 + hdmi, avi_infoframe.subpack1_high);
+		nvkm_mask(device, 0x6f0000 + hdmi, 0x00000001, 0x00000001);
+	}
+
+	/* Vendor-specific InfoFrame (VSI). */
+	nvkm_mask(device, 0x6f0100 + hdmi, 0x00010001, 0x00000000);
+	if (vendor_size) {
+		nvkm_wr32(device, 0x6f0108 + hdmi, vendor_infoframe.header);
+		nvkm_wr32(device, 0x6f010c + hdmi, vendor_infoframe.subpack0_low);
+		nvkm_wr32(device, 0x6f0110 + hdmi, vendor_infoframe.subpack0_high);
+		nvkm_wr32(device, 0x6f0110 + hdmi, 0x00000000);
+		nvkm_wr32(device, 0x6f0114 + hdmi, 0x00000000);
+		nvkm_wr32(device, 0x6f0118 + hdmi, 0x00000000);
+		nvkm_wr32(device, 0x6f011c + hdmi, 0x00000000);
+		nvkm_wr32(device, 0x6f0120 + hdmi, 0x00000000);
+		nvkm_wr32(device, 0x6f0124 + hdmi, 0x00000000);
+		nvkm_mask(device, 0x6f0100 + hdmi, 0x00000001, 0x00000001);
+	}
+
+
+	/* General Control (GCP). */
+	nvkm_mask(device, 0x6f00c0 + hdmi, 0x00000001, 0x00000000);
+	nvkm_wr32(device, 0x6f00cc + hdmi, 0x00000010);
+	nvkm_mask(device, 0x6f00c0 + hdmi, 0x00000001, 0x00000001);
+
+	/* Audio Clock Regeneration (ACR). */
+	nvkm_wr32(device, 0x6f0080 + hdmi, 0x82000000);
+
+	/* NV_PDISP_SF_HDMI_CTRL. */
+	nvkm_mask(device, 0x6165c0 + hoff, 0x401f007f, ctrl);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h
index 4a5d7892ff54..7d55faf52fcb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h
@@ -58,4 +58,8 @@ int nv50_head_new(struct nvkm_disp *, int id);
 
 int gf119_head_cnt(struct nvkm_disp *, unsigned long *);
 int gf119_head_new(struct nvkm_disp *, int id);
+void gf119_head_rgclk(struct nvkm_head *, int);
+
+int gv100_head_cnt(struct nvkm_disp *, unsigned long *);
+int gv100_head_new(struct nvkm_disp *, int id);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c
index bcbdaaf8ba20..e86298b35902 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c
@@ -39,7 +39,7 @@ gf119_head_vblank_get(struct nvkm_head *head)
 	nvkm_mask(device, 0x6100c0 + hoff, 0x00000001, 0x00000001);
 }
 
-static void
+void
 gf119_head_rgclk(struct nvkm_head *head, int div)
 {
 	struct nvkm_device *device = head->disp->engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/headgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/headgv100.c
new file mode 100644
index 000000000000..1a061b42ae5c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/headgv100.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "head.h"
+
+static void
+gv100_head_vblank_put(struct nvkm_head *head)
+{
+	struct nvkm_device *device = head->disp->engine.subdev.device;
+	nvkm_mask(device, 0x611d80 + (head->id * 4), 0x00000004, 0x00000000);
+}
+
+static void
+gv100_head_vblank_get(struct nvkm_head *head)
+{
+	struct nvkm_device *device = head->disp->engine.subdev.device;
+	nvkm_mask(device, 0x611d80 + (head->id * 4), 0x00000004, 0x00000004);
+}
+
+static void
+gv100_head_rgpos(struct nvkm_head *head, u16 *hline, u16 *vline)
+{
+	struct nvkm_device *device = head->disp->engine.subdev.device;
+	const u32 hoff = head->id * 0x800;
+	/* vline read locks hline. */
+	*vline = nvkm_rd32(device, 0x616330 + hoff) & 0x0000ffff;
+	*hline = nvkm_rd32(device, 0x616334 + hoff) & 0x0000ffff;
+}
+
+static void
+gv100_head_state(struct nvkm_head *head, struct nvkm_head_state *state)
+{
+	struct nvkm_device *device = head->disp->engine.subdev.device;
+	const u32 hoff = (state == &head->arm) * 0x8000 + head->id * 0x400;
+	u32 data;
+
+	data = nvkm_rd32(device, 0x682064 + hoff);
+	state->vtotal = (data & 0xffff0000) >> 16;
+	state->htotal = (data & 0x0000ffff);
+	data = nvkm_rd32(device, 0x682068 + hoff);
+	state->vsynce = (data & 0xffff0000) >> 16;
+	state->hsynce = (data & 0x0000ffff);
+	data = nvkm_rd32(device, 0x68206c + hoff);
+	state->vblanke = (data & 0xffff0000) >> 16;
+	state->hblanke = (data & 0x0000ffff);
+	data = nvkm_rd32(device, 0x682070 + hoff);
+	state->vblanks = (data & 0xffff0000) >> 16;
+	state->hblanks = (data & 0x0000ffff);
+	state->hz = nvkm_rd32(device, 0x68200c + hoff);
+
+	data = nvkm_rd32(device, 0x682004 + hoff);
+	switch ((data & 0x000000f0) >> 4) {
+	case 5: state->or.depth = 30; break;
+	case 4: state->or.depth = 24; break;
+	case 1: state->or.depth = 18; break;
+	default:
+		state->or.depth = 18;
+		WARN_ON(1);
+		break;
+	}
+}
+
+static const struct nvkm_head_func
+gv100_head = {
+	.state = gv100_head_state,
+	.rgpos = gv100_head_rgpos,
+	.rgclk = gf119_head_rgclk,
+	.vblank_get = gv100_head_vblank_get,
+	.vblank_put = gv100_head_vblank_put,
+};
+
+int
+gv100_head_new(struct nvkm_disp *disp, int id)
+{
+	struct nvkm_device *device = disp->engine.subdev.device;
+	if (!(nvkm_rd32(device, 0x610060) & (0x00000001 << id)))
+		return 0;
+	return nvkm_head_new_(&gv100_head, disp, id);
+}
+
+int
+gv100_head_cnt(struct nvkm_disp *disp, unsigned long *pmask)
+{
+	struct nvkm_device *device = disp->engine.subdev.device;
+	*pmask = nvkm_rd32(device, 0x610060) & 0x000000ff;
+	return nvkm_rd32(device, 0x610074) & 0x0000000f;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index 9d43ab23f4d0..e0b4e0c5704e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -30,7 +30,7 @@ struct nvkm_ior {
 			UNKNOWN
 		} proto:3;
 		unsigned link:2;
-		unsigned head:4;
+		unsigned head:8;
 	} arm, asy;
 
 	/* Armed DP state. */
@@ -133,10 +133,15 @@ void gf119_sor_dp_watermark(struct nvkm_ior *, int, u8);
 
 void gm107_sor_dp_pattern(struct nvkm_ior *, int);
 
+void gm200_sor_route_set(struct nvkm_outp *, struct nvkm_ior *);
+int gm200_sor_route_get(struct nvkm_outp *, int *);
+void gm200_sor_dp_drive(struct nvkm_ior *, int, int, int, int, int);
+
 void g84_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
 void gt215_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
 void gf119_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
 void gk104_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
+void gv100_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
 
 void gt215_hda_hpd(struct nvkm_ior *, int, bool);
 void gt215_hda_eld(struct nvkm_ior *, u8 *, u8);
@@ -178,4 +183,7 @@ int gf119_sor_new(struct nvkm_disp *, int);
 int gk104_sor_new(struct nvkm_disp *, int);
 int gm107_sor_new(struct nvkm_disp *, int);
 int gm200_sor_new(struct nvkm_disp *, int);
+
+int gv100_sor_cnt(struct nvkm_disp *, unsigned long *);
+int gv100_sor_new(struct nvkm_disp *, int);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index 4a37c44fcbed..f89c7b977aa5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -88,6 +88,12 @@ nv50_disp_oneinit_(struct nvkm_disp *base)
 	struct nvkm_device *device = subdev->device;
 	int ret, i;
 
+	if (func->wndw.cnt) {
+		disp->wndw.nr = func->wndw.cnt(&disp->base, &disp->wndw.mask);
+		nvkm_debug(subdev, "Window(s): %d (%08lx)\n",
+			   disp->wndw.nr, disp->wndw.mask);
+	}
+
 	disp->head.nr = func->head.cnt(&disp->base, &disp->head.mask);
 	nvkm_debug(subdev, "  Head(s): %d (%02lx)\n",
 		   disp->head.nr, disp->head.mask);
@@ -133,7 +139,8 @@ nv50_disp_oneinit_(struct nvkm_disp *base)
 	if (ret)
 		return ret;
 
-	return nvkm_ramht_new(device, 0x1000, 0, disp->inst, &disp->ramht);
+	return nvkm_ramht_new(device, func->ramht_size ? func->ramht_size :
+			      0x1000, 0, disp->inst, &disp->ramht);
 }
 
 static const struct nvkm_disp_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index 77aa2c8cfcd6..8580382ab248 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -18,7 +18,7 @@ struct nv50_disp {
 	struct {
 		unsigned long mask;
 		int nr;
-	} head, dac;
+	} wndw, head, dac;
 
 	struct {
 		unsigned long mask;
@@ -35,7 +35,7 @@ struct nv50_disp {
 	struct nvkm_gpuobj *inst;
 	struct nvkm_ramht *ramht;
 
-	struct nv50_disp_chan *chan[21];
+	struct nv50_disp_chan *chan[81];
 };
 
 void nv50_disp_super_1(struct nv50_disp *);
@@ -62,7 +62,9 @@ struct nv50_disp_func {
 	struct {
 		int (*cnt)(struct nvkm_disp *, unsigned long *mask);
 		int (*new)(struct nvkm_disp *, int id);
-	} head, dac, sor, pior;
+	} wndw, head, dac, sor, pior;
+
+	u16 ramht_size;
 };
 
 int nv50_disp_init(struct nv50_disp *);
@@ -86,4 +88,5 @@ int  nv50_disp_chan_uevent_ctor(struct nvkm_object *, void *, u32,
 void nv50_disp_chan_uevent_send(struct nv50_disp *, int);
 
 extern const struct nvkm_event_func gf119_disp_chan_uevent;
+extern const struct nvkm_event_func gv100_disp_chan_uevent;
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgv100.c
new file mode 100644
index 000000000000..9c658d632d37
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgv100.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "rootnv50.h"
+#include "channv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gv100_disp_root = {
+	.user = {
+		{{0,0,GV100_DISP_CURSOR                }, gv100_disp_curs_new },
+		{{0,0,GV100_DISP_WINDOW_IMM_CHANNEL_DMA}, gv100_disp_wimm_new },
+		{{0,0,GV100_DISP_CORE_CHANNEL_DMA      }, gv100_disp_core_new },
+		{{0,0,GV100_DISP_WINDOW_CHANNEL_DMA    }, gv100_disp_wndw_new },
+		{}
+	},
+};
+
+static int
+gv100_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		    void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nv50_disp_root_new_(&gv100_disp_root, disp, oclass,
+				   data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gv100_disp_root_oclass = {
+	.base.oclass = GV100_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = gv100_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
index 9983a424d30d..6ca4f9184b51 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -36,4 +36,5 @@ extern const struct nvkm_disp_oclass gm107_disp_root_oclass;
 extern const struct nvkm_disp_oclass gm200_disp_root_oclass;
 extern const struct nvkm_disp_oclass gp100_disp_root_oclass;
 extern const struct nvkm_disp_oclass gp102_disp_root_oclass;
+extern const struct nvkm_disp_oclass gv100_disp_root_oclass;
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
index 8bc019b6ffab..d892bdf04034 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
@@ -23,7 +23,7 @@
  */
 #include "ior.h"
 
-static void
+void
 gm200_sor_dp_drive(struct nvkm_ior *sor, int ln, int pc, int dc, int pe, int pu)
 {
 	struct nvkm_device *device = sor->disp->engine.subdev.device;
@@ -45,7 +45,7 @@ gm200_sor_dp_drive(struct nvkm_ior *sor, int ln, int pc, int dc, int pe, int pu)
 	nvkm_wr32(device, 0x61c13c + loff, data[3] | (pc << shift));
 }
 
-static void
+void
 gm200_sor_route_set(struct nvkm_outp *outp, struct nvkm_ior *ior)
 {
 	struct nvkm_device *device = outp->disp->engine.subdev.device;
@@ -62,7 +62,7 @@ gm200_sor_route_set(struct nvkm_outp *outp, struct nvkm_ior *ior)
 		nvkm_mask(device, 0x612388 + moff, 0x0000001f, link << 4 | sor);
 }
 
-static int
+int
 gm200_sor_route_get(struct nvkm_outp *outp, int *link)
 {
 	struct nvkm_device *device = outp->disp->engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c
new file mode 100644
index 000000000000..040db8a338de
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ior.h"
+
+#include <subdev/timer.h>
+
+static void
+gv100_sor_dp_watermark(struct nvkm_ior *sor, int head, u8 watermark)
+{
+	struct nvkm_device *device = sor->disp->engine.subdev.device;
+	const u32 hoff = head * 0x800;
+	nvkm_mask(device, 0x616550 + hoff, 0x0c00003f, 0x08000000 | watermark);
+}
+
+static void
+gv100_sor_dp_audio_sym(struct nvkm_ior *sor, int head, u16 h, u32 v)
+{
+	struct nvkm_device *device = sor->disp->engine.subdev.device;
+	const u32 hoff = head * 0x800;
+	nvkm_mask(device, 0x616568 + hoff, 0x0000ffff, h);
+	nvkm_mask(device, 0x61656c + hoff, 0x00ffffff, v);
+}
+
+static void
+gv100_sor_dp_audio(struct nvkm_ior *sor, int head, bool enable)
+{
+	struct nvkm_device *device = sor->disp->engine.subdev.device;
+	const u32 hoff = 0x800 * head;
+	const u32 data = 0x80000000 | (0x00000001 * enable);
+	const u32 mask = 0x8000000d;
+	nvkm_mask(device, 0x616560 + hoff, mask, data);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x616560 + hoff) & 0x80000000))
+			break;
+	);
+}
+
+static void
+gv100_sor_state(struct nvkm_ior *sor, struct nvkm_ior_state *state)
+{
+	struct nvkm_device *device = sor->disp->engine.subdev.device;
+	const u32 coff = (state == &sor->arm) * 0x8000 + sor->id * 0x20;
+	u32 ctrl = nvkm_rd32(device, 0x680300 + coff);
+
+	state->proto_evo = (ctrl & 0x00000f00) >> 8;
+	switch (state->proto_evo) {
+	case 0: state->proto = LVDS; state->link = 1; break;
+	case 1: state->proto = TMDS; state->link = 1; break;
+	case 2: state->proto = TMDS; state->link = 2; break;
+	case 5: state->proto = TMDS; state->link = 3; break;
+	case 8: state->proto =   DP; state->link = 1; break;
+	case 9: state->proto =   DP; state->link = 2; break;
+	default:
+		state->proto = UNKNOWN;
+		break;
+	}
+
+	state->head = ctrl & 0x000000ff;
+}
+
+static const struct nvkm_ior_func
+gv100_sor = {
+	.route = {
+		.get = gm200_sor_route_get,
+		.set = gm200_sor_route_set,
+	},
+	.state = gv100_sor_state,
+	.power = nv50_sor_power,
+	.clock = gf119_sor_clock,
+	.hdmi = {
+		.ctrl = gv100_hdmi_ctrl,
+	},
+	.dp = {
+		.lanes = { 0, 1, 2, 3 },
+		.links = gf119_sor_dp_links,
+		.power = g94_sor_dp_power,
+		.pattern = gm107_sor_dp_pattern,
+		.drive = gm200_sor_dp_drive,
+		.audio = gv100_sor_dp_audio,
+		.audio_sym = gv100_sor_dp_audio_sym,
+		.watermark = gv100_sor_dp_watermark,
+	},
+	.hda = {
+		.hpd = gf119_hda_hpd,
+		.eld = gf119_hda_eld,
+	},
+};
+
+int
+gv100_sor_new(struct nvkm_disp *disp, int id)
+{
+	return nvkm_ior_new_(&gv100_sor, disp, SOR, id);
+}
+
+int
+gv100_sor_cnt(struct nvkm_disp *disp, unsigned long *pmask)
+{
+	struct nvkm_device *device = disp->engine.subdev.device;
+	*pmask = (nvkm_rd32(device, 0x610060) & 0x0000ff00) >> 8;
+	return (nvkm_rd32(device, 0x610074) & 0x00000f00) >> 8;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/wimmgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/wimmgv100.c
new file mode 100644
index 000000000000..89d783368b4f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/wimmgv100.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "channv50.h"
+
+#include <core/client.h>
+
+#include <nvif/clc37b.h>
+#include <nvif/unpack.h>
+
+static void
+gv100_disp_wimm_intr(struct nv50_disp_chan *chan, bool en)
+{
+	struct nvkm_device *device = chan->disp->base.engine.subdev.device;
+	const u32 mask = 0x00000001 << chan->head;
+	const u32 data = en ? mask : 0;
+	nvkm_mask(device, 0x611da8, mask, data);
+}
+
+const struct nv50_disp_chan_func
+gv100_disp_wimm = {
+	.init = gv100_disp_dmac_init,
+	.fini = gv100_disp_dmac_fini,
+	.intr = gv100_disp_wimm_intr,
+	.user = gv100_disp_chan_user,
+};
+
+static int
+gv100_disp_wimm_new_(const struct nv50_disp_chan_func *func,
+		     const struct nv50_disp_chan_mthd *mthd,
+		     struct nv50_disp *disp, int chid,
+		     const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		     struct nvkm_object **pobject)
+{
+	union {
+		struct nvc37b_window_imm_channel_dma_v0 v0;
+	} *args = argv;
+	struct nvkm_object *parent = oclass->parent;
+	int wndw, ret = -ENOSYS;
+	u64 push;
+
+	nvif_ioctl(parent, "create window imm channel dma size %d\n", argc);
+	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+		nvif_ioctl(parent, "create window imm channel dma vers %d "
+				   "pushbuf %016llx index %d\n",
+			   args->v0.version, args->v0.pushbuf, args->v0.index);
+		if (!(disp->wndw.mask & BIT(args->v0.index)))
+			return -EINVAL;
+		push = args->v0.pushbuf;
+		wndw = args->v0.index;
+	} else
+		return ret;
+
+	return nv50_disp_dmac_new_(func, mthd, disp, chid + wndw,
+				   wndw, push, oclass, pobject);
+}
+
+int
+gv100_disp_wimm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		    struct nv50_disp *disp, struct nvkm_object **pobject)
+{
+	return gv100_disp_wimm_new_(&gv100_disp_wimm, NULL, disp, 33,
+				    oclass, argv, argc, pobject);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/wndwgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/wndwgv100.c
new file mode 100644
index 000000000000..98911805aabf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/wndwgv100.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "channv50.h"
+
+#include <core/client.h>
+
+#include <nvif/clc37e.h>
+#include <nvif/unpack.h>
+
+static const struct nv50_disp_mthd_list
+gv100_disp_wndw_mthd_base = {
+	.mthd = 0x0000,
+	.addr = 0x000000,
+	.data = {
+		{ 0x0200, 0x690200 },
+		{ 0x020c, 0x69020c },
+		{ 0x0210, 0x690210 },
+		{ 0x0214, 0x690214 },
+		{ 0x0218, 0x690218 },
+		{ 0x021c, 0x69021c },
+		{ 0x0220, 0x690220 },
+		{ 0x0224, 0x690224 },
+		{ 0x0228, 0x690228 },
+		{ 0x022c, 0x69022c },
+		{ 0x0230, 0x690230 },
+		{ 0x0234, 0x690234 },
+		{ 0x0238, 0x690238 },
+		{ 0x0240, 0x690240 },
+		{ 0x0244, 0x690244 },
+		{ 0x0248, 0x690248 },
+		{ 0x024c, 0x69024c },
+		{ 0x0250, 0x690250 },
+		{ 0x0254, 0x690254 },
+		{ 0x0260, 0x690260 },
+		{ 0x0264, 0x690264 },
+		{ 0x0268, 0x690268 },
+		{ 0x026c, 0x69026c },
+		{ 0x0270, 0x690270 },
+		{ 0x0274, 0x690274 },
+		{ 0x0280, 0x690280 },
+		{ 0x0284, 0x690284 },
+		{ 0x0288, 0x690288 },
+		{ 0x028c, 0x69028c },
+		{ 0x0290, 0x690290 },
+		{ 0x0298, 0x690298 },
+		{ 0x029c, 0x69029c },
+		{ 0x02a0, 0x6902a0 },
+		{ 0x02a4, 0x6902a4 },
+		{ 0x02a8, 0x6902a8 },
+		{ 0x02ac, 0x6902ac },
+		{ 0x02b0, 0x6902b0 },
+		{ 0x02b4, 0x6902b4 },
+		{ 0x02b8, 0x6902b8 },
+		{ 0x02bc, 0x6902bc },
+		{ 0x02c0, 0x6902c0 },
+		{ 0x02c4, 0x6902c4 },
+		{ 0x02c8, 0x6902c8 },
+		{ 0x02cc, 0x6902cc },
+		{ 0x02d0, 0x6902d0 },
+		{ 0x02d4, 0x6902d4 },
+		{ 0x02d8, 0x6902d8 },
+		{ 0x02dc, 0x6902dc },
+		{ 0x02e0, 0x6902e0 },
+		{ 0x02e4, 0x6902e4 },
+		{ 0x02e8, 0x6902e8 },
+		{ 0x02ec, 0x6902ec },
+		{ 0x02f0, 0x6902f0 },
+		{ 0x02f4, 0x6902f4 },
+		{ 0x02f8, 0x6902f8 },
+		{ 0x02fc, 0x6902fc },
+		{ 0x0300, 0x690300 },
+		{ 0x0304, 0x690304 },
+		{ 0x0308, 0x690308 },
+		{ 0x0310, 0x690310 },
+		{ 0x0314, 0x690314 },
+		{ 0x0318, 0x690318 },
+		{ 0x031c, 0x69031c },
+		{ 0x0320, 0x690320 },
+		{ 0x0324, 0x690324 },
+		{ 0x0328, 0x690328 },
+		{ 0x032c, 0x69032c },
+		{ 0x033c, 0x69033c },
+		{ 0x0340, 0x690340 },
+		{ 0x0344, 0x690344 },
+		{ 0x0348, 0x690348 },
+		{ 0x034c, 0x69034c },
+		{ 0x0350, 0x690350 },
+		{ 0x0354, 0x690354 },
+		{ 0x0358, 0x690358 },
+		{ 0x0364, 0x690364 },
+		{ 0x0368, 0x690368 },
+		{ 0x036c, 0x69036c },
+		{ 0x0370, 0x690370 },
+		{ 0x0374, 0x690374 },
+		{ 0x0380, 0x690380 },
+		{}
+	}
+};
+
+const struct nv50_disp_chan_mthd
+gv100_disp_wndw_mthd = {
+	.name = "Base",
+	.addr = 0x001000,
+	.prev = 0x000800,
+	.data = {
+		{ "Global", 1, &gv100_disp_wndw_mthd_base },
+		{}
+	}
+};
+
+static void
+gv100_disp_wndw_intr(struct nv50_disp_chan *chan, bool en)
+{
+	struct nvkm_device *device = chan->disp->base.engine.subdev.device;
+	const u32 mask = 0x00000001 << chan->head;
+	const u32 data = en ? mask : 0;
+	nvkm_mask(device, 0x611da4, mask, data);
+}
+
+const struct nv50_disp_chan_func
+gv100_disp_wndw = {
+	.init = gv100_disp_dmac_init,
+	.fini = gv100_disp_dmac_fini,
+	.intr = gv100_disp_wndw_intr,
+	.user = gv100_disp_chan_user,
+	.bind = gv100_disp_dmac_bind,
+};
+
+static int
+gv100_disp_wndw_new_(const struct nv50_disp_chan_func *func,
+		     const struct nv50_disp_chan_mthd *mthd,
+		     struct nv50_disp *disp, int chid,
+		     const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		     struct nvkm_object **pobject)
+{
+	union {
+		struct nvc37e_window_channel_dma_v0 v0;
+	} *args = argv;
+	struct nvkm_object *parent = oclass->parent;
+	int wndw, ret = -ENOSYS;
+	u64 push;
+
+	nvif_ioctl(parent, "create window channel dma size %d\n", argc);
+	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+		nvif_ioctl(parent, "create window channel dma vers %d "
+				   "pushbuf %016llx index %d\n",
+			   args->v0.version, args->v0.pushbuf, args->v0.index);
+		if (!(disp->wndw.mask & BIT(args->v0.index)))
+			return -EINVAL;
+		push = args->v0.pushbuf;
+		wndw = args->v0.index;
+	} else
+		return ret;
+
+	return nv50_disp_dmac_new_(func, mthd, disp, chid + wndw,
+				   wndw, push, oclass, pobject);
+}
+
+int
+gv100_disp_wndw_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		    struct nv50_disp *disp, struct nvkm_object **pobject)
+{
+	return gv100_disp_wndw_new_(&gv100_disp_wndw, &gv100_disp_wndw_mthd,
+				    disp, 1, oclass, argv, argc, pobject);
+}

From facaed62b4cba3a6334fc1798fa8f51ea6a1962d Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:48 +1000
Subject: [PATCH 1160/1286] drm/nouveau/kms/gv100: initial support

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/dispnv50/Kbuild     |   8 +
 drivers/gpu/drm/nouveau/dispnv50/atom.h     |   8 +-
 drivers/gpu/drm/nouveau/dispnv50/core.c     |   1 +
 drivers/gpu/drm/nouveau/dispnv50/core.h     |   3 +
 drivers/gpu/drm/nouveau/dispnv50/corec37d.c | 110 ++++++++
 drivers/gpu/drm/nouveau/dispnv50/curs.c     |   1 +
 drivers/gpu/drm/nouveau/dispnv50/curs.h     |   1 +
 drivers/gpu/drm/nouveau/dispnv50/cursc37a.c |  50 ++++
 drivers/gpu/drm/nouveau/dispnv50/disp.c     |   6 +
 drivers/gpu/drm/nouveau/dispnv50/disp.h     |   4 +
 drivers/gpu/drm/nouveau/dispnv50/head.c     |  13 +-
 drivers/gpu/drm/nouveau/dispnv50/head.h     |   4 +
 drivers/gpu/drm/nouveau/dispnv50/head917d.c |   2 +-
 drivers/gpu/drm/nouveau/dispnv50/headc37d.c | 212 +++++++++++++++
 drivers/gpu/drm/nouveau/dispnv50/sorc37d.c  |  39 +++
 drivers/gpu/drm/nouveau/dispnv50/wimm.c     |  47 ++++
 drivers/gpu/drm/nouveau/dispnv50/wimm.h     |   8 +
 drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c |  86 ++++++
 drivers/gpu/drm/nouveau/dispnv50/wndw.c     |  38 ++-
 drivers/gpu/drm/nouveau/dispnv50/wndw.h     |   6 +
 drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c | 278 ++++++++++++++++++++
 21 files changed, 918 insertions(+), 7 deletions(-)
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/corec37d.c
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/cursc37a.c
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/headc37d.c
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/sorc37d.c
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/wimm.c
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/wimm.h
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
 create mode 100644 drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c

diff --git a/drivers/gpu/drm/nouveau/dispnv50/Kbuild b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
index ebd18cb9feda..849b0f45afb8 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/Kbuild
+++ b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
@@ -6,6 +6,7 @@ nouveau-y += dispnv50/core507d.o
 nouveau-y += dispnv50/core827d.o
 nouveau-y += dispnv50/core907d.o
 nouveau-y += dispnv50/core917d.o
+nouveau-y += dispnv50/corec37d.o
 
 nouveau-y += dispnv50/dac507d.o
 nouveau-y += dispnv50/dac907d.o
@@ -14,14 +15,20 @@ nouveau-y += dispnv50/pior507d.o
 
 nouveau-y += dispnv50/sor507d.o
 nouveau-y += dispnv50/sor907d.o
+nouveau-y += dispnv50/sorc37d.o
 
 nouveau-y += dispnv50/head.o
 nouveau-y += dispnv50/head507d.o
 nouveau-y += dispnv50/head827d.o
 nouveau-y += dispnv50/head907d.o
 nouveau-y += dispnv50/head917d.o
+nouveau-y += dispnv50/headc37d.o
+
+nouveau-y += dispnv50/wimm.o
+nouveau-y += dispnv50/wimmc37b.o
 
 nouveau-y += dispnv50/wndw.o
+nouveau-y += dispnv50/wndwc37e.o
 
 nouveau-y += dispnv50/base.o
 nouveau-y += dispnv50/base507c.o
@@ -32,6 +39,7 @@ nouveau-y += dispnv50/base917c.o
 nouveau-y += dispnv50/curs.o
 nouveau-y += dispnv50/curs507a.o
 nouveau-y += dispnv50/curs907a.o
+nouveau-y += dispnv50/cursc37a.o
 
 nouveau-y += dispnv50/oimm.o
 nouveau-y += dispnv50/oimm507b.o
diff --git a/drivers/gpu/drm/nouveau/dispnv50/atom.h b/drivers/gpu/drm/nouveau/dispnv50/atom.h
index d8337e7996e8..908feb1fc60f 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/atom.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/atom.h
@@ -54,6 +54,9 @@ struct nv50_head_atom {
 		u64 offset:40;
 		u8 buffer:1;
 		u8 mode:4;
+		u8 size:2;
+		u8 range:2;
+		u8 output_mode:2;
 	} olut;
 
 	struct {
@@ -77,7 +80,7 @@ struct nv50_head_atom {
 		u32 handle;
 		u64 offset:40;
 		u8  layout:2;
-		u8  format:1;
+		u8  format:8;
 	} curs;
 
 	struct {
@@ -166,6 +169,9 @@ struct nv50_wndw_atom {
 			u8  buffer:1;
 			u8  enable:2;
 			u8  mode:4;
+			u8  size:2;
+			u8  range:2;
+			u8  output_mode:2;
 		} i;
 	} xlut;
 
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.c b/drivers/gpu/drm/nouveau/dispnv50/core.c
index f87cbaa4f8ec..f3c49adb1bdb 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.c
@@ -42,6 +42,7 @@ nv50_core_new(struct nouveau_drm *drm, struct nv50_core **pcore)
 		int version;
 		int (*new)(struct nouveau_drm *, s32, struct nv50_core **);
 	} cores[] = {
+		{ GV100_DISP_CORE_CHANNEL_DMA, 0, corec37d_new },
 		{ GP102_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
 		{ GP100_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
 		{ GM200_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.h b/drivers/gpu/drm/nouveau/dispnv50/core.h
index c490d7d497b2..8470df9dd13d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.h
@@ -44,4 +44,7 @@ extern const struct nv50_outp_func dac907d;
 extern const struct nv50_outp_func sor907d;
 
 int core917d_new(struct nouveau_drm *, s32, struct nv50_core **);
+
+int corec37d_new(struct nouveau_drm *, s32, struct nv50_core **);
+extern const struct nv50_outp_func sorc37d;
 #endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
new file mode 100644
index 000000000000..b5c17c948918
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "core.h"
+#include "head.h"
+
+#include <nouveau_bo.h>
+
+static void
+corec37d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
+{
+	u32 *push;
+	if ((push = evo_wait(&core->chan, 9))) {
+		if (ntfy) {
+			evo_mthd(push, 0x020c, 1);
+			evo_data(push, 0x00001000 | NV50_DISP_CORE_NTFY);
+		}
+
+		evo_mthd(push, 0x0218, 2);
+		evo_data(push, interlock[NV50_DISP_INTERLOCK_CURS]);
+		evo_data(push, interlock[NV50_DISP_INTERLOCK_WNDW]);
+		evo_mthd(push, 0x0200, 1);
+		evo_data(push, 0x00000001);
+
+		if (ntfy) {
+			evo_mthd(push, 0x020c, 1);
+			evo_data(push, 0x00000000);
+		}
+		evo_kick(push, &core->chan);
+	}
+}
+
+int
+corec37d_ntfy_wait_done(struct nouveau_bo *bo, u32 offset,
+			struct nvif_device *device)
+{
+	u32 data;
+	s64 time = nvif_msec(device, 2000ULL,
+		data = nouveau_bo_rd32(bo, offset / 4 + 0);
+		if ((data & 0xc0000000) == 0x80000000)
+			break;
+		usleep_range(1, 2);
+	);
+	return time < 0 ? time : 0;
+}
+
+void
+corec37d_ntfy_init(struct nouveau_bo *bo, u32 offset)
+{
+	nouveau_bo_wr32(bo, offset / 4 + 0, 0x00000000);
+	nouveau_bo_wr32(bo, offset / 4 + 1, 0x00000000);
+	nouveau_bo_wr32(bo, offset / 4 + 2, 0x00000000);
+	nouveau_bo_wr32(bo, offset / 4 + 3, 0x00000000);
+}
+
+void
+corec37d_init(struct nv50_core *core)
+{
+	const u32 windows = 8; /*XXX*/
+	u32 *push, i;
+	if ((push = evo_wait(&core->chan, 2 + 6 * windows + 2))) {
+		evo_mthd(push, 0x0208, 1);
+		evo_data(push, core->chan.sync.handle);
+		for (i = 0; i < windows; i++) {
+			evo_mthd(push, 0x1000 + (i * 0x080), 3);
+			evo_data(push, i >> 1);
+			evo_data(push, 0x00000017);
+			evo_data(push, 0x00000000);
+			evo_mthd(push, 0x1010 + (i * 0x080), 1);
+			evo_data(push, 0x00127fff);
+		}
+		evo_mthd(push, 0x0200, 1);
+		evo_data(push, 0x00000001);
+		evo_kick(push, &core->chan);
+	}
+}
+
+static const struct nv50_core_func
+corec37d = {
+	.init = corec37d_init,
+	.ntfy_init = corec37d_ntfy_init,
+	.ntfy_wait_done = corec37d_ntfy_wait_done,
+	.update = corec37d_update,
+	.head = &headc37d,
+	.sor = &sorc37d,
+};
+
+int
+corec37d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore)
+{
+	return core507d_new_(&corec37d, drm, oclass, pcore);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs.c b/drivers/gpu/drm/nouveau/dispnv50/curs.c
index fb842ed2592f..f592087338c4 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs.c
@@ -31,6 +31,7 @@ nv50_curs_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
 		int version;
 		int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
 	} curses[] = {
+		{ GV100_DISP_CURSOR, 0, cursc37a_new },
 		{ GK104_DISP_CURSOR, 0, curs907a_new },
 		{ GF110_DISP_CURSOR, 0, curs907a_new },
 		{ GT214_DISP_CURSOR, 0, curs507a_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs.h b/drivers/gpu/drm/nouveau/dispnv50/curs.h
index 8edac4507ec8..23aff5fd6747 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs.h
@@ -8,6 +8,7 @@ int curs507a_new_(const struct nv50_wimm_func *, struct nouveau_drm *,
 		  struct nv50_wndw **);
 
 int curs907a_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
+int cursc37a_new(struct nouveau_drm *, int, s32, struct nv50_wndw **);
 
 int nv50_curs_new(struct nouveau_drm *, int head, struct nv50_wndw **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/cursc37a.c b/drivers/gpu/drm/nouveau/dispnv50/cursc37a.c
new file mode 100644
index 000000000000..23fb29d41efe
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/cursc37a.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "curs.h"
+#include "atom.h"
+
+static void
+cursc37a_update(struct nv50_wndw *wndw, u32 *interlock)
+{
+	nvif_wr32(&wndw->wimm.base.user, 0x0200, 0x00000001);
+}
+
+static void
+cursc37a_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+	nvif_wr32(&wndw->wimm.base.user, 0x0208, asyw->point.y << 16 |
+						 asyw->point.x);
+}
+
+static const struct nv50_wimm_func
+cursc37a = {
+	.point = cursc37a_point,
+	.update = cursc37a_update,
+};
+
+int
+cursc37a_new(struct nouveau_drm *drm, int head, s32 oclass,
+	     struct nv50_wndw **pwndw)
+{
+	return curs507a_new_(&cursc37a, drm, head, oclass,
+			     0x00000001 << head, pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 6c860e8b1b16..b83465ae7c1b 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -154,6 +154,9 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
 	if (ret)
 		return ret;
 
+	if (!syncbuf)
+		return 0;
+
 	ret = nvif_object_init(&dmac->base.user, 0xf0000000, NV_DMA_IN_MEMORY,
 			       &(struct nv_dma_v0) {
 					.target = NV_DMA_V0_TARGET_VRAM,
@@ -2170,6 +2173,9 @@ nv50_display_create(struct drm_device *dev)
 		goto out;
 
 	/* create crtc objects to represent the hw heads */
+	if (disp->disp->object.oclass >= GV100_DISP)
+		crtcs = nvif_rd32(&device->object, 0x610060) & 0xff;
+	else
 	if (disp->disp->object.oclass >= GF110_DISP)
 		crtcs = nvif_rd32(&device->object, 0x612004) & 0xf;
 	else
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
index a89b83f95187..e48c5eb35b49 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -36,11 +36,15 @@ struct nv50_disp_interlock {
 		NV50_DISP_INTERLOCK_CURS,
 		NV50_DISP_INTERLOCK_BASE,
 		NV50_DISP_INTERLOCK_OVLY,
+		NV50_DISP_INTERLOCK_WNDW,
+		NV50_DISP_INTERLOCK_WIMM,
 		NV50_DISP_INTERLOCK__SIZE
 	} type;
 	u32 data;
 };
 
+void corec37d_ntfy_init(struct nouveau_bo *, u32);
+
 struct nv50_chan {
 	struct nvif_object user;
 	struct nvif_device *device;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index ca83006510b7..4f57e5379796 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -475,7 +475,16 @@ nv50_head_create(struct drm_device *dev, int index)
 
 	head->func = disp->core->func->head;
 	head->base.index = index;
-	ret = nv50_base_new(drm, head->base.index, &wndw);
+
+	if (disp->disp->object.oclass < GV100_DISP) {
+		ret = nv50_ovly_new(drm, head->base.index, &wndw);
+		ret = nv50_base_new(drm, head->base.index, &wndw);
+	} else {
+		ret = nv50_wndw_new(drm, DRM_PLANE_TYPE_OVERLAY,
+				    head->base.index * 2 + 1, &wndw);
+		ret = nv50_wndw_new(drm, DRM_PLANE_TYPE_PRIMARY,
+				    head->base.index * 2 + 0, &wndw);
+	}
 	if (ret == 0)
 		ret = nv50_curs_new(drm, head->base.index, &curs);
 	if (ret) {
@@ -495,8 +504,6 @@ nv50_head_create(struct drm_device *dev, int index)
 			goto out;
 	}
 
-	/* allocate overlay resources */
-	ret = nv50_ovly_new(drm, head->base.index, &wndw);
 out:
 	if (ret)
 		nv50_head_destroy(crtc);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.h b/drivers/gpu/drm/nouveau/dispnv50/head.h
index 8f2c3ffa4e61..37b3248c6dae 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.h
@@ -71,4 +71,8 @@ void head907d_procamp(struct nv50_head *, struct nv50_head_atom *);
 void head907d_or(struct nv50_head *, struct nv50_head_atom *);
 
 extern const struct nv50_head_func head917d;
+int head917d_curs_layout(struct nv50_head *, struct nv50_wndw_atom *,
+			 struct nv50_head_atom *);
+
+extern const struct nv50_head_func headc37d;
 #endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head917d.c b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
index 4c019a4417ea..303df8459ca8 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head917d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
@@ -63,7 +63,7 @@ head917d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
 	}
 }
 
-static int
+int
 head917d_curs_layout(struct nv50_head *head, struct nv50_wndw_atom *asyw,
 		     struct nv50_head_atom *asyh)
 {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
new file mode 100644
index 000000000000..989c14083066
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "head.h"
+#include "atom.h"
+#include "core.h"
+
+static void
+headc37d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 2))) {
+		/*XXX: This is a dirty hack until OR depth handling is
+		 *     improved later for deep colour etc.
+		 */
+		switch (asyh->or.depth) {
+		case 6: asyh->or.depth = 5; break;
+		case 5: asyh->or.depth = 4; break;
+		case 2: asyh->or.depth = 1; break;
+		case 0:	asyh->or.depth = 4; break;
+		default:
+			WARN_ON(1);
+			break;
+		}
+
+		evo_mthd(push, 0x2004 + (head->base.index * 0x400), 1);
+		evo_data(push, 0x00000001 |
+			       asyh->or.depth << 4 |
+			       asyh->or.nvsync << 3 |
+			       asyh->or.nhsync << 2);
+		evo_kick(push, core);
+	}
+}
+
+static void
+headc37d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 2))) {
+		evo_mthd(push, 0x2000 + (head->base.index * 0x400), 1);
+		evo_data(push, 0x80000000 |
+			       asyh->procamp.sat.sin << 16 |
+			       asyh->procamp.sat.cos << 4);
+		evo_kick(push, core);
+	}
+}
+
+static void
+headc37d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 2))) {
+		evo_mthd(push, 0x2018 + (head->base.index * 0x0400), 1);
+		evo_data(push, asyh->dither.mode << 8 |
+			       asyh->dither.bits << 4 |
+			       asyh->dither.enable);
+		evo_kick(push, core);
+	}
+}
+
+static void
+headc37d_curs_clr(struct nv50_head *head)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 4))) {
+		evo_mthd(push, 0x209c + head->base.index * 0x400, 1);
+		evo_data(push, 0x000000cf);
+		evo_mthd(push, 0x2088 + head->base.index * 0x400, 1);
+		evo_data(push, 0x00000000);
+		evo_kick(push, core);
+	}
+}
+
+static void
+headc37d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 7))) {
+		evo_mthd(push, 0x209c + head->base.index * 0x400, 2);
+		evo_data(push, 0x80000000 |
+			       asyh->curs.layout << 8 |
+			       asyh->curs.format << 0);
+		evo_data(push, 0x000072ff);
+		evo_mthd(push, 0x2088 + head->base.index * 0x400, 1);
+		evo_data(push, asyh->curs.handle);
+		evo_mthd(push, 0x2090 + head->base.index * 0x400, 1);
+		evo_data(push, asyh->curs.offset >> 8);
+		evo_kick(push, core);
+	}
+}
+
+static int
+headc37d_curs_format(struct nv50_head *head, struct nv50_wndw_atom *asyw,
+		     struct nv50_head_atom *asyh)
+{
+	asyh->curs.format = asyw->image.format;
+	return 0;
+}
+
+static void
+headc37d_olut_clr(struct nv50_head *head)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 2))) {
+		evo_mthd(push, 0x20ac + (head->base.index * 0x400), 1);
+		evo_data(push, 0x00000000);
+		evo_kick(push, core);
+	}
+}
+
+static void
+headc37d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 4))) {
+		evo_mthd(push, 0x20a4 + (head->base.index * 0x400), 3);
+		evo_data(push, asyh->olut.output_mode << 8 |
+			       asyh->olut.range << 4 |
+			       asyh->olut.size);
+		evo_data(push, asyh->olut.offset >> 8);
+		evo_data(push, asyh->olut.handle);
+		evo_kick(push, core);
+	}
+}
+
+static void
+headc37d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	asyh->olut.mode = 2;
+	asyh->olut.size = 0;
+	asyh->olut.range = 0;
+	asyh->olut.output_mode = 1;
+}
+
+static void
+headc37d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	struct nv50_head_mode *m = &asyh->mode;
+	u32 *push;
+	if ((push = evo_wait(core, 12))) {
+		evo_mthd(push, 0x2064 + (head->base.index * 0x400), 5);
+		evo_data(push, (m->v.active  << 16) | m->h.active );
+		evo_data(push, (m->v.synce   << 16) | m->h.synce  );
+		evo_data(push, (m->v.blanke  << 16) | m->h.blanke );
+		evo_data(push, (m->v.blanks  << 16) | m->h.blanks );
+		evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
+		evo_mthd(push, 0x200c + (head->base.index * 0x400), 1);
+		evo_data(push, m->clock * 1000);
+		evo_mthd(push, 0x2028 + (head->base.index * 0x400), 1);
+		evo_data(push, m->clock * 1000);
+		/*XXX: HEAD_USAGE_BOUNDS, doesn't belong here. */
+		evo_mthd(push, 0x2030 + (head->base.index * 0x400), 1);
+		evo_data(push, 0x00000124);
+		evo_kick(push, core);
+	}
+}
+
+static void
+headc37d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+	u32 *push;
+	if ((push = evo_wait(core, 4))) {
+		evo_mthd(push, 0x204c + (head->base.index * 0x400), 1);
+		evo_data(push, (asyh->view.iH << 16) | asyh->view.iW);
+		evo_mthd(push, 0x2058 + (head->base.index * 0x400), 1);
+		evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
+		evo_kick(push, core);
+	}
+}
+
+const struct nv50_head_func
+headc37d = {
+	.view = headc37d_view,
+	.mode = headc37d_mode,
+	.olut = headc37d_olut,
+	.olut_set = headc37d_olut_set,
+	.olut_clr = headc37d_olut_clr,
+	.curs_layout = head917d_curs_layout,
+	.curs_format = headc37d_curs_format,
+	.curs_set = headc37d_curs_set,
+	.curs_clr = headc37d_curs_clr,
+	.dither = headc37d_dither,
+	.procamp = headc37d_procamp,
+	.or = headc37d_or,
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c b/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c
new file mode 100644
index 000000000000..dff059241c5d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "core.h"
+
+static void
+sorc37d_ctrl(struct nv50_core *core, int or, u32 ctrl,
+	     struct nv50_head_atom *asyh)
+{
+	u32 *push;
+	if ((push = evo_wait(&core->chan, 2))) {
+		evo_mthd(push, 0x0300 + (or * 0x20), 1);
+		evo_data(push, ctrl);
+		evo_kick(push, &core->chan);
+	}
+}
+
+const struct nv50_outp_func
+sorc37d = {
+	.ctrl = sorc37d_ctrl,
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimm.c b/drivers/gpu/drm/nouveau/dispnv50/wimm.c
new file mode 100644
index 000000000000..fc36e0696407
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimm.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "wimm.h"
+
+#include <nvif/class.h>
+
+int
+nv50_wimm_init(struct nouveau_drm *drm, struct nv50_wndw *wndw)
+{
+	struct {
+		s32 oclass;
+		int version;
+		int (*init)(struct nouveau_drm *, s32, struct nv50_wndw *);
+	} wimms[] = {
+		{ GV100_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
+		{}
+	};
+	struct nv50_disp *disp = nv50_disp(drm->dev);
+	int cid;
+
+	cid = nvif_mclass(&disp->disp->object, wimms);
+	if (cid < 0) {
+		NV_ERROR(drm, "No supported window immediate class\n");
+		return cid;
+	}
+
+	return wimms[cid].init(drm, wimms[cid].oclass, wndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimm.h b/drivers/gpu/drm/nouveau/dispnv50/wimm.h
new file mode 100644
index 000000000000..363052309be9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimm.h
@@ -0,0 +1,8 @@
+#ifndef __NV50_KMS_WIMM_H__
+#define __NV50_KMS_WIMM_H__
+#include "wndw.h"
+
+int nv50_wimm_init(struct nouveau_drm *drm, struct nv50_wndw *);
+
+int wimmc37b_init(struct nouveau_drm *, s32, struct nv50_wndw *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
new file mode 100644
index 000000000000..9103b8494279
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "wimm.h"
+#include "atom.h"
+#include "wndw.h"
+
+#include <nvif/clc37b.h>
+
+static void
+wimmc37b_update(struct nv50_wndw *wndw, u32 *interlock)
+{
+	u32 *push;
+	if ((push = evo_wait(&wndw->wimm, 2))) {
+		evo_mthd(push, 0x0200, 1);
+		if (interlock[NV50_DISP_INTERLOCK_WNDW] & wndw->interlock.data)
+			evo_data(push, 0x00000003);
+		else
+			evo_data(push, 0x00000001);
+		evo_kick(push, &wndw->wimm);
+	}
+}
+
+static void
+wimmc37b_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+	u32 *push;
+	if ((push = evo_wait(&wndw->wimm, 2))) {
+		evo_mthd(push, 0x0208, 1);
+		evo_data(push, asyw->point.y << 16 | asyw->point.x);
+		evo_kick(push, &wndw->wimm);
+	}
+}
+
+static const struct nv50_wimm_func
+wimmc37b = {
+	.point = wimmc37b_point,
+	.update = wimmc37b_update,
+};
+
+static int
+wimmc37b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
+	       s32 oclass, struct nv50_wndw *wndw)
+{
+	struct nvc37b_window_imm_channel_dma_v0 args = {
+		.pushbuf = 0xb0007b00 | wndw->id,
+		.index = wndw->id,
+	};
+	struct nv50_disp *disp = nv50_disp(drm->dev);
+	int ret;
+
+	ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
+			       &oclass, 0, &args, sizeof(args), 0,
+			       &wndw->wimm);
+	if (ret) {
+		NV_ERROR(drm, "wimm%04x allocation failed: %d\n", oclass, ret);
+		return ret;
+	}
+
+	wndw->immd = func;
+	return 0;
+}
+
+int
+wimmc37b_init(struct nouveau_drm *drm, s32 oclass, struct nv50_wndw *wndw)
+{
+	return wimmc37b_init_(&wimmc37b, drm, oclass, wndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index c7c08fae383f..224963b533a6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -20,6 +20,7 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 #include "wndw.h"
+#include "wimm.h"
 
 #include <nvif/class.h>
 #include <nvif/cl0002.h>
@@ -148,11 +149,15 @@ nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
 
 	if (asyw->set.scale) wndw->func->scale_set(wndw, asyw);
 	if (asyw->set.point) {
+		if (asyw->set.point = false, asyw->set.mask)
+			interlock[wndw->interlock.type] |= wndw->interlock.data;
+		interlock[NV50_DISP_INTERLOCK_WIMM] |= wndw->interlock.data;
+
 		wndw->immd->point(wndw, asyw);
 		wndw->immd->update(wndw, interlock);
+	} else {
+		interlock[wndw->interlock.type] |= wndw->interlock.data;
 	}
-
-	interlock[wndw->interlock.type] |= wndw->interlock.data;
 }
 
 void
@@ -605,3 +610,32 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
 	wndw->notify.func = nv50_wndw_notify;
 	return 0;
 }
+
+int
+nv50_wndw_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
+	      struct nv50_wndw **pwndw)
+{
+	struct {
+		s32 oclass;
+		int version;
+		int (*new)(struct nouveau_drm *, enum drm_plane_type,
+			   int, s32, struct nv50_wndw **);
+	} wndws[] = {
+		{ GV100_DISP_WINDOW_CHANNEL_DMA, 0, wndwc37e_new },
+		{}
+	};
+	struct nv50_disp *disp = nv50_disp(drm->dev);
+	int cid, ret;
+
+	cid = nvif_mclass(&disp->disp->object, wndws);
+	if (cid < 0) {
+		NV_ERROR(drm, "No supported window class\n");
+		return cid;
+	}
+
+	ret = wndws[cid].new(drm, type, index, wndws[cid].oclass, pwndw);
+	if (ret)
+		return ret;
+
+	return nv50_wimm_init(drm, *pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
index 745304d06af1..b0b6428034b0 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
@@ -87,4 +87,10 @@ struct nv50_wimm_func {
 };
 
 extern const struct nv50_wimm_func curs507a;
+
+int wndwc37e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
+		 struct nv50_wndw **);
+
+int nv50_wndw_new(struct nouveau_drm *, enum drm_plane_type, int index,
+		  struct nv50_wndw **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
new file mode 100644
index 000000000000..44afb0f069a5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
@@ -0,0 +1,278 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "wndw.h"
+#include "atom.h"
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <nouveau_bo.h>
+
+#include <nvif/clc37e.h>
+
+static void
+wndwc37e_ilut_clr(struct nv50_wndw *wndw)
+{
+	u32 *push;
+	if ((push = evo_wait(&wndw->wndw, 2))) {
+		evo_mthd(push, 0x02b8, 1);
+		evo_data(push, 0x00000000);
+		evo_kick(push, &wndw->wndw);
+	}
+}
+
+static void
+wndwc37e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+	u32 *push;
+	if ((push = evo_wait(&wndw->wndw, 4))) {
+		evo_mthd(push, 0x02b0, 3);
+		evo_data(push, asyw->xlut.i.output_mode << 8 |
+			       asyw->xlut.i.range << 4 |
+			       asyw->xlut.i.size);
+		evo_data(push, asyw->xlut.i.offset >> 8);
+		evo_data(push, asyw->xlut.handle);
+		evo_kick(push, &wndw->wndw);
+	}
+}
+
+static void
+wndwc37e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+	asyw->xlut.i.mode = 2;
+	asyw->xlut.i.size = 0;
+	asyw->xlut.i.range = 0;
+	asyw->xlut.i.output_mode = 1;
+}
+
+static void
+wndwc37e_image_clr(struct nv50_wndw *wndw)
+{
+	u32 *push;
+	if ((push = evo_wait(&wndw->wndw, 4))) {
+		evo_mthd(push, 0x0308, 1);
+		evo_data(push, 0x00000000);
+		evo_mthd(push, 0x0240, 1);
+		evo_data(push, 0x00000000);
+		evo_kick(push, &wndw->wndw);
+	}
+}
+
+static void
+wndwc37e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+	u32 *push;
+
+	if (!(push = evo_wait(&wndw->wndw, 25)))
+		return;
+
+	evo_mthd(push, 0x0308, 1);
+	evo_data(push, asyw->image.mode << 4 | asyw->image.interval);
+	evo_mthd(push, 0x0224, 4);
+	evo_data(push, asyw->image.h << 16 | asyw->image.w);
+	evo_data(push, asyw->image.layout << 4 | asyw->image.blockh);
+	evo_data(push, asyw->image.colorspace << 8 | asyw->image.format);
+	evo_data(push, asyw->image.blocks[0] | (asyw->image.pitch[0] >> 6));
+	evo_mthd(push, 0x0240, 1);
+	evo_data(push, asyw->image.handle[0]);
+	evo_mthd(push, 0x0260, 1);
+	evo_data(push, asyw->image.offset[0] >> 8);
+	evo_mthd(push, 0x0290, 1);
+	evo_data(push, (asyw->state.src_y >> 16) << 16 |
+		       (asyw->state.src_x >> 16));
+	evo_mthd(push, 0x0298, 1);
+	evo_data(push, (asyw->state.src_h >> 16) << 16 |
+		       (asyw->state.src_w >> 16));
+	evo_mthd(push, 0x02a4, 1);
+	evo_data(push, asyw->state.crtc_h << 16 |
+		       asyw->state.crtc_w);
+
+	/*XXX: Composition-related stuff.  Need to implement properly. */
+	evo_mthd(push, 0x02ec, 1);
+	evo_data(push, (2 - (wndw->id & 1)) << 4);
+	evo_mthd(push, 0x02f4, 5);
+	evo_data(push, 0x00000011);
+	evo_data(push, 0xffff0000);
+	evo_data(push, 0xffff0000);
+	evo_data(push, 0xffff0000);
+	evo_data(push, 0xffff0000);
+	evo_kick(push, &wndw->wndw);
+}
+
+static void
+wndwc37e_ntfy_clr(struct nv50_wndw *wndw)
+{
+	u32 *push;
+	if ((push = evo_wait(&wndw->wndw, 2))) {
+		evo_mthd(push, 0x021c, 1);
+		evo_data(push, 0x00000000);
+		evo_kick(push, &wndw->wndw);
+	}
+}
+
+static void
+wndwc37e_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+	u32 *push;
+	if ((push = evo_wait(&wndw->wndw, 3))) {
+		evo_mthd(push, 0x021c, 2);
+		evo_data(push, asyw->ntfy.handle);
+		evo_data(push, asyw->ntfy.offset | asyw->ntfy.awaken);
+		evo_kick(push, &wndw->wndw);
+	}
+}
+
+static void
+wndwc37e_sema_clr(struct nv50_wndw *wndw)
+{
+	u32 *push;
+	if ((push = evo_wait(&wndw->wndw, 2))) {
+		evo_mthd(push, 0x0218, 1);
+		evo_data(push, 0x00000000);
+		evo_kick(push, &wndw->wndw);
+	}
+}
+
+static void
+wndwc37e_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+	u32 *push;
+	if ((push = evo_wait(&wndw->wndw, 5))) {
+		evo_mthd(push, 0x020c, 4);
+		evo_data(push, asyw->sema.offset);
+		evo_data(push, asyw->sema.acquire);
+		evo_data(push, asyw->sema.release);
+		evo_data(push, asyw->sema.handle);
+		evo_kick(push, &wndw->wndw);
+	}
+}
+
+static void
+wndwc37e_update(struct nv50_wndw *wndw, u32 *interlock)
+{
+	u32 *push;
+	if ((push = evo_wait(&wndw->wndw, 5))) {
+		evo_mthd(push, 0x0370, 2);
+		evo_data(push, interlock[NV50_DISP_INTERLOCK_CURS] << 1 |
+			       interlock[NV50_DISP_INTERLOCK_CORE]);
+		evo_data(push, interlock[NV50_DISP_INTERLOCK_WNDW]);
+		evo_mthd(push, 0x0200, 1);
+		if (interlock[NV50_DISP_INTERLOCK_WIMM] & wndw->interlock.data)
+			evo_data(push, 0x00001001);
+		else
+			evo_data(push, 0x00000001);
+		evo_kick(push, &wndw->wndw);
+	}
+}
+
+static void
+wndwc37e_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+		 struct nv50_head_atom *asyh)
+{
+}
+
+static int
+wndwc37e_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+		 struct nv50_head_atom *asyh)
+{
+	return drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
+						   DRM_PLANE_HELPER_NO_SCALING,
+						   DRM_PLANE_HELPER_NO_SCALING,
+						   true, true);
+}
+
+static const u32
+wndwc37e_format[] = {
+	DRM_FORMAT_C8,
+	DRM_FORMAT_YUYV,
+	DRM_FORMAT_UYVY,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_XRGB1555,
+	DRM_FORMAT_ARGB1555,
+	DRM_FORMAT_XBGR2101010,
+	DRM_FORMAT_ABGR2101010,
+	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_ABGR8888,
+	DRM_FORMAT_XRGB2101010,
+	DRM_FORMAT_ARGB2101010,
+	0
+};
+
+static const struct nv50_wndw_func
+wndwc37e = {
+	.acquire = wndwc37e_acquire,
+	.release = wndwc37e_release,
+	.sema_set = wndwc37e_sema_set,
+	.sema_clr = wndwc37e_sema_clr,
+	.ntfy_set = wndwc37e_ntfy_set,
+	.ntfy_clr = wndwc37e_ntfy_clr,
+	.ntfy_reset = corec37d_ntfy_init,
+	.ntfy_wait_begun = base507c_ntfy_wait_begun,
+	.ilut = wndwc37e_ilut,
+	.xlut_set = wndwc37e_ilut_set,
+	.xlut_clr = wndwc37e_ilut_clr,
+	.image_set = wndwc37e_image_set,
+	.image_clr = wndwc37e_image_clr,
+	.update = wndwc37e_update,
+};
+
+static int
+wndwc37e_new_(const struct nv50_wndw_func *func, struct nouveau_drm *drm,
+	      enum drm_plane_type type, int index, s32 oclass, u32 heads,
+	      struct nv50_wndw **pwndw)
+{
+	struct nvc37e_window_channel_dma_v0 args = {
+		.pushbuf = 0xb0007e00 | index,
+		.index = index,
+	};
+	struct nv50_disp *disp = nv50_disp(drm->dev);
+	struct nv50_wndw *wndw;
+	int ret;
+
+	ret = nv50_wndw_new_(func, drm->dev, type, "wndw", index,
+			     wndwc37e_format, heads, NV50_DISP_INTERLOCK_WNDW,
+			     BIT(index), &wndw);
+	if (*pwndw = wndw, ret)
+		return ret;
+
+	ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
+			       &oclass, 0, &args, sizeof(args),
+			       disp->sync->bo.offset, &wndw->wndw);
+	if (ret) {
+		NV_ERROR(drm, "qndw%04x allocation failed: %d\n", oclass, ret);
+		return ret;
+	}
+
+	wndw->ntfy = NV50_DISP_WNDW_NTFY(wndw->id);
+	wndw->sema = NV50_DISP_WNDW_SEM0(wndw->id);
+	wndw->data = 0x00000000;
+	return 0;
+}
+
+int
+wndwc37e_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
+	     s32 oclass, struct nv50_wndw **pwndw)
+{
+	return wndwc37e_new_(&wndwc37e, drm, type, index, oclass,
+			     BIT(index >> 1), pwndw);
+}

From 37e1c45a58b5c1f699d583483f612462418dd2ee Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:48 +1000
Subject: [PATCH 1161/1286] drm/nouveau/fifo/gv100: initial support

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/include/nvif/class.h  |   3 +
 drivers/gpu/drm/nouveau/include/nvif/device.h |   3 +
 drivers/gpu/drm/nouveau/include/nvif/user.h   |  19 ++
 .../drm/nouveau/include/nvkm/engine/fifo.h    |   1 +
 drivers/gpu/drm/nouveau/nouveau_chan.c        |   3 +-
 drivers/gpu/drm/nouveau/nouveau_dma.c         |   5 +
 drivers/gpu/drm/nouveau/nouveau_drm.c         |   8 +
 drivers/gpu/drm/nouveau/nvif/Kbuild           |   4 +
 drivers/gpu/drm/nouveau/nvif/device.c         |   2 +
 drivers/gpu/drm/nouveau/nvif/user.c           |  64 ++++
 drivers/gpu/drm/nouveau/nvif/userc361.c       |  33 ++
 .../gpu/drm/nouveau/nvkm/engine/device/base.c |   1 +
 .../gpu/drm/nouveau/nvkm/engine/fifo/Kbuild   |   4 +
 .../drm/nouveau/nvkm/engine/fifo/changk104.h  |  14 +
 .../gpu/drm/nouveau/nvkm/engine/fifo/gk104.c  |  10 +
 .../gpu/drm/nouveau/nvkm/engine/fifo/gk104.h  |   6 +
 .../nouveau/nvkm/engine/fifo/gpfifogk104.c    |  28 +-
 .../nouveau/nvkm/engine/fifo/gpfifogv100.c    | 225 +++++++++++++
 .../gpu/drm/nouveau/nvkm/engine/fifo/gv100.c  | 306 ++++++++++++++++++
 .../gpu/drm/nouveau/nvkm/engine/fifo/user.h   |   6 +
 .../drm/nouveau/nvkm/engine/fifo/usergv100.c  |  45 +++
 21 files changed, 779 insertions(+), 11 deletions(-)
 create mode 100644 drivers/gpu/drm/nouveau/include/nvif/user.h
 create mode 100644 drivers/gpu/drm/nouveau/nvif/user.c
 create mode 100644 drivers/gpu/drm/nouveau/nvif/userc361.c
 create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
 create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
 create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h
 create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/fifo/usergv100.c

diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 8c9aa556be0e..a2fdbc637788 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -52,6 +52,8 @@
 
 #define NV04_DISP                                     /* cl0046.h */ 0x00000046
 
+#define VOLTA_USERMODE_A                                             0x0000c361
+
 #define NV03_CHANNEL_DMA                              /* cl506b.h */ 0x0000006b
 #define NV10_CHANNEL_DMA                              /* cl506b.h */ 0x0000006e
 #define NV17_CHANNEL_DMA                              /* cl506b.h */ 0x0000176e
@@ -66,6 +68,7 @@
 #define KEPLER_CHANNEL_GPFIFO_B                       /* cla06f.h */ 0x0000a16f
 #define MAXWELL_CHANNEL_GPFIFO_A                      /* cla06f.h */ 0x0000b06f
 #define PASCAL_CHANNEL_GPFIFO_A                       /* cla06f.h */ 0x0000c06f
+#define VOLTA_CHANNEL_GPFIFO_A                        /* cla06f.h */ 0x0000c36f
 
 #define NV50_DISP                                     /* cl5070.h */ 0x00005070
 #define G82_DISP                                      /* cl5070.h */ 0x00008270
diff --git a/drivers/gpu/drm/nouveau/include/nvif/device.h b/drivers/gpu/drm/nouveau/include/nvif/device.h
index 76fe21e395de..ef839bd1d37e 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/device.h
@@ -4,6 +4,7 @@
 
 #include <nvif/object.h>
 #include <nvif/cl0080.h>
+#include <nvif/user.h>
 
 struct nvif_device {
 	struct nvif_object object;
@@ -13,6 +14,8 @@ struct nvif_device {
 		u64 engines;
 	} *runlist;
 	int runlists;
+
+	struct nvif_user user;
 };
 
 int  nvif_device_init(struct nvif_object *, u32 handle, s32 oclass, void *, u32,
diff --git a/drivers/gpu/drm/nouveau/include/nvif/user.h b/drivers/gpu/drm/nouveau/include/nvif/user.h
new file mode 100644
index 000000000000..03c11826b693
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/user.h
@@ -0,0 +1,19 @@
+#ifndef __NVIF_USER_H__
+#define __NVIF_USER_H__
+#include <nvif/object.h>
+struct nvif_device;
+
+struct nvif_user {
+	const struct nvif_user_func *func;
+	struct nvif_object object;
+};
+
+struct nvif_user_func {
+	void (*doorbell)(struct nvif_user *, u32 token);
+};
+
+int nvif_user_init(struct nvif_device *);
+void nvif_user_fini(struct nvif_device *);
+
+extern const struct nvif_user_func nvif_userc361;
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index 0d96edee1e6a..7e39fbed2519 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -73,4 +73,5 @@ int gm200_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
 int gm20b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
 int gp100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
 int gp10b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gv100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 97900e9cfe3f..92d3115f96b5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -220,7 +220,8 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
 		    u64 runlist, struct nouveau_channel **pchan)
 {
 	struct nouveau_cli *cli = (void *)device->object.client;
-	static const u16 oclasses[] = { PASCAL_CHANNEL_GPFIFO_A,
+	static const u16 oclasses[] = { VOLTA_CHANNEL_GPFIFO_A,
+					PASCAL_CHANNEL_GPFIFO_A,
 					MAXWELL_CHANNEL_GPFIFO_A,
 					KEPLER_CHANNEL_GPFIFO_B,
 					KEPLER_CHANNEL_GPFIFO_A,
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index e0664d28802b..945afd34138e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -28,6 +28,8 @@
 #include "nouveau_dma.h"
 #include "nouveau_vmm.h"
 
+#include <nvif/user.h>
+
 void
 OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
 {
@@ -82,6 +84,7 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
 void
 nv50_dma_push(struct nouveau_channel *chan, u64 offset, int length)
 {
+	struct nvif_user *user = &chan->drm->client.device.user;
 	struct nouveau_bo *pb = chan->push.buffer;
 	int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
 
@@ -97,6 +100,8 @@ nv50_dma_push(struct nouveau_channel *chan, u64 offset, int length)
 	nouveau_bo_rd32(pb, 0);
 
 	nvif_wr32(&chan->user, 0x8c, chan->dma.ib_put);
+	if (user->func && user->func->doorbell)
+		user->func->doorbell(user, chan->chid);
 	chan->dma.ib_free--;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 8e506c5d5a73..775443c9af94 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -39,6 +39,7 @@
 
 #include <nvif/driver.h>
 #include <nvif/fifo.h>
+#include <nvif/user.h>
 
 #include <nvif/class.h>
 #include <nvif/cl0002.h>
@@ -310,6 +311,12 @@ nouveau_accel_init(struct nouveau_drm *drm)
 	if (ret)
 		return;
 
+	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_VOLTA) {
+		ret = nvif_user_init(device);
+		if (ret)
+			return;
+	}
+
 	/* initialise synchronisation routines */
 	/*XXX: this is crap, but the fence/channel stuff is a little
 	 *     backwards in some places.  this will be fixed.
@@ -341,6 +348,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
 		case KEPLER_CHANNEL_GPFIFO_B:
 		case MAXWELL_CHANNEL_GPFIFO_A:
 		case PASCAL_CHANNEL_GPFIFO_A:
+		case VOLTA_CHANNEL_GPFIFO_A:
 			ret = nvc0_fence_create(drm);
 			break;
 		default:
diff --git a/drivers/gpu/drm/nouveau/nvif/Kbuild b/drivers/gpu/drm/nouveau/nvif/Kbuild
index 3db12504140f..42e8c85caa33 100644
--- a/drivers/gpu/drm/nouveau/nvif/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvif/Kbuild
@@ -8,3 +8,7 @@ nvif-y += nvif/mem.o
 nvif-y += nvif/mmu.o
 nvif-y += nvif/notify.o
 nvif-y += nvif/vmm.o
+
+# Usermode classes
+nvif-y += nvif/user.o
+nvif-y += nvif/userc361.o
diff --git a/drivers/gpu/drm/nouveau/nvif/device.c b/drivers/gpu/drm/nouveau/nvif/device.c
index ca5eb3dde70a..1ec101ba3b42 100644
--- a/drivers/gpu/drm/nouveau/nvif/device.c
+++ b/drivers/gpu/drm/nouveau/nvif/device.c
@@ -37,6 +37,7 @@ nvif_device_time(struct nvif_device *device)
 void
 nvif_device_fini(struct nvif_device *device)
 {
+	nvif_user_fini(device);
 	kfree(device->runlist);
 	device->runlist = NULL;
 	nvif_object_fini(&device->object);
@@ -49,6 +50,7 @@ nvif_device_init(struct nvif_object *parent, u32 handle, s32 oclass,
 	int ret = nvif_object_init(parent, handle, oclass, data, size,
 				   &device->object);
 	device->runlist = NULL;
+	device->user.func = NULL;
 	if (ret == 0) {
 		device->info.version = 0;
 		ret = nvif_object_mthd(&device->object, NV_DEVICE_V0_INFO,
diff --git a/drivers/gpu/drm/nouveau/nvif/user.c b/drivers/gpu/drm/nouveau/nvif/user.c
new file mode 100644
index 000000000000..10da3cdca647
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/user.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <nvif/user.h>
+#include <nvif/device.h>
+
+#include <nvif/class.h>
+
+void
+nvif_user_fini(struct nvif_device *device)
+{
+	if (device->user.func) {
+		nvif_object_fini(&device->user.object);
+		device->user.func = NULL;
+	}
+}
+
+int
+nvif_user_init(struct nvif_device *device)
+{
+	struct {
+		s32 oclass;
+		int version;
+		const struct nvif_user_func *func;
+	} users[] = {
+		{ VOLTA_USERMODE_A, -1, &nvif_userc361 },
+		{}
+	};
+	int cid, ret;
+
+	if (device->user.func)
+		return 0;
+
+	cid = nvif_mclass(&device->object, users);
+	if (cid < 0)
+		return cid;
+
+	ret = nvif_object_init(&device->object, 0, users[cid].oclass, NULL, 0,
+			       &device->user.object);
+	if (ret)
+		return ret;
+
+	nvif_object_map(&device->user.object, NULL, 0);
+	device->user.func = users[cid].func;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/userc361.c b/drivers/gpu/drm/nouveau/nvif/userc361.c
new file mode 100644
index 000000000000..19f9958e7e01
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/userc361.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <nvif/user.h>
+
+static void
+nvif_userc361_doorbell(struct nvif_user *user, u32 token)
+{
+	nvif_wr32(&user->object, 0x90, token);
+}
+
+const struct nvif_user_func
+nvif_userc361 = {
+	.doorbell = nvif_userc361_doorbell,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 43f6b7afdb52..95a56d4ba339 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2418,6 +2418,7 @@ nv140_chipset = {
 	.top = gk104_top_new,
 	.disp = gv100_disp_new,
 	.dma = gv100_dma_new,
+	.fifo = gv100_fifo_new,
 };
 
 static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
index b888ea64df21..f00408577a6a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -15,6 +15,7 @@ nvkm-y += nvkm/engine/fifo/gm200.o
 nvkm-y += nvkm/engine/fifo/gm20b.o
 nvkm-y += nvkm/engine/fifo/gp100.o
 nvkm-y += nvkm/engine/fifo/gp10b.o
+nvkm-y += nvkm/engine/fifo/gv100.o
 
 nvkm-y += nvkm/engine/fifo/chan.o
 nvkm-y += nvkm/engine/fifo/channv50.o
@@ -31,3 +32,6 @@ nvkm-y += nvkm/engine/fifo/gpfifonv50.o
 nvkm-y += nvkm/engine/fifo/gpfifog84.o
 nvkm-y += nvkm/engine/fifo/gpfifogf100.o
 nvkm-y += nvkm/engine/fifo/gpfifogk104.o
+nvkm-y += nvkm/engine/fifo/gpfifogv100.o
+
+nvkm-y += nvkm/engine/fifo/usergv100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
index 391e864c2a4a..8e28ba6b2307 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
@@ -20,6 +20,20 @@ struct gk104_fifo_chan {
 	} engn[NVKM_SUBDEV_NR];
 };
 
+extern const struct nvkm_fifo_chan_func gk104_fifo_gpfifo_func;
+
 int gk104_fifo_gpfifo_new(struct gk104_fifo *, const struct nvkm_oclass *,
 			  void *data, u32 size, struct nvkm_object **);
+void *gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *);
+void gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *);
+void gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan *);
+int gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *, struct nvkm_engine *,
+				  struct nvkm_object *);
+void gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *,
+				   struct nvkm_engine *);
+int gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *);
+int gk104_fifo_gpfifo_kick_locked(struct gk104_fifo_chan *);
+
+int gv100_fifo_gpfifo_new(struct gk104_fifo *, const struct nvkm_oclass *,
+			  void *data, u32 size, struct nvkm_object **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 767e0ab44cb8..a99046414a18 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -103,6 +103,10 @@ gk104_fifo_class_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
 	if (oclass->engn == &fifo->func->chan) {
 		const struct gk104_fifo_chan_user *user = oclass->engn;
 		return user->ctor(fifo, oclass, argv, argc, pobject);
+	} else
+	if (oclass->engn == &fifo->func->user) {
+		const struct gk104_fifo_user_user *user = oclass->engn;
+		return user->ctor(oclass, argv, argc, pobject);
 	}
 	WARN_ON(1);
 	return -EINVAL;
@@ -115,6 +119,12 @@ gk104_fifo_class_get(struct nvkm_fifo *base, int index,
 	struct gk104_fifo *fifo = gk104_fifo(base);
 	int c = 0;
 
+	if (fifo->func->user.ctor && c++ == index) {
+		oclass->base =  fifo->func->user.user;
+		oclass->engn = &fifo->func->user;
+		return 0;
+	}
+
 	if (fifo->func->chan.ctor && c++ == index) {
 		oclass->base =  fifo->func->chan.user;
 		oclass->engn = &fifo->func->chan;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index 1d182d8d2fce..d295b81e18d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -63,6 +63,12 @@ struct gk104_fifo_func {
 			     struct nvkm_memory *, u32 offset);
 	} *runlist;
 
+	struct gk104_fifo_user_user {
+		struct nvkm_sclass user;
+		int (*ctor)(const struct nvkm_oclass *, void *, u32,
+			    struct nvkm_object **);
+	} user;
+
 	struct gk104_fifo_chan_user {
 		struct nvkm_sclass user;
 		int (*ctor)(struct gk104_fifo *, const struct nvkm_oclass *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
index 60e7d72d6e46..118b37aea318 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -34,8 +34,8 @@
 #include <nvif/cla06f.h>
 #include <nvif/unpack.h>
 
-static int
-gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
+int
+gk104_fifo_gpfifo_kick_locked(struct gk104_fifo_chan *chan)
 {
 	struct gk104_fifo *fifo = chan->fifo;
 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
@@ -44,7 +44,6 @@ gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
 	struct nvkm_fifo_cgrp *cgrp = chan->cgrp;
 	int ret = 0;
 
-	mutex_lock(&subdev->mutex);
 	if (cgrp)
 		nvkm_wr32(device, 0x002634, cgrp->id | 0x01000000);
 	else
@@ -59,7 +58,16 @@ gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
 		nvkm_fifo_recover_chan(&fifo->base, chan->base.chid);
 		ret = -ETIMEDOUT;
 	}
-	mutex_unlock(&subdev->mutex);
+	return ret;
+}
+
+int
+gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
+{
+	int ret;
+	mutex_lock(&chan->base.fifo->engine.subdev.mutex);
+	ret = gk104_fifo_gpfifo_kick_locked(chan);
+	mutex_unlock(&chan->base.fifo->engine.subdev.mutex);
 	return ret;
 }
 
@@ -138,7 +146,7 @@ gk104_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
 	return 0;
 }
 
-static void
+void
 gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
 			      struct nvkm_engine *engine)
 {
@@ -147,7 +155,7 @@ gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
 	nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
 }
 
-static int
+int
 gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
 			      struct nvkm_engine *engine,
 			      struct nvkm_object *object)
@@ -172,7 +180,7 @@ gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
 			       chan->engn[engn].vma, NULL, 0);
 }
 
-static void
+void
 gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan *base)
 {
 	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
@@ -190,7 +198,7 @@ gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan *base)
 	nvkm_wr32(device, 0x800000 + coff, 0x00000000);
 }
 
-static void
+void
 gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
 {
 	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
@@ -210,7 +218,7 @@ gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
 	}
 }
 
-static void *
+void *
 gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
 {
 	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
@@ -218,7 +226,7 @@ gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
 	return chan;
 }
 
-static const struct nvkm_fifo_chan_func
+const struct nvkm_fifo_chan_func
 gk104_fifo_gpfifo_func = {
 	.dtor = gk104_fifo_gpfifo_dtor,
 	.init = gk104_fifo_gpfifo_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
new file mode 100644
index 000000000000..9598853ced56
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
@@ -0,0 +1,225 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "changk104.h"
+#include "cgrp.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+
+#include <nvif/cla06f.h>
+#include <nvif/unpack.h>
+
+static int
+gv100_fifo_gpfifo_engine_valid(struct gk104_fifo_chan *chan, bool ce, bool valid)
+{
+	struct nvkm_subdev *subdev = &chan->base.fifo->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	const u32 mask = ce ? 0x00020000 : 0x00010000;
+	const u32 data = valid ? mask : 0x00000000;
+	int ret;
+
+	/* Block runlist to prevent the channel from being rescheduled. */
+	mutex_lock(&subdev->mutex);
+	nvkm_mask(device, 0x002630, BIT(chan->runl), BIT(chan->runl));
+
+	/* Preempt the channel. */
+	ret = gk104_fifo_gpfifo_kick_locked(chan);
+	if (ret == 0) {
+		/* Update engine context validity. */
+		nvkm_kmap(chan->base.inst);
+		nvkm_mo32(chan->base.inst, 0x0ac, mask, data);
+		nvkm_done(chan->base.inst);
+	}
+
+	/* Resume runlist. */
+	nvkm_mask(device, 0x002630, BIT(chan->runl), 0);
+	mutex_unlock(&subdev->mutex);
+	return ret;
+}
+
+static int
+gv100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
+			      struct nvkm_engine *engine, bool suspend)
+{
+	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+	struct nvkm_gpuobj *inst = chan->base.inst;
+	int ret;
+
+	if (engine->subdev.index >= NVKM_ENGINE_CE0 &&
+	    engine->subdev.index <= NVKM_ENGINE_CE_LAST)
+		return gk104_fifo_gpfifo_kick(chan);
+
+	ret = gv100_fifo_gpfifo_engine_valid(chan, false, false);
+	if (ret && suspend)
+		return ret;
+
+	nvkm_kmap(inst);
+	nvkm_wo32(inst, 0x0210, 0x00000000);
+	nvkm_wo32(inst, 0x0214, 0x00000000);
+	nvkm_done(inst);
+	return ret;
+}
+
+static int
+gv100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
+			      struct nvkm_engine *engine)
+{
+	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+	struct nvkm_gpuobj *inst = chan->base.inst;
+	u64 addr;
+
+	if (engine->subdev.index >= NVKM_ENGINE_CE0 &&
+	    engine->subdev.index <= NVKM_ENGINE_CE_LAST)
+		return 0;
+
+	addr = chan->engn[engine->subdev.index].vma->addr;
+	nvkm_kmap(inst);
+	nvkm_wo32(inst, 0x210, lower_32_bits(addr) | 0x00000004);
+	nvkm_wo32(inst, 0x214, upper_32_bits(addr));
+	nvkm_done(inst);
+
+	return gv100_fifo_gpfifo_engine_valid(chan, false, true);
+}
+
+const struct nvkm_fifo_chan_func
+gv100_fifo_gpfifo_func = {
+	.dtor = gk104_fifo_gpfifo_dtor,
+	.init = gk104_fifo_gpfifo_init,
+	.fini = gk104_fifo_gpfifo_fini,
+	.ntfy = gf100_fifo_chan_ntfy,
+	.engine_ctor = gk104_fifo_gpfifo_engine_ctor,
+	.engine_dtor = gk104_fifo_gpfifo_engine_dtor,
+	.engine_init = gv100_fifo_gpfifo_engine_init,
+	.engine_fini = gv100_fifo_gpfifo_engine_fini,
+};
+
+static int
+gv100_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
+		       u64 vmm, u64 ioffset, u64 ilength,
+		       const struct nvkm_oclass *oclass,
+		       struct nvkm_object **pobject)
+{
+	struct gk104_fifo_chan *chan;
+	int runlist = ffs(*runlists) -1, ret, i;
+	unsigned long engm;
+	u64 subdevs = 0;
+	u64 usermem;
+
+	if (!vmm || runlist < 0 || runlist >= fifo->runlist_nr)
+		return -EINVAL;
+	*runlists = BIT_ULL(runlist);
+
+	engm = fifo->runlist[runlist].engm;
+	for_each_set_bit(i, &engm, fifo->engine_nr) {
+		if (fifo->engine[i].engine)
+			subdevs |= BIT_ULL(fifo->engine[i].engine->subdev.index);
+	}
+
+	/* Allocate the channel. */
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+	chan->fifo = fifo;
+	chan->runl = runlist;
+	INIT_LIST_HEAD(&chan->head);
+
+	ret = nvkm_fifo_chan_ctor(&gv100_fifo_gpfifo_func, &fifo->base,
+				  0x1000, 0x1000, true, vmm, 0, subdevs,
+				  1, fifo->user.bar->addr, 0x200,
+				  oclass, &chan->base);
+	if (ret)
+		return ret;
+
+	*chid = chan->base.chid;
+
+	/* Hack to support GPUs where even individual channels should be
+	 * part of a channel group.
+	 */
+	if (fifo->func->cgrp_force) {
+		if (!(chan->cgrp = kmalloc(sizeof(*chan->cgrp), GFP_KERNEL)))
+			return -ENOMEM;
+		chan->cgrp->id = chan->base.chid;
+		INIT_LIST_HEAD(&chan->cgrp->head);
+		INIT_LIST_HEAD(&chan->cgrp->chan);
+		chan->cgrp->chan_nr = 0;
+	}
+
+	/* Clear channel control registers. */
+	usermem = chan->base.chid * 0x200;
+	ilength = order_base_2(ilength / 8);
+
+	nvkm_kmap(fifo->user.mem);
+	for (i = 0; i < 0x200; i += 4)
+		nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
+	nvkm_done(fifo->user.mem);
+	usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
+
+	/* RAMFC */
+	nvkm_kmap(chan->base.inst);
+	nvkm_wo32(chan->base.inst, 0x008, lower_32_bits(usermem));
+	nvkm_wo32(chan->base.inst, 0x00c, upper_32_bits(usermem));
+	nvkm_wo32(chan->base.inst, 0x010, 0x0000face);
+	nvkm_wo32(chan->base.inst, 0x030, 0x7ffff902);
+	nvkm_wo32(chan->base.inst, 0x048, lower_32_bits(ioffset));
+	nvkm_wo32(chan->base.inst, 0x04c, upper_32_bits(ioffset) |
+					  (ilength << 16));
+	nvkm_wo32(chan->base.inst, 0x084, 0x20400000);
+	nvkm_wo32(chan->base.inst, 0x094, 0x30000001);
+	nvkm_wo32(chan->base.inst, 0x0e4, 0x00000020);
+	nvkm_wo32(chan->base.inst, 0x0e8, chan->base.chid);
+	nvkm_wo32(chan->base.inst, 0x0f4, 0x00001100);
+	nvkm_wo32(chan->base.inst, 0x0f8, 0x10003080);
+	nvkm_mo32(chan->base.inst, 0x218, 0x00000000, 0x00000000);
+	nvkm_wo32(chan->base.inst, 0x220, 0x020a1000);
+	nvkm_wo32(chan->base.inst, 0x224, 0x00000000);
+	nvkm_done(chan->base.inst);
+	return gv100_fifo_gpfifo_engine_valid(chan, true, true);
+}
+
+int
+gv100_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
+		      void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_object *parent = oclass->parent;
+	union {
+		struct kepler_channel_gpfifo_a_v0 v0;
+	} *args = data;
+	int ret = -ENOSYS;
+
+	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
+	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+		nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
+				   "ioffset %016llx ilength %08x "
+				   "runlist %016llx\n",
+			   args->v0.version, args->v0.vmm, args->v0.ioffset,
+			   args->v0.ilength, args->v0.runlist);
+		return gv100_fifo_gpfifo_new_(fifo,
+					      &args->v0.runlist,
+					      &args->v0.chid,
+					       args->v0.vmm,
+					       args->v0.ioffset,
+					       args->v0.ilength,
+					      oclass, pobject);
+	}
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
new file mode 100644
index 000000000000..4e1d159c0ae7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
@@ -0,0 +1,306 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gk104.h"
+#include "cgrp.h"
+#include "changk104.h"
+#include "user.h"
+
+#include <core/gpuobj.h>
+
+#include <nvif/class.h>
+
+static void
+gv100_fifo_runlist_chan(struct gk104_fifo_chan *chan,
+			struct nvkm_memory *memory, u32 offset)
+{
+	struct nvkm_memory *usermem = chan->fifo->user.mem;
+	const u64 user = nvkm_memory_addr(usermem) + (chan->base.chid * 0x200);
+	const u64 inst = chan->base.inst->addr;
+
+	nvkm_wo32(memory, offset + 0x0, lower_32_bits(user));
+	nvkm_wo32(memory, offset + 0x4, upper_32_bits(user));
+	nvkm_wo32(memory, offset + 0x8, lower_32_bits(inst) | chan->base.chid);
+	nvkm_wo32(memory, offset + 0xc, upper_32_bits(inst));
+}
+
+static void
+gv100_fifo_runlist_cgrp(struct nvkm_fifo_cgrp *cgrp,
+			struct nvkm_memory *memory, u32 offset)
+{
+	nvkm_wo32(memory, offset + 0x0, (128 << 24) | (3 << 16) | 0x00000001);
+	nvkm_wo32(memory, offset + 0x4, cgrp->chan_nr);
+	nvkm_wo32(memory, offset + 0x8, cgrp->id);
+	nvkm_wo32(memory, offset + 0xc, 0x00000000);
+}
+
+const struct gk104_fifo_runlist_func
+gv100_fifo_runlist = {
+	.size = 16,
+	.cgrp = gv100_fifo_runlist_cgrp,
+	.chan = gv100_fifo_runlist_chan,
+};
+
+static const struct nvkm_enum
+gv100_fifo_fault_gpcclient[] = {
+	{ 0x00, "T1_0" },
+	{ 0x01, "T1_1" },
+	{ 0x02, "T1_2" },
+	{ 0x03, "T1_3" },
+	{ 0x04, "T1_4" },
+	{ 0x05, "T1_5" },
+	{ 0x06, "T1_6" },
+	{ 0x07, "T1_7" },
+	{ 0x08, "PE_0" },
+	{ 0x09, "PE_1" },
+	{ 0x0a, "PE_2" },
+	{ 0x0b, "PE_3" },
+	{ 0x0c, "PE_4" },
+	{ 0x0d, "PE_5" },
+	{ 0x0e, "PE_6" },
+	{ 0x0f, "PE_7" },
+	{ 0x10, "RAST" },
+	{ 0x11, "GCC" },
+	{ 0x12, "GPCCS" },
+	{ 0x13, "PROP_0" },
+	{ 0x14, "PROP_1" },
+	{ 0x15, "PROP_2" },
+	{ 0x16, "PROP_3" },
+	{ 0x17, "GPM" },
+	{ 0x18, "LTP_UTLB_0" },
+	{ 0x19, "LTP_UTLB_1" },
+	{ 0x1a, "LTP_UTLB_2" },
+	{ 0x1b, "LTP_UTLB_3" },
+	{ 0x1c, "LTP_UTLB_4" },
+	{ 0x1d, "LTP_UTLB_5" },
+	{ 0x1e, "LTP_UTLB_6" },
+	{ 0x1f, "LTP_UTLB_7" },
+	{ 0x20, "RGG_UTLB" },
+	{ 0x21, "T1_8" },
+	{ 0x22, "T1_9" },
+	{ 0x23, "T1_10" },
+	{ 0x24, "T1_11" },
+	{ 0x25, "T1_12" },
+	{ 0x26, "T1_13" },
+	{ 0x27, "T1_14" },
+	{ 0x28, "T1_15" },
+	{ 0x29, "TPCCS_0" },
+	{ 0x2a, "TPCCS_1" },
+	{ 0x2b, "TPCCS_2" },
+	{ 0x2c, "TPCCS_3" },
+	{ 0x2d, "TPCCS_4" },
+	{ 0x2e, "TPCCS_5" },
+	{ 0x2f, "TPCCS_6" },
+	{ 0x30, "TPCCS_7" },
+	{ 0x31, "PE_8" },
+	{ 0x32, "PE_9" },
+	{ 0x33, "TPCCS_8" },
+	{ 0x34, "TPCCS_9" },
+	{ 0x35, "T1_16" },
+	{ 0x36, "T1_17" },
+	{ 0x37, "T1_18" },
+	{ 0x38, "T1_19" },
+	{ 0x39, "PE_10" },
+	{ 0x3a, "PE_11" },
+	{ 0x3b, "TPCCS_10" },
+	{ 0x3c, "TPCCS_11" },
+	{ 0x3d, "T1_20" },
+	{ 0x3e, "T1_21" },
+	{ 0x3f, "T1_22" },
+	{ 0x40, "T1_23" },
+	{ 0x41, "PE_12" },
+	{ 0x42, "PE_13" },
+	{ 0x43, "TPCCS_12" },
+	{ 0x44, "TPCCS_13" },
+	{ 0x45, "T1_24" },
+	{ 0x46, "T1_25" },
+	{ 0x47, "T1_26" },
+	{ 0x48, "T1_27" },
+	{ 0x49, "PE_14" },
+	{ 0x4a, "PE_15" },
+	{ 0x4b, "TPCCS_14" },
+	{ 0x4c, "TPCCS_15" },
+	{ 0x4d, "T1_28" },
+	{ 0x4e, "T1_29" },
+	{ 0x4f, "T1_30" },
+	{ 0x50, "T1_31" },
+	{ 0x51, "PE_16" },
+	{ 0x52, "PE_17" },
+	{ 0x53, "TPCCS_16" },
+	{ 0x54, "TPCCS_17" },
+	{ 0x55, "T1_32" },
+	{ 0x56, "T1_33" },
+	{ 0x57, "T1_34" },
+	{ 0x58, "T1_35" },
+	{ 0x59, "PE_18" },
+	{ 0x5a, "PE_19" },
+	{ 0x5b, "TPCCS_18" },
+	{ 0x5c, "TPCCS_19" },
+	{ 0x5d, "T1_36" },
+	{ 0x5e, "T1_37" },
+	{ 0x5f, "T1_38" },
+	{ 0x60, "T1_39" },
+	{}
+};
+
+static const struct nvkm_enum
+gv100_fifo_fault_hubclient[] = {
+	{ 0x00, "VIP" },
+	{ 0x01, "CE0" },
+	{ 0x02, "CE1" },
+	{ 0x03, "DNISO" },
+	{ 0x04, "FE" },
+	{ 0x05, "FECS" },
+	{ 0x06, "HOST" },
+	{ 0x07, "HOST_CPU" },
+	{ 0x08, "HOST_CPU_NB" },
+	{ 0x09, "ISO" },
+	{ 0x0a, "MMU" },
+	{ 0x0b, "NVDEC" },
+	{ 0x0d, "NVENC1" },
+	{ 0x0e, "NISO" },
+	{ 0x0f, "P2P" },
+	{ 0x10, "PD" },
+	{ 0x11, "PERF" },
+	{ 0x12, "PMU" },
+	{ 0x13, "RASTERTWOD" },
+	{ 0x14, "SCC" },
+	{ 0x15, "SCC_NB" },
+	{ 0x16, "SEC" },
+	{ 0x17, "SSYNC" },
+	{ 0x18, "CE2" },
+	{ 0x19, "XV" },
+	{ 0x1a, "MMU_NB" },
+	{ 0x1b, "NVENC0" },
+	{ 0x1c, "DFALCON" },
+	{ 0x1d, "SKED" },
+	{ 0x1e, "AFALCON" },
+	{ 0x1f, "DONT_CARE" },
+	{ 0x20, "HSCE0" },
+	{ 0x21, "HSCE1" },
+	{ 0x22, "HSCE2" },
+	{ 0x23, "HSCE3" },
+	{ 0x24, "HSCE4" },
+	{ 0x25, "HSCE5" },
+	{ 0x26, "HSCE6" },
+	{ 0x27, "HSCE7" },
+	{ 0x28, "HSCE8" },
+	{ 0x29, "HSCE9" },
+	{ 0x2a, "HSHUB" },
+	{ 0x2b, "PTP_X0" },
+	{ 0x2c, "PTP_X1" },
+	{ 0x2d, "PTP_X2" },
+	{ 0x2e, "PTP_X3" },
+	{ 0x2f, "PTP_X4" },
+	{ 0x30, "PTP_X5" },
+	{ 0x31, "PTP_X6" },
+	{ 0x32, "PTP_X7" },
+	{ 0x33, "NVENC2" },
+	{ 0x34, "VPR_SCRUBBER0" },
+	{ 0x35, "VPR_SCRUBBER1" },
+	{ 0x36, "DWBIF" },
+	{ 0x37, "FBFALCON" },
+	{ 0x38, "CE_SHIM" },
+	{ 0x39, "GSP" },
+	{}
+};
+
+static const struct nvkm_enum
+gv100_fifo_fault_reason[] = {
+	{ 0x00, "PDE" },
+	{ 0x01, "PDE_SIZE" },
+	{ 0x02, "PTE" },
+	{ 0x03, "VA_LIMIT_VIOLATION" },
+	{ 0x04, "UNBOUND_INST_BLOCK" },
+	{ 0x05, "PRIV_VIOLATION" },
+	{ 0x06, "RO_VIOLATION" },
+	{ 0x07, "WO_VIOLATION" },
+	{ 0x08, "PITCH_MASK_VIOLATION" },
+	{ 0x09, "WORK_CREATION" },
+	{ 0x0a, "UNSUPPORTED_APERTURE" },
+	{ 0x0b, "COMPRESSION_FAILURE" },
+	{ 0x0c, "UNSUPPORTED_KIND" },
+	{ 0x0d, "REGION_VIOLATION" },
+	{ 0x0e, "POISONED" },
+	{ 0x0f, "ATOMIC_VIOLATION" },
+	{}
+};
+
+static const struct nvkm_enum
+gv100_fifo_fault_engine[] = {
+	{ 0x01, "DISPLAY" },
+	{ 0x03, "PTP" },
+	{ 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
+	{ 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
+	{ 0x06, "PWR_PMU" },
+	{ 0x08, "IFB", NULL, NVKM_ENGINE_IFB },
+	{ 0x09, "PERF" },
+	{ 0x1f, "PHYSICAL" },
+	{ 0x20, "HOST0" },
+	{ 0x21, "HOST1" },
+	{ 0x22, "HOST2" },
+	{ 0x23, "HOST3" },
+	{ 0x24, "HOST4" },
+	{ 0x25, "HOST5" },
+	{ 0x26, "HOST6" },
+	{ 0x27, "HOST7" },
+	{ 0x28, "HOST8" },
+	{ 0x29, "HOST9" },
+	{ 0x2a, "HOST10" },
+	{ 0x2b, "HOST11" },
+	{ 0x2c, "HOST12" },
+	{ 0x2d, "HOST13" },
+	{}
+};
+
+static const struct nvkm_enum
+gv100_fifo_fault_access[] = {
+	{ 0x0, "VIRT_READ" },
+	{ 0x1, "VIRT_WRITE" },
+	{ 0x2, "VIRT_ATOMIC" },
+	{ 0x3, "VIRT_PREFETCH" },
+	{ 0x4, "VIRT_ATOMIC_WEAK" },
+	{ 0x8, "PHYS_READ" },
+	{ 0x9, "PHYS_WRITE" },
+	{ 0xa, "PHYS_ATOMIC" },
+	{ 0xb, "PHYS_PREFETCH" },
+	{}
+};
+
+static const struct gk104_fifo_func
+gv100_fifo = {
+	.init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
+	.fault.access = gv100_fifo_fault_access,
+	.fault.engine = gv100_fifo_fault_engine,
+	.fault.reason = gv100_fifo_fault_reason,
+	.fault.hubclient = gv100_fifo_fault_hubclient,
+	.fault.gpcclient = gv100_fifo_fault_gpcclient,
+	.runlist = &gv100_fifo_runlist,
+	.user = {{-1,-1,VOLTA_USERMODE_A      }, gv100_fifo_user_new   },
+	.chan = {{ 0, 0,VOLTA_CHANNEL_GPFIFO_A}, gv100_fifo_gpfifo_new },
+	.cgrp_force = true,
+};
+
+int
+gv100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+	return gk104_fifo_new_(&gv100_fifo, device, index, 4096, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h
new file mode 100644
index 000000000000..ed840921ebe8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h
@@ -0,0 +1,6 @@
+#ifndef __NVKM_FIFO_USER_H__
+#define __NVKM_FIFO_USER_H__
+#include "priv.h"
+int gv100_fifo_user_new(const struct nvkm_oclass *, void *, u32,
+			struct nvkm_object **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/usergv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/usergv100.c
new file mode 100644
index 000000000000..3dc3b8b312de
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/usergv100.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "user.h"
+
+static int
+gv100_fifo_user_map(struct nvkm_object *object, void *argv, u32 argc,
+		    enum nvkm_object_map *type, u64 *addr, u64 *size)
+{
+	struct nvkm_device *device = object->engine->subdev.device;
+	*addr = 0x810000 + device->func->resource_addr(device, 0);
+	*size = 0x010000;
+	*type = NVKM_OBJECT_MAP_IO;
+	return 0;
+}
+
+static const struct nvkm_object_func
+gv100_fifo_user = {
+	.map = gv100_fifo_user_map,
+};
+
+int
+gv100_fifo_user_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+		    struct nvkm_object **pobject)
+{
+	return nvkm_object_new_(&gv100_fifo_user, oclass, argv, argc, pobject);
+}

From 6e1f34e33c17f633ebbd383cab429c820ec0c7b0 Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:48 +1000
Subject: [PATCH 1162/1286] drm/nouveau/ce/gv100: initial support

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/include/nvif/class.h  |  1 +
 .../gpu/drm/nouveau/include/nvkm/engine/ce.h  |  1 +
 drivers/gpu/drm/nouveau/nouveau_bo.c          |  2 +
 drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild |  1 +
 .../gpu/drm/nouveau/nvkm/engine/ce/gv100.c    | 40 +++++++++++++++++++
 .../gpu/drm/nouveau/nvkm/engine/device/base.c |  9 +++++
 6 files changed, 54 insertions(+)
 create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/ce/gv100.c

diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index a2fdbc637788..8688342aca61 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -180,6 +180,7 @@
 #define MAXWELL_DMA_COPY_A                                           0x0000b0b5
 #define PASCAL_DMA_COPY_A                                            0x0000c0b5
 #define PASCAL_DMA_COPY_B                                            0x0000c1b5
+#define VOLTA_DMA_COPY_A                                             0x0000c3b5
 
 #define FERMI_DECOMPRESS                                             0x000090b8
 
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
index 553245994450..fc295e1faa19 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
@@ -10,4 +10,5 @@ int gm107_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
 int gm200_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
 int gp100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
 int gp102_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gv100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index ab61c038f42c..7214022dfb91 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1141,6 +1141,8 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
 			    struct ttm_mem_reg *, struct ttm_mem_reg *);
 		int (*init)(struct nouveau_channel *, u32 handle);
 	} _methods[] = {
+		{  "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init },
+		{  "GRCE", 0, 0xc3b5, nve0_bo_move_copy, nvc0_bo_move_init },
 		{  "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
 		{  "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
 		{  "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
index 255d81ccf916..80d784441904 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
@@ -5,3 +5,4 @@ nvkm-y += nvkm/engine/ce/gm107.o
 nvkm-y += nvkm/engine/ce/gm200.o
 nvkm-y += nvkm/engine/ce/gp100.o
 nvkm-y += nvkm/engine/ce/gp102.o
+nvkm-y += nvkm/engine/ce/gv100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gv100.c
new file mode 100644
index 000000000000..fcda3de45857
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gv100.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+gv100_ce = {
+	.intr = gp100_ce_intr,
+	.sclass = {
+		{ -1, -1, VOLTA_DMA_COPY_A },
+		{}
+	}
+};
+
+int
+gv100_ce_new(struct nvkm_device *device, int index,
+	     struct nvkm_engine **pengine)
+{
+	return nvkm_engine_new_(&gv100_ce, device, index, true, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 95a56d4ba339..5fa30613da1e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2417,6 +2417,15 @@ nv140_chipset = {
 	.timer = gk20a_timer_new,
 	.top = gk104_top_new,
 	.disp = gv100_disp_new,
+	.ce[0] = gv100_ce_new,
+	.ce[1] = gv100_ce_new,
+	.ce[2] = gv100_ce_new,
+	.ce[3] = gv100_ce_new,
+	.ce[4] = gv100_ce_new,
+	.ce[5] = gv100_ce_new,
+	.ce[6] = gv100_ce_new,
+	.ce[7] = gv100_ce_new,
+	.ce[8] = gv100_ce_new,
 	.dma = gv100_dma_new,
 	.fifo = gv100_fifo_new,
 };

From d521097f58bdfdc9966b8d10754074c8524133dd Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Tue, 8 May 2018 20:39:48 +1000
Subject: [PATCH 1163/1286] drm/nouveau/gr/gv100: initial support

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/include/nvif/class.h  |   3 +
 .../gpu/drm/nouveau/include/nvkm/engine/gr.h  |   1 +
 .../gpu/drm/nouveau/nvkm/engine/device/base.c |   4 +
 drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild |   2 +
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c |  10 +
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h |   5 +
 .../gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c | 215 ++++++++++++++++++
 .../gpu/drm/nouveau/nvkm/engine/gr/gf100.c    |  10 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/gf100.h    |   6 +-
 .../gpu/drm/nouveau/nvkm/engine/gr/gv100.c    | 120 ++++++++++
 .../gpu/drm/nouveau/nvkm/falcon/msgqueue.c    |   1 +
 .../drm/nouveau/nvkm/subdev/secboot/gp108.c   |  21 ++
 12 files changed, 395 insertions(+), 3 deletions(-)
 create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c
 create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c

diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 8688342aca61..6db56bd7d67e 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -155,6 +155,8 @@
 #define PASCAL_A                                      /* cl9097.h */ 0x0000c097
 #define PASCAL_B                                      /* cl9097.h */ 0x0000c197
 
+#define VOLTA_A                                       /* cl9097.h */ 0x0000c397
+
 #define NV74_BSP                                                     0x000074b0
 
 #define GT212_MSVLD                                                  0x000085b1
@@ -194,6 +196,7 @@
 #define MAXWELL_COMPUTE_B                                            0x0000b1c0
 #define PASCAL_COMPUTE_A                                             0x0000c0c0
 #define PASCAL_COMPUTE_B                                             0x0000c1c0
+#define VOLTA_COMPUTE_A                                              0x0000c3c0
 
 #define NV74_CIPHER                                                  0x000074c1
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index 33b2f2e543ee..ba1518ff8b66 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -48,4 +48,5 @@ int gp102_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
 int gp104_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
 int gp107_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
 int gp10b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gv100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 5fa30613da1e..e294013426ce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2413,6 +2413,7 @@ nv140_chipset = {
 	.mmu = gv100_mmu_new,
 	.pci = gp100_pci_new,
 	.pmu = gp102_pmu_new,
+	.secboot = gp108_secboot_new,
 	.therm = gp100_therm_new,
 	.timer = gk20a_timer_new,
 	.top = gk104_top_new,
@@ -2428,6 +2429,9 @@ nv140_chipset = {
 	.ce[8] = gv100_ce_new,
 	.dma = gv100_dma_new,
 	.fifo = gv100_fifo_new,
+	.gr = gv100_gr_new,
+	.nvdec = gp102_nvdec_new,
+	.sec2 = gp102_sec2_new,
 };
 
 static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
index 42342b4a9abe..93e3733f54e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
@@ -36,6 +36,7 @@ nvkm-y += nvkm/engine/gr/gp102.o
 nvkm-y += nvkm/engine/gr/gp104.o
 nvkm-y += nvkm/engine/gr/gp107.o
 nvkm-y += nvkm/engine/gr/gp10b.o
+nvkm-y += nvkm/engine/gr/gv100.o
 
 nvkm-y += nvkm/engine/gr/ctxnv40.o
 nvkm-y += nvkm/engine/gr/ctxnv50.o
@@ -57,3 +58,4 @@ nvkm-y += nvkm/engine/gr/ctxgp100.o
 nvkm-y += nvkm/engine/gr/ctxgp102.o
 nvkm-y += nvkm/engine/gr/ctxgp104.o
 nvkm-y += nvkm/engine/gr/ctxgp107.o
+nvkm-y += nvkm/engine/gr/ctxgv100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index 1ed63ed1a283..f0f5a518e52a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1396,10 +1396,14 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 
 	gf100_grctx_generate_floorsweep(gr);
 
+	if (grctx->r400088) grctx->r400088(gr, false);
 	if (gr->fuc_bundle)
 		gf100_gr_icmd(gr, gr->fuc_bundle);
 	else
 		gf100_gr_icmd(gr, grctx->icmd);
+	if (grctx->sw_veid_bundle_init)
+		gf100_gr_icmd(gr, grctx->sw_veid_bundle_init);
+	if (grctx->r400088) grctx->r400088(gr, true);
 
 	nvkm_wr32(device, 0x404154, idle_timeout);
 
@@ -1448,6 +1452,9 @@ gf100_grctx_generate(struct gf100_gr *gr)
 			break;
 	);
 
+	if (grctx->unkn88c)
+		grctx->unkn88c(gr, true);
+
 	/* Reset FECS. */
 	nvkm_wr32(device, 0x409614, 0x00000070);
 	nvkm_usec(device, 10, NVKM_DELAY);
@@ -1455,6 +1462,9 @@ gf100_grctx_generate(struct gf100_gr *gr)
 	nvkm_usec(device, 10, NVKM_DELAY);
 	nvkm_rd32(device, 0x409614);
 
+	if (grctx->unkn88c)
+		grctx->unkn88c(gr, false);
+
 	/* NV_PGRAPH_FE_PWR_MODE_AUTO. */
 	nvkm_wr32(device, 0x404170, 0x00000010);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 9ce3d0075573..33e932bd73b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -21,6 +21,7 @@ void gf100_grctx_mmio_item(struct gf100_grctx *, u32 addr, u32 data, int s, int)
 #define mmio_wr32(a,b,c) mmio_refn((a), (b), (c),  0, -1)
 
 struct gf100_grctx_func {
+	void (*unkn88c)(struct gf100_gr *, bool on);
 	/* main context generation function */
 	void  (*main)(struct gf100_gr *, struct gf100_grctx *);
 	/* context-specific modify-on-first-load list generation function */
@@ -35,6 +36,7 @@ struct gf100_grctx_func {
 	/* indirect context data, generated with icmds/mthds */
 	const struct gf100_gr_pack *icmd;
 	const struct gf100_gr_pack *mthd;
+	const struct gf100_gr_pack *sw_veid_bundle_init;
 	/* bundle circular buffer */
 	void (*bundle)(struct gf100_grctx *);
 	u32 bundle_size;
@@ -66,6 +68,7 @@ struct gf100_grctx_func {
 	void (*tpc_mask)(struct gf100_gr *);
 	void (*smid_config)(struct gf100_gr *);
 	/* misc other things */
+	void (*r400088)(struct gf100_gr *, bool);
 	void (*r419cb8)(struct gf100_gr *);
 	void (*r418800)(struct gf100_gr *);
 	void (*r419eb0)(struct gf100_gr *);
@@ -148,6 +151,8 @@ extern const struct gf100_grctx_func gp104_grctx;
 
 extern const struct gf100_grctx_func gp107_grctx;
 
+extern const struct gf100_grctx_func gv100_grctx;
+
 /* context init value lists */
 
 extern const struct gf100_gr_pack gf100_grctx_pack_icmd[];
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c
new file mode 100644
index 000000000000..0990765ef191
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ctxgf100.h"
+
+/*******************************************************************************
+ * PGRAPH context implementation
+ ******************************************************************************/
+
+static const struct gf100_gr_init
+gv100_grctx_init_sw_veid_bundle_init_0[] = {
+	{ 0x00001000, 64, 0x00100000, 0x00000008 },
+	{ 0x00000941, 64, 0x00100000, 0x00000000 },
+	{ 0x0000097e, 64, 0x00100000, 0x00000000 },
+	{ 0x0000097f, 64, 0x00100000, 0x00000100 },
+	{ 0x0000035c, 64, 0x00100000, 0x00000000 },
+	{ 0x0000035d, 64, 0x00100000, 0x00000000 },
+	{ 0x00000a08, 64, 0x00100000, 0x00000000 },
+	{ 0x00000a09, 64, 0x00100000, 0x00000000 },
+	{ 0x00000a0a, 64, 0x00100000, 0x00000000 },
+	{ 0x00000352, 64, 0x00100000, 0x00000000 },
+	{ 0x00000353, 64, 0x00100000, 0x00000000 },
+	{ 0x00000358, 64, 0x00100000, 0x00000000 },
+	{ 0x00000359, 64, 0x00100000, 0x00000000 },
+	{ 0x00000370, 64, 0x00100000, 0x00000000 },
+	{ 0x00000371, 64, 0x00100000, 0x00000000 },
+	{ 0x00000372, 64, 0x00100000, 0x000fffff },
+	{ 0x00000366, 64, 0x00100000, 0x00000000 },
+	{ 0x00000367, 64, 0x00100000, 0x00000000 },
+	{ 0x00000368, 64, 0x00100000, 0x00000fff },
+	{ 0x00000623, 64, 0x00100000, 0x00000000 },
+	{ 0x00000624, 64, 0x00100000, 0x00000000 },
+	{ 0x0001e100,  1, 0x00000001, 0x02000001 },
+	{}
+};
+
+static const struct gf100_gr_pack
+gv100_grctx_pack_sw_veid_bundle_init[] = {
+	{ gv100_grctx_init_sw_veid_bundle_init_0 },
+	{}
+};
+
+static void
+gv100_grctx_generate_attrib(struct gf100_grctx *info)
+{
+	struct gf100_gr *gr = info->gr;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
+	const u32  alpha = grctx->alpha_nr;
+	const u32 attrib = grctx->attrib_nr;
+	const u32   gfxp = grctx->gfxp_nr;
+	const int s = 12;
+	const int max_batches = 0xffff;
+	u32 size = grctx->alpha_nr_max * gr->tpc_total;
+	u32 ao = 0;
+	u32 bo = ao + size;
+	int gpc, ppc, b, n = 0;
+
+	size += grctx->gfxp_nr * gr->tpc_total;
+	size = ((size * 0x20) + 128) & ~127;
+	b = mmio_vram(info, size, (1 << s), false);
+
+	mmio_refn(info, 0x418810, 0x80000000, s, b);
+	mmio_refn(info, 0x419848, 0x10000000, s, b);
+	mmio_refn(info, 0x419c2c, 0x10000000, s, b);
+	mmio_refn(info, 0x419e00, 0x00000000, s, b);
+	mmio_wr32(info, 0x419e04, 0x80000000 | size >> 7);
+	mmio_wr32(info, 0x405830, attrib);
+	mmio_wr32(info, 0x40585c, alpha);
+	mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
+
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) {
+			const u32 as =  alpha * gr->ppc_tpc_nr[gpc][ppc];
+			const u32 bs = attrib * gr->ppc_tpc_nr[gpc][ppc];
+			const u32 gs =   gfxp * gr->ppc_tpc_nr[gpc][ppc];
+			const u32 u = 0x418ea0 + (n * 0x04);
+			const u32 o = PPC_UNIT(gpc, ppc, 0);
+			if (!(gr->ppc_mask[gpc] & (1 << ppc)))
+				continue;
+			mmio_wr32(info, o + 0xc0, gs);
+			mmio_wr32(info, o + 0xf4, bo);
+			mmio_wr32(info, o + 0xf0, bs);
+			bo += gs;
+			mmio_wr32(info, o + 0xe4, as);
+			mmio_wr32(info, o + 0xf8, ao);
+			ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
+			mmio_wr32(info, u, bs);
+		}
+	}
+
+	mmio_wr32(info, 0x4181e4, 0x00000100);
+	mmio_wr32(info, 0x41befc, 0x00000100);
+}
+
+static void
+gv100_grctx_generate_rop_mapping(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	u32 data;
+	int i, j;
+
+	/* Pack tile map into register format. */
+	nvkm_wr32(device, 0x418bb8, (gr->tpc_total << 8) |
+				     gr->screen_tile_row_offset);
+	for (i = 0; i < 11; i++) {
+		for (data = 0, j = 0; j < 6; j++)
+			data |= (gr->tile[i * 6 + j] & 0x1f) << (j * 5);
+		nvkm_wr32(device, 0x418b08 + (i * 4), data);
+		nvkm_wr32(device, 0x41bf00 + (i * 4), data);
+		nvkm_wr32(device, 0x40780c + (i * 4), data);
+	}
+
+	/* GPC_BROADCAST.TP_BROADCAST */
+	nvkm_wr32(device, 0x41bfd0, (gr->tpc_total << 8) |
+				     gr->screen_tile_row_offset);
+	for (i = 0, j = 1; i < 5; i++, j += 4) {
+		u8 v19 = (1 << (j + 0)) % gr->tpc_total;
+		u8 v20 = (1 << (j + 1)) % gr->tpc_total;
+		u8 v21 = (1 << (j + 2)) % gr->tpc_total;
+		u8 v22 = (1 << (j + 3)) % gr->tpc_total;
+		nvkm_wr32(device, 0x41bfb0 + (i * 4), (v22 << 24) |
+						      (v21 << 16) |
+						      (v20 <<  8) |
+						       v19);
+	}
+
+	/* UNK78xx */
+	nvkm_wr32(device, 0x4078bc, (gr->tpc_total << 8) |
+				     gr->screen_tile_row_offset);
+}
+
+static void
+gv100_grctx_generate_r400088(struct gf100_gr *gr, bool on)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_mask(device, 0x400088, 0x00060000, on ? 0x00060000 : 0x00000000);
+}
+
+static void
+gv100_grctx_generate_sm_id(struct gf100_gr *gr, int gpc, int tpc, int sm)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x608), sm);
+	nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), sm);
+	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), sm);
+}
+
+static void
+gv100_grctx_generate_unkn(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_mask(device, 0x41980c, 0x00000010, 0x00000010);
+	nvkm_mask(device, 0x41be08, 0x00000004, 0x00000004);
+	nvkm_mask(device, 0x4064c0, 0x80000000, 0x80000000);
+	nvkm_mask(device, 0x405800, 0x08000000, 0x08000000);
+	nvkm_mask(device, 0x419c00, 0x00000008, 0x00000008);
+}
+
+static void
+gv100_grctx_unkn88c(struct gf100_gr *gr, bool on)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 mask = 0x00000010, data = on ? mask : 0x00000000;
+	nvkm_mask(device, 0x40988c, mask, data);
+	nvkm_rd32(device, 0x40988c);
+	nvkm_mask(device, 0x41a88c, mask, data);
+	nvkm_rd32(device, 0x41a88c);
+	nvkm_mask(device, 0x408a14, mask, data);
+	nvkm_rd32(device, 0x408a14);
+}
+
+const struct gf100_grctx_func
+gv100_grctx = {
+	.unkn88c = gv100_grctx_unkn88c,
+	.main = gf100_grctx_generate_main,
+	.unkn = gv100_grctx_generate_unkn,
+	.sw_veid_bundle_init = gv100_grctx_pack_sw_veid_bundle_init,
+	.bundle = gm107_grctx_generate_bundle,
+	.bundle_size = 0x3000,
+	.bundle_min_gpm_fifo_depth = 0x180,
+	.bundle_token_limit = 0x1680,
+	.pagepool = gp100_grctx_generate_pagepool,
+	.pagepool_size = 0x20000,
+	.attrib = gv100_grctx_generate_attrib,
+	.attrib_nr_max = 0x6c0,
+	.attrib_nr = 0x480,
+	.alpha_nr_max = 0xc00,
+	.alpha_nr = 0x800,
+	.gfxp_nr = 0xd10,
+	.sm_id = gv100_grctx_generate_sm_id,
+	.rop_mapping = gv100_grctx_generate_rop_mapping,
+	.dist_skip_table = gm200_grctx_generate_dist_skip_table,
+	.r406500 = gm200_grctx_generate_r406500,
+	.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
+	.smid_config = gp100_grctx_generate_smid_config,
+	.r400088 = gv100_grctx_generate_r400088,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 86ae5c706aa0..8dd4bd71b4fc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -987,7 +987,7 @@ gf100_gr_trap_gpc_rop(struct gf100_gr *gr, int gpc)
 	nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
 }
 
-static const struct nvkm_enum gf100_mp_warp_error[] = {
+const struct nvkm_enum gf100_mp_warp_error[] = {
 	{ 0x01, "STACK_ERROR" },
 	{ 0x02, "API_STACK_ERROR" },
 	{ 0x03, "RET_EMPTY_STACK_ERROR" },
@@ -1012,7 +1012,7 @@ static const struct nvkm_enum gf100_mp_warp_error[] = {
 	{}
 };
 
-static const struct nvkm_bitfield gf100_mp_global_error[] = {
+const struct nvkm_bitfield gf100_mp_global_error[] = {
 	{ 0x00000001, "SM_TO_SM_FAULT" },
 	{ 0x00000002, "L1_ERROR" },
 	{ 0x00000004, "MULTIPLE_WARP_ERRORS" },
@@ -2113,6 +2113,9 @@ gf100_gr_init(struct gf100_gr *gr)
 	struct nvkm_device *device = gr->base.engine.subdev.device;
 	int gpc, tpc, rop;
 
+	if (gr->func->init_419bd8)
+		gr->func->init_419bd8(gr);
+
 	gr->func->init_gpc_mmu(gr);
 
 	if (gr->fuc_sw_nonctx)
@@ -2213,6 +2216,9 @@ gf100_gr_init(struct gf100_gr *gr)
 
 	gf100_gr_zbc_init(gr);
 
+	if (gr->func->init_4188a4)
+		gr->func->init_4188a4(gr);
+
 	return gf100_gr_init_ctxctl(gr);
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index edf6edabf6df..dc46cf0131db 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -149,6 +149,7 @@ struct gf100_gr_func {
 	void (*oneinit_tiles)(struct gf100_gr *);
 	void (*oneinit_sm_id)(struct gf100_gr *);
 	int (*init)(struct gf100_gr *);
+	void (*init_419bd8)(struct gf100_gr *);
 	void (*init_gpc_mmu)(struct gf100_gr *);
 	void (*init_r405a14)(struct gf100_gr *);
 	void (*init_bios)(struct gf100_gr *);
@@ -170,6 +171,7 @@ struct gf100_gr_func {
 	void (*init_504430)(struct gf100_gr *, int gpc, int tpc);
 	void (*init_shader_exceptions)(struct gf100_gr *, int gpc, int tpc);
 	void (*init_400054)(struct gf100_gr *);
+	void (*init_4188a4)(struct gf100_gr *);
 	void (*trap_mp)(struct gf100_gr *, int gpc, int tpc);
 	void (*set_hww_esr_report_mask)(struct gf100_gr *);
 	const struct gf100_gr_pack *mmio;
@@ -266,7 +268,7 @@ extern const struct nvkm_object_func gf100_fermi;
 struct gf100_gr_init {
 	u32 addr;
 	u8  count;
-	u8  pitch;
+	u32 pitch;
 	u32 data;
 };
 
@@ -337,6 +339,8 @@ extern const struct gf100_gr_init gf100_gr_init_fe_1[];
 extern const struct gf100_gr_init gf100_gr_init_pe_1[];
 void gf100_gr_init_gpc_mmu(struct gf100_gr *);
 void gf100_gr_trap_mp(struct gf100_gr *, int, int);
+extern const struct nvkm_bitfield gf100_mp_global_error[];
+extern const struct nvkm_enum gf100_mp_warp_error[];
 
 extern const struct gf100_gr_init gf104_gr_init_ds_0[];
 extern const struct gf100_gr_init gf104_gr_init_tex_0[];
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
new file mode 100644
index 000000000000..19173ea19096
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+#include "ctxgf100.h"
+
+#include <nvif/class.h>
+
+static void
+gv100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc)
+{
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 werr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x730));
+	u32 gerr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x734));
+	const struct nvkm_enum *warp;
+	char glob[128];
+
+	nvkm_snprintbf(glob, sizeof(glob), gf100_mp_global_error, gerr);
+	warp = nvkm_enum_find(gf100_mp_warp_error, werr & 0xffff);
+
+	nvkm_error(subdev, "GPC%i/TPC%i/MP trap: "
+			   "global %08x [%s] warp %04x [%s]\n",
+		   gpc, tpc, gerr, glob, werr, warp ? warp->name : "");
+
+	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x730), 0x00000000);
+	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x734), gerr);
+}
+
+static void
+gv100_gr_init_4188a4(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_mask(device, 0x4188a4, 0x03000000, 0x03000000);
+}
+
+static void
+gv100_gr_init_shader_exceptions(struct gf100_gr *gr, int gpc, int tpc)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	int sm;
+	for (sm = 0; sm < 0x100; sm += 0x80) {
+		nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x728 + sm), 0x0085eb64);
+		nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x610), 0x00000001);
+		nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x72c + sm), 0x00000004);
+	}
+}
+
+static void
+gv100_gr_init_504430(struct gf100_gr *gr, int gpc, int tpc)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0x403f0000);
+}
+
+static void
+gv100_gr_init_419bd8(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_mask(device, 0x419bd8, 0x00000700, 0x00000000);
+}
+
+static const struct gf100_gr_func
+gv100_gr = {
+	.oneinit_tiles = gm200_gr_oneinit_tiles,
+	.oneinit_sm_id = gm200_gr_oneinit_sm_id,
+	.init = gf100_gr_init,
+	.init_419bd8 = gv100_gr_init_419bd8,
+	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
+	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
+	.init_zcull = gf117_gr_init_zcull,
+	.init_num_active_ltcs = gm200_gr_init_num_active_ltcs,
+	.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
+	.init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
+	.init_fecs_exceptions = gp100_gr_init_fecs_exceptions,
+	.init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
+	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
+	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
+	.init_504430 = gv100_gr_init_504430,
+	.init_shader_exceptions = gv100_gr_init_shader_exceptions,
+	.init_4188a4 = gv100_gr_init_4188a4,
+	.trap_mp = gv100_gr_trap_mp,
+	.rops = gm200_gr_rops,
+	.gpc_nr = 6,
+	.tpc_nr = 5,
+	.ppc_nr = 3,
+	.grctx = &gv100_grctx,
+	.zbc = &gp102_gr_zbc,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+		{ -1, -1, VOLTA_A, &gf100_fermi },
+		{ -1, -1, VOLTA_COMPUTE_A },
+		{}
+	}
+};
+
+int
+gv100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gm200_gr_new_(&gv100_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
index 58a59b7db2e5..771e16a16267 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
@@ -506,6 +506,7 @@ nvkm_msgqueue_new(u32 version, struct nvkm_falcon *falcon,
 		break;
 	case 0x0148cdec:
 	case 0x015ccf3e:
+	case 0x0167d263:
 		ret = msgqueue_0148cdec_new(falcon, sb, queue);
 		break;
 	default:
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c
index e8c27ec700de..737a8d50a1f2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c
@@ -65,3 +65,24 @@ MODULE_FIRMWARE("nvidia/gp108/nvdec/scrubber.bin");
 MODULE_FIRMWARE("nvidia/gp108/sec2/desc.bin");
 MODULE_FIRMWARE("nvidia/gp108/sec2/image.bin");
 MODULE_FIRMWARE("nvidia/gp108/sec2/sig.bin");
+
+MODULE_FIRMWARE("nvidia/gv100/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gv100/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/gv100/acr/ucode_load.bin");
+MODULE_FIRMWARE("nvidia/gv100/acr/ucode_unload.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gv100/gr/sw_method_init.bin");
+MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/gv100/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/gv100/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/gv100/sec2/sig.bin");

From 54b202f1d83074074562f645e9d1ba4e7b6f1cca Mon Sep 17 00:00:00 2001
From: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Date: Tue, 24 Apr 2018 15:15:10 +0200
Subject: [PATCH 1164/1286] drm/nouveau: fix mode_valid's return type

The method struct drm_connector_helper_funcs::mode_valid is defined
as returning an 'enum drm_mode_status' but the driver implementation
for this method uses an 'int' for it.

Fix this by using 'enum drm_mode_status' in the driver too.

Signed-off-by: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nouveau_connector.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 18e3239f7658..7b557c354307 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -1005,7 +1005,7 @@ get_tmds_link_bandwidth(struct drm_connector *connector, bool hdmi)
 		return 112000;
 }
 
-static int
+static enum drm_mode_status
 nouveau_connector_mode_valid(struct drm_connector *connector,
 			     struct drm_display_mode *mode)
 {

From f43cda5c76922777f4fe5026ee5984364ae5a918 Mon Sep 17 00:00:00 2001
From: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Date: Tue, 24 Apr 2018 15:15:38 +0200
Subject: [PATCH 1165/1286] drm/nouveau: fix nouveau_dsm_get_client_id()'s
 return type

The method struct vga_switcheroo_handler::get_client_id() is defined
as returning an 'enum vga_switcheroo_client_id' but the implementation
in this driver, nouveau_dsm_get_client_id(), returns an 'int'.

Fix this by returning 'enum vga_switcheroo_client_id' in this driver too.

Signed-off-by: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nouveau_acpi.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 5ffcb6683776..ffb195850314 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -193,7 +193,7 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
 	return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state);
 }
 
-static int nouveau_dsm_get_client_id(struct pci_dev *pdev)
+static enum vga_switcheroo_client_id nouveau_dsm_get_client_id(struct pci_dev *pdev)
 {
 	/* easy option one - intel vendor ID means Integrated */
 	if (pdev->vendor == PCI_VENDOR_ID_INTEL)

From 7a22c737faef99d6f75d7049c1a2f6f0fdefb1ec Mon Sep 17 00:00:00 2001
From: Ilia Mirkin <imirkin@alum.mit.edu>
Date: Sun, 22 Apr 2018 17:47:12 -0400
Subject: [PATCH 1166/1286] drm/nouveau: fix temp/pwm visibility, skip hwmon
 when no sensors exist

A NV34 GPU was seeing temp and pwm entries in hwmon, which would error
out when read. These should not have been visible, but also the whole
hwmon object should just not have been registered in the first place.

Signed-off-by: Ilia Mirkin <imirkin@alum.mit.edu>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nouveau_hwmon.c | 16 +++++++++++++---
 1 file changed, 13 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 7c965648df80..44178b4c3599 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -327,7 +327,7 @@ nouveau_temp_is_visible(const void *data, u32 attr, int channel)
 	struct nouveau_drm *drm = nouveau_drm((struct drm_device *)data);
 	struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
 
-	if (therm && therm->attr_get && nvkm_therm_temp_get(therm) < 0)
+	if (!therm || !therm->attr_get || nvkm_therm_temp_get(therm) < 0)
 		return 0;
 
 	switch (attr) {
@@ -351,8 +351,8 @@ nouveau_pwm_is_visible(const void *data, u32 attr, int channel)
 	struct nouveau_drm *drm = nouveau_drm((struct drm_device *)data);
 	struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
 
-	if (therm && therm->attr_get && therm->fan_get &&
-				therm->fan_get(therm) < 0)
+	if (!therm || !therm->attr_get || !therm->fan_get ||
+	    therm->fan_get(therm) < 0)
 		return 0;
 
 	switch (attr) {
@@ -707,13 +707,20 @@ nouveau_hwmon_init(struct drm_device *dev)
 {
 #if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
 	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->client.device);
 	struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+	struct nvkm_volt *volt = nvxx_volt(&drm->client.device);
 	const struct attribute_group *special_groups[N_ATTR_GROUPS];
 	struct nouveau_hwmon *hwmon;
 	struct device *hwmon_dev;
 	int ret = 0;
 	int i = 0;
 
+	if (!iccsense && !therm && !volt) {
+		NV_DEBUG(drm, "Skipping hwmon registration\n");
+		return 0;
+	}
+
 	hwmon = drm->hwmon = kzalloc(sizeof(*hwmon), GFP_KERNEL);
 	if (!hwmon)
 		return -ENOMEM;
@@ -749,6 +756,9 @@ nouveau_hwmon_fini(struct drm_device *dev)
 #if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
 	struct nouveau_hwmon *hwmon = nouveau_hwmon(dev);
 
+	if (!hwmon)
+		return;
+
 	if (hwmon->hwmon)
 		hwmon_device_unregister(hwmon->hwmon);
 

From dd3b89be3eafd1c9977e350e81c5556230319101 Mon Sep 17 00:00:00 2001
From: Arushi Singhal <arushisinghal19971997@gmail.com>
Date: Tue, 8 May 2018 23:13:09 +1000
Subject: [PATCH 1167/1286] drm/nouveau/clk: Use
 list_for_each_entry_from_reverse

It's better to use "list_for_each_entry_from_reverse" for iterating list
than "for loop" as it makes the code more clear to read.
This patch replace "for loop" with "list_for_each_entry_from_reverse"
and "start" variable with "cstate" which helps in refactoring
the code and also "cstate" variable is more commonly used in the other
functions.

changes in v2:
"start" variable is removed, before "cstate" variable was removed
but "cstate" is more common so preferred "cstate" over "start".

Signed-off-by: Arushi Singhal <arushisinghal19971997@gmail.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c | 10 ++++------
 1 file changed, 4 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
index 81c3567d4e67..ba6a868d4c95 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
@@ -109,18 +109,17 @@ nvkm_cstate_valid(struct nvkm_clk *clk, struct nvkm_cstate *cstate,
 
 static struct nvkm_cstate *
 nvkm_cstate_find_best(struct nvkm_clk *clk, struct nvkm_pstate *pstate,
-		      struct nvkm_cstate *start)
+		      struct nvkm_cstate *cstate)
 {
 	struct nvkm_device *device = clk->subdev.device;
 	struct nvkm_volt *volt = device->volt;
-	struct nvkm_cstate *cstate;
 	int max_volt;
 
-	if (!pstate || !start)
+	if (!pstate || !cstate)
 		return NULL;
 
 	if (!volt)
-		return start;
+		return cstate;
 
 	max_volt = volt->max_uv;
 	if (volt->max0_id != 0xff)
@@ -133,8 +132,7 @@ nvkm_cstate_find_best(struct nvkm_clk *clk, struct nvkm_pstate *pstate,
 		max_volt = min(max_volt,
 			       nvkm_volt_map(volt, volt->max2_id, clk->temp));
 
-	for (cstate = start; &cstate->head != &pstate->list;
-	     cstate = list_prev_entry(cstate, head)) {
+	list_for_each_entry_from_reverse(cstate, &pstate->list, head) {
 		if (nvkm_cstate_valid(clk, cstate, max_volt, clk->temp))
 			break;
 	}

From 6c46d01f25bcf74608d09645c27c35c3f3940ebe Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Wed, 16 May 2018 12:07:32 +1000
Subject: [PATCH 1168/1286] drm/nouveau/gr/gf100-: insert some WFIs during gr
 init

Inserted wait-for-gr-idle in the places it seems that RM does it, seems
to prevent some random mmio timeouts on Quadro GV100.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
---
 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c | 4 ++++
 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c    | 2 ++
 2 files changed, 6 insertions(+)

diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index f0f5a518e52a..e813a3f8ea93 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1385,6 +1385,8 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 		gf100_gr_mmio(gr, gr->fuc_sw_ctx);
 	}
 
+	gf100_gr_wait_idle(gr);
+
 	idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
 
 	grctx->pagepool(info);
@@ -1396,6 +1398,8 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 
 	gf100_grctx_generate_floorsweep(gr);
 
+	gf100_gr_wait_idle(gr);
+
 	if (grctx->r400088) grctx->r400088(gr, false);
 	if (gr->fuc_bundle)
 		gf100_gr_icmd(gr, gr->fuc_bundle);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 8dd4bd71b4fc..70d3d41e616c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -2123,6 +2123,8 @@ gf100_gr_init(struct gf100_gr *gr)
 	else
 		gf100_gr_mmio(gr, gr->func->mmio);
 
+	gf100_gr_wait_idle(gr);
+
 	if (gr->func->init_r405a14)
 		gr->func->init_r405a14(gr);
 

From 2f8a6da866eff746a9f8c7745790f3765baeb589 Mon Sep 17 00:00:00 2001
From: Emil Goode <emil.fsw@goode.io>
Date: Wed, 16 May 2018 12:22:04 +0200
Subject: [PATCH 1169/1286] gpu: host1x: Fix compiler errors by converting to
 dma_addr_t
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

The compiler is complaining with the following errors:

drivers/gpu/host1x/cdma.c:94:48: error:
	passing argument 3 of ‘dma_alloc_wc’ from incompatible pointer type
	[-Werror=incompatible-pointer-types]

drivers/gpu/host1x/cdma.c:113:48: error:
	passing argument 3 of ‘dma_alloc_wc’ from incompatible pointer type
	[-Werror=incompatible-pointer-types]

The expected pointer type of the third argument to dma_alloc_wc() is
dma_addr_t but phys_addr_t is passed.

Change the phys member of struct push_buffer to be dma_addr_t so that we
pass the correct type to dma_alloc_wc().
Also check pb->mapped for non-NULL in the destroy function as that is the
right way of checking if dma_alloc_wc() was successful.

Signed-off-by: Emil Goode <emil.fsw@goode.io>
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/host1x/cdma.c | 2 +-
 drivers/gpu/host1x/cdma.h | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
index cf6caa90bf89..69bb77372ed9 100644
--- a/drivers/gpu/host1x/cdma.c
+++ b/drivers/gpu/host1x/cdma.c
@@ -51,7 +51,7 @@ static void host1x_pushbuffer_destroy(struct push_buffer *pb)
 	struct host1x_cdma *cdma = pb_to_cdma(pb);
 	struct host1x *host1x = cdma_to_host1x(cdma);
 
-	if (!pb->phys)
+	if (!pb->mapped)
 		return;
 
 	if (host1x->domain) {
diff --git a/drivers/gpu/host1x/cdma.h b/drivers/gpu/host1x/cdma.h
index 286d49386be9..446ee1a84969 100644
--- a/drivers/gpu/host1x/cdma.h
+++ b/drivers/gpu/host1x/cdma.h
@@ -44,7 +44,7 @@ struct host1x_job;
 struct push_buffer {
 	void *mapped;			/* mapped pushbuffer memory */
 	dma_addr_t dma;			/* device address of pushbuffer */
-	phys_addr_t phys;		/* physical address of pushbuffer */
+	dma_addr_t phys;		/* physical address of pushbuffer */
 	u32 fence;			/* index we've written */
 	u32 pos;			/* index to write to */
 	u32 size;

From d066b246d482f69553e58d52f746377ce3966b66 Mon Sep 17 00:00:00 2001
From: Russell King <rmk+kernel@armlinux.org.uk>
Date: Tue, 20 Feb 2018 10:22:22 +0100
Subject: [PATCH 1170/1286] drm/etnaviv: correct timeout calculation

The old way did clamp the jiffy conversion and thus caused the timeouts
to become negative after some time. Also it didn't work with userspace
which actually fills the upper 32bits of the 64bit timestamp value.

clock_gettime() is 32-bit on 32-bit architectures. Using 64-bit timespec
math, like we do in this commit, means that when a wrap occurs, the
specified timeout goes into the past and we can't request a timeout in
the future. As the Linux implementation of CLOCK_MONOTONIC is reasonable
and starts at 0, the first such timer wrap will occur after approx. 68
years of system uptime.

Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
---
 drivers/gpu/drm/etnaviv/etnaviv_drv.h | 25 +++++++++++++++++--------
 1 file changed, 17 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index ddb17ee565e9..17a43da98fb9 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -26,6 +26,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
 #include <linux/list.h>
+#include <linux/time64.h>
 #include <linux/types.h>
 #include <linux/sizes.h>
 
@@ -132,19 +133,27 @@ static inline bool fence_after_eq(u32 a, u32 b)
 	return (s32)(a - b) >= 0;
 }
 
+/*
+ * Etnaviv timeouts are specified wrt CLOCK_MONOTONIC, not jiffies.
+ * We need to calculate the timeout in terms of number of jiffies
+ * between the specified timeout and the current CLOCK_MONOTONIC time.
+ */
 static inline unsigned long etnaviv_timeout_to_jiffies(
 	const struct timespec *timeout)
 {
-	unsigned long timeout_jiffies = timespec_to_jiffies(timeout);
-	unsigned long start_jiffies = jiffies;
-	unsigned long remaining_jiffies;
+	struct timespec64 ts, to;
 
-	if (time_after(start_jiffies, timeout_jiffies))
-		remaining_jiffies = 0;
-	else
-		remaining_jiffies = timeout_jiffies - start_jiffies;
+	to = timespec_to_timespec64(*timeout);
 
-	return remaining_jiffies;
+	ktime_get_ts64(&ts);
+
+	/* timeouts before "now" have already expired */
+	if (timespec64_compare(&to, &ts) <= 0)
+		return 0;
+
+	ts = timespec64_sub(to, ts);
+
+	return timespec64_to_jiffies(&ts);
 }
 
 #endif /* __ETNAVIV_DRV_H__ */

From ccae45928fc43d78d6ba7d0c6965b142c922a446 Mon Sep 17 00:00:00 2001
From: Lucas Stach <l.stach@pengutronix.de>
Date: Fri, 9 Mar 2018 12:53:34 +0100
Subject: [PATCH 1171/1286] drm/etnaviv: remove cycling through MMU address
 space

This was useful on MMUv1 GPUs, which don't generate proper faults,
when the GPU write caches weren't fully understood and not properly
handled by the kernel driver. As this has been fixed for quite some
time, the cycling though the MMU address space needlessly spreads
out the MMU mappings.

Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
---
 drivers/gpu/drm/etnaviv/etnaviv_mmu.c | 16 +---------------
 drivers/gpu/drm/etnaviv/etnaviv_mmu.h |  1 -
 2 files changed, 1 insertion(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index 49e049713a52..e8e8c4fe3242 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -162,22 +162,10 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
 		bool found;
 
 		ret = drm_mm_insert_node_in_range(&mmu->mm, node,
-						  size, 0, 0,
-						  mmu->last_iova, U64_MAX,
-						  mode);
+						  size, 0, 0, 0, U64_MAX, mode);
 		if (ret != -ENOSPC)
 			break;
 
-		/*
-		 * If we did not search from the start of the MMU region,
-		 * try again in case there are free slots.
-		 */
-		if (mmu->last_iova) {
-			mmu->last_iova = 0;
-			mmu->need_flush = true;
-			continue;
-		}
-
 		/* Try to retire some entries */
 		drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, mode);
 
@@ -274,7 +262,6 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
 	if (ret < 0)
 		goto unlock;
 
-	mmu->last_iova = node->start + etnaviv_obj->base.size;
 	mapping->iova = node->start;
 	ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
 				ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
@@ -381,7 +368,6 @@ int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
 			mutex_unlock(&mmu->lock);
 			return ret;
 		}
-		mmu->last_iova = vram_node->start + size;
 		gpu->mmu->need_flush = true;
 		mutex_unlock(&mmu->lock);
 
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
index ab603f5166b1..a339ec5798ff 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
@@ -59,7 +59,6 @@ struct etnaviv_iommu {
 	struct mutex lock;
 	struct list_head mappings;
 	struct drm_mm mm;
-	u32 last_iova;
 	bool need_flush;
 };
 

From a98b1e7808a8a9faf7aa3a6318a1f3400f0ee628 Mon Sep 17 00:00:00 2001
From: Lucas Stach <l.stach@pengutronix.de>
Date: Thu, 19 Apr 2018 15:55:40 +0200
Subject: [PATCH 1172/1286] drm/etnaviv: remove register logging

I'm not aware of any case where tracing GPU register manipulation at the
kernel level would have been useful. It only adds more indirections and
adds to the code size.

Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Christian Gmeiner <christian.gmeiner@gmail.com>
---
 drivers/gpu/drm/etnaviv/Kconfig       |  8 -----
 drivers/gpu/drm/etnaviv/etnaviv_drv.c | 51 ---------------------------
 drivers/gpu/drm/etnaviv/etnaviv_drv.h |  5 ---
 drivers/gpu/drm/etnaviv/etnaviv_gpu.c |  4 ++-
 drivers/gpu/drm/etnaviv/etnaviv_gpu.h |  4 +--
 5 files changed, 5 insertions(+), 67 deletions(-)

diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig
index e5bfeca361bd..041a77e400d4 100644
--- a/drivers/gpu/drm/etnaviv/Kconfig
+++ b/drivers/gpu/drm/etnaviv/Kconfig
@@ -22,11 +22,3 @@ config DRM_ETNAVIV_THERMAL
 	help
 	  Compile in support for thermal throttling.
 	  Say Y unless you want to risk burning your SoC.
-
-config DRM_ETNAVIV_REGISTER_LOGGING
-	bool "enable ETNAVIV register logging"
-	depends on DRM_ETNAVIV
-	help
-	  Compile in support for logging register reads/writes in a format
-	  that can be parsed by envytools demsm tool.  If enabled, register
-	  logging can be switched on via etnaviv.reglog=y module param.
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index ab50090d066c..0aa543d75953 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -25,57 +25,6 @@
 #include "etnaviv_mmu.h"
 #include "etnaviv_perfmon.h"
 
-#ifdef CONFIG_DRM_ETNAVIV_REGISTER_LOGGING
-static bool reglog;
-MODULE_PARM_DESC(reglog, "Enable register read/write logging");
-module_param(reglog, bool, 0600);
-#else
-#define reglog 0
-#endif
-
-void __iomem *etnaviv_ioremap(struct platform_device *pdev, const char *name,
-		const char *dbgname)
-{
-	struct resource *res;
-	void __iomem *ptr;
-
-	if (name)
-		res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
-	else
-		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
-	ptr = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(ptr)) {
-		dev_err(&pdev->dev, "failed to ioremap %s: %ld\n", name,
-			PTR_ERR(ptr));
-		return ptr;
-	}
-
-	if (reglog)
-		dev_printk(KERN_DEBUG, &pdev->dev, "IO:region %s 0x%p %08zx\n",
-			   dbgname, ptr, (size_t)resource_size(res));
-
-	return ptr;
-}
-
-void etnaviv_writel(u32 data, void __iomem *addr)
-{
-	if (reglog)
-		printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
-
-	writel(data, addr);
-}
-
-u32 etnaviv_readl(const void __iomem *addr)
-{
-	u32 val = readl(addr);
-
-	if (reglog)
-		printk(KERN_DEBUG "IO:R %p %08x\n", addr, val);
-
-	return val;
-}
-
 /*
  * DRM operations:
  */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index 17a43da98fb9..763cf5bf8eae 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -102,11 +102,6 @@ void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
 	struct seq_file *m);
 #endif
 
-void __iomem *etnaviv_ioremap(struct platform_device *pdev, const char *name,
-		const char *dbgname);
-void etnaviv_writel(u32 data, void __iomem *addr);
-u32 etnaviv_readl(const void __iomem *addr);
-
 #define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
 #define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
 
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 8a88799bf79b..08c587547f19 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1735,6 +1735,7 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct etnaviv_gpu *gpu;
+	struct resource *res;
 	int err;
 
 	gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
@@ -1746,7 +1747,8 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
 	mutex_init(&gpu->fence_idr_lock);
 
 	/* Map registers: */
-	gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	gpu->mmio = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(gpu->mmio))
 		return PTR_ERR(gpu->mmio);
 
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 3c3005501846..6052093d00b2 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -161,12 +161,12 @@ struct etnaviv_gpu {
 
 static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data)
 {
-	etnaviv_writel(data, gpu->mmio + reg);
+	writel(data, gpu->mmio + reg);
 }
 
 static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg)
 {
-	return etnaviv_readl(gpu->mmio + reg);
+	return readl(gpu->mmio + reg);
 }
 
 static inline bool fence_completed(struct etnaviv_gpu *gpu, u32 fence)

From 1af998b27c6c63d43c491783144ad0310d13a747 Mon Sep 17 00:00:00 2001
From: Lucas Stach <l.stach@pengutronix.de>
Date: Tue, 17 Apr 2018 12:00:46 +0200
Subject: [PATCH 1173/1286] drm/etnaviv: switch MMU page tables to writecombine
 memory

We are likely to write multiple page entries at once and already ensure
proper write buffer flushing before GPU submit, so this improves CPU
time usage in the submit path without any downsides.

Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
---
 drivers/gpu/drm/etnaviv/etnaviv_iommu.c    | 34 +++++-----
 drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c | 74 ++++++++++------------
 2 files changed, 49 insertions(+), 59 deletions(-)

diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
index 4b9b11ca6f03..4ada19054443 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
@@ -47,11 +47,10 @@ static int __etnaviv_iommu_init(struct etnaviv_iommuv1_domain *etnaviv_domain)
 	u32 *p;
 	int i;
 
-	etnaviv_domain->base.bad_page_cpu = dma_alloc_coherent(
-						etnaviv_domain->base.dev,
-						SZ_4K,
-						&etnaviv_domain->base.bad_page_dma,
-						GFP_KERNEL);
+	etnaviv_domain->base.bad_page_cpu =
+			dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
+				     &etnaviv_domain->base.bad_page_dma,
+				     GFP_KERNEL);
 	if (!etnaviv_domain->base.bad_page_cpu)
 		return -ENOMEM;
 
@@ -59,14 +58,14 @@ static int __etnaviv_iommu_init(struct etnaviv_iommuv1_domain *etnaviv_domain)
 	for (i = 0; i < SZ_4K / 4; i++)
 		*p++ = 0xdead55aa;
 
-	etnaviv_domain->pgtable_cpu =
-			dma_alloc_coherent(etnaviv_domain->base.dev, PT_SIZE,
-					   &etnaviv_domain->pgtable_dma,
-					   GFP_KERNEL);
+	etnaviv_domain->pgtable_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
+						   PT_SIZE,
+						   &etnaviv_domain->pgtable_dma,
+						   GFP_KERNEL);
 	if (!etnaviv_domain->pgtable_cpu) {
-		dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
-				  etnaviv_domain->base.bad_page_cpu,
-				  etnaviv_domain->base.bad_page_dma);
+		dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+			    etnaviv_domain->base.bad_page_cpu,
+			    etnaviv_domain->base.bad_page_dma);
 		return -ENOMEM;
 	}
 
@@ -81,13 +80,12 @@ static void etnaviv_iommuv1_domain_free(struct etnaviv_iommu_domain *domain)
 	struct etnaviv_iommuv1_domain *etnaviv_domain =
 			to_etnaviv_domain(domain);
 
-	dma_free_coherent(etnaviv_domain->base.dev, PT_SIZE,
-			  etnaviv_domain->pgtable_cpu,
-			  etnaviv_domain->pgtable_dma);
+	dma_free_wc(etnaviv_domain->base.dev, PT_SIZE,
+		    etnaviv_domain->pgtable_cpu, etnaviv_domain->pgtable_dma);
 
-	dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
-			  etnaviv_domain->base.bad_page_cpu,
-			  etnaviv_domain->base.bad_page_dma);
+	dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+		    etnaviv_domain->base.bad_page_cpu,
+		    etnaviv_domain->base.bad_page_dma);
 
 	kfree(etnaviv_domain);
 }
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
index 9752dbd5d28b..47785d61cd95 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -104,11 +104,10 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
 	int ret, i, j;
 
 	/* allocate scratch page */
-	etnaviv_domain->base.bad_page_cpu = dma_alloc_coherent(
-						etnaviv_domain->base.dev,
-						SZ_4K,
-						&etnaviv_domain->base.bad_page_dma,
-						GFP_KERNEL);
+	etnaviv_domain->base.bad_page_cpu =
+			dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
+				     &etnaviv_domain->base.bad_page_dma,
+				     GFP_KERNEL);
 	if (!etnaviv_domain->base.bad_page_cpu) {
 		ret = -ENOMEM;
 		goto fail_mem;
@@ -117,19 +116,17 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
 	for (i = 0; i < SZ_4K / 4; i++)
 		*p++ = 0xdead55aa;
 
-	etnaviv_domain->pta_cpu = dma_alloc_coherent(etnaviv_domain->base.dev,
-						     SZ_4K,
-						     &etnaviv_domain->pta_dma,
-						     GFP_KERNEL);
+	etnaviv_domain->pta_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
+					       SZ_4K, &etnaviv_domain->pta_dma,
+					       GFP_KERNEL);
 	if (!etnaviv_domain->pta_cpu) {
 		ret = -ENOMEM;
 		goto fail_mem;
 	}
 
-	etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->base.dev,
-						  SZ_4K,
-						  &etnaviv_domain->mtlb_dma,
-						  GFP_KERNEL);
+	etnaviv_domain->mtlb_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
+						SZ_4K, &etnaviv_domain->mtlb_dma,
+						GFP_KERNEL);
 	if (!etnaviv_domain->mtlb_cpu) {
 		ret = -ENOMEM;
 		goto fail_mem;
@@ -138,10 +135,9 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
 	/* pre-populate STLB pages (may want to switch to on-demand later) */
 	for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
 		etnaviv_domain->stlb_cpu[i] =
-				dma_alloc_coherent(etnaviv_domain->base.dev,
-						   SZ_4K,
-						   &etnaviv_domain->stlb_dma[i],
-						   GFP_KERNEL);
+				dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
+					     &etnaviv_domain->stlb_dma[i],
+					     GFP_KERNEL);
 		if (!etnaviv_domain->stlb_cpu[i]) {
 			ret = -ENOMEM;
 			goto fail_mem;
@@ -158,25 +154,23 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
 
 fail_mem:
 	if (etnaviv_domain->base.bad_page_cpu)
-		dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
-				  etnaviv_domain->base.bad_page_cpu,
-				  etnaviv_domain->base.bad_page_dma);
+		dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+			    etnaviv_domain->base.bad_page_cpu,
+			    etnaviv_domain->base.bad_page_dma);
 
 	if (etnaviv_domain->pta_cpu)
-		dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
-				  etnaviv_domain->pta_cpu,
-				  etnaviv_domain->pta_dma);
+		dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+			    etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma);
 
 	if (etnaviv_domain->mtlb_cpu)
-		dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
-				  etnaviv_domain->mtlb_cpu,
-				  etnaviv_domain->mtlb_dma);
+		dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+			    etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma);
 
 	for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
 		if (etnaviv_domain->stlb_cpu[i])
-			dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
-					  etnaviv_domain->stlb_cpu[i],
-					  etnaviv_domain->stlb_dma[i]);
+			dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+				    etnaviv_domain->stlb_cpu[i],
+				    etnaviv_domain->stlb_dma[i]);
 	}
 
 	return ret;
@@ -188,23 +182,21 @@ static void etnaviv_iommuv2_domain_free(struct etnaviv_iommu_domain *domain)
 			to_etnaviv_domain(domain);
 	int i;
 
-	dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
-			  etnaviv_domain->base.bad_page_cpu,
-			  etnaviv_domain->base.bad_page_dma);
+	dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+		    etnaviv_domain->base.bad_page_cpu,
+		    etnaviv_domain->base.bad_page_dma);
 
-	dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
-			  etnaviv_domain->pta_cpu,
-			  etnaviv_domain->pta_dma);
+	dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+		    etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma);
 
-	dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
-			  etnaviv_domain->mtlb_cpu,
-			  etnaviv_domain->mtlb_dma);
+	dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+		    etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma);
 
 	for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
 		if (etnaviv_domain->stlb_cpu[i])
-			dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
-					  etnaviv_domain->stlb_cpu[i],
-					  etnaviv_domain->stlb_dma[i]);
+			dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+				    etnaviv_domain->stlb_cpu[i],
+				    etnaviv_domain->stlb_dma[i]);
 	}
 
 	vfree(etnaviv_domain);

From a1fb6f204f956cc8385c31600354e2039978ebb4 Mon Sep 17 00:00:00 2001
From: Lucas Stach <l.stach@pengutronix.de>
Date: Tue, 17 Apr 2018 12:15:13 +0200
Subject: [PATCH 1174/1286] drm/etnaviv: mmuv2: allocate 2nd level page tables
 on demand

With etnaviv not being tied into the IOMMU framework anymore, the MMU
functions will only be called under sleeping locks. Thus we are able
to allocate the memory for the 2nd level page tables on demand without
having to deal with memory allocation in atomic context.

This speeds up driver intitialization on MMUv2 GPU cores, as we don't
need to preallocate all the page table memory and also reduces memory
consumption for most workloads, as most of them won't use the full
GPU virtual address space.

Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
---
 drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c | 61 ++++++++++++----------
 1 file changed, 33 insertions(+), 28 deletions(-)

diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
index 47785d61cd95..6336fdc70433 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -47,8 +47,8 @@ struct etnaviv_iommuv2_domain {
 	u32 *mtlb_cpu;
 	dma_addr_t mtlb_dma;
 	/* S(lave) TLB aka second level pagetable */
-	u32 *stlb_cpu[1024];
-	dma_addr_t stlb_dma[1024];
+	u32 *stlb_cpu[MMUv2_MAX_STLB_ENTRIES];
+	dma_addr_t stlb_dma[MMUv2_MAX_STLB_ENTRIES];
 };
 
 static struct etnaviv_iommuv2_domain *
@@ -57,13 +57,36 @@ to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
 	return container_of(domain, struct etnaviv_iommuv2_domain, base);
 }
 
+static int
+etnaviv_iommuv2_ensure_stlb(struct etnaviv_iommuv2_domain *etnaviv_domain,
+			    int stlb)
+{
+	if (etnaviv_domain->stlb_cpu[stlb])
+		return 0;
+
+	etnaviv_domain->stlb_cpu[stlb] =
+			dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
+				     &etnaviv_domain->stlb_dma[stlb],
+				     GFP_KERNEL);
+
+	if (!etnaviv_domain->stlb_cpu[stlb])
+		return -ENOMEM;
+
+	memset32(etnaviv_domain->stlb_cpu[stlb], MMUv2_PTE_EXCEPTION,
+		 SZ_4K / sizeof(u32));
+
+	etnaviv_domain->mtlb_cpu[stlb] = etnaviv_domain->stlb_dma[stlb] |
+						      MMUv2_PTE_PRESENT;
+	return 0;
+}
+
 static int etnaviv_iommuv2_map(struct etnaviv_iommu_domain *domain,
 			       unsigned long iova, phys_addr_t paddr,
 			       size_t size, int prot)
 {
 	struct etnaviv_iommuv2_domain *etnaviv_domain =
 			to_etnaviv_domain(domain);
-	int mtlb_entry, stlb_entry;
+	int mtlb_entry, stlb_entry, ret;
 	u32 entry = (u32)paddr | MMUv2_PTE_PRESENT;
 
 	if (size != SZ_4K)
@@ -75,6 +98,10 @@ static int etnaviv_iommuv2_map(struct etnaviv_iommu_domain *domain,
 	mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
 	stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
 
+	ret = etnaviv_iommuv2_ensure_stlb(etnaviv_domain, mtlb_entry);
+	if (ret)
+		return ret;
+
 	etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = entry;
 
 	return 0;
@@ -101,7 +128,7 @@ static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
 static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
 {
 	u32 *p;
-	int ret, i, j;
+	int ret, i;
 
 	/* allocate scratch page */
 	etnaviv_domain->base.bad_page_cpu =
@@ -132,23 +159,8 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
 		goto fail_mem;
 	}
 
-	/* pre-populate STLB pages (may want to switch to on-demand later) */
-	for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
-		etnaviv_domain->stlb_cpu[i] =
-				dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
-					     &etnaviv_domain->stlb_dma[i],
-					     GFP_KERNEL);
-		if (!etnaviv_domain->stlb_cpu[i]) {
-			ret = -ENOMEM;
-			goto fail_mem;
-		}
-		p = etnaviv_domain->stlb_cpu[i];
-		for (j = 0; j < SZ_4K / 4; j++)
-			*p++ = MMUv2_PTE_EXCEPTION;
-
-		etnaviv_domain->mtlb_cpu[i] = etnaviv_domain->stlb_dma[i] |
-					      MMUv2_PTE_PRESENT;
-	}
+	memset32(etnaviv_domain->mtlb_cpu, MMUv2_PTE_EXCEPTION,
+		 MMUv2_MAX_STLB_ENTRIES);
 
 	return 0;
 
@@ -166,13 +178,6 @@ fail_mem:
 		dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
 			    etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma);
 
-	for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
-		if (etnaviv_domain->stlb_cpu[i])
-			dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
-				    etnaviv_domain->stlb_cpu[i],
-				    etnaviv_domain->stlb_dma[i]);
-	}
-
 	return ret;
 }
 

From 931e97f3afd80bd9671d92f6934306a56012cae8 Mon Sep 17 00:00:00 2001
From: Lucas Stach <l.stach@pengutronix.de>
Date: Fri, 4 May 2018 11:58:45 +0200
Subject: [PATCH 1175/1286] drm/etnaviv: mmuv2: support 40 bit phys address

MMUv2 supports up to 40 bits of physical address by folding the upper
8 bits into bits [4:11] of the PTE.

Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
---
 drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
index 6336fdc70433..72bd0107a00c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -87,11 +87,14 @@ static int etnaviv_iommuv2_map(struct etnaviv_iommu_domain *domain,
 	struct etnaviv_iommuv2_domain *etnaviv_domain =
 			to_etnaviv_domain(domain);
 	int mtlb_entry, stlb_entry, ret;
-	u32 entry = (u32)paddr | MMUv2_PTE_PRESENT;
+	u32 entry = lower_32_bits(paddr) | MMUv2_PTE_PRESENT;
 
 	if (size != SZ_4K)
 		return -EINVAL;
 
+	if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
+		entry |= (upper_32_bits(paddr) & 0xff) << 4;
+
 	if (prot & ETNAVIV_PROT_WRITE)
 		entry |= MMUv2_PTE_WRITEABLE;
 

From f6ffbd4fc1a1caafe2ab840993b917fba5324598 Mon Sep 17 00:00:00 2001
From: Lucas Stach <l.stach@pengutronix.de>
Date: Tue, 8 May 2018 16:20:54 +0200
Subject: [PATCH 1176/1286] drm/etnaviv: replace license text with SPDX tags

This replaces the repetitive GPL-2.0 license text in code and header files
with the SPDX tags. Generated hardware headers aren't changed, as any changes
there need to be done in the upstream rnndb repository.

Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Christian Gmeiner <christian.gmeiner@gmail.com>
---
 drivers/gpu/drm/etnaviv/etnaviv_buffer.c     | 16 ++--------------
 drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c | 15 ++-------------
 drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c     | 15 ++-------------
 drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h     | 13 +------------
 drivers/gpu/drm/etnaviv/etnaviv_drv.c        | 15 ++-------------
 drivers/gpu/drm/etnaviv/etnaviv_drv.h        | 15 ++-------------
 drivers/gpu/drm/etnaviv/etnaviv_dump.c       | 15 ++-------------
 drivers/gpu/drm/etnaviv/etnaviv_dump.h       | 16 ++--------------
 drivers/gpu/drm/etnaviv/etnaviv_gem.c        | 15 ++-------------
 drivers/gpu/drm/etnaviv/etnaviv_gem.h        | 15 ++-------------
 drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c  | 16 ++--------------
 drivers/gpu/drm/etnaviv/etnaviv_gpu.c        | 15 ++-------------
 drivers/gpu/drm/etnaviv/etnaviv_gpu.h        | 15 ++-------------
 drivers/gpu/drm/etnaviv/etnaviv_hwdb.c       | 13 +------------
 drivers/gpu/drm/etnaviv/etnaviv_iommu.c      | 15 ++-------------
 drivers/gpu/drm/etnaviv/etnaviv_iommu.h      | 15 ++-------------
 drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c   | 15 ++-------------
 drivers/gpu/drm/etnaviv/etnaviv_mmu.c        | 15 ++-------------
 drivers/gpu/drm/etnaviv/etnaviv_mmu.h        | 15 ++-------------
 drivers/gpu/drm/etnaviv/etnaviv_perfmon.c    | 13 +------------
 drivers/gpu/drm/etnaviv/etnaviv_perfmon.h    | 13 +------------
 drivers/gpu/drm/etnaviv/etnaviv_sched.c      | 13 +------------
 drivers/gpu/drm/etnaviv/etnaviv_sched.h      | 13 +------------
 23 files changed, 40 insertions(+), 296 deletions(-)

diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index bfc6d4aa3b7c..7fea74861a87 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -1,18 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (C) 2014 Etnaviv Project
- * Author: Christian Gmeiner <christian.gmeiner@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
+ * Copyright (C) 2014-2018 Etnaviv Project
  */
 
 #include "etnaviv_cmdbuf.h"
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
index 68e6d3772ad8..b106e8b288ad 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (C) 2015 Etnaviv Project
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
+ * Copyright (C) 2015-2018 Etnaviv Project
  */
 
 #include <linux/kernel.h>
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
index 3746827f45eb..a3c44f145c1d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (C) 2017 Etnaviv Project
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
+ * Copyright (C) 2017-2018 Etnaviv Project
  */
 
 #include <drm/drm_mm.h>
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
index ddc3f7ea169c..acb68c698363 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
@@ -1,17 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright (C) 2017 Etnaviv Project
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
 #ifndef __ETNAVIV_CMDBUF_H__
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 0aa543d75953..144fd8bf4172 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (C) 2015 Etnaviv Project
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
+ * Copyright (C) 2015-2018 Etnaviv Project
  */
 
 #include <linux/component.h>
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index 763cf5bf8eae..d36c7bbe66db 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -1,17 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (C) 2015 Etnaviv Project
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
+ * Copyright (C) 2015-2018 Etnaviv Project
  */
 
 #ifndef __ETNAVIV_DRV_H__
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index 48aef6cf6a42..9146e30e24a6 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (C) 2015 Etnaviv Project
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
+ * Copyright (C) 2015-2018 Etnaviv Project
  */
 
 #include <linux/devcoredump.h>
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.h b/drivers/gpu/drm/etnaviv/etnaviv_dump.h
index 97f2f8db9133..2d916c2667ee 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright (C) 2015 Etnaviv Project
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * Etnaviv devcoredump file definitions
  */
+
 #ifndef ETNAVIV_DUMP_H
 #define ETNAVIV_DUMP_H
 
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index fcc969fa0e69..209ef1274b80 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (C) 2015 Etnaviv Project
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
+ * Copyright (C) 2015-2018 Etnaviv Project
  */
 
 #include <linux/spinlock.h>
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index 93e696fcc14f..76079c2291f8 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -1,17 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (C) 2015 Etnaviv Project
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
+ * Copyright (C) 2015-2018 Etnaviv Project
  */
 
 #ifndef __ETNAVIV_GEM_H__
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index 5704305d41e6..0566171f8df2 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -1,18 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (C) 2013 Red Hat
- * Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
+ * Copyright (C) 2014-2018 Etnaviv Project
  */
 
 #include <linux/dma-buf.h>
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 08c587547f19..686f6552db48 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (C) 2015 Etnaviv Project
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
+ * Copyright (C) 2015-2018 Etnaviv Project
  */
 
 #include <linux/component.h>
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 6052093d00b2..dd430f0f8ff5 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -1,17 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (C) 2015 Etnaviv Project
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
+ * Copyright (C) 2015-2018 Etnaviv Project
  */
 
 #ifndef __ETNAVIV_GPU_H__
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
index ea08bb38caaf..39b463db76c9 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright (C) 2018 Etnaviv Project
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
 #include "etnaviv_gpu.h"
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
index 4ada19054443..b163bdbcb880 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
+ * Copyright (C) 2014-2018 Etnaviv Project
  */
 
 #include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.h b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
index 01d59bf70d78..b279404ce91a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
@@ -1,17 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com>
-  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
+ * Copyright (C) 2014-2018 Etnaviv Project
  */
 
 #ifndef __ETNAVIV_IOMMU_H__
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
index 72bd0107a00c..71fbc1f96cb6 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (C) 2016 Etnaviv Project
-  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
+ * Copyright (C) 2016-2018 Etnaviv Project
  */
 
 #include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index e8e8c4fe3242..8069f9f36a2e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (C) 2015 Etnaviv Project
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
+ * Copyright (C) 2015-2018 Etnaviv Project
  */
 
 #include "common.xml.h"
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
index a339ec5798ff..a0db17ffb686 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
@@ -1,17 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (C) 2015 Etnaviv Project
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
+ * Copyright (C) 2015-2018 Etnaviv Project
  */
 
 #ifndef __ETNAVIV_MMU_H__
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
index 26dddfc41aac..9980d81a26e3 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright (C) 2017 Etnaviv Project
  * Copyright (C) 2017 Zodiac Inflight Innovations
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
 #include "etnaviv_gpu.h"
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.h b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.h
index c1653c64ab6b..4a9d508f6e10 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright (C) 2017 Etnaviv Project
  * Copyright (C) 2017 Zodiac Inflight Innovations
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
 #ifndef __ETNAVIV_PERFMON_H__
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 6cf0775dbcd7..a74eb57af15b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright (C) 2017 Etnaviv Project
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/kthread.h>
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.h b/drivers/gpu/drm/etnaviv/etnaviv_sched.h
index 097635fa78ae..c0a6796e22c9 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.h
@@ -1,17 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright (C) 2017 Etnaviv Project
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
 #ifndef __ETNAVIV_SCHED_H__

From fcdfa432a5b0569e8c5399effa950c71940b5889 Mon Sep 17 00:00:00 2001
From: Oded Gabbay <oded.gabbay@gmail.com>
Date: Fri, 18 May 2018 22:18:16 +0300
Subject: [PATCH 1177/1286] drm/amdgpu: conditionally compile amdgpu's amdkfd
 files

In case CONFIG_HSA_AMD is not chosen, there is no need to compile amdkfd
files that reside inside amdgpu dirver. In addition, because amdkfd
depends on x86_64 architecture and amdgpu is not, compiling amdkfd files
under i386 architecture can cause compiler errors and warnings.

This patch modifies amdgpu's makefile to build amdkfd files only if
CONFIG_HSA_AMD is chosen. The only file to be compiled unconditionally
is amdgpu_amdkfd.c

There are stub functions that are compiled only if amdkfd is not
compiled. In that case, calls from amdgpu driver proper will go to those
functions instead of the real functions.

v2: instead of using function pointers, use stub functions

v3: initialize kgd2kfd to NULL in case amdkfd is not compiled

Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdgpu/Makefile        | 13 ++++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 47 ++++++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 12 +++---
 3 files changed, 63 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index a51c5a960750..bfd332c95b61 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -56,8 +56,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
 
 # add asic specific block
 amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
-	ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \
-	amdgpu_amdkfd_gfx_v7.o
+	ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o
 
 amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o
 
@@ -132,13 +131,21 @@ amdgpu-y += \
 	vcn_v1_0.o
 
 # add amdkfd interfaces
+amdgpu-y += amdgpu_amdkfd.o
+
+ifneq ($(CONFIG_HSA_AMD),)
 amdgpu-y += \
-	 amdgpu_amdkfd.o \
 	 amdgpu_amdkfd_fence.o \
 	 amdgpu_amdkfd_gpuvm.o \
 	 amdgpu_amdkfd_gfx_v8.o \
 	 amdgpu_amdkfd_gfx_v9.o
 
+ifneq ($(CONFIG_DRM_AMDGPU_CIK),)
+amdgpu-y += amdgpu_amdkfd_gfx_v7.o
+endif
+
+endif
+
 # add cgs
 amdgpu-y += amdgpu_cgs.o
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index bd36ee9f7e6d..95fcbd8a4bf3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -50,15 +50,21 @@ int amdgpu_amdkfd_init(void)
 		kgd2kfd = NULL;
 	}
 
+
 #elif defined(CONFIG_HSA_AMD)
+
 	ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd);
 	if (ret)
 		kgd2kfd = NULL;
 
 #else
+	kgd2kfd = NULL;
 	ret = -ENOENT;
 #endif
+
+#if defined(CONFIG_HSA_AMD_MODULE) || defined(CONFIG_HSA_AMD)
 	amdgpu_amdkfd_gpuvm_init_mem_limits();
+#endif
 
 	return ret;
 }
@@ -464,3 +470,44 @@ bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
 
 	return false;
 }
+
+#if !defined(CONFIG_HSA_AMD_MODULE) && !defined(CONFIG_HSA_AMD)
+bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
+{
+	return false;
+}
+
+void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
+{
+}
+
+void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
+					struct amdgpu_vm *vm)
+{
+}
+
+struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
+{
+	return NULL;
+}
+
+int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
+{
+	return 0;
+}
+
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
+{
+	return NULL;
+}
+
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
+{
+	return NULL;
+}
+
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void)
+{
+	return NULL;
+}
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 12367a9951e8..a8418a3f4e9d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -156,14 +156,14 @@ uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd);
 
 /* GPUVM API */
 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, void **vm,
-					  void **process_info,
-					  struct dma_fence **ef);
+					void **process_info,
+					struct dma_fence **ef);
 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
-					   struct file *filp,
-					   void **vm, void **process_info,
-					   struct dma_fence **ef);
+					struct file *filp,
+					void **vm, void **process_info,
+					struct dma_fence **ef);
 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
-				    struct amdgpu_vm *vm);
+				struct amdgpu_vm *vm);
 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm);
 uint32_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm);
 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(

From 24c94e166dfe89839129b8e0fae208b6af60d6f1 Mon Sep 17 00:00:00 2001
From: Thierry Reding <treding@nvidia.com>
Date: Sat, 5 May 2018 08:45:47 +0200
Subject: [PATCH 1178/1286] gpu: host1x: Remove wait check support

The job submission userspace ABI doesn't support this and there are no
plans to implement it, so all of this code is dead and can be removed.

Reviewed-by: Dmitry Osipenko <digetx@gmail.com>
Tested-by: Dmitry Osipenko <digetx@gmail.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/drm/tegra/drm.c        |  62 +--------------
 drivers/gpu/host1x/dev.h           |   8 --
 drivers/gpu/host1x/hw/channel_hw.c |   3 +-
 drivers/gpu/host1x/hw/syncpt_hw.c  |  11 ---
 drivers/gpu/host1x/job.c           | 124 +----------------------------
 drivers/gpu/host1x/syncpt.c        |   6 --
 drivers/gpu/host1x/syncpt.h        |   3 -
 include/linux/host1x.h             |  15 +---
 include/trace/events/host1x.h      |  16 ++--
 9 files changed, 14 insertions(+), 234 deletions(-)

diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 3cdef659cd39..204b10e33f16 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -321,46 +321,14 @@ static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
 	return 0;
 }
 
-static int host1x_waitchk_copy_from_user(struct host1x_waitchk *dest,
-					 struct drm_tegra_waitchk __user *src,
-					 struct drm_file *file)
-{
-	u32 cmdbuf;
-	int err;
-
-	err = get_user(cmdbuf, &src->handle);
-	if (err < 0)
-		return err;
-
-	err = get_user(dest->offset, &src->offset);
-	if (err < 0)
-		return err;
-
-	err = get_user(dest->syncpt_id, &src->syncpt);
-	if (err < 0)
-		return err;
-
-	err = get_user(dest->thresh, &src->thresh);
-	if (err < 0)
-		return err;
-
-	dest->bo = host1x_bo_lookup(file, cmdbuf);
-	if (!dest->bo)
-		return -ENOENT;
-
-	return 0;
-}
-
 int tegra_drm_submit(struct tegra_drm_context *context,
 		     struct drm_tegra_submit *args, struct drm_device *drm,
 		     struct drm_file *file)
 {
 	unsigned int num_cmdbufs = args->num_cmdbufs;
 	unsigned int num_relocs = args->num_relocs;
-	unsigned int num_waitchks = args->num_waitchks;
 	struct drm_tegra_cmdbuf __user *user_cmdbufs;
 	struct drm_tegra_reloc __user *user_relocs;
-	struct drm_tegra_waitchk __user *user_waitchks;
 	struct drm_tegra_syncpt __user *user_syncpt;
 	struct drm_tegra_syncpt syncpt;
 	struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
@@ -372,7 +340,6 @@ int tegra_drm_submit(struct tegra_drm_context *context,
 
 	user_cmdbufs = u64_to_user_ptr(args->cmdbufs);
 	user_relocs = u64_to_user_ptr(args->relocs);
-	user_waitchks = u64_to_user_ptr(args->waitchks);
 	user_syncpt = u64_to_user_ptr(args->syncpts);
 
 	/* We don't yet support other than one syncpt_incr struct per submit */
@@ -384,12 +351,11 @@ int tegra_drm_submit(struct tegra_drm_context *context,
 		return -EINVAL;
 
 	job = host1x_job_alloc(context->channel, args->num_cmdbufs,
-			       args->num_relocs, args->num_waitchks);
+			       args->num_relocs);
 	if (!job)
 		return -ENOMEM;
 
 	job->num_relocs = args->num_relocs;
-	job->num_waitchk = args->num_waitchks;
 	job->client = (u32)args->context;
 	job->class = context->client->base.class;
 	job->serialize = true;
@@ -398,7 +364,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
 	 * Track referenced BOs so that they can be unreferenced after the
 	 * submission is complete.
 	 */
-	num_refs = num_cmdbufs + num_relocs * 2 + num_waitchks;
+	num_refs = num_cmdbufs + num_relocs * 2;
 
 	refs = kmalloc_array(num_refs, sizeof(*refs), GFP_KERNEL);
 	if (!refs) {
@@ -489,30 +455,6 @@ int tegra_drm_submit(struct tegra_drm_context *context,
 		}
 	}
 
-	/* copy and resolve waitchks from submit */
-	while (num_waitchks--) {
-		struct host1x_waitchk *wait = &job->waitchk[num_waitchks];
-		struct tegra_bo *obj;
-
-		err = host1x_waitchk_copy_from_user(
-			wait, &user_waitchks[num_waitchks], file);
-		if (err < 0)
-			goto fail;
-
-		obj = host1x_to_tegra_bo(wait->bo);
-		refs[num_refs++] = &obj->gem;
-
-		/*
-		 * The unaligned offset will cause an unaligned write during
-		 * of the waitchks patching, corrupting the commands stream.
-		 */
-		if (wait->offset & 3 ||
-		    wait->offset >= obj->gem.size) {
-			err = -EINVAL;
-			goto fail;
-		}
-	}
-
 	if (copy_from_user(&syncpt, user_syncpt, sizeof(syncpt))) {
 		err = -EFAULT;
 		goto fail;
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index 43e9fabb43a1..36f44ffebe73 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -78,7 +78,6 @@ struct host1x_syncpt_ops {
 	void (*load_wait_base)(struct host1x_syncpt *syncpt);
 	u32 (*load)(struct host1x_syncpt *syncpt);
 	int (*cpu_incr)(struct host1x_syncpt *syncpt);
-	int (*patch_wait)(struct host1x_syncpt *syncpt, void *patch_addr);
 	void (*assign_to_channel)(struct host1x_syncpt *syncpt,
 	                          struct host1x_channel *channel);
 	void (*enable_protection)(struct host1x *host);
@@ -183,13 +182,6 @@ static inline int host1x_hw_syncpt_cpu_incr(struct host1x *host,
 	return host->syncpt_op->cpu_incr(sp);
 }
 
-static inline int host1x_hw_syncpt_patch_wait(struct host1x *host,
-					      struct host1x_syncpt *sp,
-					      void *patch_addr)
-{
-	return host->syncpt_op->patch_wait(sp, patch_addr);
-}
-
 static inline void host1x_hw_syncpt_assign_to_channel(
 	struct host1x *host, struct host1x_syncpt *sp,
 	struct host1x_channel *ch)
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
index 9af758785a11..4c9555038a95 100644
--- a/drivers/gpu/host1x/hw/channel_hw.c
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -104,8 +104,7 @@ static int channel_submit(struct host1x_job *job)
 	sp = host->syncpt + job->syncpt_id;
 	trace_host1x_channel_submit(dev_name(ch->dev),
 				    job->num_gathers, job->num_relocs,
-				    job->num_waitchk, job->syncpt_id,
-				    job->syncpt_incrs);
+				    job->syncpt_id, job->syncpt_incrs);
 
 	/* before error checks, return current max */
 	prev_max = job->syncpt_end = host1x_syncpt_read_max(sp);
diff --git a/drivers/gpu/host1x/hw/syncpt_hw.c b/drivers/gpu/host1x/hw/syncpt_hw.c
index 7dfd47d74f89..a23bb3352d02 100644
--- a/drivers/gpu/host1x/hw/syncpt_hw.c
+++ b/drivers/gpu/host1x/hw/syncpt_hw.c
@@ -96,16 +96,6 @@ static int syncpt_cpu_incr(struct host1x_syncpt *sp)
 	return 0;
 }
 
-/* remove a wait pointed to by patch_addr */
-static int syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
-{
-	u32 override = host1x_class_host_wait_syncpt(HOST1X_SYNCPT_RESERVED, 0);
-
-	*((u32 *)patch_addr) = override;
-
-	return 0;
-}
-
 /**
  * syncpt_assign_to_channel() - Assign syncpoint to channel
  * @sp: syncpoint
@@ -156,7 +146,6 @@ static const struct host1x_syncpt_ops host1x_syncpt_ops = {
 	.load_wait_base = syncpt_read_wait_base,
 	.load = syncpt_load,
 	.cpu_incr = syncpt_cpu_incr,
-	.patch_wait = syncpt_patch_wait,
 	.assign_to_channel = syncpt_assign_to_channel,
 	.enable_protection = syncpt_enable_protection,
 };
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index db509ab8874e..3cbfc6e37668 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -34,8 +34,7 @@
 #define HOST1X_WAIT_SYNCPT_OFFSET 0x8
 
 struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
-				    u32 num_cmdbufs, u32 num_relocs,
-				    u32 num_waitchks)
+				    u32 num_cmdbufs, u32 num_relocs)
 {
 	struct host1x_job *job = NULL;
 	unsigned int num_unpins = num_cmdbufs + num_relocs;
@@ -46,7 +45,6 @@ struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
 	total = sizeof(struct host1x_job) +
 		(u64)num_relocs * sizeof(struct host1x_reloc) +
 		(u64)num_unpins * sizeof(struct host1x_job_unpin_data) +
-		(u64)num_waitchks * sizeof(struct host1x_waitchk) +
 		(u64)num_cmdbufs * sizeof(struct host1x_job_gather) +
 		(u64)num_unpins * sizeof(dma_addr_t) +
 		(u64)num_unpins * sizeof(u32 *);
@@ -66,8 +64,6 @@ struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
 	mem += num_relocs * sizeof(struct host1x_reloc);
 	job->unpins = num_unpins ? mem : NULL;
 	mem += num_unpins * sizeof(struct host1x_job_unpin_data);
-	job->waitchk = num_waitchks ? mem : NULL;
-	mem += num_waitchks * sizeof(struct host1x_waitchk);
 	job->gathers = num_cmdbufs ? mem : NULL;
 	mem += num_cmdbufs * sizeof(struct host1x_job_gather);
 	job->addr_phys = num_unpins ? mem : NULL;
@@ -111,73 +107,6 @@ void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
 }
 EXPORT_SYMBOL(host1x_job_add_gather);
 
-/*
- * NULL an already satisfied WAIT_SYNCPT host method, by patching its
- * args in the command stream. The method data is changed to reference
- * a reserved (never given out or incr) HOST1X_SYNCPT_RESERVED syncpt
- * with a matching threshold value of 0, so is guaranteed to be popped
- * by the host HW.
- */
-static void host1x_syncpt_patch_offset(struct host1x_syncpt *sp,
-				       struct host1x_bo *h, u32 offset)
-{
-	void *patch_addr = NULL;
-
-	/* patch the wait */
-	patch_addr = host1x_bo_kmap(h, offset >> PAGE_SHIFT);
-	if (patch_addr) {
-		host1x_syncpt_patch_wait(sp,
-					 patch_addr + (offset & ~PAGE_MASK));
-		host1x_bo_kunmap(h, offset >> PAGE_SHIFT, patch_addr);
-	} else
-		pr_err("Could not map cmdbuf for wait check\n");
-}
-
-/*
- * Check driver supplied waitchk structs for syncpt thresholds
- * that have already been satisfied and NULL the comparison (to
- * avoid a wrap condition in the HW).
- */
-static int do_waitchks(struct host1x_job *job, struct host1x *host,
-		       struct host1x_job_gather *g)
-{
-	struct host1x_bo *patch = g->bo;
-	int i;
-
-	/* compare syncpt vs wait threshold */
-	for (i = 0; i < job->num_waitchk; i++) {
-		struct host1x_waitchk *wait = &job->waitchk[i];
-		struct host1x_syncpt *sp =
-			host1x_syncpt_get(host, wait->syncpt_id);
-
-		/* validate syncpt id */
-		if (wait->syncpt_id > host1x_syncpt_nb_pts(host))
-			continue;
-
-		/* skip all other gathers */
-		if (patch != wait->bo)
-			continue;
-
-		trace_host1x_syncpt_wait_check(wait->bo, wait->offset,
-					       wait->syncpt_id, wait->thresh,
-					       host1x_syncpt_read_min(sp));
-
-		if (host1x_syncpt_is_expired(sp, wait->thresh)) {
-			dev_dbg(host->dev,
-				"drop WAIT id %u (%s) thresh 0x%x, min 0x%x\n",
-				wait->syncpt_id, sp->name, wait->thresh,
-				host1x_syncpt_read_min(sp));
-
-			host1x_syncpt_patch_offset(sp, patch,
-						   g->offset + wait->offset);
-		}
-
-		wait->bo = NULL;
-	}
-
-	return 0;
-}
-
 static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
 {
 	unsigned int i;
@@ -331,17 +260,6 @@ static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
 	return true;
 }
 
-static bool check_wait(struct host1x_waitchk *wait, struct host1x_bo *cmdbuf,
-		       unsigned int offset)
-{
-	offset *= sizeof(u32);
-
-	if (wait->bo != cmdbuf || wait->offset != offset)
-		return false;
-
-	return true;
-}
-
 struct host1x_firewall {
 	struct host1x_job *job;
 	struct device *dev;
@@ -349,9 +267,6 @@ struct host1x_firewall {
 	unsigned int num_relocs;
 	struct host1x_reloc *reloc;
 
-	unsigned int num_waitchks;
-	struct host1x_waitchk *waitchk;
-
 	struct host1x_bo *cmdbuf;
 	unsigned int offset;
 
@@ -378,20 +293,6 @@ static int check_register(struct host1x_firewall *fw, unsigned long offset)
 		fw->reloc++;
 	}
 
-	if (offset == HOST1X_WAIT_SYNCPT_OFFSET) {
-		if (fw->class != HOST1X_CLASS_HOST1X)
-			return -EINVAL;
-
-		if (!fw->num_waitchks)
-			return -EINVAL;
-
-		if (!check_wait(fw->waitchk, fw->cmdbuf, fw->offset))
-			return -EINVAL;
-
-		fw->num_waitchks--;
-		fw->waitchk++;
-	}
-
 	return 0;
 }
 
@@ -556,8 +457,6 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev)
 	fw.dev = dev;
 	fw.reloc = job->relocarray;
 	fw.num_relocs = job->num_relocs;
-	fw.waitchk = job->waitchk;
-	fw.num_waitchks = job->num_waitchk;
 	fw.class = job->class;
 
 	for (i = 0; i < job->num_gathers; i++) {
@@ -604,8 +503,8 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev)
 		offset += g->words * sizeof(u32);
 	}
 
-	/* No relocs and waitchks should remain at this point */
-	if (fw.num_relocs || fw.num_waitchks)
+	/* No relocs should remain at this point */
+	if (fw.num_relocs)
 		return -EINVAL;
 
 	return 0;
@@ -616,19 +515,6 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev)
 	int err;
 	unsigned int i, j;
 	struct host1x *host = dev_get_drvdata(dev->parent);
-	DECLARE_BITMAP(waitchk_mask, host1x_syncpt_nb_pts(host));
-
-	bitmap_zero(waitchk_mask, host1x_syncpt_nb_pts(host));
-	for (i = 0; i < job->num_waitchk; i++) {
-		u32 syncpt_id = job->waitchk[i].syncpt_id;
-
-		if (syncpt_id < host1x_syncpt_nb_pts(host))
-			set_bit(syncpt_id, waitchk_mask);
-	}
-
-	/* get current syncpt values for waitchk */
-	for_each_set_bit(i, waitchk_mask, host1x_syncpt_nb_pts(host))
-		host1x_syncpt_load(host->syncpt + i);
 
 	/* pin memory */
 	err = pin_job(host, job);
@@ -663,10 +549,6 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev)
 		err = do_relocs(job, g);
 		if (err)
 			break;
-
-		err = do_waitchks(job, host, g);
-		if (err)
-			break;
 	}
 
 out:
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index a2a952adc136..a108669188e8 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -373,12 +373,6 @@ bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh)
 		return (s32)(current_val - thresh) >= 0;
 }
 
-/* remove a wait pointed to by patch_addr */
-int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
-{
-	return host1x_hw_syncpt_patch_wait(sp->host, sp, patch_addr);
-}
-
 int host1x_syncpt_init(struct host1x *host)
 {
 	struct host1x_syncpt_base *bases;
diff --git a/drivers/gpu/host1x/syncpt.h b/drivers/gpu/host1x/syncpt.h
index 9d88d37c2397..d98e22325e9d 100644
--- a/drivers/gpu/host1x/syncpt.h
+++ b/drivers/gpu/host1x/syncpt.h
@@ -124,7 +124,4 @@ static inline int host1x_syncpt_is_valid(struct host1x_syncpt *sp)
 	return sp->id < host1x_syncpt_nb_pts(sp->host);
 }
 
-/* Patch a wait by replacing it with a wait for syncpt 0 value 0 */
-int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr);
-
 #endif
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index ddf7f9ca86cc..f66bece1e1b7 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -192,13 +192,6 @@ struct host1x_reloc {
 	unsigned long shift;
 };
 
-struct host1x_waitchk {
-	struct host1x_bo *bo;
-	u32 offset;
-	u32 syncpt_id;
-	u32 thresh;
-};
-
 struct host1x_job {
 	/* When refcount goes to zero, job can be freed */
 	struct kref ref;
@@ -215,11 +208,6 @@ struct host1x_job {
 	struct host1x_job_gather *gathers;
 	unsigned int num_gathers;
 
-	/* Wait checks to be processed at submit time */
-	struct host1x_waitchk *waitchk;
-	unsigned int num_waitchk;
-	u32 waitchk_mask;
-
 	/* Array of handles to be pinned & unpinned */
 	struct host1x_reloc *relocarray;
 	unsigned int num_relocs;
@@ -261,8 +249,7 @@ struct host1x_job {
 };
 
 struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
-				    u32 num_cmdbufs, u32 num_relocs,
-				    u32 num_waitchks);
+				    u32 num_cmdbufs, u32 num_relocs);
 void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *mem_id,
 			   u32 words, u32 offset);
 struct host1x_job *host1x_job_get(struct host1x_job *job);
diff --git a/include/trace/events/host1x.h b/include/trace/events/host1x.h
index 63116362543c..a37ef73092e5 100644
--- a/include/trace/events/host1x.h
+++ b/include/trace/events/host1x.h
@@ -115,16 +115,15 @@ TRACE_EVENT(host1x_cdma_push_gather,
 );
 
 TRACE_EVENT(host1x_channel_submit,
-	TP_PROTO(const char *name, u32 cmdbufs, u32 relocs, u32 waitchks,
-			u32 syncpt_id, u32 syncpt_incrs),
+	TP_PROTO(const char *name, u32 cmdbufs, u32 relocs, u32 syncpt_id,
+		 u32 syncpt_incrs),
 
-	TP_ARGS(name, cmdbufs, relocs, waitchks, syncpt_id, syncpt_incrs),
+	TP_ARGS(name, cmdbufs, relocs, syncpt_id, syncpt_incrs),
 
 	TP_STRUCT__entry(
 		__field(const char *, name)
 		__field(u32, cmdbufs)
 		__field(u32, relocs)
-		__field(u32, waitchks)
 		__field(u32, syncpt_id)
 		__field(u32, syncpt_incrs)
 	),
@@ -133,15 +132,14 @@ TRACE_EVENT(host1x_channel_submit,
 		__entry->name = name;
 		__entry->cmdbufs = cmdbufs;
 		__entry->relocs = relocs;
-		__entry->waitchks = waitchks;
 		__entry->syncpt_id = syncpt_id;
 		__entry->syncpt_incrs = syncpt_incrs;
 	),
 
-	TP_printk("name=%s, cmdbufs=%u, relocs=%u, waitchks=%d,"
-		"syncpt_id=%u, syncpt_incrs=%u",
-	  __entry->name, __entry->cmdbufs, __entry->relocs, __entry->waitchks,
-	  __entry->syncpt_id, __entry->syncpt_incrs)
+	TP_printk("name=%s, cmdbufs=%u, relocs=%u, syncpt_id=%u, "
+		  "syncpt_incrs=%u",
+		  __entry->name, __entry->cmdbufs, __entry->relocs,
+		  __entry->syncpt_id, __entry->syncpt_incrs)
 );
 
 TRACE_EVENT(host1x_channel_submitted,

From bf3d41ccabb53c57e19fcfc8b81d790043ac2bed Mon Sep 17 00:00:00 2001
From: Thierry Reding <treding@nvidia.com>
Date: Wed, 16 May 2018 14:12:33 +0200
Subject: [PATCH 1179/1286] gpu: host1x: Store pointer to client in jobs

Rather than storing some identifier derived from the application
context that can't be used concretely anywhere, store a pointer to the
client directly so that accesses can be made directly through that
client object.

Reviewed-by: Dmitry Osipenko <digetx@gmail.com>
Tested-by: Dmitry Osipenko <digetx@gmail.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/drm/tegra/drm.c | 5 +++--
 drivers/gpu/host1x/cdma.c   | 2 +-
 drivers/gpu/host1x/cdma.h   | 2 +-
 include/linux/host1x.h      | 3 ++-
 4 files changed, 7 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 204b10e33f16..8f29323611dd 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -325,6 +325,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
 		     struct drm_tegra_submit *args, struct drm_device *drm,
 		     struct drm_file *file)
 {
+	struct host1x_client *client = &context->client->base;
 	unsigned int num_cmdbufs = args->num_cmdbufs;
 	unsigned int num_relocs = args->num_relocs;
 	struct drm_tegra_cmdbuf __user *user_cmdbufs;
@@ -356,8 +357,8 @@ int tegra_drm_submit(struct tegra_drm_context *context,
 		return -ENOMEM;
 
 	job->num_relocs = args->num_relocs;
-	job->client = (u32)args->context;
-	job->class = context->client->base.class;
+	job->client = client;
+	job->class = client->class;
 	job->serialize = true;
 
 	/*
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
index 69bb77372ed9..91df51e631b2 100644
--- a/drivers/gpu/host1x/cdma.c
+++ b/drivers/gpu/host1x/cdma.c
@@ -247,7 +247,7 @@ static void cdma_start_timer_locked(struct host1x_cdma *cdma,
 static void stop_cdma_timer_locked(struct host1x_cdma *cdma)
 {
 	cancel_delayed_work(&cdma->timeout.wq);
-	cdma->timeout.client = 0;
+	cdma->timeout.client = NULL;
 }
 
 /*
diff --git a/drivers/gpu/host1x/cdma.h b/drivers/gpu/host1x/cdma.h
index 446ee1a84969..e97e17b82370 100644
--- a/drivers/gpu/host1x/cdma.h
+++ b/drivers/gpu/host1x/cdma.h
@@ -58,7 +58,7 @@ struct buffer_timeout {
 	u32 syncpt_val;			/* syncpt value when completed */
 	ktime_t start_ktime;		/* starting time */
 	/* context timeout information */
-	int client;
+	struct host1x_client *client;
 };
 
 enum cdma_event {
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index f66bece1e1b7..0632010f47fb 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -202,7 +202,8 @@ struct host1x_job {
 	/* Channel where job is submitted to */
 	struct host1x_channel *channel;
 
-	u32 client;
+	/* client where the job originated */
+	struct host1x_client *client;
 
 	/* Gathers and their memory */
 	struct host1x_job_gather *gathers;

From d4ad3ad9b81b73f568227563988b67708291900b Mon Sep 17 00:00:00 2001
From: Thierry Reding <treding@nvidia.com>
Date: Fri, 23 Mar 2018 13:31:24 +0100
Subject: [PATCH 1180/1286] gpu: host1x: Cleanup loop variable usage

Use unsigned int where possible and don't unnecessarily initialize the
loop variable.

Reviewed-by: Dmitry Osipenko <digetx@gmail.com>
Tested-by: Dmitry Osipenko <digetx@gmail.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/host1x/debug.c  | 2 +-
 drivers/gpu/host1x/intr.c   | 2 +-
 drivers/gpu/host1x/job.c    | 4 ++--
 drivers/gpu/host1x/syncpt.c | 2 +-
 4 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c
index dc77ec452ffc..329e4a3d8ae7 100644
--- a/drivers/gpu/host1x/debug.c
+++ b/drivers/gpu/host1x/debug.c
@@ -103,7 +103,7 @@ static void show_syncpts(struct host1x *m, struct output *o)
 
 static void show_all(struct host1x *m, struct output *o, bool show_fifo)
 {
-	int i;
+	unsigned int i;
 
 	host1x_hw_show_mlocks(m, o);
 	show_syncpts(m, o);
diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c
index 8b4fad0ab35d..6028cf7b681f 100644
--- a/drivers/gpu/host1x/intr.c
+++ b/drivers/gpu/host1x/intr.c
@@ -144,7 +144,7 @@ static const action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
 static void run_handlers(struct list_head completed[HOST1X_INTR_ACTION_COUNT])
 {
 	struct list_head *head = completed;
-	int i;
+	unsigned int i;
 
 	for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i, ++head) {
 		action_handler handler = action_handlers[i];
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index 3cbfc6e37668..2be0bcaf8288 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -196,10 +196,10 @@ unpin:
 
 static int do_relocs(struct host1x_job *job, struct host1x_job_gather *g)
 {
-	int i = 0;
 	u32 last_page = ~0;
 	void *cmdbuf_page_addr = NULL;
 	struct host1x_bo *cmdbuf = g->bo;
+	unsigned int i;
 
 	/* pin & patch the relocs for one gather */
 	for (i = 0; i < job->num_relocs; i++) {
@@ -451,7 +451,7 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev)
 	struct host1x_firewall fw;
 	size_t size = 0;
 	size_t offset = 0;
-	int i;
+	unsigned int i;
 
 	fw.job = job;
 	fw.dev = dev;
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index a108669188e8..088c05dd884c 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -57,8 +57,8 @@ static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
 						 struct host1x_client *client,
 						 unsigned long flags)
 {
-	int i;
 	struct host1x_syncpt *sp = host->syncpt;
+	unsigned int i;
 	char *name;
 
 	mutex_lock(&host->syncpt_mutex);

From ac330f45c7ca5b92e78b369c7034160947f03b8d Mon Sep 17 00:00:00 2001
From: Thierry Reding <treding@nvidia.com>
Date: Wed, 16 May 2018 14:29:33 +0200
Subject: [PATCH 1181/1286] gpu: host1x: Drop unnecessary host1x argument

Functions taking a pointer to a host1x syncpoint as an argument don't
need to specify a pointer to a host1x instance because it can be
obtained from the syncpoint.

Reviewed-by: Dmitry Osipenko <digetx@gmail.com>
Tested-by: Dmitry Osipenko <digetx@gmail.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/host1x/hw/channel_hw.c |  2 +-
 drivers/gpu/host1x/intr.c          | 14 ++++++--------
 drivers/gpu/host1x/intr.h          |  8 +++++---
 drivers/gpu/host1x/syncpt.c        |  2 +-
 4 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
index 4c9555038a95..d188f9068b91 100644
--- a/drivers/gpu/host1x/hw/channel_hw.c
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -164,7 +164,7 @@ static int channel_submit(struct host1x_job *job)
 	trace_host1x_channel_submitted(dev_name(ch->dev), prev_max, syncval);
 
 	/* schedule a submit complete interrupt */
-	err = host1x_intr_add_action(host, job->syncpt_id, syncval,
+	err = host1x_intr_add_action(host, sp, syncval,
 				     HOST1X_INTR_ACTION_SUBMIT_COMPLETE, ch,
 				     completed_waiter, NULL);
 	completed_waiter = NULL;
diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c
index 6028cf7b681f..9629c009d10f 100644
--- a/drivers/gpu/host1x/intr.c
+++ b/drivers/gpu/host1x/intr.c
@@ -211,11 +211,11 @@ static void syncpt_thresh_work(struct work_struct *work)
 				host1x_syncpt_load(host->syncpt + id));
 }
 
-int host1x_intr_add_action(struct host1x *host, unsigned int id, u32 thresh,
-			   enum host1x_intr_action action, void *data,
-			   struct host1x_waitlist *waiter, void **ref)
+int host1x_intr_add_action(struct host1x *host, struct host1x_syncpt *syncpt,
+			   u32 thresh, enum host1x_intr_action action,
+			   void *data, struct host1x_waitlist *waiter,
+			   void **ref)
 {
-	struct host1x_syncpt *syncpt;
 	int queue_was_empty;
 
 	if (waiter == NULL) {
@@ -234,19 +234,17 @@ int host1x_intr_add_action(struct host1x *host, unsigned int id, u32 thresh,
 	waiter->data = data;
 	waiter->count = 1;
 
-	syncpt = host->syncpt + id;
-
 	spin_lock(&syncpt->intr.lock);
 
 	queue_was_empty = list_empty(&syncpt->intr.wait_head);
 
 	if (add_waiter_to_queue(waiter, &syncpt->intr.wait_head)) {
 		/* added at head of list - new threshold value */
-		host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
+		host1x_hw_intr_set_syncpt_threshold(host, syncpt->id, thresh);
 
 		/* added as first waiter - enable interrupt */
 		if (queue_was_empty)
-			host1x_hw_intr_enable_syncpt_intr(host, id);
+			host1x_hw_intr_enable_syncpt_intr(host, syncpt->id);
 	}
 
 	spin_unlock(&syncpt->intr.lock);
diff --git a/drivers/gpu/host1x/intr.h b/drivers/gpu/host1x/intr.h
index 1370c2bb75b8..6db96af484fe 100644
--- a/drivers/gpu/host1x/intr.h
+++ b/drivers/gpu/host1x/intr.h
@@ -22,6 +22,7 @@
 #include <linux/interrupt.h>
 #include <linux/workqueue.h>
 
+struct host1x_syncpt;
 struct host1x;
 
 enum host1x_intr_action {
@@ -75,9 +76,10 @@ struct host1x_waitlist {
  *
  * This is a non-blocking api.
  */
-int host1x_intr_add_action(struct host1x *host, unsigned int id, u32 thresh,
-	enum host1x_intr_action action, void *data,
-	struct host1x_waitlist *waiter, void **ref);
+int host1x_intr_add_action(struct host1x *host, struct host1x_syncpt *syncpt,
+			   u32 thresh, enum host1x_intr_action action,
+			   void *data, struct host1x_waitlist *waiter,
+			   void **ref);
 
 /*
  * Unreference an action submitted to host1x_intr_add_action().
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index 088c05dd884c..a5dbf1ba4645 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -255,7 +255,7 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
 	}
 
 	/* schedule a wakeup when the syncpoint value is reached */
-	err = host1x_intr_add_action(sp->host, sp->id, thresh,
+	err = host1x_intr_add_action(sp->host, sp, thresh,
 				     HOST1X_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
 				     &wq, waiter, &ref);
 	if (err)

From 06490bb99e1840ab2b6814af7356e8b4ab0e3ee6 Mon Sep 17 00:00:00 2001
From: Thierry Reding <treding@nvidia.com>
Date: Wed, 16 May 2018 16:58:44 +0200
Subject: [PATCH 1182/1286] gpu: host1x: Rename relocarray -> relocs for
 consistency

All other array variables use a plural, and this is the only one using
the *array suffix. This is confusing, so rename it for consistency.

Reviewed-by: Dmitry Osipenko <digetx@gmail.com>
Tested-by: Dmitry Osipenko <digetx@gmail.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/drm/tegra/drm.c | 4 ++--
 drivers/gpu/host1x/job.c    | 8 ++++----
 include/linux/host1x.h      | 2 +-
 3 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 8f29323611dd..bfbd3a89c26f 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -426,13 +426,13 @@ int tegra_drm_submit(struct tegra_drm_context *context,
 		struct host1x_reloc *reloc;
 		struct tegra_bo *obj;
 
-		err = host1x_reloc_copy_from_user(&job->relocarray[num_relocs],
+		err = host1x_reloc_copy_from_user(&job->relocs[num_relocs],
 						  &user_relocs[num_relocs], drm,
 						  file);
 		if (err < 0)
 			goto fail;
 
-		reloc = &job->relocarray[num_relocs];
+		reloc = &job->relocs[num_relocs];
 		obj = host1x_to_tegra_bo(reloc->cmdbuf.bo);
 		refs[num_refs++] = &obj->gem;
 
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index 2be0bcaf8288..9d6d3e151291 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -60,7 +60,7 @@ struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
 
 	/* Redistribute memory to the structs  */
 	mem += sizeof(struct host1x_job);
-	job->relocarray = num_relocs ? mem : NULL;
+	job->relocs = num_relocs ? mem : NULL;
 	mem += num_relocs * sizeof(struct host1x_reloc);
 	job->unpins = num_unpins ? mem : NULL;
 	mem += num_unpins * sizeof(struct host1x_job_unpin_data);
@@ -115,7 +115,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
 	job->num_unpins = 0;
 
 	for (i = 0; i < job->num_relocs; i++) {
-		struct host1x_reloc *reloc = &job->relocarray[i];
+		struct host1x_reloc *reloc = &job->relocs[i];
 		struct sg_table *sgt;
 		dma_addr_t phys_addr;
 
@@ -203,7 +203,7 @@ static int do_relocs(struct host1x_job *job, struct host1x_job_gather *g)
 
 	/* pin & patch the relocs for one gather */
 	for (i = 0; i < job->num_relocs; i++) {
-		struct host1x_reloc *reloc = &job->relocarray[i];
+		struct host1x_reloc *reloc = &job->relocs[i];
 		u32 reloc_addr = (job->reloc_addr_phys[i] +
 				  reloc->target.offset) >> reloc->shift;
 		u32 *target;
@@ -455,7 +455,7 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev)
 
 	fw.job = job;
 	fw.dev = dev;
-	fw.reloc = job->relocarray;
+	fw.reloc = job->relocs;
 	fw.num_relocs = job->num_relocs;
 	fw.class = job->class;
 
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index 0632010f47fb..dcb6140d39d7 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -210,7 +210,7 @@ struct host1x_job {
 	unsigned int num_gathers;
 
 	/* Array of handles to be pinned & unpinned */
-	struct host1x_reloc *relocarray;
+	struct host1x_reloc *relocs;
 	unsigned int num_relocs;
 	struct host1x_job_unpin_data *unpins;
 	unsigned int num_unpins;

From 326bbd79fd61716841585a52d5b68f48f4e6644e Mon Sep 17 00:00:00 2001
From: Thierry Reding <treding@nvidia.com>
Date: Wed, 16 May 2018 17:01:43 +0200
Subject: [PATCH 1183/1286] gpu: host1x: Use not explicitly sized types

The number of words and the offset in a gather don't need to be
explicitly sized, so make them unsigned int instead.

Reviewed-by: Dmitry Osipenko <digetx@gmail.com>
Tested-by: Dmitry Osipenko <digetx@gmail.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/host1x/job.c | 11 ++++++-----
 drivers/gpu/host1x/job.h |  4 ++--
 include/linux/host1x.h   |  4 ++--
 3 files changed, 10 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index 9d6d3e151291..e2f4a4d93d20 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -96,13 +96,14 @@ void host1x_job_put(struct host1x_job *job)
 EXPORT_SYMBOL(host1x_job_put);
 
 void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
-			   u32 words, u32 offset)
+			   unsigned int words, unsigned int offset)
 {
-	struct host1x_job_gather *cur_gather = &job->gathers[job->num_gathers];
+	struct host1x_job_gather *gather = &job->gathers[job->num_gathers];
+
+	gather->words = words;
+	gather->bo = bo;
+	gather->offset = offset;
 
-	cur_gather->words = words;
-	cur_gather->bo = bo;
-	cur_gather->offset = offset;
 	job->num_gathers++;
 }
 EXPORT_SYMBOL(host1x_job_add_gather);
diff --git a/drivers/gpu/host1x/job.h b/drivers/gpu/host1x/job.h
index 4bda51d503ec..188400e00192 100644
--- a/drivers/gpu/host1x/job.h
+++ b/drivers/gpu/host1x/job.h
@@ -20,10 +20,10 @@
 #define __HOST1X_JOB_H
 
 struct host1x_job_gather {
-	u32 words;
+	unsigned int words;
 	dma_addr_t base;
 	struct host1x_bo *bo;
-	u32 offset;
+	unsigned int offset;
 	bool handled;
 };
 
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index dcb6140d39d7..89110d896d72 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -251,8 +251,8 @@ struct host1x_job {
 
 struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
 				    u32 num_cmdbufs, u32 num_relocs);
-void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *mem_id,
-			   u32 words, u32 offset);
+void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
+			   unsigned int words, unsigned int offset);
 struct host1x_job *host1x_job_get(struct host1x_job *job);
 void host1x_job_put(struct host1x_job *job);
 int host1x_job_pin(struct host1x_job *job, struct device *dev);

From c850ece71f71c2a68a9921c52fb5fd8d3ec2b8d7 Mon Sep 17 00:00:00 2001
From: Thierry Reding <treding@nvidia.com>
Date: Sat, 5 May 2018 08:12:53 +0200
Subject: [PATCH 1184/1286] drm/tegra: Use proper arguments for
 DRM_TEGRA_CLOSE_CHANNEL IOCTL

A separate data structure exists for the DRM_TEGRA_CLOSE_CHANNEL IOCTL,
but it is currently unused. The IOCTL was using the data structure for
the DRM_TEGRA_OPEN_CHANNEL IOCTL.

Reviewed-by: Dmitry Osipenko <digetx@gmail.com>
Tested-by: Dmitry Osipenko <digetx@gmail.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 include/uapi/drm/tegra_drm.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/include/uapi/drm/tegra_drm.h b/include/uapi/drm/tegra_drm.h
index d954f8c33321..99e15d82d1e9 100644
--- a/include/uapi/drm/tegra_drm.h
+++ b/include/uapi/drm/tegra_drm.h
@@ -193,7 +193,7 @@ struct drm_tegra_gem_get_flags {
 #define DRM_IOCTL_TEGRA_SYNCPT_INCR DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SYNCPT_INCR, struct drm_tegra_syncpt_incr)
 #define DRM_IOCTL_TEGRA_SYNCPT_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SYNCPT_WAIT, struct drm_tegra_syncpt_wait)
 #define DRM_IOCTL_TEGRA_OPEN_CHANNEL DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_OPEN_CHANNEL, struct drm_tegra_open_channel)
-#define DRM_IOCTL_TEGRA_CLOSE_CHANNEL DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_CLOSE_CHANNEL, struct drm_tegra_open_channel)
+#define DRM_IOCTL_TEGRA_CLOSE_CHANNEL DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_CLOSE_CHANNEL, struct drm_tegra_close_channel)
 #define DRM_IOCTL_TEGRA_GET_SYNCPT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GET_SYNCPT, struct drm_tegra_get_syncpt)
 #define DRM_IOCTL_TEGRA_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SUBMIT, struct drm_tegra_submit)
 #define DRM_IOCTL_TEGRA_GET_SYNCPT_BASE DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GET_SYNCPT_BASE, struct drm_tegra_get_syncpt_base)

From 4bd91a5b5dbb8b536208396c3d032cba8e3c3913 Mon Sep 17 00:00:00 2001
From: Thierry Reding <treding@nvidia.com>
Date: Wed, 16 May 2018 18:49:04 +0200
Subject: [PATCH 1185/1286] drm/tegra: gem: Fill in missing export info

Set the owner and name of the exported DMA-BUF in addition to the
already filled-in fields.

Reviewed-by: Dmitry Osipenko <digetx@gmail.com>
Tested-by: Dmitry Osipenko <digetx@gmail.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/drm/tegra/gem.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 1c4011774c3f..00a5c9f32254 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -649,6 +649,8 @@ struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
 {
 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 
+	exp_info.exp_name = KBUILD_MODNAME;
+	exp_info.owner = drm->driver->fops->owner;
 	exp_info.ops = &tegra_gem_prime_dmabuf_ops;
 	exp_info.size = gem->size;
 	exp_info.flags = flags;

From 995c5a509fb032ddd83eff4f3772c7fc8ff0b7ec Mon Sep 17 00:00:00 2001
From: Thierry Reding <treding@nvidia.com>
Date: Mon, 19 Mar 2018 17:20:46 +0100
Subject: [PATCH 1186/1286] drm/tegra: dc: Support rotation property

Currently only the DRM_MODE_REFLECT_Y rotation is supported. The driver
already supports reflection on the Y axis via a custom flag which is not
very useful because it requires custom userspace. Add the standard
rotation property that supports 0 degree rotation and Y axis reflection
for primary and overlay planes to provide a better interface than the
custom flag.

v2: keep custom flag for ABI compatibility (Dmitry)

Reviewed-by: Dmitry Osipenko <digetx@gmail.com>
Tested-by: Dmitry Osipenko <digetx@gmail.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/drm/tegra/dc.c    | 26 +++++++++++++++++++++++++-
 drivers/gpu/drm/tegra/plane.c |  1 +
 drivers/gpu/drm/tegra/plane.h |  2 ++
 3 files changed, 28 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 31e12a9dfcb8..c3afe7b2237e 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -596,6 +596,7 @@ static int tegra_plane_atomic_check(struct drm_plane *plane,
 				    struct drm_plane_state *state)
 {
 	struct tegra_plane_state *plane_state = to_tegra_plane_state(state);
+	unsigned int rotation = DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_Y;
 	struct tegra_bo_tiling *tiling = &plane_state->tiling;
 	struct tegra_plane *tegra = to_tegra_plane(plane);
 	struct tegra_dc *dc = to_tegra_dc(state->crtc);
@@ -633,6 +634,13 @@ static int tegra_plane_atomic_check(struct drm_plane *plane,
 		return -EINVAL;
 	}
 
+	rotation = drm_rotation_simplify(state->rotation, rotation);
+
+	if (rotation & DRM_MODE_REFLECT_Y)
+		plane_state->bottom_up = true;
+	else
+		plane_state->bottom_up = false;
+
 	/*
 	 * Tegra doesn't support different strides for U and V planes so we
 	 * error out if the user tries to display a framebuffer with such a
@@ -693,7 +701,7 @@ static void tegra_plane_atomic_update(struct drm_plane *plane,
 	window.dst.w = drm_rect_width(&plane->state->dst);
 	window.dst.h = drm_rect_height(&plane->state->dst);
 	window.bits_per_pixel = fb->format->cpp[0] * 8;
-	window.bottom_up = tegra_fb_is_bottom_up(fb);
+	window.bottom_up = tegra_fb_is_bottom_up(fb) || state->bottom_up;
 
 	/* copy from state */
 	window.zpos = plane->state->normalized_zpos;
@@ -776,6 +784,14 @@ static struct drm_plane *tegra_primary_plane_create(struct drm_device *drm,
 	drm_plane_helper_add(&plane->base, &tegra_plane_helper_funcs);
 	drm_plane_create_zpos_property(&plane->base, plane->index, 0, 255);
 
+	err = drm_plane_create_rotation_property(&plane->base,
+						 DRM_MODE_ROTATE_0,
+						 DRM_MODE_ROTATE_0 |
+						 DRM_MODE_REFLECT_Y);
+	if (err < 0)
+		dev_err(dc->dev, "failed to create rotation property: %d\n",
+			err);
+
 	return &plane->base;
 }
 
@@ -1053,6 +1069,14 @@ static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,
 	drm_plane_helper_add(&plane->base, &tegra_plane_helper_funcs);
 	drm_plane_create_zpos_property(&plane->base, plane->index, 0, 255);
 
+	err = drm_plane_create_rotation_property(&plane->base,
+						 DRM_MODE_ROTATE_0,
+						 DRM_MODE_ROTATE_0 |
+						 DRM_MODE_REFLECT_Y);
+	if (err < 0)
+		dev_err(dc->dev, "failed to create rotation property: %d\n",
+			err);
+
 	return &plane->base;
 }
 
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
index 0406c2ef432c..d068e8aa3553 100644
--- a/drivers/gpu/drm/tegra/plane.c
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -56,6 +56,7 @@ tegra_plane_atomic_duplicate_state(struct drm_plane *plane)
 	copy->tiling = state->tiling;
 	copy->format = state->format;
 	copy->swap = state->swap;
+	copy->bottom_up = state->bottom_up;
 	copy->opaque = state->opaque;
 
 	for (i = 0; i < 2; i++)
diff --git a/drivers/gpu/drm/tegra/plane.h b/drivers/gpu/drm/tegra/plane.h
index 7360ddfafee8..e79e6b4a8e0a 100644
--- a/drivers/gpu/drm/tegra/plane.h
+++ b/drivers/gpu/drm/tegra/plane.h
@@ -46,6 +46,8 @@ struct tegra_plane_state {
 	u32 format;
 	u32 swap;
 
+	bool bottom_up;
+
 	/* used for legacy blending support only */
 	struct tegra_plane_legacy_blending_state blending[2];
 	bool opaque;

From f3b3cfcc3f09490ffb8e1e997e8a8695a6a55b1b Mon Sep 17 00:00:00 2001
From: Thierry Reding <treding@nvidia.com>
Date: Wed, 16 May 2018 17:05:04 +0200
Subject: [PATCH 1187/1286] drm/tegra: Track client version

Userspace needs to know the version of the interface implemented by a
client so it can create the proper command streams. Allow individual
drivers to store this version along with the client so that it can be
returned to userspace upon opening a channel.

Acked-by: Dmitry Osipenko <digetx@gmail.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/drm/tegra/drm.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index f47a60592334..92d248784396 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -91,6 +91,7 @@ struct tegra_drm_client {
 	struct host1x_client base;
 	struct list_head list;
 
+	unsigned int version;
 	const struct tegra_drm_client_ops *ops;
 };
 

From 840fd213fca23b185f71b45a5b563e4e9b6d1759 Mon Sep 17 00:00:00 2001
From: Thierry Reding <treding@nvidia.com>
Date: Wed, 16 May 2018 17:06:36 +0200
Subject: [PATCH 1188/1286] drm/tegra: gr2d: Track interface version

Set the interface version implemented by the gr2d module. This allows
userspace to pass the correct command stream when programming the gr2d
module.

Reviewed-by: Dmitry Osipenko <digetx@gmail.com>
Tested-by: Dmitry Osipenko <digetx@gmail.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/drm/tegra/gr2d.c | 22 ++++++++++++++++++++--
 1 file changed, 20 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index 2cd0f66c8aa9..673059fd2fcb 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -8,17 +8,24 @@
 
 #include <linux/clk.h>
 #include <linux/iommu.h>
+#include <linux/of_device.h>
 
 #include "drm.h"
 #include "gem.h"
 #include "gr2d.h"
 
+struct gr2d_soc {
+	unsigned int version;
+};
+
 struct gr2d {
 	struct iommu_group *group;
 	struct tegra_drm_client client;
 	struct host1x_channel *channel;
 	struct clk *clk;
 
+	const struct gr2d_soc *soc;
+
 	DECLARE_BITMAP(addr_regs, GR2D_NUM_REGS);
 };
 
@@ -150,9 +157,17 @@ static const struct tegra_drm_client_ops gr2d_ops = {
 	.submit = tegra_drm_submit,
 };
 
+static const struct gr2d_soc tegra20_gr2d_soc = {
+	.version = 0x20,
+};
+
+static const struct gr2d_soc tegra30_gr2d_soc = {
+	.version = 0x30,
+};
+
 static const struct of_device_id gr2d_match[] = {
-	{ .compatible = "nvidia,tegra30-gr2d" },
-	{ .compatible = "nvidia,tegra20-gr2d" },
+	{ .compatible = "nvidia,tegra30-gr2d", .data = &tegra20_gr2d_soc },
+	{ .compatible = "nvidia,tegra20-gr2d", .data = &tegra30_gr2d_soc },
 	{ },
 };
 MODULE_DEVICE_TABLE(of, gr2d_match);
@@ -185,6 +200,8 @@ static int gr2d_probe(struct platform_device *pdev)
 	if (!gr2d)
 		return -ENOMEM;
 
+	gr2d->soc = of_device_get_match_data(dev);
+
 	syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
 	if (!syncpts)
 		return -ENOMEM;
@@ -209,6 +226,7 @@ static int gr2d_probe(struct platform_device *pdev)
 	gr2d->client.base.num_syncpts = 1;
 
 	INIT_LIST_HEAD(&gr2d->client.list);
+	gr2d->client.version = gr2d->soc->version;
 	gr2d->client.ops = &gr2d_ops;
 
 	err = host1x_client_register(&gr2d->client.base);

From 33f150ea82ff029ec7e00345c6fbf00e44a8fd60 Mon Sep 17 00:00:00 2001
From: Thierry Reding <treding@nvidia.com>
Date: Wed, 16 May 2018 17:07:38 +0200
Subject: [PATCH 1189/1286] drm/tegra: gr3d: Track interface version

Set the interface version implemented by the gr3d module. This allows
userspace to pass the correct command stream when programming the gr3d
module.

Reviewed-by: Dmitry Osipenko <digetx@gmail.com>
Tested-by: Dmitry Osipenko <digetx@gmail.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/drm/tegra/gr3d.c | 28 +++++++++++++++++++++++++---
 1 file changed, 25 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index b00002f1c590..4778ae999668 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -11,6 +11,7 @@
 #include <linux/host1x.h>
 #include <linux/iommu.h>
 #include <linux/module.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/reset.h>
 
@@ -20,6 +21,10 @@
 #include "gem.h"
 #include "gr3d.h"
 
+struct gr3d_soc {
+	unsigned int version;
+};
+
 struct gr3d {
 	struct iommu_group *group;
 	struct tegra_drm_client client;
@@ -29,6 +34,8 @@ struct gr3d {
 	struct reset_control *rst_secondary;
 	struct reset_control *rst;
 
+	const struct gr3d_soc *soc;
+
 	DECLARE_BITMAP(addr_regs, GR3D_NUM_REGS);
 };
 
@@ -151,10 +158,22 @@ static const struct tegra_drm_client_ops gr3d_ops = {
 	.submit = tegra_drm_submit,
 };
 
+static const struct gr3d_soc tegra20_gr3d_soc = {
+	.version = 0x20,
+};
+
+static const struct gr3d_soc tegra30_gr3d_soc = {
+	.version = 0x30,
+};
+
+static const struct gr3d_soc tegra114_gr3d_soc = {
+	.version = 0x35,
+};
+
 static const struct of_device_id tegra_gr3d_match[] = {
-	{ .compatible = "nvidia,tegra114-gr3d" },
-	{ .compatible = "nvidia,tegra30-gr3d" },
-	{ .compatible = "nvidia,tegra20-gr3d" },
+	{ .compatible = "nvidia,tegra114-gr3d", .data = &tegra114_gr3d_soc },
+	{ .compatible = "nvidia,tegra30-gr3d", .data = &tegra30_gr3d_soc },
+	{ .compatible = "nvidia,tegra20-gr3d", .data = &tegra20_gr3d_soc },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, tegra_gr3d_match);
@@ -276,6 +295,8 @@ static int gr3d_probe(struct platform_device *pdev)
 	if (!gr3d)
 		return -ENOMEM;
 
+	gr3d->soc = of_device_get_match_data(&pdev->dev);
+
 	syncpts = devm_kzalloc(&pdev->dev, sizeof(*syncpts), GFP_KERNEL);
 	if (!syncpts)
 		return -ENOMEM;
@@ -333,6 +354,7 @@ static int gr3d_probe(struct platform_device *pdev)
 	gr3d->client.base.num_syncpts = 1;
 
 	INIT_LIST_HEAD(&gr3d->client.list);
+	gr3d->client.version = gr3d->soc->version;
 	gr3d->client.ops = &gr3d_ops;
 
 	err = host1x_client_register(&gr3d->client.base);

From acae8a9d054daa75a01e34b18f3627e6df330622 Mon Sep 17 00:00:00 2001
From: Thierry Reding <treding@nvidia.com>
Date: Wed, 16 May 2018 17:08:04 +0200
Subject: [PATCH 1190/1286] drm/tegra: vic: Track interface version

Set the interface version implemented by the VIC module. This allows
userspace to pass the correct command stream when programming the VIC
module.

Reviewed-by: Dmitry Osipenko <digetx@gmail.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 drivers/gpu/drm/tegra/vic.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index f5794dd49f3b..9f657a63b0bb 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -25,6 +25,7 @@
 
 struct vic_config {
 	const char *firmware;
+	unsigned int version;
 };
 
 struct vic {
@@ -264,18 +265,21 @@ static const struct tegra_drm_client_ops vic_ops = {
 
 static const struct vic_config vic_t124_config = {
 	.firmware = NVIDIA_TEGRA_124_VIC_FIRMWARE,
+	.version = 0x40,
 };
 
 #define NVIDIA_TEGRA_210_VIC_FIRMWARE "nvidia/tegra210/vic04_ucode.bin"
 
 static const struct vic_config vic_t210_config = {
 	.firmware = NVIDIA_TEGRA_210_VIC_FIRMWARE,
+	.version = 0x21,
 };
 
 #define NVIDIA_TEGRA_186_VIC_FIRMWARE "nvidia/tegra186/vic04_ucode.bin"
 
 static const struct vic_config vic_t186_config = {
 	.firmware = NVIDIA_TEGRA_186_VIC_FIRMWARE,
+	.version = 0x18,
 };
 
 static const struct of_device_id vic_match[] = {
@@ -342,6 +346,7 @@ static int vic_probe(struct platform_device *pdev)
 	vic->dev = dev;
 
 	INIT_LIST_HEAD(&vic->client.list);
+	vic->client.version = vic->config->version;
 	vic->client.ops = &vic_ops;
 
 	err = host1x_client_register(&vic->client.base);

From 3fdbab5f5689a656fa719df752ca7608bcf66c99 Mon Sep 17 00:00:00 2001
From: Evan Quan <evan.quan@amd.com>
Date: Mon, 26 Mar 2018 11:43:04 +0800
Subject: [PATCH 1191/1286] drm/amd/powerplay: update vega20 cg flags (v2)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

v2: remove duplicate flag.

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Evan Quan <evan.quan@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/soc15.c | 18 +++++++++++++++++-
 1 file changed, 17 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 4e065c68b86c..63135cf79e00 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -665,7 +665,23 @@ static int soc15_common_early_init(void *handle)
 		adev->external_rev_id = adev->rev_id + 0x14;
 		break;
 	case CHIP_VEGA20:
-		adev->cg_flags = 0;
+		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+			AMD_CG_SUPPORT_GFX_MGLS |
+			AMD_CG_SUPPORT_GFX_CGCG |
+			AMD_CG_SUPPORT_GFX_CGLS |
+			AMD_CG_SUPPORT_GFX_3D_CGCG |
+			AMD_CG_SUPPORT_GFX_3D_CGLS |
+			AMD_CG_SUPPORT_GFX_CP_LS |
+			AMD_CG_SUPPORT_MC_LS |
+			AMD_CG_SUPPORT_MC_MGCG |
+			AMD_CG_SUPPORT_SDMA_MGCG |
+			AMD_CG_SUPPORT_SDMA_LS |
+			AMD_CG_SUPPORT_BIF_MGCG |
+			AMD_CG_SUPPORT_BIF_LS |
+			AMD_CG_SUPPORT_HDP_MGCG |
+			AMD_CG_SUPPORT_ROM_MGCG |
+			AMD_CG_SUPPORT_VCE_MGCG |
+			AMD_CG_SUPPORT_UVD_MGCG;
 		adev->pg_flags = 0;
 		adev->external_rev_id = adev->rev_id + 0x28;
 		break;

From 602ed6c69b128b77050e178aca9e945d969f3aa8 Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Tue, 24 Apr 2018 11:20:16 +0800
Subject: [PATCH 1192/1286] drm/amdgpu: Disable ip modules that are not ready
 yet
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Please enable above ips on soc15.c when they're available.

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/soc15.c | 14 +++++++++-----
 1 file changed, 9 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 63135cf79e00..295bc9cd46f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -514,9 +514,11 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
 		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
 		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
 		amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
-		amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
-		if (!amdgpu_sriov_vf(adev))
-			amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+		if (adev->asic_type != CHIP_VEGA20) {
+			amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
+			if (!amdgpu_sriov_vf(adev))
+				amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+		}
 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
 #if defined(CONFIG_DRM_AMD_DC)
@@ -527,8 +529,10 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
 #endif
 		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
 		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
-		amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
-		amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
+		if (adev->asic_type != CHIP_VEGA20) {
+			amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
+			amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
+		}
 		break;
 	case CHIP_RAVEN:
 		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);

From 2bb795f5ba9cd676536858a978b9df06f473af88 Mon Sep 17 00:00:00 2001
From: James Zhu <James.Zhu@amd.com>
Date: Tue, 15 May 2018 14:25:46 -0500
Subject: [PATCH 1193/1286] drm/amdgpu/vg20:Restruct uvd to support multiple
 uvds

Vega20 has dual-UVD. Need Restruct amdgpu_device::uvd to support
multiple uvds. There are no any logical changes here.

Signed-off-by: James Zhu <James.Zhu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c     |   6 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c       |   4 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c |   4 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c       | 102 ++++++-------
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h       |  19 ++-
 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c         |  27 ++--
 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c         |  25 ++--
 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c         |  77 +++++-----
 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c         | 135 +++++++++---------
 9 files changed, 205 insertions(+), 194 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index d09fcab2398f..1070f4042cbb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -376,14 +376,14 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
 	struct amdgpu_device *adev = ring->adev;
 	uint64_t index;
 
-	if (ring != &adev->uvd.ring) {
+	if (ring != &adev->uvd.inst->ring) {
 		ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
 		ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
 	} else {
 		/* put fence directly behind firmware */
 		index = ALIGN(adev->uvd.fw->size, 8);
-		ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index;
-		ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
+		ring->fence_drv.cpu_addr = adev->uvd.inst->cpu_addr + index;
+		ring->fence_drv.gpu_addr = adev->uvd.inst->gpu_addr + index;
 	}
 	amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
 	amdgpu_irq_get(adev, irq_src, irq_type);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index eb4785e51573..5620ed291107 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -348,7 +348,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 			break;
 		case AMDGPU_HW_IP_UVD:
 			type = AMD_IP_BLOCK_TYPE_UVD;
-			ring_mask = adev->uvd.ring.ready ? 1 : 0;
+			ring_mask = adev->uvd.inst->ring.ready ? 1 : 0;
 			ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
 			ib_size_alignment = 16;
 			break;
@@ -362,7 +362,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 		case AMDGPU_HW_IP_UVD_ENC:
 			type = AMD_IP_BLOCK_TYPE_UVD;
 			for (i = 0; i < adev->uvd.num_enc_rings; i++)
-				ring_mask |= ((adev->uvd.ring_enc[i].ready ? 1 : 0) << i);
+				ring_mask |= ((adev->uvd.inst->ring_enc[i].ready ? 1 : 0) << i);
 			ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
 			ib_size_alignment = 1;
 			break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
index 262c1267249e..2458d385e55a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
@@ -77,13 +77,13 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
 		*out_ring = &adev->sdma.instance[ring].ring;
 		break;
 	case AMDGPU_HW_IP_UVD:
-		*out_ring = &adev->uvd.ring;
+		*out_ring = &adev->uvd.inst->ring;
 		break;
 	case AMDGPU_HW_IP_VCE:
 		*out_ring = &adev->vce.ring[ring];
 		break;
 	case AMDGPU_HW_IP_UVD_ENC:
-		*out_ring = &adev->uvd.ring_enc[ring];
+		*out_ring = &adev->uvd.inst->ring_enc[ring];
 		break;
 	case AMDGPU_HW_IP_VCN_DEC:
 		*out_ring = &adev->vcn.ring_dec;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index fd1e9cd65066..02683a039a98 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -129,7 +129,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
 	unsigned version_major, version_minor, family_id;
 	int i, r;
 
-	INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
+	INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler);
 
 	switch (adev->asic_type) {
 #ifdef CONFIG_DRM_AMDGPU_CIK
@@ -237,16 +237,16 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
 		bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
 
 	r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
-				    AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.vcpu_bo,
-				    &adev->uvd.gpu_addr, &adev->uvd.cpu_addr);
+				    AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst->vcpu_bo,
+				    &adev->uvd.inst->gpu_addr, &adev->uvd.inst->cpu_addr);
 	if (r) {
 		dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
 		return r;
 	}
 
-	ring = &adev->uvd.ring;
+	ring = &adev->uvd.inst->ring;
 	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-	r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity,
+	r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity,
 				  rq, NULL);
 	if (r != 0) {
 		DRM_ERROR("Failed setting up UVD run queue.\n");
@@ -254,8 +254,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
 	}
 
 	for (i = 0; i < adev->uvd.max_handles; ++i) {
-		atomic_set(&adev->uvd.handles[i], 0);
-		adev->uvd.filp[i] = NULL;
+		atomic_set(&adev->uvd.inst->handles[i], 0);
+		adev->uvd.inst->filp[i] = NULL;
 	}
 
 	/* from uvd v5.0 HW addressing capacity increased to 64 bits */
@@ -285,18 +285,18 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
 int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
 {
 	int i;
-	kfree(adev->uvd.saved_bo);
+	kfree(adev->uvd.inst->saved_bo);
 
-	drm_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
+	drm_sched_entity_fini(&adev->uvd.inst->ring.sched, &adev->uvd.inst->entity);
 
-	amdgpu_bo_free_kernel(&adev->uvd.vcpu_bo,
-			      &adev->uvd.gpu_addr,
-			      (void **)&adev->uvd.cpu_addr);
+	amdgpu_bo_free_kernel(&adev->uvd.inst->vcpu_bo,
+			      &adev->uvd.inst->gpu_addr,
+			      (void **)&adev->uvd.inst->cpu_addr);
 
-	amdgpu_ring_fini(&adev->uvd.ring);
+	amdgpu_ring_fini(&adev->uvd.inst->ring);
 
 	for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i)
-		amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
+		amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
 
 	release_firmware(adev->uvd.fw);
 
@@ -309,29 +309,29 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
 	void *ptr;
 	int i;
 
-	if (adev->uvd.vcpu_bo == NULL)
+	if (adev->uvd.inst->vcpu_bo == NULL)
 		return 0;
 
-	cancel_delayed_work_sync(&adev->uvd.idle_work);
+	cancel_delayed_work_sync(&adev->uvd.inst->idle_work);
 
 	/* only valid for physical mode */
 	if (adev->asic_type < CHIP_POLARIS10) {
 		for (i = 0; i < adev->uvd.max_handles; ++i)
-			if (atomic_read(&adev->uvd.handles[i]))
+			if (atomic_read(&adev->uvd.inst->handles[i]))
 				break;
 
 		if (i == adev->uvd.max_handles)
 			return 0;
 	}
 
-	size = amdgpu_bo_size(adev->uvd.vcpu_bo);
-	ptr = adev->uvd.cpu_addr;
+	size = amdgpu_bo_size(adev->uvd.inst->vcpu_bo);
+	ptr = adev->uvd.inst->cpu_addr;
 
-	adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
-	if (!adev->uvd.saved_bo)
+	adev->uvd.inst->saved_bo = kmalloc(size, GFP_KERNEL);
+	if (!adev->uvd.inst->saved_bo)
 		return -ENOMEM;
 
-	memcpy_fromio(adev->uvd.saved_bo, ptr, size);
+	memcpy_fromio(adev->uvd.inst->saved_bo, ptr, size);
 
 	return 0;
 }
@@ -341,16 +341,16 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
 	unsigned size;
 	void *ptr;
 
-	if (adev->uvd.vcpu_bo == NULL)
+	if (adev->uvd.inst->vcpu_bo == NULL)
 		return -EINVAL;
 
-	size = amdgpu_bo_size(adev->uvd.vcpu_bo);
-	ptr = adev->uvd.cpu_addr;
+	size = amdgpu_bo_size(adev->uvd.inst->vcpu_bo);
+	ptr = adev->uvd.inst->cpu_addr;
 
-	if (adev->uvd.saved_bo != NULL) {
-		memcpy_toio(ptr, adev->uvd.saved_bo, size);
-		kfree(adev->uvd.saved_bo);
-		adev->uvd.saved_bo = NULL;
+	if (adev->uvd.inst->saved_bo != NULL) {
+		memcpy_toio(ptr, adev->uvd.inst->saved_bo, size);
+		kfree(adev->uvd.inst->saved_bo);
+		adev->uvd.inst->saved_bo = NULL;
 	} else {
 		const struct common_firmware_header *hdr;
 		unsigned offset;
@@ -358,14 +358,14 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
 		hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
 		if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
 			offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
-			memcpy_toio(adev->uvd.cpu_addr, adev->uvd.fw->data + offset,
+			memcpy_toio(adev->uvd.inst->cpu_addr, adev->uvd.fw->data + offset,
 				    le32_to_cpu(hdr->ucode_size_bytes));
 			size -= le32_to_cpu(hdr->ucode_size_bytes);
 			ptr += le32_to_cpu(hdr->ucode_size_bytes);
 		}
 		memset_io(ptr, 0, size);
 		/* to restore uvd fence seq */
-		amdgpu_fence_driver_force_completion(&adev->uvd.ring);
+		amdgpu_fence_driver_force_completion(&adev->uvd.inst->ring);
 	}
 
 	return 0;
@@ -373,12 +373,12 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
 
 void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
 {
-	struct amdgpu_ring *ring = &adev->uvd.ring;
+	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
 	int i, r;
 
 	for (i = 0; i < adev->uvd.max_handles; ++i) {
-		uint32_t handle = atomic_read(&adev->uvd.handles[i]);
-		if (handle != 0 && adev->uvd.filp[i] == filp) {
+		uint32_t handle = atomic_read(&adev->uvd.inst->handles[i]);
+		if (handle != 0 && adev->uvd.inst->filp[i] == filp) {
 			struct dma_fence *fence;
 
 			r = amdgpu_uvd_get_destroy_msg(ring, handle,
@@ -391,8 +391,8 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
 			dma_fence_wait(fence, false);
 			dma_fence_put(fence);
 
-			adev->uvd.filp[i] = NULL;
-			atomic_set(&adev->uvd.handles[i], 0);
+			adev->uvd.inst->filp[i] = NULL;
+			atomic_set(&adev->uvd.inst->handles[i], 0);
 		}
 	}
 }
@@ -696,13 +696,13 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
 
 		/* try to alloc a new handle */
 		for (i = 0; i < adev->uvd.max_handles; ++i) {
-			if (atomic_read(&adev->uvd.handles[i]) == handle) {
+			if (atomic_read(&adev->uvd.inst->handles[i]) == handle) {
 				DRM_ERROR("Handle 0x%x already in use!\n", handle);
 				return -EINVAL;
 			}
 
-			if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
-				adev->uvd.filp[i] = ctx->parser->filp;
+			if (!atomic_cmpxchg(&adev->uvd.inst->handles[i], 0, handle)) {
+				adev->uvd.inst->filp[i] = ctx->parser->filp;
 				return 0;
 			}
 		}
@@ -719,8 +719,8 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
 
 		/* validate the handle */
 		for (i = 0; i < adev->uvd.max_handles; ++i) {
-			if (atomic_read(&adev->uvd.handles[i]) == handle) {
-				if (adev->uvd.filp[i] != ctx->parser->filp) {
+			if (atomic_read(&adev->uvd.inst->handles[i]) == handle) {
+				if (adev->uvd.inst->filp[i] != ctx->parser->filp) {
 					DRM_ERROR("UVD handle collision detected!\n");
 					return -EINVAL;
 				}
@@ -734,7 +734,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
 	case 2:
 		/* it's a destroy msg, free the handle */
 		for (i = 0; i < adev->uvd.max_handles; ++i)
-			atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
+			atomic_cmpxchg(&adev->uvd.inst->handles[i], handle, 0);
 		amdgpu_bo_kunmap(bo);
 		return 0;
 
@@ -810,7 +810,7 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
 		}
 
 		if ((cmd == 0 || cmd == 0x3) &&
-		    (start >> 28) != (ctx->parser->adev->uvd.gpu_addr >> 28)) {
+		    (start >> 28) != (ctx->parser->adev->uvd.inst->gpu_addr >> 28)) {
 			DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
 				  start, end);
 			return -EINVAL;
@@ -1043,7 +1043,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
 		if (r)
 			goto err_free;
 
-		r = amdgpu_job_submit(job, ring, &adev->uvd.entity,
+		r = amdgpu_job_submit(job, ring, &adev->uvd.inst->entity,
 				      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
 		if (r)
 			goto err_free;
@@ -1131,8 +1131,8 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
 {
 	struct amdgpu_device *adev =
-		container_of(work, struct amdgpu_device, uvd.idle_work.work);
-	unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
+		container_of(work, struct amdgpu_device, uvd.inst->idle_work.work);
+	unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.inst->ring);
 
 	if (fences == 0) {
 		if (adev->pm.dpm_enabled) {
@@ -1146,7 +1146,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
 							       AMD_CG_STATE_GATE);
 		}
 	} else {
-		schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
+		schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
 	}
 }
 
@@ -1158,7 +1158,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
 	if (amdgpu_sriov_vf(adev))
 		return;
 
-	set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
+	set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work);
 	if (set_clocks) {
 		if (adev->pm.dpm_enabled) {
 			amdgpu_dpm_enable_uvd(adev, true);
@@ -1175,7 +1175,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
 void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
 {
 	if (!amdgpu_sriov_vf(ring->adev))
-		schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
+		schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
 }
 
 /**
@@ -1209,7 +1209,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	} else if (r < 0) {
 		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
 	} else {
-		DRM_DEBUG("ib test on ring %d succeeded\n",  ring->idx);
+		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
 		r = 0;
 	}
 
@@ -1237,7 +1237,7 @@ uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev)
 		 * necessarily linear. So we need to count
 		 * all non-zero handles.
 		 */
-		if (atomic_read(&adev->uvd.handles[i]))
+		if (atomic_read(&adev->uvd.inst->handles[i]))
 			used_handles++;
 	}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index 32ea20b99e53..b1579fba134c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -31,30 +31,37 @@
 #define AMDGPU_UVD_SESSION_SIZE		(50*1024)
 #define AMDGPU_UVD_FIRMWARE_OFFSET	256
 
+#define AMDGPU_MAX_UVD_INSTANCES			2
+
 #define AMDGPU_UVD_FIRMWARE_SIZE(adev)    \
 	(AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(((const struct common_firmware_header *)(adev)->uvd.fw->data)->ucode_size_bytes) + \
 			       8) - AMDGPU_UVD_FIRMWARE_OFFSET)
 
-struct amdgpu_uvd {
+struct amdgpu_uvd_inst {
 	struct amdgpu_bo	*vcpu_bo;
 	void			*cpu_addr;
 	uint64_t		gpu_addr;
-	unsigned		fw_version;
 	void			*saved_bo;
-	unsigned		max_handles;
 	atomic_t		handles[AMDGPU_MAX_UVD_HANDLES];
 	struct drm_file		*filp[AMDGPU_MAX_UVD_HANDLES];
 	struct delayed_work	idle_work;
-	const struct firmware	*fw;	/* UVD firmware */
 	struct amdgpu_ring	ring;
 	struct amdgpu_ring	ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
 	struct amdgpu_irq_src	irq;
-	bool			address_64_bit;
-	bool			use_ctx_buf;
 	struct drm_sched_entity entity;
 	struct drm_sched_entity entity_enc;
 	uint32_t                srbm_soft_reset;
+};
+
+struct amdgpu_uvd {
+	const struct firmware	*fw;	/* UVD firmware */
+	unsigned		fw_version;
+	unsigned		max_handles;
 	unsigned		num_enc_rings;
+	uint8_t		num_uvd_inst;
+	bool			address_64_bit;
+	bool			use_ctx_buf;
+	struct amdgpu_uvd_inst		inst[AMDGPU_MAX_UVD_INSTANCES];
 };
 
 int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 87cbb142dd0b..5f22135de77f 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -93,6 +93,7 @@ static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring)
 static int uvd_v4_2_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	adev->uvd.num_uvd_inst = 1;
 
 	uvd_v4_2_set_ring_funcs(adev);
 	uvd_v4_2_set_irq_funcs(adev);
@@ -107,7 +108,7 @@ static int uvd_v4_2_sw_init(void *handle)
 	int r;
 
 	/* UVD TRAP */
-	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq);
+	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
 	if (r)
 		return r;
 
@@ -119,9 +120,9 @@ static int uvd_v4_2_sw_init(void *handle)
 	if (r)
 		return r;
 
-	ring = &adev->uvd.ring;
+	ring = &adev->uvd.inst->ring;
 	sprintf(ring->name, "uvd");
-	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
+	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
 
 	return r;
 }
@@ -150,7 +151,7 @@ static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
 static int uvd_v4_2_hw_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	struct amdgpu_ring *ring = &adev->uvd.ring;
+	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
 	uint32_t tmp;
 	int r;
 
@@ -208,7 +209,7 @@ done:
 static int uvd_v4_2_hw_fini(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	struct amdgpu_ring *ring = &adev->uvd.ring;
+	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
 
 	if (RREG32(mmUVD_STATUS) != 0)
 		uvd_v4_2_stop(adev);
@@ -251,7 +252,7 @@ static int uvd_v4_2_resume(void *handle)
  */
 static int uvd_v4_2_start(struct amdgpu_device *adev)
 {
-	struct amdgpu_ring *ring = &adev->uvd.ring;
+	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
 	uint32_t rb_bufsz;
 	int i, j, r;
 	u32 tmp;
@@ -536,7 +537,7 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
 	uint32_t size;
 
 	/* programm the VCPU memory controller bits 0-27 */
-	addr = (adev->uvd.gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
+	addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
 	size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3;
 	WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr);
 	WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
@@ -553,11 +554,11 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
 	WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
 
 	/* bits 28-31 */
-	addr = (adev->uvd.gpu_addr >> 28) & 0xF;
+	addr = (adev->uvd.inst->gpu_addr >> 28) & 0xF;
 	WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
 
 	/* bits 32-39 */
-	addr = (adev->uvd.gpu_addr >> 32) & 0xFF;
+	addr = (adev->uvd.inst->gpu_addr >> 32) & 0xFF;
 	WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
 
 	WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
@@ -664,7 +665,7 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
 				      struct amdgpu_iv_entry *entry)
 {
 	DRM_DEBUG("IH: UVD TRAP\n");
-	amdgpu_fence_process(&adev->uvd.ring);
+	amdgpu_fence_process(&adev->uvd.inst->ring);
 	return 0;
 }
 
@@ -753,7 +754,7 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
 
 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
 {
-	adev->uvd.ring.funcs = &uvd_v4_2_ring_funcs;
+	adev->uvd.inst->ring.funcs = &uvd_v4_2_ring_funcs;
 }
 
 static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = {
@@ -763,8 +764,8 @@ static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = {
 
 static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev)
 {
-	adev->uvd.irq.num_types = 1;
-	adev->uvd.irq.funcs = &uvd_v4_2_irq_funcs;
+	adev->uvd.inst->irq.num_types = 1;
+	adev->uvd.inst->irq.funcs = &uvd_v4_2_irq_funcs;
 }
 
 const struct amdgpu_ip_block_version uvd_v4_2_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 6445d55e7d5a..f5d074a887fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -89,6 +89,7 @@ static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
 static int uvd_v5_0_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	adev->uvd.num_uvd_inst = 1;
 
 	uvd_v5_0_set_ring_funcs(adev);
 	uvd_v5_0_set_irq_funcs(adev);
@@ -103,7 +104,7 @@ static int uvd_v5_0_sw_init(void *handle)
 	int r;
 
 	/* UVD TRAP */
-	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq);
+	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
 	if (r)
 		return r;
 
@@ -115,9 +116,9 @@ static int uvd_v5_0_sw_init(void *handle)
 	if (r)
 		return r;
 
-	ring = &adev->uvd.ring;
+	ring = &adev->uvd.inst->ring;
 	sprintf(ring->name, "uvd");
-	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
+	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
 
 	return r;
 }
@@ -144,7 +145,7 @@ static int uvd_v5_0_sw_fini(void *handle)
 static int uvd_v5_0_hw_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	struct amdgpu_ring *ring = &adev->uvd.ring;
+	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
 	uint32_t tmp;
 	int r;
 
@@ -204,7 +205,7 @@ done:
 static int uvd_v5_0_hw_fini(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	struct amdgpu_ring *ring = &adev->uvd.ring;
+	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
 
 	if (RREG32(mmUVD_STATUS) != 0)
 		uvd_v5_0_stop(adev);
@@ -253,9 +254,9 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
 
 	/* programm memory controller bits 0-27 */
 	WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
-			lower_32_bits(adev->uvd.gpu_addr));
+			lower_32_bits(adev->uvd.inst->gpu_addr));
 	WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
-			upper_32_bits(adev->uvd.gpu_addr));
+			upper_32_bits(adev->uvd.inst->gpu_addr));
 
 	offset = AMDGPU_UVD_FIRMWARE_OFFSET;
 	size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
@@ -287,7 +288,7 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
  */
 static int uvd_v5_0_start(struct amdgpu_device *adev)
 {
-	struct amdgpu_ring *ring = &adev->uvd.ring;
+	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
 	uint32_t rb_bufsz, tmp;
 	uint32_t lmi_swap_cntl;
 	uint32_t mp_swap_cntl;
@@ -586,7 +587,7 @@ static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
 				      struct amdgpu_iv_entry *entry)
 {
 	DRM_DEBUG("IH: UVD TRAP\n");
-	amdgpu_fence_process(&adev->uvd.ring);
+	amdgpu_fence_process(&adev->uvd.inst->ring);
 	return 0;
 }
 
@@ -861,7 +862,7 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
 
 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
 {
-	adev->uvd.ring.funcs = &uvd_v5_0_ring_funcs;
+	adev->uvd.inst->ring.funcs = &uvd_v5_0_ring_funcs;
 }
 
 static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = {
@@ -871,8 +872,8 @@ static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = {
 
 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
 {
-	adev->uvd.irq.num_types = 1;
-	adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs;
+	adev->uvd.inst->irq.num_types = 1;
+	adev->uvd.inst->irq.funcs = &uvd_v5_0_irq_funcs;
 }
 
 const struct amdgpu_ip_block_version uvd_v5_0_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index ca6ab56357b5..dc391693d7ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -91,7 +91,7 @@ static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
 
-	if (ring == &adev->uvd.ring_enc[0])
+	if (ring == &adev->uvd.inst->ring_enc[0])
 		return RREG32(mmUVD_RB_RPTR);
 	else
 		return RREG32(mmUVD_RB_RPTR2);
@@ -121,7 +121,7 @@ static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
 
-	if (ring == &adev->uvd.ring_enc[0])
+	if (ring == &adev->uvd.inst->ring_enc[0])
 		return RREG32(mmUVD_RB_WPTR);
 	else
 		return RREG32(mmUVD_RB_WPTR2);
@@ -152,7 +152,7 @@ static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
 
-	if (ring == &adev->uvd.ring_enc[0])
+	if (ring == &adev->uvd.inst->ring_enc[0])
 		WREG32(mmUVD_RB_WPTR,
 			lower_32_bits(ring->wptr));
 	else
@@ -375,6 +375,7 @@ error:
 static int uvd_v6_0_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	adev->uvd.num_uvd_inst = 1;
 
 	if (!(adev->flags & AMD_IS_APU) &&
 	    (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK))
@@ -399,14 +400,14 @@ static int uvd_v6_0_sw_init(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	/* UVD TRAP */
-	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq);
+	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
 	if (r)
 		return r;
 
 	/* UVD ENC TRAP */
 	if (uvd_v6_0_enc_support(adev)) {
 		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
-			r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.irq);
+			r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.inst->irq);
 			if (r)
 				return r;
 		}
@@ -418,17 +419,17 @@ static int uvd_v6_0_sw_init(void *handle)
 
 	if (!uvd_v6_0_enc_support(adev)) {
 		for (i = 0; i < adev->uvd.num_enc_rings; ++i)
-			adev->uvd.ring_enc[i].funcs = NULL;
+			adev->uvd.inst->ring_enc[i].funcs = NULL;
 
-		adev->uvd.irq.num_types = 1;
+		adev->uvd.inst->irq.num_types = 1;
 		adev->uvd.num_enc_rings = 0;
 
 		DRM_INFO("UVD ENC is disabled\n");
 	} else {
 		struct drm_sched_rq *rq;
-		ring = &adev->uvd.ring_enc[0];
+		ring = &adev->uvd.inst->ring_enc[0];
 		rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-		r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
+		r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc,
 					  rq, NULL);
 		if (r) {
 			DRM_ERROR("Failed setting up UVD ENC run queue.\n");
@@ -440,17 +441,17 @@ static int uvd_v6_0_sw_init(void *handle)
 	if (r)
 		return r;
 
-	ring = &adev->uvd.ring;
+	ring = &adev->uvd.inst->ring;
 	sprintf(ring->name, "uvd");
-	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
+	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
 	if (r)
 		return r;
 
 	if (uvd_v6_0_enc_support(adev)) {
 		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
-			ring = &adev->uvd.ring_enc[i];
+			ring = &adev->uvd.inst->ring_enc[i];
 			sprintf(ring->name, "uvd_enc%d", i);
-			r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
+			r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
 			if (r)
 				return r;
 		}
@@ -469,10 +470,10 @@ static int uvd_v6_0_sw_fini(void *handle)
 		return r;
 
 	if (uvd_v6_0_enc_support(adev)) {
-		drm_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
+		drm_sched_entity_fini(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc);
 
 		for (i = 0; i < adev->uvd.num_enc_rings; ++i)
-			amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
+			amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
 	}
 
 	return amdgpu_uvd_sw_fini(adev);
@@ -488,7 +489,7 @@ static int uvd_v6_0_sw_fini(void *handle)
 static int uvd_v6_0_hw_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	struct amdgpu_ring *ring = &adev->uvd.ring;
+	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
 	uint32_t tmp;
 	int i, r;
 
@@ -532,7 +533,7 @@ static int uvd_v6_0_hw_init(void *handle)
 
 	if (uvd_v6_0_enc_support(adev)) {
 		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
-			ring = &adev->uvd.ring_enc[i];
+			ring = &adev->uvd.inst->ring_enc[i];
 			ring->ready = true;
 			r = amdgpu_ring_test_ring(ring);
 			if (r) {
@@ -563,7 +564,7 @@ done:
 static int uvd_v6_0_hw_fini(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	struct amdgpu_ring *ring = &adev->uvd.ring;
+	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
 
 	if (RREG32(mmUVD_STATUS) != 0)
 		uvd_v6_0_stop(adev);
@@ -611,9 +612,9 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
 
 	/* programm memory controller bits 0-27 */
 	WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
-			lower_32_bits(adev->uvd.gpu_addr));
+			lower_32_bits(adev->uvd.inst->gpu_addr));
 	WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
-			upper_32_bits(adev->uvd.gpu_addr));
+			upper_32_bits(adev->uvd.inst->gpu_addr));
 
 	offset = AMDGPU_UVD_FIRMWARE_OFFSET;
 	size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
@@ -726,7 +727,7 @@ static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
  */
 static int uvd_v6_0_start(struct amdgpu_device *adev)
 {
-	struct amdgpu_ring *ring = &adev->uvd.ring;
+	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
 	uint32_t rb_bufsz, tmp;
 	uint32_t lmi_swap_cntl;
 	uint32_t mp_swap_cntl;
@@ -866,14 +867,14 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
 	WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
 
 	if (uvd_v6_0_enc_support(adev)) {
-		ring = &adev->uvd.ring_enc[0];
+		ring = &adev->uvd.inst->ring_enc[0];
 		WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
 		WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
 		WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr);
 		WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
 		WREG32(mmUVD_RB_SIZE, ring->ring_size / 4);
 
-		ring = &adev->uvd.ring_enc[1];
+		ring = &adev->uvd.inst->ring_enc[1];
 		WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
 		WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
 		WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr);
@@ -1158,10 +1159,10 @@ static bool uvd_v6_0_check_soft_reset(void *handle)
 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
 
 	if (srbm_soft_reset) {
-		adev->uvd.srbm_soft_reset = srbm_soft_reset;
+		adev->uvd.inst->srbm_soft_reset = srbm_soft_reset;
 		return true;
 	} else {
-		adev->uvd.srbm_soft_reset = 0;
+		adev->uvd.inst->srbm_soft_reset = 0;
 		return false;
 	}
 }
@@ -1170,7 +1171,7 @@ static int uvd_v6_0_pre_soft_reset(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	if (!adev->uvd.srbm_soft_reset)
+	if (!adev->uvd.inst->srbm_soft_reset)
 		return 0;
 
 	uvd_v6_0_stop(adev);
@@ -1182,9 +1183,9 @@ static int uvd_v6_0_soft_reset(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	u32 srbm_soft_reset;
 
-	if (!adev->uvd.srbm_soft_reset)
+	if (!adev->uvd.inst->srbm_soft_reset)
 		return 0;
-	srbm_soft_reset = adev->uvd.srbm_soft_reset;
+	srbm_soft_reset = adev->uvd.inst->srbm_soft_reset;
 
 	if (srbm_soft_reset) {
 		u32 tmp;
@@ -1212,7 +1213,7 @@ static int uvd_v6_0_post_soft_reset(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	if (!adev->uvd.srbm_soft_reset)
+	if (!adev->uvd.inst->srbm_soft_reset)
 		return 0;
 
 	mdelay(5);
@@ -1238,17 +1239,17 @@ static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
 
 	switch (entry->src_id) {
 	case 124:
-		amdgpu_fence_process(&adev->uvd.ring);
+		amdgpu_fence_process(&adev->uvd.inst->ring);
 		break;
 	case 119:
 		if (likely(uvd_v6_0_enc_support(adev)))
-			amdgpu_fence_process(&adev->uvd.ring_enc[0]);
+			amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]);
 		else
 			int_handled = false;
 		break;
 	case 120:
 		if (likely(uvd_v6_0_enc_support(adev)))
-			amdgpu_fence_process(&adev->uvd.ring_enc[1]);
+			amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]);
 		else
 			int_handled = false;
 		break;
@@ -1612,10 +1613,10 @@ static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
 {
 	if (adev->asic_type >= CHIP_POLARIS10) {
-		adev->uvd.ring.funcs = &uvd_v6_0_ring_vm_funcs;
+		adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_vm_funcs;
 		DRM_INFO("UVD is enabled in VM mode\n");
 	} else {
-		adev->uvd.ring.funcs = &uvd_v6_0_ring_phys_funcs;
+		adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_phys_funcs;
 		DRM_INFO("UVD is enabled in physical mode\n");
 	}
 }
@@ -1625,7 +1626,7 @@ static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev)
 	int i;
 
 	for (i = 0; i < adev->uvd.num_enc_rings; ++i)
-		adev->uvd.ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs;
+		adev->uvd.inst->ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs;
 
 	DRM_INFO("UVD ENC is enabled in VM mode\n");
 }
@@ -1638,11 +1639,11 @@ static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
 {
 	if (uvd_v6_0_enc_support(adev))
-		adev->uvd.irq.num_types = adev->uvd.num_enc_rings + 1;
+		adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1;
 	else
-		adev->uvd.irq.num_types = 1;
+		adev->uvd.inst->irq.num_types = 1;
 
-	adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs;
+	adev->uvd.inst->irq.funcs = &uvd_v6_0_irq_funcs;
 }
 
 const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 0ca63d588670..66d4bea5fb2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -72,7 +72,7 @@ static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
 
-	if (ring == &adev->uvd.ring_enc[0])
+	if (ring == &adev->uvd.inst->ring_enc[0])
 		return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
 	else
 		return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
@@ -106,7 +106,7 @@ static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
 	if (ring->use_doorbell)
 		return adev->wb.wb[ring->wptr_offs];
 
-	if (ring == &adev->uvd.ring_enc[0])
+	if (ring == &adev->uvd.inst->ring_enc[0])
 		return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
 	else
 		return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
@@ -144,7 +144,7 @@ static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
 		return;
 	}
 
-	if (ring == &adev->uvd.ring_enc[0])
+	if (ring == &adev->uvd.inst->ring_enc[0])
 		WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR,
 			lower_32_bits(ring->wptr));
 	else
@@ -170,8 +170,8 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
 
 	r = amdgpu_ring_alloc(ring, 16);
 	if (r) {
-		DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n",
-			  ring->idx, r);
+		DRM_ERROR("amdgpu: uvd enc failed to lock (%d)ring %d (%d).\n",
+			  ring->me, ring->idx, r);
 		return r;
 	}
 	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
@@ -184,11 +184,11 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
 	}
 
 	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
-			 ring->idx, i);
+		DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n",
+			 ring->me, ring->idx, i);
 	} else {
-		DRM_ERROR("amdgpu: ring %d test failed\n",
-			  ring->idx);
+		DRM_ERROR("amdgpu: (%d)ring %d test failed\n",
+			  ring->me, ring->idx);
 		r = -ETIMEDOUT;
 	}
 
@@ -342,24 +342,24 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 	r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL);
 	if (r) {
-		DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+		DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ring->me, r);
 		goto error;
 	}
 
 	r = uvd_v7_0_enc_get_destroy_msg(ring, 1, true, &fence);
 	if (r) {
-		DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+		DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ring->me, r);
 		goto error;
 	}
 
 	r = dma_fence_wait_timeout(fence, false, timeout);
 	if (r == 0) {
-		DRM_ERROR("amdgpu: IB test timed out.\n");
+		DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ring->me);
 		r = -ETIMEDOUT;
 	} else if (r < 0) {
-		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
+		DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ring->me, r);
 	} else {
-		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+		DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ring->me, ring->idx);
 		r = 0;
 	}
 error:
@@ -370,6 +370,7 @@ error:
 static int uvd_v7_0_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	adev->uvd.num_uvd_inst = 1;
 
 	if (amdgpu_sriov_vf(adev))
 		adev->uvd.num_enc_rings = 1;
@@ -390,13 +391,13 @@ static int uvd_v7_0_sw_init(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	/* UVD TRAP */
-	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, 124, &adev->uvd.irq);
+	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, 124, &adev->uvd.inst->irq);
 	if (r)
 		return r;
 
 	/* UVD ENC TRAP */
 	for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
-		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, i + 119, &adev->uvd.irq);
+		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, i + 119, &adev->uvd.inst->irq);
 		if (r)
 			return r;
 	}
@@ -415,9 +416,9 @@ static int uvd_v7_0_sw_init(void *handle)
 		DRM_INFO("PSP loading UVD firmware\n");
 	}
 
-	ring = &adev->uvd.ring_enc[0];
+	ring = &adev->uvd.inst->ring_enc[0];
 	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-	r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
+	r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc,
 				  rq, NULL);
 	if (r) {
 		DRM_ERROR("Failed setting up UVD ENC run queue.\n");
@@ -428,15 +429,15 @@ static int uvd_v7_0_sw_init(void *handle)
 	if (r)
 		return r;
 	if (!amdgpu_sriov_vf(adev)) {
-		ring = &adev->uvd.ring;
+		ring = &adev->uvd.inst->ring;
 		sprintf(ring->name, "uvd");
-		r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
+		r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
 		if (r)
 			return r;
 	}
 
 	for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
-		ring = &adev->uvd.ring_enc[i];
+		ring = &adev->uvd.inst->ring_enc[i];
 		sprintf(ring->name, "uvd_enc%d", i);
 		if (amdgpu_sriov_vf(adev)) {
 			ring->use_doorbell = true;
@@ -449,7 +450,7 @@ static int uvd_v7_0_sw_init(void *handle)
 			else
 				ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1;
 		}
-		r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
+		r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
 		if (r)
 			return r;
 	}
@@ -472,10 +473,10 @@ static int uvd_v7_0_sw_fini(void *handle)
 	if (r)
 		return r;
 
-	drm_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
+	drm_sched_entity_fini(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc);
 
 	for (i = 0; i < adev->uvd.num_enc_rings; ++i)
-		amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
+		amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
 
 	return amdgpu_uvd_sw_fini(adev);
 }
@@ -490,7 +491,7 @@ static int uvd_v7_0_sw_fini(void *handle)
 static int uvd_v7_0_hw_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	struct amdgpu_ring *ring = &adev->uvd.ring;
+	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
 	uint32_t tmp;
 	int i, r;
 
@@ -543,7 +544,7 @@ static int uvd_v7_0_hw_init(void *handle)
 	}
 
 	for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
-		ring = &adev->uvd.ring_enc[i];
+		ring = &adev->uvd.inst->ring_enc[i];
 		ring->ready = true;
 		r = amdgpu_ring_test_ring(ring);
 		if (r) {
@@ -569,7 +570,7 @@ done:
 static int uvd_v7_0_hw_fini(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	struct amdgpu_ring *ring = &adev->uvd.ring;
+	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
 
 	if (!amdgpu_sriov_vf(adev))
 		uvd_v7_0_stop(adev);
@@ -627,9 +628,9 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
 		offset = 0;
 	} else {
 		WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
-			lower_32_bits(adev->uvd.gpu_addr));
+			lower_32_bits(adev->uvd.inst->gpu_addr));
 		WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
-			upper_32_bits(adev->uvd.gpu_addr));
+			upper_32_bits(adev->uvd.inst->gpu_addr));
 		offset = size;
 	}
 
@@ -638,16 +639,16 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
 	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
 
 	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
-			lower_32_bits(adev->uvd.gpu_addr + offset));
+			lower_32_bits(adev->uvd.inst->gpu_addr + offset));
 	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
-			upper_32_bits(adev->uvd.gpu_addr + offset));
+			upper_32_bits(adev->uvd.inst->gpu_addr + offset));
 	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21));
 	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE);
 
 	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
-			lower_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
+			lower_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
-			upper_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
+			upper_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21));
 	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2,
 			AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
@@ -688,10 +689,10 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
 	/* 4, set resp to zero */
 	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
 
-	WDOORBELL32(adev->uvd.ring_enc[0].doorbell_index, 0);
-	adev->wb.wb[adev->uvd.ring_enc[0].wptr_offs] = 0;
-	adev->uvd.ring_enc[0].wptr = 0;
-	adev->uvd.ring_enc[0].wptr_old = 0;
+	WDOORBELL32(adev->uvd.inst->ring_enc[0].doorbell_index, 0);
+	adev->wb.wb[adev->uvd.inst->ring_enc[0].wptr_offs] = 0;
+	adev->uvd.inst->ring_enc[0].wptr = 0;
+	adev->uvd.inst->ring_enc[0].wptr_old = 0;
 
 	/* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
 	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
@@ -742,7 +743,7 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
 
 		init_table += header->uvd_table_offset;
 
-		ring = &adev->uvd.ring;
+		ring = &adev->uvd.inst->ring;
 		ring->wptr = 0;
 		size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
 
@@ -757,9 +758,9 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
 			offset = 0;
 		} else {
 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
-						    lower_32_bits(adev->uvd.gpu_addr));
+						    lower_32_bits(adev->uvd.inst->gpu_addr));
 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
-						    upper_32_bits(adev->uvd.gpu_addr));
+						    upper_32_bits(adev->uvd.inst->gpu_addr));
 			offset = size;
 		}
 
@@ -768,16 +769,16 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
 		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size);
 
 		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
-					    lower_32_bits(adev->uvd.gpu_addr + offset));
+					    lower_32_bits(adev->uvd.inst->gpu_addr + offset));
 		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
-					    upper_32_bits(adev->uvd.gpu_addr + offset));
+					    upper_32_bits(adev->uvd.inst->gpu_addr + offset));
 		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
 		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
 
 		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
-					    lower_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
+					    lower_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
-					    upper_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
+					    upper_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
 		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2),
 					    AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
@@ -841,7 +842,7 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
 		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), tmp);
 
-		ring = &adev->uvd.ring_enc[0];
+		ring = &adev->uvd.inst->ring_enc[0];
 		ring->wptr = 0;
 		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO), ring->gpu_addr);
 		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
@@ -874,7 +875,7 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
  */
 static int uvd_v7_0_start(struct amdgpu_device *adev)
 {
-	struct amdgpu_ring *ring = &adev->uvd.ring;
+	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
 	uint32_t rb_bufsz, tmp;
 	uint32_t lmi_swap_cntl;
 	uint32_t mp_swap_cntl;
@@ -1027,14 +1028,14 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
 			~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
 
-	ring = &adev->uvd.ring_enc[0];
+	ring = &adev->uvd.inst->ring_enc[0];
 	WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
 	WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
 	WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
 	WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
 	WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
 
-	ring = &adev->uvd.ring_enc[1];
+	ring = &adev->uvd.inst->ring_enc[1];
 	WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
 	WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
 	WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
@@ -1162,8 +1163,8 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
 	WREG32_SOC15(UVD, 0, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
 	r = amdgpu_ring_alloc(ring, 3);
 	if (r) {
-		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
-			  ring->idx, r);
+		DRM_ERROR("amdgpu: (%d)cp failed to lock ring %d (%d).\n",
+			  ring->me, ring->idx, r);
 		return r;
 	}
 	amdgpu_ring_write(ring,
@@ -1178,11 +1179,11 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
 	}
 
 	if (i < adev->usec_timeout) {
-		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
-			 ring->idx, i);
+		DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n",
+			 ring->me, ring->idx, i);
 	} else {
-		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
-			  ring->idx, tmp);
+		DRM_ERROR("(%d)amdgpu: ring %d test failed (0x%08X)\n",
+			  ring->me, ring->idx, tmp);
 		r = -EINVAL;
 	}
 	return r;
@@ -1365,10 +1366,10 @@ static bool uvd_v7_0_check_soft_reset(void *handle)
 				SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
 
 	if (srbm_soft_reset) {
-		adev->uvd.srbm_soft_reset = srbm_soft_reset;
+		adev->uvd.inst->srbm_soft_reset = srbm_soft_reset;
 		return true;
 	} else {
-		adev->uvd.srbm_soft_reset = 0;
+		adev->uvd.inst->srbm_soft_reset = 0;
 		return false;
 	}
 }
@@ -1377,7 +1378,7 @@ static int uvd_v7_0_pre_soft_reset(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	if (!adev->uvd.srbm_soft_reset)
+	if (!adev->uvd.inst->srbm_soft_reset)
 		return 0;
 
 	uvd_v7_0_stop(adev);
@@ -1389,9 +1390,9 @@ static int uvd_v7_0_soft_reset(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	u32 srbm_soft_reset;
 
-	if (!adev->uvd.srbm_soft_reset)
+	if (!adev->uvd.inst->srbm_soft_reset)
 		return 0;
-	srbm_soft_reset = adev->uvd.srbm_soft_reset;
+	srbm_soft_reset = adev->uvd.inst->srbm_soft_reset;
 
 	if (srbm_soft_reset) {
 		u32 tmp;
@@ -1419,7 +1420,7 @@ static int uvd_v7_0_post_soft_reset(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	if (!adev->uvd.srbm_soft_reset)
+	if (!adev->uvd.inst->srbm_soft_reset)
 		return 0;
 
 	mdelay(5);
@@ -1444,14 +1445,14 @@ static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
 	DRM_DEBUG("IH: UVD TRAP\n");
 	switch (entry->src_id) {
 	case 124:
-		amdgpu_fence_process(&adev->uvd.ring);
+		amdgpu_fence_process(&adev->uvd.inst->ring);
 		break;
 	case 119:
-		amdgpu_fence_process(&adev->uvd.ring_enc[0]);
+		amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]);
 		break;
 	case 120:
 		if (!amdgpu_sriov_vf(adev))
-			amdgpu_fence_process(&adev->uvd.ring_enc[1]);
+			amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]);
 		break;
 	default:
 		DRM_ERROR("Unhandled interrupt: %d %d\n",
@@ -1719,7 +1720,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
 
 static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
 {
-	adev->uvd.ring.funcs = &uvd_v7_0_ring_vm_funcs;
+	adev->uvd.inst->ring.funcs = &uvd_v7_0_ring_vm_funcs;
 	DRM_INFO("UVD is enabled in VM mode\n");
 }
 
@@ -1728,7 +1729,7 @@ static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
 	int i;
 
 	for (i = 0; i < adev->uvd.num_enc_rings; ++i)
-		adev->uvd.ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
+		adev->uvd.inst->ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
 
 	DRM_INFO("UVD ENC is enabled in VM mode\n");
 }
@@ -1740,8 +1741,8 @@ static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
 
 static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
 {
-	adev->uvd.irq.num_types = adev->uvd.num_enc_rings + 1;
-	adev->uvd.irq.funcs = &uvd_v7_0_irq_funcs;
+	adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1;
+	adev->uvd.inst->irq.funcs = &uvd_v7_0_irq_funcs;
 }
 
 const struct amdgpu_ip_block_version uvd_v7_0_ip_block =

From 10dd74eac4dba963bfa97f5092040aa75ff742d6 Mon Sep 17 00:00:00 2001
From: James Zhu <James.Zhu@amd.com>
Date: Tue, 15 May 2018 14:31:24 -0500
Subject: [PATCH 1194/1286] drm/amdgpu/vg20:Restruct uvd.inst to support
 multiple instances

Vega20 has dual-UVD. Need add multiple instances support for uvd.
Restruct uvd.inst, using uvd.inst[0] to replace uvd.inst->.
Repurpose amdgpu_ring::me for instance index, and initialize to 0.
There are no any logical changes here.

Signed-off-by: James Zhu <James.Zhu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c |   6 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c   |  12 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c  |   1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c   | 231 ++---
 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c     | 988 ++++++++++++----------
 5 files changed, 654 insertions(+), 584 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 1070f4042cbb..39ec6b8890a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -376,14 +376,14 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
 	struct amdgpu_device *adev = ring->adev;
 	uint64_t index;
 
-	if (ring != &adev->uvd.inst->ring) {
+	if (ring != &adev->uvd.inst[ring->me].ring) {
 		ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
 		ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
 	} else {
 		/* put fence directly behind firmware */
 		index = ALIGN(adev->uvd.fw->size, 8);
-		ring->fence_drv.cpu_addr = adev->uvd.inst->cpu_addr + index;
-		ring->fence_drv.gpu_addr = adev->uvd.inst->gpu_addr + index;
+		ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index;
+		ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
 	}
 	amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
 	amdgpu_irq_get(adev, irq_src, irq_type);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 5620ed291107..91517b166a3b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -286,7 +286,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 	struct drm_crtc *crtc;
 	uint32_t ui32 = 0;
 	uint64_t ui64 = 0;
-	int i, found;
+	int i, j, found;
 	int ui32_size = sizeof(ui32);
 
 	if (!info->return_size || !info->return_pointer)
@@ -348,7 +348,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 			break;
 		case AMDGPU_HW_IP_UVD:
 			type = AMD_IP_BLOCK_TYPE_UVD;
-			ring_mask = adev->uvd.inst->ring.ready ? 1 : 0;
+			for (i = 0; i < adev->uvd.num_uvd_inst; i++)
+				ring_mask |= ((adev->uvd.inst[i].ring.ready ? 1 : 0) << i);
 			ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
 			ib_size_alignment = 16;
 			break;
@@ -361,8 +362,11 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 			break;
 		case AMDGPU_HW_IP_UVD_ENC:
 			type = AMD_IP_BLOCK_TYPE_UVD;
-			for (i = 0; i < adev->uvd.num_enc_rings; i++)
-				ring_mask |= ((adev->uvd.inst->ring_enc[i].ready ? 1 : 0) << i);
+			for (i = 0; i < adev->uvd.num_uvd_inst; i++)
+				for (j = 0; j < adev->uvd.num_enc_rings; j++)
+					ring_mask |=
+					((adev->uvd.inst[i].ring_enc[j].ready ? 1 : 0) <<
+					(j + i * adev->uvd.num_enc_rings));
 			ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
 			ib_size_alignment = 1;
 			break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 49cad08b5c16..c6850b629d0e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -362,6 +362,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
 
 	dma_fence_put(ring->vmid_wait);
 	ring->vmid_wait = NULL;
+	ring->me = 0;
 
 	ring->adev->rings[ring->idx] = NULL;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 02683a039a98..e961492d357a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -127,7 +127,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
 	const char *fw_name;
 	const struct common_firmware_header *hdr;
 	unsigned version_major, version_minor, family_id;
-	int i, r;
+	int i, j, r;
 
 	INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler);
 
@@ -236,28 +236,30 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
 		bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
 
-	r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
-				    AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst->vcpu_bo,
-				    &adev->uvd.inst->gpu_addr, &adev->uvd.inst->cpu_addr);
-	if (r) {
-		dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
-		return r;
-	}
+	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
 
-	ring = &adev->uvd.inst->ring;
-	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-	r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity,
-				  rq, NULL);
-	if (r != 0) {
-		DRM_ERROR("Failed setting up UVD run queue.\n");
-		return r;
-	}
+		r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
+					    AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo,
+					    &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr);
+		if (r) {
+			dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
+			return r;
+		}
 
-	for (i = 0; i < adev->uvd.max_handles; ++i) {
-		atomic_set(&adev->uvd.inst->handles[i], 0);
-		adev->uvd.inst->filp[i] = NULL;
-	}
+		ring = &adev->uvd.inst[j].ring;
+		rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+		r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity,
+					  rq, NULL);
+		if (r != 0) {
+			DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j);
+			return r;
+		}
 
+		for (i = 0; i < adev->uvd.max_handles; ++i) {
+			atomic_set(&adev->uvd.inst[j].handles[i], 0);
+			adev->uvd.inst[j].filp[i] = NULL;
+		}
+	}
 	/* from uvd v5.0 HW addressing capacity increased to 64 bits */
 	if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
 		adev->uvd.address_64_bit = true;
@@ -284,20 +286,22 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
 
 int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
 {
-	int i;
-	kfree(adev->uvd.inst->saved_bo);
+	int i, j;
 
-	drm_sched_entity_fini(&adev->uvd.inst->ring.sched, &adev->uvd.inst->entity);
+	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
+		kfree(adev->uvd.inst[j].saved_bo);
 
-	amdgpu_bo_free_kernel(&adev->uvd.inst->vcpu_bo,
-			      &adev->uvd.inst->gpu_addr,
-			      (void **)&adev->uvd.inst->cpu_addr);
+		drm_sched_entity_fini(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity);
 
-	amdgpu_ring_fini(&adev->uvd.inst->ring);
+		amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
+				      &adev->uvd.inst[j].gpu_addr,
+				      (void **)&adev->uvd.inst[j].cpu_addr);
 
-	for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i)
-		amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
+		amdgpu_ring_fini(&adev->uvd.inst[j].ring);
 
+		for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i)
+			amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
+	}
 	release_firmware(adev->uvd.fw);
 
 	return 0;
@@ -307,32 +311,33 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
 {
 	unsigned size;
 	void *ptr;
-	int i;
+	int i, j;
 
-	if (adev->uvd.inst->vcpu_bo == NULL)
-		return 0;
+	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
+		if (adev->uvd.inst[j].vcpu_bo == NULL)
+			continue;
 
-	cancel_delayed_work_sync(&adev->uvd.inst->idle_work);
+		cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work);
 
-	/* only valid for physical mode */
-	if (adev->asic_type < CHIP_POLARIS10) {
-		for (i = 0; i < adev->uvd.max_handles; ++i)
-			if (atomic_read(&adev->uvd.inst->handles[i]))
-				break;
+		/* only valid for physical mode */
+		if (adev->asic_type < CHIP_POLARIS10) {
+			for (i = 0; i < adev->uvd.max_handles; ++i)
+				if (atomic_read(&adev->uvd.inst[j].handles[i]))
+					break;
 
-		if (i == adev->uvd.max_handles)
-			return 0;
+			if (i == adev->uvd.max_handles)
+				continue;
+		}
+
+		size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo);
+		ptr = adev->uvd.inst[j].cpu_addr;
+
+		adev->uvd.inst[j].saved_bo = kmalloc(size, GFP_KERNEL);
+		if (!adev->uvd.inst[j].saved_bo)
+			return -ENOMEM;
+
+		memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size);
 	}
-
-	size = amdgpu_bo_size(adev->uvd.inst->vcpu_bo);
-	ptr = adev->uvd.inst->cpu_addr;
-
-	adev->uvd.inst->saved_bo = kmalloc(size, GFP_KERNEL);
-	if (!adev->uvd.inst->saved_bo)
-		return -ENOMEM;
-
-	memcpy_fromio(adev->uvd.inst->saved_bo, ptr, size);
-
 	return 0;
 }
 
@@ -340,59 +345,65 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
 {
 	unsigned size;
 	void *ptr;
+	int i;
 
-	if (adev->uvd.inst->vcpu_bo == NULL)
-		return -EINVAL;
+	for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+		if (adev->uvd.inst[i].vcpu_bo == NULL)
+			return -EINVAL;
 
-	size = amdgpu_bo_size(adev->uvd.inst->vcpu_bo);
-	ptr = adev->uvd.inst->cpu_addr;
+		size = amdgpu_bo_size(adev->uvd.inst[i].vcpu_bo);
+		ptr = adev->uvd.inst[i].cpu_addr;
 
-	if (adev->uvd.inst->saved_bo != NULL) {
-		memcpy_toio(ptr, adev->uvd.inst->saved_bo, size);
-		kfree(adev->uvd.inst->saved_bo);
-		adev->uvd.inst->saved_bo = NULL;
-	} else {
-		const struct common_firmware_header *hdr;
-		unsigned offset;
+		if (adev->uvd.inst[i].saved_bo != NULL) {
+			memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size);
+			kfree(adev->uvd.inst[i].saved_bo);
+			adev->uvd.inst[i].saved_bo = NULL;
+		} else {
+			const struct common_firmware_header *hdr;
+			unsigned offset;
 
-		hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
-		if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
-			offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
-			memcpy_toio(adev->uvd.inst->cpu_addr, adev->uvd.fw->data + offset,
-				    le32_to_cpu(hdr->ucode_size_bytes));
-			size -= le32_to_cpu(hdr->ucode_size_bytes);
-			ptr += le32_to_cpu(hdr->ucode_size_bytes);
+			hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
+			if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+				offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
+				memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset,
+					    le32_to_cpu(hdr->ucode_size_bytes));
+				size -= le32_to_cpu(hdr->ucode_size_bytes);
+				ptr += le32_to_cpu(hdr->ucode_size_bytes);
+			}
+			memset_io(ptr, 0, size);
+			/* to restore uvd fence seq */
+			amdgpu_fence_driver_force_completion(&adev->uvd.inst[i].ring);
 		}
-		memset_io(ptr, 0, size);
-		/* to restore uvd fence seq */
-		amdgpu_fence_driver_force_completion(&adev->uvd.inst->ring);
 	}
-
 	return 0;
 }
 
 void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
 {
-	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
-	int i, r;
+	struct amdgpu_ring *ring;
+	int i, j, r;
 
-	for (i = 0; i < adev->uvd.max_handles; ++i) {
-		uint32_t handle = atomic_read(&adev->uvd.inst->handles[i]);
-		if (handle != 0 && adev->uvd.inst->filp[i] == filp) {
-			struct dma_fence *fence;
+	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
+		ring = &adev->uvd.inst[j].ring;
 
-			r = amdgpu_uvd_get_destroy_msg(ring, handle,
-						       false, &fence);
-			if (r) {
-				DRM_ERROR("Error destroying UVD (%d)!\n", r);
-				continue;
+		for (i = 0; i < adev->uvd.max_handles; ++i) {
+			uint32_t handle = atomic_read(&adev->uvd.inst[j].handles[i]);
+			if (handle != 0 && adev->uvd.inst[j].filp[i] == filp) {
+				struct dma_fence *fence;
+
+				r = amdgpu_uvd_get_destroy_msg(ring, handle,
+							       false, &fence);
+				if (r) {
+					DRM_ERROR("Error destroying UVD(%d) %d!\n", j, r);
+					continue;
+				}
+
+				dma_fence_wait(fence, false);
+				dma_fence_put(fence);
+
+				adev->uvd.inst[j].filp[i] = NULL;
+				atomic_set(&adev->uvd.inst[j].handles[i], 0);
 			}
-
-			dma_fence_wait(fence, false);
-			dma_fence_put(fence);
-
-			adev->uvd.inst->filp[i] = NULL;
-			atomic_set(&adev->uvd.inst->handles[i], 0);
 		}
 	}
 }
@@ -667,15 +678,16 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
 	void *ptr;
 	long r;
 	int i;
+	uint32_t ip_instance = ctx->parser->job->ring->me;
 
 	if (offset & 0x3F) {
-		DRM_ERROR("UVD messages must be 64 byte aligned!\n");
+		DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance);
 		return -EINVAL;
 	}
 
 	r = amdgpu_bo_kmap(bo, &ptr);
 	if (r) {
-		DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r);
+		DRM_ERROR("Failed mapping the UVD(%d) message (%ld)!\n", ip_instance, r);
 		return r;
 	}
 
@@ -685,7 +697,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
 	handle = msg[2];
 
 	if (handle == 0) {
-		DRM_ERROR("Invalid UVD handle!\n");
+		DRM_ERROR("Invalid UVD(%d) handle!\n", ip_instance);
 		return -EINVAL;
 	}
 
@@ -696,18 +708,18 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
 
 		/* try to alloc a new handle */
 		for (i = 0; i < adev->uvd.max_handles; ++i) {
-			if (atomic_read(&adev->uvd.inst->handles[i]) == handle) {
-				DRM_ERROR("Handle 0x%x already in use!\n", handle);
+			if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) {
+				DRM_ERROR("(%d)Handle 0x%x already in use!\n", ip_instance, handle);
 				return -EINVAL;
 			}
 
-			if (!atomic_cmpxchg(&adev->uvd.inst->handles[i], 0, handle)) {
-				adev->uvd.inst->filp[i] = ctx->parser->filp;
+			if (!atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], 0, handle)) {
+				adev->uvd.inst[ip_instance].filp[i] = ctx->parser->filp;
 				return 0;
 			}
 		}
 
-		DRM_ERROR("No more free UVD handles!\n");
+		DRM_ERROR("No more free UVD(%d) handles!\n", ip_instance);
 		return -ENOSPC;
 
 	case 1:
@@ -719,27 +731,27 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
 
 		/* validate the handle */
 		for (i = 0; i < adev->uvd.max_handles; ++i) {
-			if (atomic_read(&adev->uvd.inst->handles[i]) == handle) {
-				if (adev->uvd.inst->filp[i] != ctx->parser->filp) {
-					DRM_ERROR("UVD handle collision detected!\n");
+			if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) {
+				if (adev->uvd.inst[ip_instance].filp[i] != ctx->parser->filp) {
+					DRM_ERROR("UVD(%d) handle collision detected!\n", ip_instance);
 					return -EINVAL;
 				}
 				return 0;
 			}
 		}
 
-		DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
+		DRM_ERROR("Invalid UVD(%d) handle 0x%x!\n", ip_instance, handle);
 		return -ENOENT;
 
 	case 2:
 		/* it's a destroy msg, free the handle */
 		for (i = 0; i < adev->uvd.max_handles; ++i)
-			atomic_cmpxchg(&adev->uvd.inst->handles[i], handle, 0);
+			atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], handle, 0);
 		amdgpu_bo_kunmap(bo);
 		return 0;
 
 	default:
-		DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
+		DRM_ERROR("Illegal UVD(%d) message type (%d)!\n", ip_instance, msg_type);
 		return -EINVAL;
 	}
 	BUG();
@@ -1043,7 +1055,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
 		if (r)
 			goto err_free;
 
-		r = amdgpu_job_submit(job, ring, &adev->uvd.inst->entity,
+		r = amdgpu_job_submit(job, ring, &adev->uvd.inst[ring->me].entity,
 				      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
 		if (r)
 			goto err_free;
@@ -1189,27 +1201,28 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
 	struct dma_fence *fence;
 	long r;
+	uint32_t ip_instance = ring->me;
 
 	r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
 	if (r) {
-		DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+		DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ip_instance, r);
 		goto error;
 	}
 
 	r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
 	if (r) {
-		DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+		DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ip_instance, r);
 		goto error;
 	}
 
 	r = dma_fence_wait_timeout(fence, false, timeout);
 	if (r == 0) {
-		DRM_ERROR("amdgpu: IB test timed out.\n");
+		DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ip_instance);
 		r = -ETIMEDOUT;
 	} else if (r < 0) {
-		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
+		DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ip_instance, r);
 	} else {
-		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+		DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ip_instance, ring->idx);
 		r = 0;
 	}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 66d4bea5fb2c..08f3b6c84bea 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -58,7 +58,7 @@ static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
 
-	return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
+	return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
 }
 
 /**
@@ -72,10 +72,10 @@ static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
 
-	if (ring == &adev->uvd.inst->ring_enc[0])
-		return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
+	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
+		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
 	else
-		return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
+		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
 }
 
 /**
@@ -89,7 +89,7 @@ static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
 
-	return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR);
+	return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
 }
 
 /**
@@ -106,10 +106,10 @@ static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
 	if (ring->use_doorbell)
 		return adev->wb.wb[ring->wptr_offs];
 
-	if (ring == &adev->uvd.inst->ring_enc[0])
-		return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
+	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
+		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
 	else
-		return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
+		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
 }
 
 /**
@@ -123,7 +123,7 @@ static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
 
-	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
+	WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
 }
 
 /**
@@ -144,11 +144,11 @@ static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
 		return;
 	}
 
-	if (ring == &adev->uvd.inst->ring_enc[0])
-		WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR,
+	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
+		WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR,
 			lower_32_bits(ring->wptr));
 	else
-		WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2,
+		WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2,
 			lower_32_bits(ring->wptr));
 }
 
@@ -387,19 +387,21 @@ static int uvd_v7_0_sw_init(void *handle)
 {
 	struct amdgpu_ring *ring;
 	struct drm_sched_rq *rq;
-	int i, r;
+	int i, j, r;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	/* UVD TRAP */
-	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, 124, &adev->uvd.inst->irq);
-	if (r)
-		return r;
-
-	/* UVD ENC TRAP */
-	for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
-		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, i + 119, &adev->uvd.inst->irq);
+	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
+		/* UVD TRAP */
+		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, 124, &adev->uvd.inst[j].irq);
 		if (r)
 			return r;
+
+		/* UVD ENC TRAP */
+		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
+			r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, i + 119, &adev->uvd.inst[j].irq);
+			if (r)
+				return r;
+		}
 	}
 
 	r = amdgpu_uvd_sw_init(adev);
@@ -416,43 +418,48 @@ static int uvd_v7_0_sw_init(void *handle)
 		DRM_INFO("PSP loading UVD firmware\n");
 	}
 
-	ring = &adev->uvd.inst->ring_enc[0];
-	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-	r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc,
-				  rq, NULL);
-	if (r) {
-		DRM_ERROR("Failed setting up UVD ENC run queue.\n");
-		return r;
+	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
+		ring = &adev->uvd.inst[j].ring_enc[0];
+		rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+		r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity_enc,
+					  rq, NULL);
+		if (r) {
+			DRM_ERROR("(%d)Failed setting up UVD ENC run queue.\n", j);
+			return r;
+		}
 	}
 
 	r = amdgpu_uvd_resume(adev);
 	if (r)
 		return r;
-	if (!amdgpu_sriov_vf(adev)) {
-		ring = &adev->uvd.inst->ring;
-		sprintf(ring->name, "uvd");
-		r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
-		if (r)
-			return r;
-	}
 
-	for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
-		ring = &adev->uvd.inst->ring_enc[i];
-		sprintf(ring->name, "uvd_enc%d", i);
-		if (amdgpu_sriov_vf(adev)) {
-			ring->use_doorbell = true;
-
-			/* currently only use the first enconding ring for
-			 * sriov, so set unused location for other unused rings.
-			 */
-			if (i == 0)
-				ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2;
-			else
-				ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1;
+	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
+		if (!amdgpu_sriov_vf(adev)) {
+			ring = &adev->uvd.inst[j].ring;
+			sprintf(ring->name, "uvd<%d>", j);
+			r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
+			if (r)
+				return r;
+		}
+
+		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
+			ring = &adev->uvd.inst[j].ring_enc[i];
+			sprintf(ring->name, "uvd_enc%d<%d>", i, j);
+			if (amdgpu_sriov_vf(adev)) {
+				ring->use_doorbell = true;
+
+				/* currently only use the first enconding ring for
+				 * sriov, so set unused location for other unused rings.
+				 */
+				if (i == 0)
+					ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2;
+				else
+					ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1;
+			}
+			r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
+			if (r)
+				return r;
 		}
-		r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
-		if (r)
-			return r;
 	}
 
 	r = amdgpu_virt_alloc_mm_table(adev);
@@ -464,7 +471,7 @@ static int uvd_v7_0_sw_init(void *handle)
 
 static int uvd_v7_0_sw_fini(void *handle)
 {
-	int i, r;
+	int i, j, r;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	amdgpu_virt_free_mm_table(adev);
@@ -473,11 +480,12 @@ static int uvd_v7_0_sw_fini(void *handle)
 	if (r)
 		return r;
 
-	drm_sched_entity_fini(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc);
-
-	for (i = 0; i < adev->uvd.num_enc_rings; ++i)
-		amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
+	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
+		drm_sched_entity_fini(&adev->uvd.inst[j].ring_enc[0].sched, &adev->uvd.inst[j].entity_enc);
 
+		for (i = 0; i < adev->uvd.num_enc_rings; ++i)
+			amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
+	}
 	return amdgpu_uvd_sw_fini(adev);
 }
 
@@ -491,9 +499,9 @@ static int uvd_v7_0_sw_fini(void *handle)
 static int uvd_v7_0_hw_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
+	struct amdgpu_ring *ring;
 	uint32_t tmp;
-	int i, r;
+	int i, j, r;
 
 	if (amdgpu_sriov_vf(adev))
 		r = uvd_v7_0_sriov_start(adev);
@@ -502,57 +510,60 @@ static int uvd_v7_0_hw_init(void *handle)
 	if (r)
 		goto done;
 
-	if (!amdgpu_sriov_vf(adev)) {
-		ring->ready = true;
-		r = amdgpu_ring_test_ring(ring);
-		if (r) {
-			ring->ready = false;
-			goto done;
+	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
+		ring = &adev->uvd.inst[j].ring;
+
+		if (!amdgpu_sriov_vf(adev)) {
+			ring->ready = true;
+			r = amdgpu_ring_test_ring(ring);
+			if (r) {
+				ring->ready = false;
+				goto done;
+			}
+
+			r = amdgpu_ring_alloc(ring, 10);
+			if (r) {
+				DRM_ERROR("amdgpu: (%d)ring failed to lock UVD ring (%d).\n", j, r);
+				goto done;
+			}
+
+			tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
+				mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
+			amdgpu_ring_write(ring, tmp);
+			amdgpu_ring_write(ring, 0xFFFFF);
+
+			tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
+				mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
+			amdgpu_ring_write(ring, tmp);
+			amdgpu_ring_write(ring, 0xFFFFF);
+
+			tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
+				mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
+			amdgpu_ring_write(ring, tmp);
+			amdgpu_ring_write(ring, 0xFFFFF);
+
+			/* Clear timeout status bits */
+			amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
+				mmUVD_SEMA_TIMEOUT_STATUS), 0));
+			amdgpu_ring_write(ring, 0x8);
+
+			amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
+				mmUVD_SEMA_CNTL), 0));
+			amdgpu_ring_write(ring, 3);
+
+			amdgpu_ring_commit(ring);
 		}
 
-		r = amdgpu_ring_alloc(ring, 10);
-		if (r) {
-			DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
-			goto done;
-		}
-
-		tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
-			mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
-		amdgpu_ring_write(ring, tmp);
-		amdgpu_ring_write(ring, 0xFFFFF);
-
-		tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
-			mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
-		amdgpu_ring_write(ring, tmp);
-		amdgpu_ring_write(ring, 0xFFFFF);
-
-		tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
-			mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
-		amdgpu_ring_write(ring, tmp);
-		amdgpu_ring_write(ring, 0xFFFFF);
-
-		/* Clear timeout status bits */
-		amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0,
-			mmUVD_SEMA_TIMEOUT_STATUS), 0));
-		amdgpu_ring_write(ring, 0x8);
-
-		amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0,
-			mmUVD_SEMA_CNTL), 0));
-		amdgpu_ring_write(ring, 3);
-
-		amdgpu_ring_commit(ring);
-	}
-
-	for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
-		ring = &adev->uvd.inst->ring_enc[i];
-		ring->ready = true;
-		r = amdgpu_ring_test_ring(ring);
-		if (r) {
-			ring->ready = false;
-			goto done;
+		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
+			ring = &adev->uvd.inst[j].ring_enc[i];
+			ring->ready = true;
+			r = amdgpu_ring_test_ring(ring);
+			if (r) {
+				ring->ready = false;
+				goto done;
+			}
 		}
 	}
-
 done:
 	if (!r)
 		DRM_INFO("UVD and UVD ENC initialized successfully.\n");
@@ -570,7 +581,7 @@ done:
 static int uvd_v7_0_hw_fini(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
+	int i;
 
 	if (!amdgpu_sriov_vf(adev))
 		uvd_v7_0_stop(adev);
@@ -579,7 +590,8 @@ static int uvd_v7_0_hw_fini(void *handle)
 		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
 	}
 
-	ring->ready = false;
+	for (i = 0; i < adev->uvd.num_uvd_inst; ++i)
+		adev->uvd.inst[i].ring.ready = false;
 
 	return 0;
 }
@@ -619,48 +631,51 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
 {
 	uint32_t size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
 	uint32_t offset;
+	int i;
 
-	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
-		WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
-			lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
-		WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
-			upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
-		offset = 0;
-	} else {
-		WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
-			lower_32_bits(adev->uvd.inst->gpu_addr));
-		WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
-			upper_32_bits(adev->uvd.inst->gpu_addr));
-		offset = size;
+	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+				lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
+			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+				upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
+			offset = 0;
+		} else {
+			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+				lower_32_bits(adev->uvd.inst[i].gpu_addr));
+			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+				upper_32_bits(adev->uvd.inst[i].gpu_addr));
+			offset = size;
+		}
+
+		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
+					AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
+
+		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
+				lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
+		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
+				upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
+		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21));
+		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE);
+
+		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
+				lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
+		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
+				upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
+		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21));
+		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2,
+				AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
+
+		WREG32_SOC15(UVD, i, mmUVD_UDEC_ADDR_CONFIG,
+				adev->gfx.config.gb_addr_config);
+		WREG32_SOC15(UVD, i, mmUVD_UDEC_DB_ADDR_CONFIG,
+				adev->gfx.config.gb_addr_config);
+		WREG32_SOC15(UVD, i, mmUVD_UDEC_DBW_ADDR_CONFIG,
+				adev->gfx.config.gb_addr_config);
+
+		WREG32_SOC15(UVD, i, mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
 	}
-
-	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
-				AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
-	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
-
-	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
-			lower_32_bits(adev->uvd.inst->gpu_addr + offset));
-	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
-			upper_32_bits(adev->uvd.inst->gpu_addr + offset));
-	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21));
-	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE);
-
-	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
-			lower_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
-	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
-			upper_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
-	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21));
-	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2,
-			AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
-
-	WREG32_SOC15(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
-			adev->gfx.config.gb_addr_config);
-	WREG32_SOC15(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
-			adev->gfx.config.gb_addr_config);
-	WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
-			adev->gfx.config.gb_addr_config);
-
-	WREG32_SOC15(UVD, 0, mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
 }
 
 static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
@@ -670,6 +685,7 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
 	uint64_t addr = table->gpu_addr;
 	struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
 	uint32_t size;
+	int i;
 
 	size = header->header_size + header->vce_table_size + header->uvd_table_size;
 
@@ -689,11 +705,12 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
 	/* 4, set resp to zero */
 	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
 
-	WDOORBELL32(adev->uvd.inst->ring_enc[0].doorbell_index, 0);
-	adev->wb.wb[adev->uvd.inst->ring_enc[0].wptr_offs] = 0;
-	adev->uvd.inst->ring_enc[0].wptr = 0;
-	adev->uvd.inst->ring_enc[0].wptr_old = 0;
-
+	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+		WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
+		adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0;
+		adev->uvd.inst[i].ring_enc[0].wptr = 0;
+		adev->uvd.inst[i].ring_enc[0].wptr_old = 0;
+	}
 	/* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
 	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
 
@@ -726,6 +743,7 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
 	struct mmsch_v1_0_cmd_end end = { {0} };
 	uint32_t *init_table = adev->virt.mm_table.cpu_addr;
 	struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
+	uint8_t i = 0;
 
 	direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
 	direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
@@ -743,120 +761,121 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
 
 		init_table += header->uvd_table_offset;
 
-		ring = &adev->uvd.inst->ring;
-		ring->wptr = 0;
-		size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
+		for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+			ring = &adev->uvd.inst[i].ring;
+			ring->wptr = 0;
+			size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
 
-		MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS),
-						   0xFFFFFFFF, 0x00000004);
-		/* mc resume*/
-		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
-			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
-						    lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
-			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
-						    upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
-			offset = 0;
-		} else {
-			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
-						    lower_32_bits(adev->uvd.inst->gpu_addr));
-			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
-						    upper_32_bits(adev->uvd.inst->gpu_addr));
-			offset = size;
+			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
+							   0xFFFFFFFF, 0x00000004);
+			/* mc resume*/
+			if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+							    lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
+				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+							    upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
+				offset = 0;
+			} else {
+				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+							    lower_32_bits(adev->uvd.inst[i].gpu_addr));
+				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+							    upper_32_bits(adev->uvd.inst[i].gpu_addr));
+				offset = size;
+			}
+
+			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
+						    AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size);
+
+			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+						    lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
+			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+						    upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
+			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
+			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
+
+			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
+						    lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
+			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
+						    upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
+			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
+			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
+						    AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
+
+			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
+			/* mc resume end*/
+
+			/* disable clock gating */
+			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_CGC_CTRL),
+							   ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0);
+
+			/* disable interupt */
+			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
+							   ~UVD_MASTINT_EN__VCPU_EN_MASK, 0);
+
+			/* stall UMC and register bus before resetting VCPU */
+			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
+							   ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
+							   UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+
+			/* put LMI, VCPU, RBC etc... into reset */
+			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
+						    (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
+							       UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
+							       UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
+							       UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
+							       UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
+							       UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
+							       UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
+							       UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK));
+
+			/* initialize UVD memory controller */
+			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL),
+						    (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
+							       UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+							       UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+							       UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
+							       UVD_LMI_CTRL__REQ_MODE_MASK |
+							       0x00100000L));
+
+			/* take all subblocks out of reset, except VCPU */
+			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
+						    UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+
+			/* enable VCPU clock */
+			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
+						    UVD_VCPU_CNTL__CLK_EN_MASK);
+
+			/* enable master interrupt */
+			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
+							   ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
+							   (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
+
+			/* clear the bit 4 of UVD_STATUS */
+			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
+							   ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0);
+
+			/* force RBC into idle state */
+			size = order_base_2(ring->ring_size);
+			tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
+			tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
+			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
+
+			ring = &adev->uvd.inst[i].ring_enc[0];
+			ring->wptr = 0;
+			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), ring->gpu_addr);
+			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
+			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), ring->ring_size / 4);
+
+			/* boot up the VCPU */
+			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), 0);
+
+			/* enable UMC */
+			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
+											   ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
+
+			MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0x02, 0x02);
 		}
-
-		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
-					    AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
-		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size);
-
-		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
-					    lower_32_bits(adev->uvd.inst->gpu_addr + offset));
-		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
-					    upper_32_bits(adev->uvd.inst->gpu_addr + offset));
-		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
-		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
-
-		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
-					    lower_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
-		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
-					    upper_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
-		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
-		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2),
-					    AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
-
-		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
-		/* mc resume end*/
-
-		/* disable clock gating */
-		MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_CTRL),
-						   ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0);
-
-		/* disable interupt */
-		MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
-						   ~UVD_MASTINT_EN__VCPU_EN_MASK, 0);
-
-		/* stall UMC and register bus before resetting VCPU */
-		MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
-						   ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
-						   UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
-
-		/* put LMI, VCPU, RBC etc... into reset */
-		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
-					    (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
-						       UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
-						       UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
-						       UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
-						       UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
-						       UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
-						       UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
-						       UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK));
-
-		/* initialize UVD memory controller */
-		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL),
-					    (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
-						       UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
-						       UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
-						       UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
-						       UVD_LMI_CTRL__REQ_MODE_MASK |
-						       0x00100000L));
-
-		/* take all subblocks out of reset, except VCPU */
-		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
-					    UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
-
-		/* enable VCPU clock */
-		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL),
-					    UVD_VCPU_CNTL__CLK_EN_MASK);
-
-		/* enable master interrupt */
-		MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
-						   ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
-						   (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
-
-		/* clear the bit 4 of UVD_STATUS */
-		MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS),
-						   ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0);
-
-		/* force RBC into idle state */
-		size = order_base_2(ring->ring_size);
-		tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
-		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
-		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), tmp);
-
-		ring = &adev->uvd.inst->ring_enc[0];
-		ring->wptr = 0;
-		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO), ring->gpu_addr);
-		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
-		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_SIZE), ring->ring_size / 4);
-
-		/* boot up the VCPU */
-		MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0);
-
-		/* enable UMC */
-		MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
-										   ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
-
-		MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0x02, 0x02);
-
 		/* add end packet */
 		memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
 		table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
@@ -875,15 +894,17 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
  */
 static int uvd_v7_0_start(struct amdgpu_device *adev)
 {
-	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
+	struct amdgpu_ring *ring;
 	uint32_t rb_bufsz, tmp;
 	uint32_t lmi_swap_cntl;
 	uint32_t mp_swap_cntl;
-	int i, j, r;
+	int i, j, k, r;
 
-	/* disable DPG */
-	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
-			~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+	for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
+		/* disable DPG */
+		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
+				~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+	}
 
 	/* disable byte swapping */
 	lmi_swap_cntl = 0;
@@ -891,157 +912,159 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
 
 	uvd_v7_0_mc_resume(adev);
 
-	/* disable clock gating */
-	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_CTRL), 0,
-			~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
+	for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
+		ring = &adev->uvd.inst[k].ring;
+		/* disable clock gating */
+		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
+				~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
 
-	/* disable interupt */
-	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
-			~UVD_MASTINT_EN__VCPU_EN_MASK);
+		/* disable interupt */
+		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN), 0,
+				~UVD_MASTINT_EN__VCPU_EN_MASK);
 
-	/* stall UMC and register bus before resetting VCPU */
-	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
-			UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
-			~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
-	mdelay(1);
+		/* stall UMC and register bus before resetting VCPU */
+		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2),
+				UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
+				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+		mdelay(1);
 
-	/* put LMI, VCPU, RBC etc... into reset */
-	WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
-		UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
-		UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
-		UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
-		UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
-		UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
-		UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
-		UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
-		UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
-	mdelay(5);
+		/* put LMI, VCPU, RBC etc... into reset */
+		WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
+			UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
+			UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
+			UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
+			UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
+			UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
+			UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
+			UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
+			UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
+		mdelay(5);
 
-	/* initialize UVD memory controller */
-	WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL,
-		(0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
-		UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
-		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
-		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
-		UVD_LMI_CTRL__REQ_MODE_MASK |
-		0x00100000L);
+		/* initialize UVD memory controller */
+		WREG32_SOC15(UVD, k, mmUVD_LMI_CTRL,
+			(0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
+			UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+			UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+			UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
+			UVD_LMI_CTRL__REQ_MODE_MASK |
+			0x00100000L);
 
 #ifdef __BIG_ENDIAN
-	/* swap (8 in 32) RB and IB */
-	lmi_swap_cntl = 0xa;
-	mp_swap_cntl = 0;
+		/* swap (8 in 32) RB and IB */
+		lmi_swap_cntl = 0xa;
+		mp_swap_cntl = 0;
 #endif
-	WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
-	WREG32_SOC15(UVD, 0, mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
+		WREG32_SOC15(UVD, k, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
+		WREG32_SOC15(UVD, k, mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
 
-	WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0, 0x40c2040);
-	WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA1, 0x0);
-	WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0, 0x40c2040);
-	WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB1, 0x0);
-	WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_ALU, 0);
-	WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX, 0x88);
+		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA0, 0x40c2040);
+		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA1, 0x0);
+		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB0, 0x40c2040);
+		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB1, 0x0);
+		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_ALU, 0);
+		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUX, 0x88);
 
-	/* take all subblocks out of reset, except VCPU */
-	WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
-			UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
-	mdelay(5);
+		/* take all subblocks out of reset, except VCPU */
+		WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
+				UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+		mdelay(5);
 
-	/* enable VCPU clock */
-	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL,
-			UVD_VCPU_CNTL__CLK_EN_MASK);
+		/* enable VCPU clock */
+		WREG32_SOC15(UVD, k, mmUVD_VCPU_CNTL,
+				UVD_VCPU_CNTL__CLK_EN_MASK);
 
-	/* enable UMC */
-	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
-			~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+		/* enable UMC */
+		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2), 0,
+				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 
-	/* boot up the VCPU */
-	WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, 0);
-	mdelay(10);
+		/* boot up the VCPU */
+		WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, 0);
+		mdelay(10);
 
-	for (i = 0; i < 10; ++i) {
-		uint32_t status;
+		for (i = 0; i < 10; ++i) {
+			uint32_t status;
 
-		for (j = 0; j < 100; ++j) {
-			status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
+			for (j = 0; j < 100; ++j) {
+				status = RREG32_SOC15(UVD, k, mmUVD_STATUS);
+				if (status & 2)
+					break;
+				mdelay(10);
+			}
+			r = 0;
 			if (status & 2)
 				break;
+
+			DRM_ERROR("UVD(%d) not responding, trying to reset the VCPU!!!\n", k);
+			WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET),
+					UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
+					~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 			mdelay(10);
+			WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET), 0,
+					~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+			mdelay(10);
+			r = -1;
 		}
-		r = 0;
-		if (status & 2)
-			break;
 
-		DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
-		WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
-				UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
-				~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
-		mdelay(10);
-		WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
-				~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
-		mdelay(10);
-		r = -1;
+		if (r) {
+			DRM_ERROR("UVD(%d) not responding, giving up!!!\n", k);
+			return r;
+		}
+		/* enable master interrupt */
+		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN),
+			(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
+			~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
+
+		/* clear the bit 4 of UVD_STATUS */
+		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_STATUS), 0,
+				~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+
+		/* force RBC into idle state */
+		rb_bufsz = order_base_2(ring->ring_size);
+		tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
+		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
+		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
+		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
+		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
+		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
+		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_CNTL, tmp);
+
+		/* set the write pointer delay */
+		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR_CNTL, 0);
+
+		/* set the wb address */
+		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR,
+				(upper_32_bits(ring->gpu_addr) >> 2));
+
+		/* programm the RB_BASE for ring buffer */
+		WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
+				lower_32_bits(ring->gpu_addr));
+		WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
+				upper_32_bits(ring->gpu_addr));
+
+		/* Initialize the ring buffer's read and write pointers */
+		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR, 0);
+
+		ring->wptr = RREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR);
+		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR,
+				lower_32_bits(ring->wptr));
+
+		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_RBC_RB_CNTL), 0,
+				~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
+
+		ring = &adev->uvd.inst[k].ring_enc[0];
+		WREG32_SOC15(UVD, k, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
+		WREG32_SOC15(UVD, k, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO, ring->gpu_addr);
+		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+		WREG32_SOC15(UVD, k, mmUVD_RB_SIZE, ring->ring_size / 4);
+
+		ring = &adev->uvd.inst[k].ring_enc[1];
+		WREG32_SOC15(UVD, k, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
+		WREG32_SOC15(UVD, k, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO2, ring->gpu_addr);
+		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+		WREG32_SOC15(UVD, k, mmUVD_RB_SIZE2, ring->ring_size / 4);
 	}
-
-	if (r) {
-		DRM_ERROR("UVD not responding, giving up!!!\n");
-		return r;
-	}
-	/* enable master interrupt */
-	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
-		(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
-		~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
-
-	/* clear the bit 4 of UVD_STATUS */
-	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0,
-			~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
-
-	/* force RBC into idle state */
-	rb_bufsz = order_base_2(ring->ring_size);
-	tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
-	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
-	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
-	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
-	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
-	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
-	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
-
-	/* set the write pointer delay */
-	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
-
-	/* set the wb address */
-	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
-			(upper_32_bits(ring->gpu_addr) >> 2));
-
-	/* programm the RB_BASE for ring buffer */
-	WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
-			lower_32_bits(ring->gpu_addr));
-	WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
-			upper_32_bits(ring->gpu_addr));
-
-	/* Initialize the ring buffer's read and write pointers */
-	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
-
-	ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
-	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
-			lower_32_bits(ring->wptr));
-
-	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
-			~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
-
-	ring = &adev->uvd.inst->ring_enc[0];
-	WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
-	WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
-	WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
-	WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
-	WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
-
-	ring = &adev->uvd.inst->ring_enc[1];
-	WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
-	WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
-	WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
-	WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
-	WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
-
 	return 0;
 }
 
@@ -1054,26 +1077,30 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
  */
 static void uvd_v7_0_stop(struct amdgpu_device *adev)
 {
-	/* force RBC into idle state */
-	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, 0x11010101);
+	uint8_t i = 0;
 
-	/* Stall UMC and register bus before resetting VCPU */
-	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
-			UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
-			~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
-	mdelay(1);
+	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+		/* force RBC into idle state */
+		WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
 
-	/* put VCPU into reset */
-	WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
-			UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
-	mdelay(5);
+		/* Stall UMC and register bus before resetting VCPU */
+		WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
+				UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
+				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+		mdelay(1);
 
-	/* disable VCPU clock */
-	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, 0x0);
+		/* put VCPU into reset */
+		WREG32_SOC15(UVD, i, mmUVD_SOFT_RESET,
+				UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+		mdelay(5);
 
-	/* Unstall UMC and register bus */
-	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
-			~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+		/* disable VCPU clock */
+		WREG32_SOC15(UVD, i, mmUVD_VCPU_CNTL, 0x0);
+
+		/* Unstall UMC and register bus */
+		WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
+				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+	}
 }
 
 /**
@@ -1092,26 +1119,26 @@ static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
 	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
 
 	amdgpu_ring_write(ring,
-		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
+		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
 	amdgpu_ring_write(ring, seq);
 	amdgpu_ring_write(ring,
-		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
+		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
 	amdgpu_ring_write(ring, addr & 0xffffffff);
 	amdgpu_ring_write(ring,
-		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
+		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
 	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
 	amdgpu_ring_write(ring,
-		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
+		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
 	amdgpu_ring_write(ring, 0);
 
 	amdgpu_ring_write(ring,
-		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
+		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
 	amdgpu_ring_write(ring, 0);
 	amdgpu_ring_write(ring,
-		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
+		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
 	amdgpu_ring_write(ring, 0);
 	amdgpu_ring_write(ring,
-		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
+		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
 	amdgpu_ring_write(ring, 2);
 }
 
@@ -1160,7 +1187,7 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
 	unsigned i;
 	int r;
 
-	WREG32_SOC15(UVD, 0, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
+	WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
 	r = amdgpu_ring_alloc(ring, 3);
 	if (r) {
 		DRM_ERROR("amdgpu: (%d)cp failed to lock ring %d (%d).\n",
@@ -1168,11 +1195,11 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
 		return r;
 	}
 	amdgpu_ring_write(ring,
-		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
+		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
 	amdgpu_ring_write(ring, 0xDEADBEEF);
 	amdgpu_ring_commit(ring);
 	for (i = 0; i < adev->usec_timeout; i++) {
-		tmp = RREG32_SOC15(UVD, 0, mmUVD_CONTEXT_ID);
+		tmp = RREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID);
 		if (tmp == 0xDEADBEEF)
 			break;
 		DRM_UDELAY(1);
@@ -1204,17 +1231,17 @@ static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
 	struct amdgpu_device *adev = ring->adev;
 
 	amdgpu_ring_write(ring,
-		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
+		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
 	amdgpu_ring_write(ring, vmid);
 
 	amdgpu_ring_write(ring,
-		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
+		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
 	amdgpu_ring_write(ring,
-		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
+		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
 	amdgpu_ring_write(ring,
-		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_IB_SIZE), 0));
+		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_RBC_IB_SIZE), 0));
 	amdgpu_ring_write(ring, ib->length_dw);
 }
 
@@ -1242,13 +1269,13 @@ static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
 	struct amdgpu_device *adev = ring->adev;
 
 	amdgpu_ring_write(ring,
-		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
+		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
 	amdgpu_ring_write(ring, reg << 2);
 	amdgpu_ring_write(ring,
-		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
+		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
 	amdgpu_ring_write(ring, val);
 	amdgpu_ring_write(ring,
-		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
+		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
 	amdgpu_ring_write(ring, 8);
 }
 
@@ -1258,16 +1285,16 @@ static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
 	struct amdgpu_device *adev = ring->adev;
 
 	amdgpu_ring_write(ring,
-		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
+		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
 	amdgpu_ring_write(ring, reg << 2);
 	amdgpu_ring_write(ring,
-		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
+		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
 	amdgpu_ring_write(ring, val);
 	amdgpu_ring_write(ring,
-		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0));
+		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GP_SCRATCH8), 0));
 	amdgpu_ring_write(ring, mask);
 	amdgpu_ring_write(ring,
-		PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
+		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
 	amdgpu_ring_write(ring, 12);
 }
 
@@ -1292,7 +1319,7 @@ static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 	struct amdgpu_device *adev = ring->adev;
 
 	for (i = 0; i < count; i++)
-		amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
+		amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0));
 
 }
 
@@ -1360,16 +1387,16 @@ static bool uvd_v7_0_check_soft_reset(void *handle)
 
 	if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
 	    REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
-	    (RREG32_SOC15(UVD, 0, mmUVD_STATUS) &
+	    (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) &
 		    AMDGPU_UVD_STATUS_BUSY_MASK))
 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
 				SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
 
 	if (srbm_soft_reset) {
-		adev->uvd.inst->srbm_soft_reset = srbm_soft_reset;
+		adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset;
 		return true;
 	} else {
-		adev->uvd.inst->srbm_soft_reset = 0;
+		adev->uvd.inst[ring->me].srbm_soft_reset = 0;
 		return false;
 	}
 }
@@ -1378,7 +1405,7 @@ static int uvd_v7_0_pre_soft_reset(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	if (!adev->uvd.inst->srbm_soft_reset)
+	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
 		return 0;
 
 	uvd_v7_0_stop(adev);
@@ -1390,9 +1417,9 @@ static int uvd_v7_0_soft_reset(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	u32 srbm_soft_reset;
 
-	if (!adev->uvd.inst->srbm_soft_reset)
+	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
 		return 0;
-	srbm_soft_reset = adev->uvd.inst->srbm_soft_reset;
+	srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset;
 
 	if (srbm_soft_reset) {
 		u32 tmp;
@@ -1420,7 +1447,7 @@ static int uvd_v7_0_post_soft_reset(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	if (!adev->uvd.inst->srbm_soft_reset)
+	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
 		return 0;
 
 	mdelay(5);
@@ -1442,17 +1469,29 @@ static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
 				      struct amdgpu_irq_src *source,
 				      struct amdgpu_iv_entry *entry)
 {
+	uint32_t ip_instance;
+
+	switch (entry->client_id) {
+	case SOC15_IH_CLIENTID_UVD:
+		ip_instance = 0;
+		break;
+	default:
+		DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
+		return 0;
+	}
+
 	DRM_DEBUG("IH: UVD TRAP\n");
+
 	switch (entry->src_id) {
 	case 124:
-		amdgpu_fence_process(&adev->uvd.inst->ring);
+		amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring);
 		break;
 	case 119:
-		amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]);
+		amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[0]);
 		break;
 	case 120:
 		if (!amdgpu_sriov_vf(adev))
-			amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]);
+			amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[1]);
 		break;
 	default:
 		DRM_ERROR("Unhandled interrupt: %d %d\n",
@@ -1468,9 +1507,9 @@ static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
 {
 	uint32_t data, data1, data2, suvd_flags;
 
-	data = RREG32_SOC15(UVD, 0, mmUVD_CGC_CTRL);
-	data1 = RREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_GATE);
-	data2 = RREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_CTRL);
+	data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL);
+	data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
+	data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL);
 
 	data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
 		  UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
@@ -1514,18 +1553,18 @@ static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
 			UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
 	data1 |= suvd_flags;
 
-	WREG32_SOC15(UVD, 0, mmUVD_CGC_CTRL, data);
-	WREG32_SOC15(UVD, 0, mmUVD_CGC_GATE, 0);
-	WREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_GATE, data1);
-	WREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_CTRL, data2);
+	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data);
+	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0);
+	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
+	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2);
 }
 
 static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
 {
 	uint32_t data, data1, cgc_flags, suvd_flags;
 
-	data = RREG32_SOC15(UVD, 0, mmUVD_CGC_GATE);
-	data1 = RREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_GATE);
+	data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE);
+	data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
 
 	cgc_flags = UVD_CGC_GATE__SYS_MASK |
 		UVD_CGC_GATE__UDEC_MASK |
@@ -1557,8 +1596,8 @@ static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
 	data |= cgc_flags;
 	data1 |= suvd_flags;
 
-	WREG32_SOC15(UVD, 0, mmUVD_CGC_GATE, data);
-	WREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_GATE, data1);
+	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data);
+	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
 }
 
 static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
@@ -1617,7 +1656,7 @@ static int uvd_v7_0_set_powergating_state(void *handle,
 	if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
 		return 0;
 
-	WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
+	WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
 
 	if (state == AMD_PG_STATE_GATE) {
 		uvd_v7_0_stop(adev);
@@ -1720,18 +1759,27 @@ static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
 
 static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
 {
-	adev->uvd.inst->ring.funcs = &uvd_v7_0_ring_vm_funcs;
-	DRM_INFO("UVD is enabled in VM mode\n");
+	int i;
+
+	for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+		adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
+		adev->uvd.inst[i].ring.me = i;
+		DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
+	}
 }
 
 static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
 {
-	int i;
+	int i, j;
 
-	for (i = 0; i < adev->uvd.num_enc_rings; ++i)
-		adev->uvd.inst->ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
+	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
+		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
+			adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
+			adev->uvd.inst[j].ring_enc[i].me = j;
+		}
 
-	DRM_INFO("UVD ENC is enabled in VM mode\n");
+		DRM_INFO("UVD(%d) ENC is enabled in VM mode\n", j);
+	}
 }
 
 static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
@@ -1741,8 +1789,12 @@ static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
 
 static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
 {
-	adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1;
-	adev->uvd.inst->irq.funcs = &uvd_v7_0_irq_funcs;
+	int i;
+
+	for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+		adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
+		adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
+	}
 }
 
 const struct amdgpu_ip_block_version uvd_v7_0_ip_block =

From 3b17c622856299a0b0eef02a409edec366a719a7 Mon Sep 17 00:00:00 2001
From: James Zhu <James.Zhu@amd.com>
Date: Mon, 23 Apr 2018 19:11:46 -0400
Subject: [PATCH 1195/1286] drm/amdgpu/vg20:increase 3 rings for
 AMDGPU_MAX_RINGS

For Vega20, there are two UVD Hardware. One more UVD hardware
adds one decode ring and two encode rings. So AMDGPU_MAX_RINGS
need increase by 3.

Signed-off-by: James Zhu <James.Zhu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 4f8dac2d36a5..1513124c5659 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -29,7 +29,7 @@
 #include <drm/drm_print.h>
 
 /* max number of rings */
-#define AMDGPU_MAX_RINGS		18
+#define AMDGPU_MAX_RINGS		21
 #define AMDGPU_MAX_GFX_RINGS		1
 #define AMDGPU_MAX_COMPUTE_RINGS	8
 #define AMDGPU_MAX_VCE_RINGS		3

From 9181dba670cf0a0e8e3bda9fa66fecfe7c28b535 Mon Sep 17 00:00:00 2001
From: James Zhu <James.Zhu@amd.com>
Date: Fri, 11 May 2018 13:56:44 -0500
Subject: [PATCH 1196/1286] drm/amdgpu/vg20:Enable the 2nd instance for uvd

For Vega20, set num of uvd instance to 2, to enble 2nd instance.
The IB test build-in registers need update for vega20 2nd instance.

Signed-off-by: James Zhu <James.Zhu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 30 +++++++++++++------------
 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c   |  7 +++++-
 2 files changed, 22 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e961492d357a..0772680371a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -72,11 +72,12 @@
 #define FIRMWARE_VEGA12		"amdgpu/vega12_uvd.bin"
 #define FIRMWARE_VEGA20		"amdgpu/vega20_uvd.bin"
 
-#define mmUVD_GPCOM_VCPU_DATA0_VEGA10 (0x03c4 + 0x7e00)
-#define mmUVD_GPCOM_VCPU_DATA1_VEGA10 (0x03c5 + 0x7e00)
-#define mmUVD_GPCOM_VCPU_CMD_VEGA10 (0x03c3 + 0x7e00)
-#define mmUVD_NO_OP_VEGA10 (0x03ff + 0x7e00)
-#define mmUVD_ENGINE_CNTL_VEGA10 (0x03c6 + 0x7e00)
+/* These are common relative offsets for all asics, from uvd_7_0_offset.h,  */
+#define UVD_GPCOM_VCPU_CMD		0x03c3
+#define UVD_GPCOM_VCPU_DATA0	0x03c4
+#define UVD_GPCOM_VCPU_DATA1	0x03c5
+#define UVD_NO_OP				0x03ff
+#define UVD_BASE_SI				0x3800
 
 /**
  * amdgpu_uvd_cs_ctx - Command submission parser context
@@ -990,6 +991,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
 	uint64_t addr;
 	long r;
 	int i;
+	unsigned offset_idx = 0;
+	unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
 
 	amdgpu_bo_kunmap(bo);
 	amdgpu_bo_unpin(bo);
@@ -1009,17 +1012,16 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
 		goto err;
 
 	if (adev->asic_type >= CHIP_VEGA10) {
-		data[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0_VEGA10, 0);
-		data[1] = PACKET0(mmUVD_GPCOM_VCPU_DATA1_VEGA10, 0);
-		data[2] = PACKET0(mmUVD_GPCOM_VCPU_CMD_VEGA10, 0);
-		data[3] = PACKET0(mmUVD_NO_OP_VEGA10, 0);
-	} else {
-		data[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0);
-		data[1] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0);
-		data[2] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0);
-		data[3] = PACKET0(mmUVD_NO_OP, 0);
+		offset_idx = 1 + ring->me;
+		offset[1] = adev->reg_offset[UVD_HWIP][0][1];
+		offset[2] = adev->reg_offset[UVD_HWIP][1][1];
 	}
 
+	data[0] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA0, 0);
+	data[1] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA1, 0);
+	data[2] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_CMD, 0);
+	data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0);
+
 	ib = &job->ibs[0];
 	addr = amdgpu_bo_gpu_offset(bo);
 	ib->ptr[0] = data[0];
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 08f3b6c84bea..6b719e11b2cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -40,6 +40,8 @@
 #include "mmhub/mmhub_1_0_offset.h"
 #include "mmhub/mmhub_1_0_sh_mask.h"
 
+#define UVD7_MAX_HW_INSTANCES_VEGA20			2
+
 static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
 static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
 static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -370,7 +372,10 @@ error:
 static int uvd_v7_0_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	adev->uvd.num_uvd_inst = 1;
+	if (adev->asic_type == CHIP_VEGA20)
+		adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
+	else
+		adev->uvd.num_uvd_inst = 1;
 
 	if (amdgpu_sriov_vf(adev))
 		adev->uvd.num_enc_rings = 1;

From 915893fd2b7bdb0e1e0a16ca402345ebc60e391b Mon Sep 17 00:00:00 2001
From: James Zhu <James.Zhu@amd.com>
Date: Mon, 23 Apr 2018 20:49:28 -0400
Subject: [PATCH 1197/1286] drm/amdgpu/vg20:Add IH client ID for the 2nd UVD

For Vega20, there are two UVD hardware. Need add
the 2nd IH client ID for the 2nd UVD Hardware.

Signed-off-by: James Zhu <James.Zhu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/include/soc15_ih_clientid.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/include/soc15_ih_clientid.h b/drivers/gpu/drm/amd/include/soc15_ih_clientid.h
index a12d4f27cfa4..12e196c15bbe 100644
--- a/drivers/gpu/drm/amd/include/soc15_ih_clientid.h
+++ b/drivers/gpu/drm/amd/include/soc15_ih_clientid.h
@@ -43,6 +43,7 @@ enum soc15_ih_clientid {
 	SOC15_IH_CLIENTID_SE2SH		= 0x0c,
 	SOC15_IH_CLIENTID_SE3SH		= 0x0d,
 	SOC15_IH_CLIENTID_SYSHUB	= 0x0e,
+	SOC15_IH_CLIENTID_UVD1		= 0x0e,
 	SOC15_IH_CLIENTID_THM		= 0x0f,
 	SOC15_IH_CLIENTID_UVD		= 0x10,
 	SOC15_IH_CLIENTID_VCE0		= 0x11,

From b53a6ebcc55971169e56982fd9131d1a6969a053 Mon Sep 17 00:00:00 2001
From: James Zhu <James.Zhu@amd.com>
Date: Mon, 23 Apr 2018 20:56:01 -0400
Subject: [PATCH 1198/1286] drm/amdgpu/vg20:Enable the 2nd instance IRQ for uvd
 7.2

For Vega20, the 2nd instance uvd IRQ using different client id.
Enable the 2nd instance IRQ for uvd 7.2

Signed-off-by: James Zhu <James.Zhu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 12 ++++++++++--
 1 file changed, 10 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 6b719e11b2cd..f9a5482101bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -49,6 +49,11 @@ static int uvd_v7_0_start(struct amdgpu_device *adev);
 static void uvd_v7_0_stop(struct amdgpu_device *adev);
 static int uvd_v7_0_sriov_start(struct amdgpu_device *adev);
 
+static int amdgpu_ih_clientid_uvds[] = {
+	SOC15_IH_CLIENTID_UVD,
+	SOC15_IH_CLIENTID_UVD1
+};
+
 /**
  * uvd_v7_0_ring_get_rptr - get read pointer
  *
@@ -397,13 +402,13 @@ static int uvd_v7_0_sw_init(void *handle)
 
 	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
 		/* UVD TRAP */
-		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, 124, &adev->uvd.inst[j].irq);
+		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], 124, &adev->uvd.inst[j].irq);
 		if (r)
 			return r;
 
 		/* UVD ENC TRAP */
 		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
-			r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, i + 119, &adev->uvd.inst[j].irq);
+			r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + 119, &adev->uvd.inst[j].irq);
 			if (r)
 				return r;
 		}
@@ -1480,6 +1485,9 @@ static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
 	case SOC15_IH_CLIENTID_UVD:
 		ip_instance = 0;
 		break;
+	case SOC15_IH_CLIENTID_UVD1:
+		ip_instance = 1;
+		break;
 	default:
 		DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
 		return 0;

From 04305acb9f7fc9978ed7a14bf965802c45ea9682 Mon Sep 17 00:00:00 2001
From: James Zhu <James.Zhu@amd.com>
Date: Mon, 23 Apr 2018 21:00:58 -0400
Subject: [PATCH 1199/1286] drm/amdgpu/vg20:Enable 2nd instance queue maping
 for uvd 7.2

Enable 2nd instance uvd queue maping for uvd 7.2. For user, only one UVD
instance presents. there is two rings for uvd decode, and
4 rings for uvd encode.

Signed-off-by: James Zhu <James.Zhu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c | 14 ++++++++++----
 1 file changed, 10 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
index 2458d385e55a..8af16e81c7d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
@@ -66,6 +66,8 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
 			       u32 ring,
 			       struct amdgpu_ring **out_ring)
 {
+	u32 instance;
+
 	switch (mapper->hw_ip) {
 	case AMDGPU_HW_IP_GFX:
 		*out_ring = &adev->gfx.gfx_ring[ring];
@@ -77,13 +79,16 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
 		*out_ring = &adev->sdma.instance[ring].ring;
 		break;
 	case AMDGPU_HW_IP_UVD:
-		*out_ring = &adev->uvd.inst->ring;
+		instance = ring;
+		*out_ring = &adev->uvd.inst[instance].ring;
 		break;
 	case AMDGPU_HW_IP_VCE:
 		*out_ring = &adev->vce.ring[ring];
 		break;
 	case AMDGPU_HW_IP_UVD_ENC:
-		*out_ring = &adev->uvd.inst->ring_enc[ring];
+		instance = ring / adev->uvd.num_enc_rings;
+		*out_ring =
+		&adev->uvd.inst[instance].ring_enc[ring%adev->uvd.num_enc_rings];
 		break;
 	case AMDGPU_HW_IP_VCN_DEC:
 		*out_ring = &adev->vcn.ring_dec;
@@ -240,13 +245,14 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
 		ip_num_rings = adev->sdma.num_instances;
 		break;
 	case AMDGPU_HW_IP_UVD:
-		ip_num_rings = 1;
+		ip_num_rings = adev->uvd.num_uvd_inst;
 		break;
 	case AMDGPU_HW_IP_VCE:
 		ip_num_rings = adev->vce.num_rings;
 		break;
 	case AMDGPU_HW_IP_UVD_ENC:
-		ip_num_rings = adev->uvd.num_enc_rings;
+		ip_num_rings =
+			adev->uvd.num_enc_rings * adev->uvd.num_uvd_inst;
 		break;
 	case AMDGPU_HW_IP_VCN_DEC:
 		ip_num_rings = 1;

From 705e98d77bc61d234ef5a1867acb38f6d0d40e4f Mon Sep 17 00:00:00 2001
From: James Zhu <James.Zhu@amd.com>
Date: Mon, 30 Apr 2018 08:43:12 -0400
Subject: [PATCH 1200/1286] drm/amdgpu/vg20:Enable UVD/VCE for Vega20

Vega20 ucode load type is set to AMDGPU_FW_LOAD_DIRECT for default.
So UVD/VCE needn't PSP IP block up. UVD/VCE for Vega20 can be enabled
at this moment.

Signed-off-by: James Zhu <James.Zhu@amd.com>
Reviewed-by: Leo Liu <leo.liu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/soc15.c | 6 ++----
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 295bc9cd46f0..987271b18fd1 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -529,10 +529,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
 #endif
 		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
 		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
-		if (adev->asic_type != CHIP_VEGA20) {
-			amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
-			amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
-		}
+		amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
+		amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
 		break;
 	case CHIP_RAVEN:
 		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);

From 9883e9d751dad05e8c3ad3c6b769dafc60762c38 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Mon, 14 May 2018 11:50:46 -0500
Subject: [PATCH 1201/1286] drm/amdgpu: add df 3.6 headers
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Needed for vega20.

Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../amd/include/asic_reg/df/df_3_6_default.h  | 26 ++++++++++
 .../amd/include/asic_reg/df/df_3_6_offset.h   | 33 +++++++++++++
 .../amd/include/asic_reg/df/df_3_6_sh_mask.h  | 48 +++++++++++++++++++
 3 files changed, 107 insertions(+)
 create mode 100644 drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_default.h
 create mode 100644 drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
 create mode 100644 drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h

diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_default.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_default.h
new file mode 100644
index 000000000000..e58c207ac980
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_default.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2018  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _df_3_6_DEFAULT_HEADER
+#define _df_3_6_DEFAULT_HEADER
+
+#define mmFabricConfigAccessControl_DEFAULT						0x00000000
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
new file mode 100644
index 000000000000..a9575db8d7aa
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2018  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _df_3_6_OFFSET_HEADER
+#define _df_3_6_OFFSET_HEADER
+
+#define mmFabricConfigAccessControl									0x0410
+#define mmFabricConfigAccessControl_BASE_IDX								0
+
+#define mmDF_PIE_AON0_DfGlobalClkGater									0x00fc
+#define mmDF_PIE_AON0_DfGlobalClkGater_BASE_IDX								0
+
+#define mmDF_CS_UMC_AON0_DramBaseAddress0								0x0044
+#define mmDF_CS_UMC_AON0_DramBaseAddress0_BASE_IDX							0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h
new file mode 100644
index 000000000000..88f7c69df6b9
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2018  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _df_3_6_SH_MASK_HEADER
+#define _df_3_6_SH_MASK_HEADER
+
+/* FabricConfigAccessControl */
+#define FabricConfigAccessControl__CfgRegInstAccEn__SHIFT						0x0
+#define FabricConfigAccessControl__CfgRegInstAccRegLock__SHIFT						0x1
+#define FabricConfigAccessControl__CfgRegInstID__SHIFT							0x10
+#define FabricConfigAccessControl__CfgRegInstAccEn_MASK							0x00000001L
+#define FabricConfigAccessControl__CfgRegInstAccRegLock_MASK						0x00000002L
+#define FabricConfigAccessControl__CfgRegInstID_MASK							0x00FF0000L
+
+/* DF_PIE_AON0_DfGlobalClkGater */
+#define DF_PIE_AON0_DfGlobalClkGater__MGCGMode__SHIFT							0x0
+#define DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK							0x0000000FL
+
+/* DF_CS_AON0_DramBaseAddress0 */
+#define DF_CS_UMC_AON0_DramBaseAddress0__AddrRngVal__SHIFT						0x0
+#define DF_CS_UMC_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT						0x1
+#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan__SHIFT						0x4
+#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT						0x8
+#define DF_CS_UMC_AON0_DramBaseAddress0__DramBaseAddr__SHIFT						0xc
+#define DF_CS_UMC_AON0_DramBaseAddress0__AddrRngVal_MASK						0x00000001L
+#define DF_CS_UMC_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK						0x00000002L
+#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK						0x000000F0L
+#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel_MASK						0x00000700L
+#define DF_CS_UMC_AON0_DramBaseAddress0__DramBaseAddr_MASK						0xFFFFF000L
+
+#endif

From 13b581502d5101adadfb7ea269ff4c8074ba76cb Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Wed, 4 Apr 2018 14:30:28 +0800
Subject: [PATCH 1202/1286] drm/amdgpu/df: implement df v3_6 callback functions
 (v2)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

New df helpers for 3.6.

v2: switch to using df 3.6 headers (Alex)

Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/Makefile  |   3 +-
 drivers/gpu/drm/amd/amdgpu/df_v3_6.c | 116 +++++++++++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/df_v3_6.h |  40 +++++++++
 3 files changed, 158 insertions(+), 1 deletion(-)
 create mode 100644 drivers/gpu/drm/amd/amdgpu/df_v3_6.c
 create mode 100644 drivers/gpu/drm/amd/amdgpu/df_v3_6.h

diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 012ea37b81be..a51c5a960750 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -67,7 +67,8 @@ amdgpu-y += \
 
 # add DF block
 amdgpu-y += \
-	df_v1_7.o
+	df_v1_7.o \
+	df_v3_6.o
 
 # add GMC block
 amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
new file mode 100644
index 000000000000..60608b3df881
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "df_v3_6.h"
+
+#include "df/df_3_6_default.h"
+#include "df/df_3_6_offset.h"
+#include "df/df_3_6_sh_mask.h"
+
+static u32 df_v3_6_channel_number[] = {1, 2, 0, 4, 0, 8, 0,
+				       16, 32, 0, 0, 0, 2, 4, 8};
+
+static void df_v3_6_init(struct amdgpu_device *adev)
+{
+}
+
+static void df_v3_6_enable_broadcast_mode(struct amdgpu_device *adev,
+					  bool enable)
+{
+	u32 tmp;
+
+	if (enable) {
+		tmp = RREG32_SOC15(DF, 0, mmFabricConfigAccessControl);
+		tmp &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK;
+		WREG32_SOC15(DF, 0, mmFabricConfigAccessControl, tmp);
+	} else
+		WREG32_SOC15(DF, 0, mmFabricConfigAccessControl,
+			     mmFabricConfigAccessControl_DEFAULT);
+}
+
+static u32 df_v3_6_get_fb_channel_number(struct amdgpu_device *adev)
+{
+	u32 tmp;
+
+	tmp = RREG32_SOC15(DF, 0, mmDF_CS_UMC_AON0_DramBaseAddress0);
+	tmp &= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK;
+	tmp >>= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
+
+	return tmp;
+}
+
+static u32 df_v3_6_get_hbm_channel_number(struct amdgpu_device *adev)
+{
+	int fb_channel_number;
+
+	fb_channel_number = adev->df_funcs->get_fb_channel_number(adev);
+	if (fb_channel_number > ARRAY_SIZE(df_v3_6_channel_number))
+		fb_channel_number = 0;
+
+	return df_v3_6_channel_number[fb_channel_number];
+}
+
+static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+						     bool enable)
+{
+	u32 tmp;
+
+	/* Put DF on broadcast mode */
+	adev->df_funcs->enable_broadcast_mode(adev, true);
+
+	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) {
+		tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
+		tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
+		tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY;
+		WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
+	} else {
+		tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
+		tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
+		tmp |= DF_V3_6_MGCG_DISABLE;
+		WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
+	}
+
+	/* Exit broadcast mode */
+	adev->df_funcs->enable_broadcast_mode(adev, false);
+}
+
+static void df_v3_6_get_clockgating_state(struct amdgpu_device *adev,
+					  u32 *flags)
+{
+	u32 tmp;
+
+	/* AMD_CG_SUPPORT_DF_MGCG */
+	tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
+	if (tmp & DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY)
+		*flags |= AMD_CG_SUPPORT_DF_MGCG;
+}
+
+const struct amdgpu_df_funcs df_v3_6_funcs = {
+	.init = df_v3_6_init,
+	.enable_broadcast_mode = df_v3_6_enable_broadcast_mode,
+	.get_fb_channel_number = df_v3_6_get_fb_channel_number,
+	.get_hbm_channel_number = df_v3_6_get_hbm_channel_number,
+	.update_medium_grain_clock_gating =
+			df_v3_6_update_medium_grain_clock_gating,
+	.get_clockgating_state = df_v3_6_get_clockgating_state,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.h b/drivers/gpu/drm/amd/amdgpu/df_v3_6.h
new file mode 100644
index 000000000000..e79c58e5efcb
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __DF_V3_6_H__
+#define __DF_V3_6_H__
+
+#include "soc15_common.h"
+
+enum DF_V3_6_MGCG {
+	DF_V3_6_MGCG_DISABLE = 0,
+	DF_V3_6_MGCG_ENABLE_00_CYCLE_DELAY = 1,
+	DF_V3_6_MGCG_ENABLE_01_CYCLE_DELAY = 2,
+	DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY = 13,
+	DF_V3_6_MGCG_ENABLE_31_CYCLE_DELAY = 14,
+	DF_V3_6_MGCG_ENABLE_63_CYCLE_DELAY = 15
+};
+
+extern const struct amdgpu_df_funcs df_v3_6_funcs;
+
+#endif

From 698758bbb3e3e344073f86f2d011cc536d94da49 Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Wed, 4 Apr 2018 14:32:10 +0800
Subject: [PATCH 1203/1286] drm/amdgpu: Switch to use df_v3_6_funcs for vega20
 (v2)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

v2: fix whitespace (Alex)

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/soc15.c | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 987271b18fd1..0e4f67e4c875 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -51,6 +51,7 @@
 #include "gfxhub_v1_0.h"
 #include "mmhub_v1_0.h"
 #include "df_v1_7.h"
+#include "df_v3_6.h"
 #include "vega10_ih.h"
 #include "sdma_v4_0.h"
 #include "uvd_v7_0.h"
@@ -501,7 +502,10 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
 	else
 		adev->nbio_funcs = &nbio_v6_1_funcs;
 
-	adev->df_funcs = &df_v1_7_funcs;
+	if (adev->asic_type == CHIP_VEGA20)
+		adev->df_funcs = &df_v3_6_funcs;
+	else
+		adev->df_funcs = &df_v1_7_funcs;
 	adev->nbio_funcs->detect_hw_virt(adev);
 
 	if (amdgpu_sriov_vf(adev))

From 1204a26e03a2b46917f7164e665dfc3b67a0ae1e Mon Sep 17 00:00:00 2001
From: Feifei Xu <Feifei.Xu@amd.com>
Date: Mon, 22 Jan 2018 19:08:33 +0800
Subject: [PATCH 1204/1286] drm/amdgpu: Add vega20 pci ids
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 739e7e09c8b0..e33e53cde634 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -560,6 +560,13 @@ static const struct pci_device_id pciidlist[] = {
 	{0x1002, 0x69A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
 	{0x1002, 0x69A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
 	{0x1002, 0x69AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
+	/* Vega 20 */
+	{0x1002, 0x66A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+	{0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+	{0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+	{0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+	{0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+	{0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
 	/* Raven */
 	{0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
 

From 950f23ebdcfc7ca53d32d76631ba6c4e61d0f88e Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Mon, 14 May 2018 11:28:04 -0500
Subject: [PATCH 1205/1286] drm/amdgpu: flag Vega20 as experimental
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Must set amdgpu.exp_hw_support=1 on the kernel command line in
grub to enable support.

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index e33e53cde634..b0bf2f24da48 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -561,12 +561,12 @@ static const struct pci_device_id pciidlist[] = {
 	{0x1002, 0x69A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
 	{0x1002, 0x69AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
 	/* Vega 20 */
-	{0x1002, 0x66A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
-	{0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
-	{0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
-	{0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
-	{0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
-	{0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+	{0x1002, 0x66A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
+	{0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
+	{0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
+	{0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
+	{0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
+	{0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
 	/* Raven */
 	{0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
 

From 20b6b7885df58b86d9b2768852bb2c81081e2c93 Mon Sep 17 00:00:00 2001
From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Date: Tue, 15 May 2018 14:12:21 -0400
Subject: [PATCH 1206/1286] drm/amdgpu: Skip drm_sched_entity related ops for
 KIQ ring.

Following change 75fbed2 we never initialize or use the GPU
scheduler for KIQ and hence we need to skip KIQ ring when iterating
amdgpu_ctx's scheduler entites.

Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 21 ++++++++++++++++++---
 1 file changed, 18 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index a8e531d604fa..c5bb36275e93 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -173,9 +173,14 @@ static void amdgpu_ctx_do_release(struct kref *ref)
 
 	ctx = container_of(ref, struct amdgpu_ctx, refcount);
 
-	for (i = 0; i < ctx->adev->num_rings; i++)
+	for (i = 0; i < ctx->adev->num_rings; i++) {
+
+		if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
+			continue;
+
 		drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
 			&ctx->rings[i].entity);
+	}
 
 	amdgpu_ctx_fini(ref);
 }
@@ -452,12 +457,17 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
 		if (!ctx->adev)
 			return;
 
-		for (i = 0; i < ctx->adev->num_rings; i++)
+		for (i = 0; i < ctx->adev->num_rings; i++) {
+
+			if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
+				continue;
+
 			if (kref_read(&ctx->refcount) == 1)
 				drm_sched_entity_do_release(&ctx->adev->rings[i]->sched,
 						  &ctx->rings[i].entity);
 			else
 				DRM_ERROR("ctx %p is still alive\n", ctx);
+		}
 	}
 }
 
@@ -474,12 +484,17 @@ void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr)
 		if (!ctx->adev)
 			return;
 
-		for (i = 0; i < ctx->adev->num_rings; i++)
+		for (i = 0; i < ctx->adev->num_rings; i++) {
+
+			if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
+				continue;
+
 			if (kref_read(&ctx->refcount) == 1)
 				drm_sched_entity_cleanup(&ctx->adev->rings[i]->sched,
 					&ctx->rings[i].entity);
 			else
 				DRM_ERROR("ctx %p is still alive\n", ctx);
+		}
 	}
 }
 

From 01233b8073455e5d489b95758c3afeb78ff94530 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Mon, 14 May 2018 16:03:01 +0800
Subject: [PATCH 1207/1286] drm/amd/pp: Workaround flickering issue on RV

Screen flickering observed while running 1080p video using
MPV_VAAPI/VDPAU with 4x4K@60 monitors

Need to set higher mclk in this configuration.

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 2f69bfa478a7..017ef2d169e9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -600,7 +600,10 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
 						data->gfx_min_freq_limit/100);
 		smum_send_msg_to_smc_with_parameter(hwmgr,
 						PPSMC_MSG_SetHardMinFclkByFreq,
+						hwmgr->display_config->num_display > 3 ?
+						SMU10_UMD_PSTATE_PEAK_FCLK :
 						SMU10_UMD_PSTATE_MIN_FCLK);
+
 		smum_send_msg_to_smc_with_parameter(hwmgr,
 						PPSMC_MSG_SetHardMinSocclkByFreq,
 						SMU10_UMD_PSTATE_MIN_SOCCLK);

From b9245b949885f24e84ae16d99d3898a5f1e0ba24 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
Date: Thu, 19 Apr 2018 09:57:21 +0200
Subject: [PATCH 1208/1286] drm/amdgpu: remove unused member
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This lock isn't used any more.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 4cf678684a12..d6827083572a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -187,9 +187,6 @@ struct amdgpu_vm {
 	struct amdgpu_vm_pt     root;
 	struct dma_fence	*last_update;
 
-	/* protecting freed */
-	spinlock_t		freed_lock;
-
 	/* Scheduler entity for page table updates */
 	struct drm_sched_entity	entity;
 

From 563e1e664d27292a3b55ca08366dc8c32db52450 Mon Sep 17 00:00:00 2001
From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Date: Tue, 15 May 2018 14:42:20 -0400
Subject: [PATCH 1209/1286] drm/scheduler: Remove obsolete spinlock.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This spinlock is superfluous, any call to drm_sched_entity_push_job
should already be under a lock together with matching drm_sched_job_init
to match the order of insertion into queue with job's fence seqence
number.

v2:
Improve patch description.
Add functions documentation describing the locking considerations

Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Acked-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/scheduler/gpu_scheduler.c | 15 ++++++++++-----
 include/drm/gpu_scheduler.h               |  1 -
 2 files changed, 10 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index a364fc0b38c3..df1578d6f42e 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -139,7 +139,6 @@ int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
 	entity->last_scheduled = NULL;
 
 	spin_lock_init(&entity->rq_lock);
-	spin_lock_init(&entity->queue_lock);
 	spsc_queue_init(&entity->job_queue);
 
 	atomic_set(&entity->fence_seq, 0);
@@ -413,6 +412,10 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
  *
  * @sched_job		The pointer to job required to submit
  *
+ * Note: To guarantee that the order of insertion to queue matches
+ * the job's fence sequence number this function should be
+ * called with drm_sched_job_init under common lock.
+ *
  * Returns 0 for success, negative error code otherwise.
  */
 void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
@@ -423,11 +426,8 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
 
 	trace_drm_sched_job(sched_job, entity);
 
-	spin_lock(&entity->queue_lock);
 	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
 
-	spin_unlock(&entity->queue_lock);
-
 	/* first job wakes up scheduler */
 	if (first) {
 		/* Add the entity to the run queue */
@@ -593,7 +593,12 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
 }
 EXPORT_SYMBOL(drm_sched_job_recovery);
 
-/* init a sched_job with basic field */
+/**
+ * Init a sched_job with basic field
+ *
+ * Note: Refer to drm_sched_entity_push_job documentation
+ * for locking considerations.
+ */
 int drm_sched_job_init(struct drm_sched_job *job,
 		       struct drm_gpu_scheduler *sched,
 		       struct drm_sched_entity *entity,
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 52380067a43f..dec655894d08 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -56,7 +56,6 @@ struct drm_sched_entity {
 	spinlock_t			rq_lock;
 	struct drm_gpu_scheduler	*sched;
 
-	spinlock_t			queue_lock;
 	struct spsc_queue		job_queue;
 
 	atomic_t			fence_seq;

From 2b6dc93a3d439136c3fe11291a506e581b84a327 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Wed, 16 May 2018 08:39:58 -0500
Subject: [PATCH 1210/1286] drm/amdgpu/display: remove VEGAM config option
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Leftover from bringup.  No need to keep it around for
upstream.

Reviewed-by: Harry Wentland <harry.wentland@amd.com>
Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/Kconfig                        | 7 -------
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c          | 4 ----
 drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c | 2 --
 .../gpu/drm/amd/display/dc/bios/command_table_helper2.c    | 2 --
 drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c           | 4 ----
 drivers/gpu/drm/amd/display/dc/core/dc_resource.c          | 4 ----
 drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c      | 4 ----
 drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c           | 2 --
 drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c         | 2 --
 drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c             | 2 --
 drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h             | 2 --
 drivers/gpu/drm/amd/display/include/dal_asic_id.h          | 6 +-----
 drivers/gpu/drm/amd/display/include/dal_types.h            | 2 --
 13 files changed, 1 insertion(+), 42 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 6dcec9c9126b..a0eef59e65ba 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -34,13 +34,6 @@ config DEBUG_KERNEL_DC
 	  if you want to hit
 	  kdgb_break in assert.
 
-config DRM_AMD_DC_VEGAM
-        bool "VEGAM support"
-        depends on DRM_AMD_DC
-        help
-         Choose this option if you want to have
-         VEGAM support for display engine
-
 config DRM_AMD_DC_VG20
 	bool "Vega20 support"
 	depends on DRM_AMD_DC
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 6f5cb26b243c..6d0dc1fecb39 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1514,9 +1514,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
 	case CHIP_POLARIS11:
 	case CHIP_POLARIS10:
 	case CHIP_POLARIS12:
-#if defined(CONFIG_DRM_AMD_DC_VEGAM)
 	case CHIP_VEGAM:
-#endif
 	case CHIP_VEGA10:
 	case CHIP_VEGA12:
 	case CHIP_VEGA20:
@@ -1710,9 +1708,7 @@ static int dm_early_init(void *handle)
 		adev->mode_info.plane_type = dm_plane_type_default;
 		break;
 	case CHIP_POLARIS10:
-#if defined(CONFIG_DRM_AMD_DC_VEGAM)
 	case CHIP_VEGAM:
-#endif
 		adev->mode_info.num_crtc = 6;
 		adev->mode_info.num_hpd = 6;
 		adev->mode_info.num_dig = 6;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
index be066c49b984..253bbb1eea60 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
@@ -51,9 +51,7 @@ bool dal_bios_parser_init_cmd_tbl_helper(
 		return true;
 
 	case DCE_VERSION_11_2:
-#if defined(CONFIG_DRM_AMD_DC_VEGAM)
 	case DCE_VERSION_11_22:
-#endif
 		*h = dal_cmd_tbl_helper_dce112_get_table();
 		return true;
 
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index 9b9e06995805..bbbcef566c55 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -52,9 +52,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
 		return true;
 
 	case DCE_VERSION_11_2:
-#if defined(CONFIG_DRM_AMD_DC_VEGAM)
 	case DCE_VERSION_11_22:
-#endif
 		*h = dal_cmd_tbl_helper_dce112_get_table2();
 		return true;
 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index 4ee3c26f7c13..2c4e8f0cb2dc 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -59,10 +59,8 @@ static enum bw_calcs_version bw_calcs_version_from_asic_id(struct hw_asic_id asi
 			return BW_CALCS_VERSION_POLARIS10;
 		if (ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev))
 			return BW_CALCS_VERSION_POLARIS11;
-#if defined(CONFIG_DRM_AMD_DC_VEGAM)
 		if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev))
 			return BW_CALCS_VERSION_VEGAM;
-#endif
 		return BW_CALCS_VERSION_INVALID;
 
 	case FAMILY_AI:
@@ -2151,11 +2149,9 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
 		dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); /* todo: this is a bug*/
 		break;
 	case BW_CALCS_VERSION_POLARIS10:
-#if defined(CONFIG_DRM_AMD_DC_VEGAM)
 		/* TODO: Treat VEGAM the same as P10 for now
 		 * Need to tune the para for VEGAM if needed */
 	case BW_CALCS_VERSION_VEGAM:
-#endif
 		vbios.memory_type = bw_def_gddr5;
 		vbios.dram_channel_width_in_bits = 32;
 		vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 9eb731fb5251..345835ff58d1 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -79,10 +79,8 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
 				ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev)) {
 			dc_version = DCE_VERSION_11_2;
 		}
-#if defined(CONFIG_DRM_AMD_DC_VEGAM)
 		if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev))
 			dc_version = DCE_VERSION_11_22;
-#endif
 		break;
 	case FAMILY_AI:
 		dc_version = DCE_VERSION_12_0;
@@ -129,9 +127,7 @@ struct resource_pool *dc_create_resource_pool(
 			num_virtual_links, dc, asic_id);
 		break;
 	case DCE_VERSION_11_2:
-#if defined(CONFIG_DRM_AMD_DC_VEGAM)
 	case DCE_VERSION_11_22:
-#endif
 		res_pool = dce112_create_resource_pool(
 			num_virtual_links, dc);
 		break;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 223db98a568a..0570e7e4d0a0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -590,9 +590,7 @@ static uint32_t dce110_get_pix_clk_dividers(
 			pll_settings, pix_clk_params);
 		break;
 	case DCE_VERSION_11_2:
-#if defined(CONFIG_DRM_AMD_DC_VEGAM)
 	case DCE_VERSION_11_22:
-#endif
 	case DCE_VERSION_12_0:
 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
 	case DCN_VERSION_1_0:
@@ -982,9 +980,7 @@ static bool dce110_program_pix_clk(
 
 		break;
 	case DCE_VERSION_11_2:
-#if defined(CONFIG_DRM_AMD_DC_VEGAM)
 	case DCE_VERSION_11_22:
-#endif
 	case DCE_VERSION_12_0:
 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
 	case DCN_VERSION_1_0:
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index 61fe484da1a0..0caee3523017 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -75,9 +75,7 @@ bool dal_hw_factory_init(
 		return true;
 	case DCE_VERSION_11_0:
 	case DCE_VERSION_11_2:
-#if defined(CONFIG_DRM_AMD_DC_VEGAM)
 	case DCE_VERSION_11_22:
-#endif
 		dal_hw_factory_dce110_init(factory);
 		return true;
 	case DCE_VERSION_12_0:
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
index 910ae2b7bf64..55c707488541 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -72,9 +72,7 @@ bool dal_hw_translate_init(
 	case DCE_VERSION_10_0:
 	case DCE_VERSION_11_0:
 	case DCE_VERSION_11_2:
-#if defined(CONFIG_DRM_AMD_DC_VEGAM)
 	case DCE_VERSION_11_22:
-#endif
 		dal_hw_translate_dce110_init(translate);
 		return true;
 	case DCE_VERSION_12_0:
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
index c3d7c320fdba..14dc8c94d862 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
@@ -83,9 +83,7 @@ struct i2caux *dal_i2caux_create(
 	case DCE_VERSION_8_3:
 		return dal_i2caux_dce80_create(ctx);
 	case DCE_VERSION_11_2:
-#if defined(CONFIG_DRM_AMD_DC_VEGAM)
 	case DCE_VERSION_11_22:
-#endif
 		return dal_i2caux_dce112_create(ctx);
 	case DCE_VERSION_11_0:
 		return dal_i2caux_dce110_create(ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
index 933ea7a1e18b..eece165206f9 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
@@ -43,9 +43,7 @@ enum bw_calcs_version {
 	BW_CALCS_VERSION_POLARIS10,
 	BW_CALCS_VERSION_POLARIS11,
 	BW_CALCS_VERSION_POLARIS12,
-#if defined(CONFIG_DRM_AMD_DC_VEGAM)
 	BW_CALCS_VERSION_VEGAM,
-#endif
 	BW_CALCS_VERSION_STONEY,
 	BW_CALCS_VERSION_VEGA10
 };
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 77d2856be9f6..6aeb5a2902c3 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -86,6 +86,7 @@
 #define VI_POLARIS10_P_A0 80
 #define VI_POLARIS11_M_A0 90
 #define VI_POLARIS12_V_A0 100
+#define VI_VEGAM_A0 110
 
 #define VI_UNKNOWN 0xFF
 
@@ -98,14 +99,9 @@
 		(eChipRev < VI_POLARIS11_M_A0))
 #define ASIC_REV_IS_POLARIS11_M(eChipRev) ((eChipRev >= VI_POLARIS11_M_A0) &&  \
 		(eChipRev < VI_POLARIS12_V_A0))
-#if defined(CONFIG_DRM_AMD_DC_VEGAM)
-#define VI_VEGAM_A0 110
 #define ASIC_REV_IS_POLARIS12_V(eChipRev) ((eChipRev >= VI_POLARIS12_V_A0) && \
 		(eChipRev < VI_VEGAM_A0))
 #define ASIC_REV_IS_VEGAM(eChipRev) (eChipRev >= VI_VEGAM_A0)
-#else
-#define ASIC_REV_IS_POLARIS12_V(eChipRev) (eChipRev >= VI_POLARIS12_V_A0)
-#endif
 
 /* DCE11 */
 #define CZ_CARRIZO_A0 0x01
diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h
index 5b1f8cef0c22..840142b65f8b 100644
--- a/drivers/gpu/drm/amd/display/include/dal_types.h
+++ b/drivers/gpu/drm/amd/display/include/dal_types.h
@@ -40,9 +40,7 @@ enum dce_version {
 	DCE_VERSION_10_0,
 	DCE_VERSION_11_0,
 	DCE_VERSION_11_2,
-#if defined(CONFIG_DRM_AMD_DC_VEGAM)
 	DCE_VERSION_11_22,
-#endif
 	DCE_VERSION_12_0,
 	DCE_VERSION_MAX,
 	DCN_VERSION_1_0,

From b4b9f944e4ee3d1a268d96d7de2d519b491e8ea5 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Wed, 16 May 2018 15:28:59 -0500
Subject: [PATCH 1211/1286] drm/amdgpu/display: remove VEGA20 config option

Leftover from bringup.  No need to keep it around for
upstream.

Reviewed-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/Kconfig           |   8 -
 .../drm/amd/display/dc/bios/bios_parser2.c    |   2 -
 .../gpu/drm/amd/display/dc/dce/dce_clocks.c   |   6 -
 .../amd/display/dc/dce120/dce120_resource.c   | 177 ------------------
 .../gpu/drm/amd/display/include/dal_asic_id.h |   2 -
 5 files changed, 195 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index a0eef59e65ba..d5d4586e6176 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -34,12 +34,4 @@ config DEBUG_KERNEL_DC
 	  if you want to hit
 	  kdgb_break in assert.
 
-config DRM_AMD_DC_VG20
-	bool "Vega20 support"
-	depends on DRM_AMD_DC
-	help
-		Choose this option if you want to have
-		Vega20 support for display engine
-
-
 endmenu
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 4561673a0fe6..b8cef7af3c4a 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -1331,9 +1331,7 @@ static enum bp_result bios_parser_get_firmware_info(
 				result = get_firmware_info_v3_2(bp, info);
 				break;
 			case 3:
-#ifdef CONFIG_DRM_AMD_DC_VG20
 				result = get_firmware_info_v3_2(bp, info);
-#endif
 				break;
 			default:
 				break;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
index aa4cf3095235..f043e5ea412c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
@@ -413,18 +413,12 @@ static int dce112_set_clock(
 	/*VBIOS will determine DPREFCLK frequency, so we don't set it*/
 	dce_clk_params.target_clock_frequency = 0;
 	dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
-#ifndef CONFIG_DRM_AMD_DC_VG20
-	dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
-			(dce_clk_params.pll_id ==
-					CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
-#else
 	if (!ASICREV_IS_VEGA20_P(clk->ctx->asic_id.hw_internal_rev))
 		dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
 			(dce_clk_params.pll_id ==
 					CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
 	else
 		dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false;
-#endif
 
 	bp->funcs->set_dce_clock(bp, &dce_clk_params);
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 545f35f0821f..2d58daccc005 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -814,7 +814,6 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
 	dm_pp_notify_wm_clock_changes(dc->ctx, &clk_ranges);
 }
 
-#ifdef CONFIG_DRM_AMD_DC_VG20
 static uint32_t read_pipe_fuses(struct dc_context *ctx)
 {
 	uint32_t value = dm_read_reg_soc15(ctx, mmCC_DC_PIPE_DIS, 0);
@@ -1020,182 +1019,6 @@ res_create_fail:
 
 	return false;
 }
-#else
-static bool construct(
-	uint8_t num_virtual_links,
-	struct dc *dc,
-	struct dce110_resource_pool *pool)
-{
-	unsigned int i;
-	struct dc_context *ctx = dc->ctx;
-	struct irq_service_init_data irq_init_data;
-
-	ctx->dc_bios->regs = &bios_regs;
-
-	pool->base.res_cap = &res_cap;
-	pool->base.funcs = &dce120_res_pool_funcs;
-
-	/* TODO: Fill more data from GreenlandAsicCapability.cpp */
-	pool->base.pipe_count = res_cap.num_timing_generator;
-	pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator;
-	pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
-
-	dc->caps.max_downscale_ratio = 200;
-	dc->caps.i2c_speed_in_khz = 100;
-	dc->caps.max_cursor_size = 128;
-	dc->caps.dual_link_dvi = true;
-
-	dc->debug = debug_defaults;
-
-	/*************************************************
-	 *  Create resources                             *
-	 *************************************************/
-
-	pool->base.clock_sources[DCE120_CLK_SRC_PLL0] =
-			dce120_clock_source_create(ctx, ctx->dc_bios,
-				CLOCK_SOURCE_COMBO_PHY_PLL0,
-				&clk_src_regs[0], false);
-	pool->base.clock_sources[DCE120_CLK_SRC_PLL1] =
-			dce120_clock_source_create(ctx, ctx->dc_bios,
-				CLOCK_SOURCE_COMBO_PHY_PLL1,
-				&clk_src_regs[1], false);
-	pool->base.clock_sources[DCE120_CLK_SRC_PLL2] =
-			dce120_clock_source_create(ctx, ctx->dc_bios,
-				CLOCK_SOURCE_COMBO_PHY_PLL2,
-				&clk_src_regs[2], false);
-	pool->base.clock_sources[DCE120_CLK_SRC_PLL3] =
-			dce120_clock_source_create(ctx, ctx->dc_bios,
-				CLOCK_SOURCE_COMBO_PHY_PLL3,
-				&clk_src_regs[3], false);
-	pool->base.clock_sources[DCE120_CLK_SRC_PLL4] =
-			dce120_clock_source_create(ctx, ctx->dc_bios,
-				CLOCK_SOURCE_COMBO_PHY_PLL4,
-				&clk_src_regs[4], false);
-	pool->base.clock_sources[DCE120_CLK_SRC_PLL5] =
-			dce120_clock_source_create(ctx, ctx->dc_bios,
-				CLOCK_SOURCE_COMBO_PHY_PLL5,
-				&clk_src_regs[5], false);
-	pool->base.clk_src_count = DCE120_CLK_SRC_TOTAL;
-
-	pool->base.dp_clock_source =
-			dce120_clock_source_create(ctx, ctx->dc_bios,
-				CLOCK_SOURCE_ID_DP_DTO,
-				&clk_src_regs[0], true);
-
-	for (i = 0; i < pool->base.clk_src_count; i++) {
-		if (pool->base.clock_sources[i] == NULL) {
-			dm_error("DC: failed to create clock sources!\n");
-			BREAK_TO_DEBUGGER();
-			goto clk_src_create_fail;
-		}
-	}
-
-	pool->base.display_clock = dce120_disp_clk_create(ctx);
-	if (pool->base.display_clock == NULL) {
-		dm_error("DC: failed to create display clock!\n");
-		BREAK_TO_DEBUGGER();
-		goto disp_clk_create_fail;
-	}
-
-	pool->base.dmcu = dce_dmcu_create(ctx,
-			&dmcu_regs,
-			&dmcu_shift,
-			&dmcu_mask);
-	if (pool->base.dmcu == NULL) {
-		dm_error("DC: failed to create dmcu!\n");
-		BREAK_TO_DEBUGGER();
-		goto res_create_fail;
-	}
-
-	pool->base.abm = dce_abm_create(ctx,
-			&abm_regs,
-			&abm_shift,
-			&abm_mask);
-	if (pool->base.abm == NULL) {
-		dm_error("DC: failed to create abm!\n");
-		BREAK_TO_DEBUGGER();
-		goto res_create_fail;
-	}
-
-	irq_init_data.ctx = dc->ctx;
-	pool->base.irqs = dal_irq_service_dce120_create(&irq_init_data);
-	if (!pool->base.irqs)
-		goto irqs_create_fail;
-
-	for (i = 0; i < pool->base.pipe_count; i++) {
-		pool->base.timing_generators[i] =
-				dce120_timing_generator_create(
-					ctx,
-					i,
-					&dce120_tg_offsets[i]);
-		if (pool->base.timing_generators[i] == NULL) {
-			BREAK_TO_DEBUGGER();
-			dm_error("DC: failed to create tg!\n");
-			goto controller_create_fail;
-		}
-
-		pool->base.mis[i] = dce120_mem_input_create(ctx, i);
-
-		if (pool->base.mis[i] == NULL) {
-			BREAK_TO_DEBUGGER();
-			dm_error(
-				"DC: failed to create memory input!\n");
-			goto controller_create_fail;
-		}
-
-		pool->base.ipps[i] = dce120_ipp_create(ctx, i);
-		if (pool->base.ipps[i] == NULL) {
-			BREAK_TO_DEBUGGER();
-			dm_error(
-				"DC: failed to create input pixel processor!\n");
-			goto controller_create_fail;
-		}
-
-		pool->base.transforms[i] = dce120_transform_create(ctx, i);
-		if (pool->base.transforms[i] == NULL) {
-			BREAK_TO_DEBUGGER();
-			dm_error(
-				"DC: failed to create transform!\n");
-			goto res_create_fail;
-		}
-
-		pool->base.opps[i] = dce120_opp_create(
-			ctx,
-			i);
-		if (pool->base.opps[i] == NULL) {
-			BREAK_TO_DEBUGGER();
-			dm_error(
-				"DC: failed to create output pixel processor!\n");
-		}
-	}
-
-	if (!resource_construct(num_virtual_links, dc, &pool->base,
-			 &res_create_funcs))
-		goto res_create_fail;
-
-	/* Create hardware sequencer */
-	if (!dce120_hw_sequencer_create(dc))
-		goto controller_create_fail;
-
-	dc->caps.max_planes =  pool->base.pipe_count;
-
-	bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id);
-
-	bw_calcs_data_update_from_pplib(dc);
-
-	return true;
-
-irqs_create_fail:
-controller_create_fail:
-disp_clk_create_fail:
-clk_src_create_fail:
-res_create_fail:
-
-	destruct(pool);
-
-	return false;
-}
-#endif
 
 struct resource_pool *dce120_create_resource_pool(
 	uint8_t num_virtual_links,
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 6aeb5a2902c3..cac069dd2a0e 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -115,10 +115,8 @@
 /* DCE12 */
 #define AI_UNKNOWN 0xFF
 
-#ifdef CONFIG_DRM_AMD_DC_VG20
 #define AI_VEGA20_P_A0 40
 #define ASICREV_IS_VEGA20_P(eChipRev) ((eChipRev >= AI_VEGA20_P_A0) && (eChipRev < AI_UNKNOWN))
-#endif
 
 #define AI_GREENLAND_P_A0 1
 #define AI_GREENLAND_P_A1 2

From a1a0c40664fbd0bd1a9fa53e14ccab539005e2ca Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Wed, 16 May 2018 15:34:19 -0500
Subject: [PATCH 1212/1286] drm/amdgpu/display: fix vega12/20 handling in
 dal_asic_id.h

- Remove unused ASICREV_IS_VEGA12_p() macro
- Fix ASICREV_IS_VEGA12_P() macro to properly check against vega20

Reviewed-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/include/dal_asic_id.h | 8 +++-----
 1 file changed, 3 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index cac069dd2a0e..25029ed42d89 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -115,19 +115,17 @@
 /* DCE12 */
 #define AI_UNKNOWN 0xFF
 
-#define AI_VEGA20_P_A0 40
-#define ASICREV_IS_VEGA20_P(eChipRev) ((eChipRev >= AI_VEGA20_P_A0) && (eChipRev < AI_UNKNOWN))
-
 #define AI_GREENLAND_P_A0 1
 #define AI_GREENLAND_P_A1 2
 #define AI_UNKNOWN 0xFF
 
 #define AI_VEGA12_P_A0 20
+#define AI_VEGA20_P_A0 40
 #define ASICREV_IS_GREENLAND_M(eChipRev)  (eChipRev < AI_VEGA12_P_A0)
 #define ASICREV_IS_GREENLAND_P(eChipRev)  (eChipRev < AI_VEGA12_P_A0)
 
-#define ASICREV_IS_VEGA12_P(eChipRev) ((eChipRev >= AI_VEGA12_P_A0) && (eChipRev < AI_UNKNOWN))
-#define ASICREV_IS_VEGA12_p(eChipRev) ((eChipRev >= AI_VEGA12_P_A0) && (eChipRev < AI_UNKNOWN))
+#define ASICREV_IS_VEGA12_P(eChipRev) ((eChipRev >= AI_VEGA12_P_A0) && (eChipRev < AI_VEGA20_P_A0))
+#define ASICREV_IS_VEGA20_P(eChipRev) ((eChipRev >= AI_VEGA20_P_A0) && (eChipRev < AI_UNKNOWN))
 
 /* DCN1_0 */
 #define INTERNAL_REV_RAVEN_A0             0x00    /* First spin of Raven */

From fa19a6e9d0e7b46bedaa526ba71ff1bf376dd93f Mon Sep 17 00:00:00 2001
From: Dan Carpenter <dan.carpenter@oracle.com>
Date: Thu, 17 May 2018 15:56:05 +0300
Subject: [PATCH 1213/1286] drm/amd/pp: missing curly braces in
 smu7_enable_sclk_mclk_dpm()

We added some more lines of code to this if statement but forgot to add
curly braces.

Fixes: 0c24e7ef233b ("drm/amd/powerplay: add specific changes for VEGAM in smu7_hwmgr.c")
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 8eb3f5176646..646c9e9bf681 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -1018,7 +1018,7 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 
 	/* enable SCLK dpm */
-	if (!data->sclk_dpm_key_disabled)
+	if (!data->sclk_dpm_key_disabled) {
 		if (hwmgr->chip_id == CHIP_VEGAM)
 			smu7_disable_sclk_vce_handshake(hwmgr);
 
@@ -1026,6 +1026,7 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
 		(0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable)),
 		"Failed to enable SCLK DPM during DPM Start Function!",
 		return -EINVAL);
+	}
 
 	/* enable MCLK dpm */
 	if (0 == data->mclk_dpm_key_disabled) {

From 50da51744f005f4afd44b69c03e6f2068abfaed8 Mon Sep 17 00:00:00 2001
From: Tom St Denis <tom.stdenis@amd.com>
Date: Wed, 9 May 2018 14:22:29 -0400
Subject: [PATCH 1214/1286] drm/amd/amdgpu:  Code comments for the amdgpu_ttm.c
 driver. (v2)

NFC just comments.

(v2):  Updated based on feedback from Alex Deucher.

Signed-off-by: Tom St Denis <tom.stdenis@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 348 +++++++++++++++++++++++-
 1 file changed, 341 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 69a2b25b3696..e93a0a237dc3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -63,16 +63,44 @@ static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
 /*
  * Global memory.
  */
+
+/**
+ * amdgpu_ttm_mem_global_init - Initialize and acquire reference to
+ * memory object
+ *
+ * @ref: Object for initialization.
+ *
+ * This is called by drm_global_item_ref() when an object is being
+ * initialized.
+ */
 static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref)
 {
 	return ttm_mem_global_init(ref->object);
 }
 
+/**
+ * amdgpu_ttm_mem_global_release - Drop reference to a memory object
+ *
+ * @ref: Object being removed
+ *
+ * This is called by drm_global_item_unref() when an object is being
+ * released.
+ */
 static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
 {
 	ttm_mem_global_release(ref->object);
 }
 
+/**
+ * amdgpu_ttm_global_init - Initialize global TTM memory reference
+ * 							structures.
+ *
+ * @adev:  	AMDGPU device for which the global structures need to be
+ *			registered.
+ *
+ * This is called as part of the AMDGPU ttm init from amdgpu_ttm_init()
+ * during bring up.
+ */
 static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
 {
 	struct drm_global_reference *global_ref;
@@ -80,7 +108,9 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
 	struct drm_sched_rq *rq;
 	int r;
 
+	/* ensure reference is false in case init fails */
 	adev->mman.mem_global_referenced = false;
+
 	global_ref = &adev->mman.mem_global_ref;
 	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
 	global_ref->size = sizeof(struct ttm_mem_global);
@@ -146,6 +176,18 @@ static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
 	return 0;
 }
 
+/**
+ * amdgpu_init_mem_type - 	Initialize a memory manager for a specific
+ * 							type of memory request.
+ *
+ * @bdev:	The TTM BO device object (contains a reference to
+ * 			amdgpu_device)
+ * @type:	The type of memory requested
+ * @man:
+ *
+ * This is called by ttm_bo_init_mm() when a buffer object is being
+ * initialized.
+ */
 static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 				struct ttm_mem_type_manager *man)
 {
@@ -161,6 +203,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 		man->default_caching = TTM_PL_FLAG_CACHED;
 		break;
 	case TTM_PL_TT:
+		/* GTT memory  */
 		man->func = &amdgpu_gtt_mgr_func;
 		man->gpu_offset = adev->gmc.gart_start;
 		man->available_caching = TTM_PL_MASK_CACHING;
@@ -193,6 +236,14 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 	return 0;
 }
 
+/**
+ * amdgpu_evict_flags - Compute placement flags
+ *
+ * @bo: The buffer object to evict
+ * @placement: Possible destination(s) for evicted BO
+ *
+ * Fill in placement data when ttm_bo_evict() is called
+ */
 static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
 				struct ttm_placement *placement)
 {
@@ -204,12 +255,14 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
 		.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
 	};
 
+	/* Don't handle scatter gather BOs */
 	if (bo->type == ttm_bo_type_sg) {
 		placement->num_placement = 0;
 		placement->num_busy_placement = 0;
 		return;
 	}
 
+	/* Object isn't an AMDGPU object so ignore */
 	if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
 		placement->placement = &placements;
 		placement->busy_placement = &placements;
@@ -217,10 +270,12 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
 		placement->num_busy_placement = 1;
 		return;
 	}
+
 	abo = ttm_to_amdgpu_bo(bo);
 	switch (bo->mem.mem_type) {
 	case TTM_PL_VRAM:
 		if (!adev->mman.buffer_funcs_enabled) {
+			/* Move to system memory */
 			amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
 		} else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
 			   !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
@@ -238,6 +293,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
 			abo->placement.busy_placement = &abo->placements[1];
 			abo->placement.num_busy_placement = 1;
 		} else {
+			/* Move to GTT memory */
 			amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
 		}
 		break;
@@ -248,6 +304,15 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
 	*placement = abo->placement;
 }
 
+/**
+ * amdgpu_verify_access - Verify access for a mmap call
+ *
+ * @bo:		The buffer object to map
+ * @filp:	The file pointer from the process performing the mmap
+ *
+ * This is called by ttm_bo_mmap() to verify whether a process
+ * has the right to mmap a BO to their process space.
+ */
 static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 {
 	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
@@ -265,6 +330,15 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 					  filp->private_data);
 }
 
+/**
+ * amdgpu_move_null - Register memory for a buffer object
+ *
+ * @bo:			The bo to assign the memory to
+ * @new_mem:	The memory to be assigned.
+ *
+ * Assign the memory from new_mem to the memory of the buffer object
+ * bo.
+ */
 static void amdgpu_move_null(struct ttm_buffer_object *bo,
 			     struct ttm_mem_reg *new_mem)
 {
@@ -275,6 +349,10 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo,
 	new_mem->mm_node = NULL;
 }
 
+/**
+ * amdgpu_mm_node_addr -	Compute the GPU relative offset of a GTT
+ * 							buffer.
+ */
 static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
 				    struct drm_mm_node *mm_node,
 				    struct ttm_mem_reg *mem)
@@ -289,9 +367,10 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
 }
 
 /**
- * amdgpu_find_mm_node - Helper function finds the drm_mm_node
- *  corresponding to @offset. It also modifies the offset to be
- *  within the drm_mm_node returned
+ * amdgpu_find_mm_node -	Helper function finds the drm_mm_node
+ *  						corresponding to @offset. It also modifies
+ * 							the offset to be within the drm_mm_node
+ * 							returned
  */
 static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
 					       unsigned long *offset)
@@ -430,7 +509,12 @@ error:
 	return r;
 }
 
-
+/**
+ * amdgpu_move_blit - Copy an entire buffer to another buffer
+ *
+ * This is a helper called by amdgpu_bo_move() and
+ * amdgpu_move_vram_ram() to help move buffers to and from VRAM.
+ */
 static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 			    bool evict, bool no_wait_gpu,
 			    struct ttm_mem_reg *new_mem,
@@ -465,6 +549,11 @@ error:
 	return r;
 }
 
+/**
+ * amdgpu_move_vram_ram - Copy VRAM buffer to RAM buffer
+ *
+ * Called by amdgpu_bo_move().
+ */
 static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
 				struct ttm_operation_ctx *ctx,
 				struct ttm_mem_reg *new_mem)
@@ -477,6 +566,8 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
 	int r;
 
 	adev = amdgpu_ttm_adev(bo->bdev);
+
+	/* create space/pages for new_mem in GTT space */
 	tmp_mem = *new_mem;
 	tmp_mem.mm_node = NULL;
 	placement.num_placement = 1;
@@ -491,25 +582,36 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
 		return r;
 	}
 
+	/* set caching flags */
 	r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
 	if (unlikely(r)) {
 		goto out_cleanup;
 	}
 
+	/* Bind the memory to the GTT space */
 	r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx);
 	if (unlikely(r)) {
 		goto out_cleanup;
 	}
+
+	/* blit VRAM to GTT */
 	r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, &tmp_mem, old_mem);
 	if (unlikely(r)) {
 		goto out_cleanup;
 	}
+
+	/* move BO (in tmp_mem) to new_mem */
 	r = ttm_bo_move_ttm(bo, ctx, new_mem);
 out_cleanup:
 	ttm_bo_mem_put(bo, &tmp_mem);
 	return r;
 }
 
+/**
+ * amdgpu_move_ram_vram - Copy buffer from RAM to VRAM
+ *
+ * Called by amdgpu_bo_move().
+ */
 static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
 				struct ttm_operation_ctx *ctx,
 				struct ttm_mem_reg *new_mem)
@@ -522,6 +624,8 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
 	int r;
 
 	adev = amdgpu_ttm_adev(bo->bdev);
+
+	/* make space in GTT for old_mem buffer */
 	tmp_mem = *new_mem;
 	tmp_mem.mm_node = NULL;
 	placement.num_placement = 1;
@@ -535,10 +639,14 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
 	if (unlikely(r)) {
 		return r;
 	}
+
+	/* move/bind old memory to GTT space */
 	r = ttm_bo_move_ttm(bo, ctx, &tmp_mem);
 	if (unlikely(r)) {
 		goto out_cleanup;
 	}
+
+	/* copy to VRAM */
 	r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, new_mem, old_mem);
 	if (unlikely(r)) {
 		goto out_cleanup;
@@ -548,6 +656,11 @@ out_cleanup:
 	return r;
 }
 
+/**
+ * amdgpu_bo_move - Move a buffer object to a new memory location
+ *
+ * Called by ttm_bo_handle_move_mem()
+ */
 static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
 			  struct ttm_operation_ctx *ctx,
 			  struct ttm_mem_reg *new_mem)
@@ -613,6 +726,11 @@ memcpy:
 	return 0;
 }
 
+/**
+ * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
+ *
+ * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
+ */
 static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 {
 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
@@ -690,6 +808,14 @@ struct amdgpu_ttm_tt {
 	uint32_t		last_set_pages;
 };
 
+/**
+ * amdgpu_ttm_tt_get_user_pages - 	Pin pages of memory pointed to
+ * 									by a USERPTR pointer to memory
+ *
+ * Called by amdgpu_gem_userptr_ioctl() and amdgpu_cs_parser_bos().
+ * This provides a wrapper around the get_user_pages() call to provide
+ * device accessible pages that back user memory.
+ */
 int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
 {
 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@ -719,6 +845,7 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
 		}
 	}
 
+	/* loop enough times using contiguous pages of memory */
 	do {
 		unsigned num_pages = ttm->num_pages - pinned;
 		uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
@@ -757,6 +884,14 @@ release_pages:
 	return r;
 }
 
+/**
+ * amdgpu_ttm_tt_set_user_pages - 	Copy pages in, putting old pages
+ * 									as necessary.
+ *
+ * Called by amdgpu_cs_list_validate().  This creates the page list
+ * that backs user memory and will ultimately be mapped into the device
+ * address space.
+ */
 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
 {
 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@ -771,6 +906,11 @@ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
 	}
 }
 
+/**
+ * amdgpu_ttm_tt_mark_user_page - Mark pages as dirty
+ *
+ * Called while unpinning userptr pages
+ */
 void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
 {
 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@ -789,7 +929,12 @@ void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
 	}
 }
 
-/* prepare the sg table with the user pages */
+/**
+ * amdgpu_ttm_tt_pin_userptr - 	prepare the sg table with the
+ * 								user pages
+ *
+ * Called by amdgpu_ttm_backend_bind()
+ **/
 static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
 {
 	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
@@ -801,17 +946,20 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
 	enum dma_data_direction direction = write ?
 		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
 
+	/* Allocate an SG array and squash pages into it */
 	r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
 				      ttm->num_pages << PAGE_SHIFT,
 				      GFP_KERNEL);
 	if (r)
 		goto release_sg;
 
+	/* Map SG to device */
 	r = -ENOMEM;
 	nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
 	if (nents != ttm->sg->nents)
 		goto release_sg;
 
+	/* convert SG to linear array of pages and dma addresses */
 	drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
 					 gtt->ttm.dma_address, ttm->num_pages);
 
@@ -822,6 +970,9 @@ release_sg:
 	return r;
 }
 
+/**
+ * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
+ */
 static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
 {
 	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
@@ -835,9 +986,10 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
 	if (!ttm->sg->sgl)
 		return;
 
-	/* free the sg table and pages again */
+	/* unmap the pages mapped to the device */
 	dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
 
+	/* mark the pages as dirty */
 	amdgpu_ttm_tt_mark_user_pages(ttm);
 
 	sg_free_table(ttm->sg);
@@ -882,6 +1034,12 @@ gart_bind_fail:
 	return r;
 }
 
+/**
+ * amdgpu_ttm_backend_bind - Bind GTT memory
+ *
+ * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
+ * This handles binding GTT memory to the device address space.
+ */
 static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
 				   struct ttm_mem_reg *bo_mem)
 {
@@ -912,7 +1070,10 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
 		return 0;
 	}
 
+	/* compute PTE flags relevant to this BO memory */
 	flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
+
+	/* bind pages into GART page tables */
 	gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
 	r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
 		ttm->pages, gtt->ttm.dma_address, flags);
@@ -923,6 +1084,9 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
 	return r;
 }
 
+/**
+ * amdgpu_ttm_alloc_gart - Allocate GART memory for buffer object
+ */
 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
 {
 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
@@ -938,6 +1102,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
 	    amdgpu_gtt_mgr_has_gart_addr(&bo->mem))
 		return 0;
 
+	/* allocate GTT space */
 	tmp = bo->mem;
 	tmp.mm_node = NULL;
 	placement.num_placement = 1;
@@ -953,7 +1118,10 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
 	if (unlikely(r))
 		return r;
 
+	/* compute PTE flags for this buffer object */
 	flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
+
+	/* Bind pages */
 	gtt->offset = (u64)tmp.start << PAGE_SHIFT;
 	r = amdgpu_ttm_gart_bind(adev, bo, flags);
 	if (unlikely(r)) {
@@ -969,6 +1137,12 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
 	return 0;
 }
 
+/**
+ * amdgpu_ttm_recover_gart - Rebind GTT pages
+ *
+ * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
+ * rebind GTT pages during a GPU reset.
+ */
 int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
 {
 	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
@@ -984,12 +1158,19 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
 	return r;
 }
 
+/**
+ * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
+ *
+ * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
+ * ttm_tt_destroy().
+ */
 static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
 {
 	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 	int r;
 
+	/* if the pages have userptr pinning then clear that first */
 	if (gtt->userptr)
 		amdgpu_ttm_tt_unpin_userptr(ttm);
 
@@ -1021,6 +1202,13 @@ static struct ttm_backend_func amdgpu_backend_func = {
 	.destroy = &amdgpu_ttm_backend_destroy,
 };
 
+/**
+ * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
+ *
+ * @bo: The buffer object to create a GTT ttm_tt object around
+ *
+ * Called by ttm_tt_create().
+ */
 static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
 					   uint32_t page_flags)
 {
@@ -1034,6 +1222,8 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
 		return NULL;
 	}
 	gtt->ttm.ttm.func = &amdgpu_backend_func;
+
+	/* allocate space for the uninitialized page entries */
 	if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags)) {
 		kfree(gtt);
 		return NULL;
@@ -1041,6 +1231,12 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
 	return &gtt->ttm.ttm;
 }
 
+/**
+ * amdgpu_ttm_tt_populate - Map GTT pages visible to the device
+ *
+ * Map the pages of a ttm_tt object to an address space visible
+ * to the underlying device.
+ */
 static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
 			struct ttm_operation_ctx *ctx)
 {
@@ -1048,6 +1244,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
 
+	/* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
 	if (gtt && gtt->userptr) {
 		ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
 		if (!ttm->sg)
@@ -1072,9 +1269,17 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
 	}
 #endif
 
+	/* fall back to generic helper to populate the page array
+	 * and map them to the device */
 	return ttm_populate_and_map_pages(adev->dev, &gtt->ttm, ctx);
 }
 
+/**
+ * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
+ *
+ * Unmaps pages of a ttm_tt object from the device address space and
+ * unpopulates the page array backing it.
+ */
 static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
 {
 	struct amdgpu_device *adev;
@@ -1100,9 +1305,21 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
 	}
 #endif
 
+	/* fall back to generic helper to unmap and unpopulate array */
 	ttm_unmap_and_unpopulate_pages(adev->dev, &gtt->ttm);
 }
 
+/**
+ * amdgpu_ttm_tt_set_userptr -	Initialize userptr GTT ttm_tt
+ * 								for the current task
+ *
+ * @ttm: The ttm_tt object to bind this userptr object to
+ * @addr:  The address in the current tasks VM space to use
+ * @flags: Requirements of userptr object.
+ *
+ * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages
+ * to current task
+ */
 int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
 			      uint32_t flags)
 {
@@ -1127,6 +1344,9 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
 	return 0;
 }
 
+/**
+ * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
+ */
 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
 {
 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@ -1140,6 +1360,12 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
 	return gtt->usertask->mm;
 }
 
+/**
+ * amdgpu_ttm_tt_affect_userptr -	Determine if a ttm_tt object lays
+ * 									inside an address range for the
+ * 									current task.
+ *
+ */
 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
 				  unsigned long end)
 {
@@ -1150,10 +1376,16 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
 	if (gtt == NULL || !gtt->userptr)
 		return false;
 
+	/* Return false if no part of the ttm_tt object lies within
+	 * the range
+	 */
 	size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
 	if (gtt->userptr > end || gtt->userptr + size <= start)
 		return false;
 
+	/* Search the lists of tasks that hold this mapping and see
+	 * if current is one of them.  If it is return false.
+	 */
 	spin_lock(&gtt->guptasklock);
 	list_for_each_entry(entry, &gtt->guptasks, list) {
 		if (entry->task == current) {
@@ -1168,6 +1400,10 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
 	return true;
 }
 
+/**
+ * amdgpu_ttm_tt_userptr_invalidated -	Has the ttm_tt object been
+ * 										invalidated?
+ */
 bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
 				       int *last_invalidated)
 {
@@ -1178,6 +1414,12 @@ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
 	return prev_invalidated != *last_invalidated;
 }
 
+/**
+ * amdgpu_ttm_tt_userptr_needs_pages -	Have the pages backing this
+ * 										ttm_tt object been invalidated
+ * 										since the last time they've
+ * 										been set?
+ */
 bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
 {
 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@ -1188,6 +1430,9 @@ bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
 	return atomic_read(&gtt->mmu_invalidations) != gtt->last_set_pages;
 }
 
+/**
+ * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
+ */
 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
 {
 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@ -1198,6 +1443,12 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
 	return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
 }
 
+/**
+ * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
+ *
+ * @ttm: The ttm_tt object to compute the flags for
+ * @mem: The memory registry backing this ttm_tt object
+ */
 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
 				 struct ttm_mem_reg *mem)
 {
@@ -1222,6 +1473,16 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
 	return flags;
 }
 
+/**
+ * amdgpu_ttm_bo_eviction_valuable -	Check to see if we can evict
+ * 										a buffer object.
+ *
+ * Return true if eviction is sensible.  Called by
+ * ttm_mem_evict_first() on behalf of ttm_bo_mem_force_space()
+ * which tries to evict buffer objects until it can find space
+ * for a new object and by ttm_bo_force_list_clean() which is
+ * used to clean out a memory space.
+ */
 static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 					    const struct ttm_place *place)
 {
@@ -1268,6 +1529,19 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 	return ttm_bo_eviction_valuable(bo, place);
 }
 
+/**
+ * amdgpu_ttm_access_memory -	Read or Write memory that backs a
+ * 								buffer object.
+ *
+ * @bo:  The buffer object to read/write
+ * @offset:  Offset into buffer object
+ * @buf:  Secondary buffer to write/read from
+ * @len: Length in bytes of access
+ * @write:  true if writing
+ *
+ * This is used to access VRAM that backs a buffer object via MMIO
+ * access for debugging purposes.
+ */
 static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
 				    unsigned long offset,
 				    void *buf, int len, int write)
@@ -1444,13 +1718,22 @@ error_create:
 	adev->fw_vram_usage.reserved_bo = NULL;
 	return r;
 }
-
+/**
+ * amdgpu_ttm_init -	Init the memory management (ttm) as well as
+ * 						various gtt/vram related fields.
+ *
+ * This initializes all of the memory space pools that the TTM layer
+ * will need such as the GTT space (system memory mapped to the device),
+ * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
+ * can be mapped per VMID.
+ */
 int amdgpu_ttm_init(struct amdgpu_device *adev)
 {
 	uint64_t gtt_size;
 	int r;
 	u64 vis_vram_limit;
 
+	/* initialize global references for vram/gtt */
 	r = amdgpu_ttm_global_init(adev);
 	if (r) {
 		return r;
@@ -1471,6 +1754,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
 	/* We opt to avoid OOM on system pages allocations */
 	adev->mman.bdev.no_retry = true;
 
+	/* Initialize VRAM pool with all of VRAM divided into pages */
 	r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
 				adev->gmc.real_vram_size >> PAGE_SHIFT);
 	if (r) {
@@ -1500,6 +1784,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
 		return r;
 	}
 
+	/* allocate memory as required for VGA
+	 * This is used for VGA emulation and pre-OS scanout buffers to
+	 * avoid display artifacts while transitioning between pre-OS
+	 * and driver.  */
 	if (adev->gmc.stolen_size) {
 		r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
 					    AMDGPU_GEM_DOMAIN_VRAM,
@@ -1511,6 +1799,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
 	DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
 		 (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
 
+	/* Compute GTT size, either bsaed on 3/4th the size of RAM size
+	 * or whatever the user passed on module init */
 	if (amdgpu_gtt_size == -1) {
 		struct sysinfo si;
 
@@ -1521,6 +1811,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
 	}
 	else
 		gtt_size = (uint64_t)amdgpu_gtt_size << 20;
+
+	/* Initialize GTT memory pool */
 	r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
 	if (r) {
 		DRM_ERROR("Failed initializing GTT heap.\n");
@@ -1529,6 +1821,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
 	DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
 		 (unsigned)(gtt_size / (1024 * 1024)));
 
+	/* Initialize various on-chip memory pools */
 	adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
 	adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
 	adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT;
@@ -1568,6 +1861,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
 		}
 	}
 
+	/* Register debugfs entries for amdgpu_ttm */
 	r = amdgpu_ttm_debugfs_init(adev);
 	if (r) {
 		DRM_ERROR("Failed to init debugfs\n");
@@ -1576,11 +1870,19 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
 	return 0;
 }
 
+/**
+ * amdgpu_ttm_late_init -	Handle any late initialization for
+ * 							amdgpu_ttm
+ */
 void amdgpu_ttm_late_init(struct amdgpu_device *adev)
 {
+	/* return the VGA stolen memory (if any) back to VRAM */
 	amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
 }
 
+/**
+ * amdgpu_ttm_fini - De-initialize the TTM memory pools
+ */
 void amdgpu_ttm_fini(struct amdgpu_device *adev)
 {
 	if (!adev->mman.initialized)
@@ -1908,6 +2210,11 @@ static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
 #endif
 };
 
+/**
+ * amdgpu_ttm_vram_read - Linear read access to VRAM
+ *
+ * Accesses VRAM via MMIO for debugging purposes.
+ */
 static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
 				    size_t size, loff_t *pos)
 {
@@ -1947,6 +2254,11 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
 	return result;
 }
 
+/**
+ * amdgpu_ttm_vram_write - Linear write access to VRAM
+ *
+ * Accesses VRAM via MMIO for debugging purposes.
+ */
 static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
 				    size_t size, loff_t *pos)
 {
@@ -1995,6 +2307,9 @@ static const struct file_operations amdgpu_ttm_vram_fops = {
 
 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
 
+/**
+ * amdgpu_ttm_gtt_read - Linear read access to GTT memory
+ */
 static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
 				   size_t size, loff_t *pos)
 {
@@ -2042,6 +2357,13 @@ static const struct file_operations amdgpu_ttm_gtt_fops = {
 
 #endif
 
+/**
+ * amdgpu_iomem_read - Virtual read access to GPU mapped memory
+ *
+ * This function is used to read memory that has been mapped to the
+ * GPU and the known addresses are not physical addresses but instead
+ * bus addresses (e.g., what you'd put in an IB or ring buffer).
+ */
 static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
 				 size_t size, loff_t *pos)
 {
@@ -2050,6 +2372,7 @@ static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
 	ssize_t result = 0;
 	int r;
 
+	/* retrieve the IOMMU domain if any for this device */
 	dom = iommu_get_domain_for_dev(adev->dev);
 
 	while (size) {
@@ -2062,6 +2385,10 @@ static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
 
 		bytes = bytes < size ? bytes : size;
 
+		/* Translate the bus address to a physical address.  If
+		 * the domain is NULL it means there is no IOMMU active
+		 * and the address translation is the identity
+		 */
 		addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
 
 		pfn = addr >> PAGE_SHIFT;
@@ -2086,6 +2413,13 @@ static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
 	return result;
 }
 
+/**
+ * amdgpu_iomem_write - Virtual write access to GPU mapped memory
+ *
+ * This function is used to write memory that has been mapped to the
+ * GPU and the known addresses are not physical addresses but instead
+ * bus addresses (e.g., what you'd put in an IB or ring buffer).
+ */
 static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
 				 size_t size, loff_t *pos)
 {

From 652470ac55543fbbdcbce25492a7e370d23a38a0 Mon Sep 17 00:00:00 2001
From: Nayan Deshmukh <nayan26deshmukh@gmail.com>
Date: Wed, 16 May 2018 18:54:18 +0530
Subject: [PATCH 1215/1286] drm/scheduler: fix function name prefix in comments
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

That got missed while moving the files outside of amdgpu.

Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/scheduler/sched_fence.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index 69aab086b913..786b47f15783 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -87,7 +87,7 @@ static bool drm_sched_fence_enable_signaling(struct dma_fence *f)
 }
 
 /**
- * amd_sched_fence_free - free up the fence memory
+ * drm_sched_fence_free - free up the fence memory
  *
  * @rcu: RCU callback head
  *
@@ -103,7 +103,7 @@ static void drm_sched_fence_free(struct rcu_head *rcu)
 }
 
 /**
- * amd_sched_fence_release_scheduled - callback that fence can be freed
+ * drm_sched_fence_release_scheduled - callback that fence can be freed
  *
  * @fence: fence
  *
@@ -118,7 +118,7 @@ static void drm_sched_fence_release_scheduled(struct dma_fence *f)
 }
 
 /**
- * amd_sched_fence_release_finished - drop extra reference
+ * drm_sched_fence_release_finished - drop extra reference
  *
  * @f: fence
  *

From 6f0fd919471cf2477e86e2be9b53ecae37b0e815 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Thu, 17 May 2018 12:33:34 -0500
Subject: [PATCH 1216/1286] drm/amdgpu: count fences from all uvd instances in
 idle handler
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Current multi-UVD hardware uses a single clock and power source
so handle all instances in the idle handler.

Reviewed-by: James Zhu <James.Zhu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 0772680371a1..be2917c6698e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1146,7 +1146,11 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
 {
 	struct amdgpu_device *adev =
 		container_of(work, struct amdgpu_device, uvd.inst->idle_work.work);
-	unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.inst->ring);
+	unsigned fences = 0, i;
+
+	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+		fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
+	}
 
 	if (fences == 0) {
 		if (adev->pm.dpm_enabled) {

From 4bd2c5dd763866b827dd7e95b9ea71c47fa06126 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Thu, 17 May 2018 12:45:52 -0500
Subject: [PATCH 1217/1286] drm/amdgpu: Take uvd encode rings into account in
 idle work (v2)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Take the encode rings into account in the idle work handler.

v2: fix typo: s/num_uvd_inst/num_enc_rings/

Reviewed-by: James Zhu <James.Zhu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index be2917c6698e..bcf68f80bbf0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1146,10 +1146,13 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
 {
 	struct amdgpu_device *adev =
 		container_of(work, struct amdgpu_device, uvd.inst->idle_work.work);
-	unsigned fences = 0, i;
+	unsigned fences = 0, i, j;
 
 	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
 		fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
+		for (j = 0; j < adev->uvd.num_enc_rings; ++j) {
+			fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]);
+		}
 	}
 
 	if (fences == 0) {

From 646e906d1d64fdc6bb1a27dac45144dfd8996071 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Thu, 17 May 2018 13:03:05 -0500
Subject: [PATCH 1218/1286] drm/amdgpu: Take vcn encode rings into account in
 idle work
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Take the encode rings into account in the idle work handler.

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index e5d234cf804f..60468385e6b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -205,6 +205,11 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
 	struct amdgpu_device *adev =
 		container_of(work, struct amdgpu_device, vcn.idle_work.work);
 	unsigned fences = amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
+	unsigned i;
+
+	for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+		fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
+	}
 
 	if (fences == 0) {
 		if (adev->pm.dpm_enabled) {

From b79655c37b209315d3b533f6d63a3d6f5fcb6f84 Mon Sep 17 00:00:00 2001
From: Anthony Koo <Anthony.Koo@amd.com>
Date: Thu, 12 Apr 2018 22:40:02 -0400
Subject: [PATCH 1219/1286] drm/amd/display: Cleanup unused SetPlaneConfig

Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../display/dc/dce110/dce110_hw_sequencer.c   | 69 -------------------
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c | 10 ---
 .../gpu/drm/amd/display/dc/inc/hw_sequencer.h |  5 --
 3 files changed, 84 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 2288d0aa773b..ae500421edb6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -2269,74 +2269,6 @@ static void program_gamut_remap(struct pipe_ctx *pipe_ctx)
 
 	pipe_ctx->plane_res.xfm->funcs->transform_set_gamut_remap(pipe_ctx->plane_res.xfm, &adjust);
 }
-
-/**
- * TODO REMOVE, USE UPDATE INSTEAD
- */
-static void set_plane_config(
-	const struct dc *dc,
-	struct pipe_ctx *pipe_ctx,
-	struct resource_context *res_ctx)
-{
-	struct mem_input *mi = pipe_ctx->plane_res.mi;
-	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
-	struct xfm_grph_csc_adjustment adjust;
-	struct out_csc_color_matrix tbl_entry;
-	unsigned int i;
-
-	memset(&adjust, 0, sizeof(adjust));
-	memset(&tbl_entry, 0, sizeof(tbl_entry));
-	adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
-
-	dce_enable_fe_clock(dc->hwseq, mi->inst, true);
-
-	set_default_colors(pipe_ctx);
-	if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
-		tbl_entry.color_space =
-			pipe_ctx->stream->output_color_space;
-
-		for (i = 0; i < 12; i++)
-			tbl_entry.regval[i] =
-			pipe_ctx->stream->csc_color_matrix.matrix[i];
-
-		pipe_ctx->plane_res.xfm->funcs->opp_set_csc_adjustment
-				(pipe_ctx->plane_res.xfm, &tbl_entry);
-	}
-
-	if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
-		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
-
-		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
-			adjust.temperature_matrix[i] =
-				pipe_ctx->stream->gamut_remap_matrix.matrix[i];
-	}
-
-	pipe_ctx->plane_res.xfm->funcs->transform_set_gamut_remap(pipe_ctx->plane_res.xfm, &adjust);
-
-	pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
-	program_scaler(dc, pipe_ctx);
-
-	program_surface_visibility(dc, pipe_ctx);
-
-	mi->funcs->mem_input_program_surface_config(
-			mi,
-			plane_state->format,
-			&plane_state->tiling_info,
-			&plane_state->plane_size,
-			plane_state->rotation,
-			NULL,
-			false);
-	if (mi->funcs->set_blank)
-		mi->funcs->set_blank(mi, pipe_ctx->plane_state->visible);
-
-	if (dc->config.gpu_vm_support)
-		mi->funcs->mem_input_program_pte_vm(
-				pipe_ctx->plane_res.mi,
-				plane_state->format,
-				&plane_state->tiling_info,
-				plane_state->rotation);
-}
-
 static void update_plane_addr(const struct dc *dc,
 		struct pipe_ctx *pipe_ctx)
 {
@@ -3023,7 +2955,6 @@ static const struct hw_sequencer_funcs dce110_funcs = {
 	.init_hw = init_hw,
 	.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
 	.apply_ctx_for_surface = dce110_apply_ctx_for_surface,
-	.set_plane_config = set_plane_config,
 	.update_plane_addr = update_plane_addr,
 	.update_pending_status = dce110_update_pending_status,
 	.set_input_transfer_func = dce110_set_input_transfer_func,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 572fa601a0eb..8adb8dc44af5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2487,15 +2487,6 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
 			set_static_screen_control(pipe_ctx[i]->stream_res.tg, value);
 }
 
-static void set_plane_config(
-	const struct dc *dc,
-	struct pipe_ctx *pipe_ctx,
-	struct resource_context *res_ctx)
-{
-	/* TODO */
-	program_gamut_remap(pipe_ctx);
-}
-
 static void dcn10_config_stereo_parameters(
 		struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
 {
@@ -2673,7 +2664,6 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
 	.init_hw = dcn10_init_hw,
 	.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
 	.apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
-	.set_plane_config = set_plane_config,
 	.update_plane_addr = dcn10_update_plane_addr,
 	.update_dchub = dcn10_update_dchub,
 	.update_pending_status = dcn10_update_pending_status,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 29abf3ecb39c..63fc6c499789 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -83,11 +83,6 @@ struct hw_sequencer_funcs {
 			int num_planes,
 			struct dc_state *context);
 
-	void (*set_plane_config)(
-			const struct dc *dc,
-			struct pipe_ctx *pipe_ctx,
-			struct resource_context *res_ctx);
-
 	void (*program_gamut_remap)(
 			struct pipe_ctx *pipe_ctx);
 

From eb0e515464e4a1be730c7ac7a01c3ba04c98ea97 Mon Sep 17 00:00:00 2001
From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Date: Wed, 18 Apr 2018 11:37:53 -0400
Subject: [PATCH 1220/1286] drm/amd/display: get rid of 32.32 unsigned fixed
 point

32.32 is redundant, 31.32 does everything we use 32.32 for

Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../amd/display/amdgpu_dm/amdgpu_dm_color.c   |  14 +-
 .../gpu/drm/amd/display/dc/basics/Makefile    |   2 +-
 .../drm/amd/display/dc/basics/conversion.c    |  28 +-
 .../drm/amd/display/dc/basics/fixpt31_32.c    | 176 +++----
 .../drm/amd/display/dc/basics/fixpt32_32.c    | 161 -------
 .../drm/amd/display/dc/calcs/custom_float.c   |  46 +-
 .../gpu/drm/amd/display/dc/calcs/dcn_calcs.c  |   8 +-
 drivers/gpu/drm/amd/display/dc/core/dc_link.c |  12 +-
 .../gpu/drm/amd/display/dc/core/dc_resource.c |  68 +--
 drivers/gpu/drm/amd/display/dc/dc_dp_types.h  |   2 +
 drivers/gpu/drm/amd/display/dc/dc_types.h     |   2 +-
 drivers/gpu/drm/amd/display/dc/dce/dce_abm.c  |   2 +-
 .../drm/amd/display/dc/dce/dce_clock_source.c |  60 +--
 .../gpu/drm/amd/display/dc/dce/dce_clocks.c   |  26 +-
 drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c |   2 +-
 drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c  |   6 +-
 .../drm/amd/display/dc/dce/dce_scl_filters.c  |  48 +-
 .../amd/display/dc/dce/dce_stream_encoder.c   |   8 +-
 .../drm/amd/display/dc/dce/dce_transform.c    |  26 +-
 .../display/dc/dce110/dce110_hw_sequencer.c   |  36 +-
 .../display/dc/dce110/dce110_transform_v.c    |   8 +-
 .../amd/display/dc/dcn10/dcn10_cm_common.c    |  86 ++--
 .../gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c  |   2 +-
 .../drm/amd/display/dc/dcn10/dcn10_dpp_cm.c   |   6 +-
 .../drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c |  38 +-
 .../gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c |   4 +-
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c |  28 +-
 .../display/dc/dcn10/dcn10_stream_encoder.c   |   8 +-
 drivers/gpu/drm/amd/display/dc/irq_types.h    |   2 +
 .../gpu/drm/amd/display/include/fixed31_32.h  | 118 ++---
 .../gpu/drm/amd/display/include/fixed32_32.h  | 129 -----
 .../amd/display/modules/color/color_gamma.c   | 446 +++++++++---------
 32 files changed, 661 insertions(+), 947 deletions(-)
 delete mode 100644 drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c
 delete mode 100644 drivers/gpu/drm/amd/display/include/fixed32_32.h

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index e3d90e918d1b..b329393307e5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -88,9 +88,9 @@ static void __drm_lut_to_dc_gamma(struct drm_color_lut *lut,
 			g = drm_color_lut_extract(lut[i].green, 16);
 			b = drm_color_lut_extract(lut[i].blue, 16);
 
-			gamma->entries.red[i] = dal_fixed31_32_from_int(r);
-			gamma->entries.green[i] = dal_fixed31_32_from_int(g);
-			gamma->entries.blue[i] = dal_fixed31_32_from_int(b);
+			gamma->entries.red[i] = dc_fixpt_from_int(r);
+			gamma->entries.green[i] = dc_fixpt_from_int(g);
+			gamma->entries.blue[i] = dc_fixpt_from_int(b);
 		}
 		return;
 	}
@@ -101,9 +101,9 @@ static void __drm_lut_to_dc_gamma(struct drm_color_lut *lut,
 		g = drm_color_lut_extract(lut[i].green, 16);
 		b = drm_color_lut_extract(lut[i].blue, 16);
 
-		gamma->entries.red[i] = dal_fixed31_32_from_fraction(r, MAX_DRM_LUT_VALUE);
-		gamma->entries.green[i] = dal_fixed31_32_from_fraction(g, MAX_DRM_LUT_VALUE);
-		gamma->entries.blue[i] = dal_fixed31_32_from_fraction(b, MAX_DRM_LUT_VALUE);
+		gamma->entries.red[i] = dc_fixpt_from_fraction(r, MAX_DRM_LUT_VALUE);
+		gamma->entries.green[i] = dc_fixpt_from_fraction(g, MAX_DRM_LUT_VALUE);
+		gamma->entries.blue[i] = dc_fixpt_from_fraction(b, MAX_DRM_LUT_VALUE);
 	}
 }
 
@@ -208,7 +208,7 @@ void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc)
 	for (i = 0; i < 12; i++) {
 		/* Skip 4th element */
 		if (i % 4 == 3) {
-			stream->gamut_remap_matrix.matrix[i] = dal_fixed31_32_zero;
+			stream->gamut_remap_matrix.matrix[i] = dc_fixpt_zero;
 			continue;
 		}
 
diff --git a/drivers/gpu/drm/amd/display/dc/basics/Makefile b/drivers/gpu/drm/amd/display/dc/basics/Makefile
index bca33bd9a0d2..b49ea96b5dae 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/basics/Makefile
@@ -24,7 +24,7 @@
 # It provides the general basic services required by other DAL
 # subcomponents.
 
-BASICS = conversion.o fixpt31_32.o fixpt32_32.o \
+BASICS = conversion.o fixpt31_32.o \
 	logger.o log_helpers.o vector.o
 
 AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS))
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
index 310964915a83..50b47f11875c 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
@@ -41,22 +41,22 @@ uint16_t fixed_point_to_int_frac(
 
 	uint16_t result;
 
-	uint16_t d = (uint16_t)dal_fixed31_32_floor(
-		dal_fixed31_32_abs(
+	uint16_t d = (uint16_t)dc_fixpt_floor(
+		dc_fixpt_abs(
 			arg));
 
 	if (d <= (uint16_t)(1 << integer_bits) - (1 / (uint16_t)divisor))
-		numerator = (uint16_t)dal_fixed31_32_round(
-			dal_fixed31_32_mul_int(
+		numerator = (uint16_t)dc_fixpt_round(
+			dc_fixpt_mul_int(
 				arg,
 				divisor));
 	else {
-		numerator = dal_fixed31_32_floor(
-			dal_fixed31_32_sub(
-				dal_fixed31_32_from_int(
+		numerator = dc_fixpt_floor(
+			dc_fixpt_sub(
+				dc_fixpt_from_int(
 					1LL << integer_bits),
-				dal_fixed31_32_recip(
-					dal_fixed31_32_from_int(
+				dc_fixpt_recip(
+					dc_fixpt_from_int(
 						divisor))));
 	}
 
@@ -66,8 +66,8 @@ uint16_t fixed_point_to_int_frac(
 		result = (uint16_t)(
 		(1 << (integer_bits + fractional_bits + 1)) + numerator);
 
-	if ((result != 0) && dal_fixed31_32_lt(
-		arg, dal_fixed31_32_zero))
+	if ((result != 0) && dc_fixpt_lt(
+		arg, dc_fixpt_zero))
 		result |= 1 << (integer_bits + fractional_bits);
 
 	return result;
@@ -84,15 +84,15 @@ void convert_float_matrix(
 	uint32_t buffer_size)
 {
 	const struct fixed31_32 min_2_13 =
-		dal_fixed31_32_from_fraction(S2D13_MIN, DIVIDER);
+		dc_fixpt_from_fraction(S2D13_MIN, DIVIDER);
 	const struct fixed31_32 max_2_13 =
-		dal_fixed31_32_from_fraction(S2D13_MAX, DIVIDER);
+		dc_fixpt_from_fraction(S2D13_MAX, DIVIDER);
 	uint32_t i;
 
 	for (i = 0; i < buffer_size; ++i) {
 		uint32_t reg_value =
 				fixed_point_to_int_frac(
-					dal_fixed31_32_clamp(
+					dc_fixpt_clamp(
 						flt[i],
 						min_2_13,
 						max_2_13),
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
index 7191c3213743..e398ecdf742c 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
@@ -64,7 +64,7 @@ static inline unsigned long long complete_integer_division_u64(
 #define GET_FRACTIONAL_PART(x) \
 	(FRACTIONAL_PART_MASK & (x))
 
-struct fixed31_32 dal_fixed31_32_from_fraction(
+struct fixed31_32 dc_fixpt_from_fraction(
 	long long numerator,
 	long long denominator)
 {
@@ -118,7 +118,7 @@ struct fixed31_32 dal_fixed31_32_from_fraction(
 	return res;
 }
 
-struct fixed31_32 dal_fixed31_32_from_int_nonconst(
+struct fixed31_32 dc_fixpt_from_int_nonconst(
 	long long arg)
 {
 	struct fixed31_32 res;
@@ -130,7 +130,7 @@ struct fixed31_32 dal_fixed31_32_from_int_nonconst(
 	return res;
 }
 
-struct fixed31_32 dal_fixed31_32_shl(
+struct fixed31_32 dc_fixpt_shl(
 	struct fixed31_32 arg,
 	unsigned char shift)
 {
@@ -144,7 +144,7 @@ struct fixed31_32 dal_fixed31_32_shl(
 	return res;
 }
 
-struct fixed31_32 dal_fixed31_32_add(
+struct fixed31_32 dc_fixpt_add(
 	struct fixed31_32 arg1,
 	struct fixed31_32 arg2)
 {
@@ -158,7 +158,7 @@ struct fixed31_32 dal_fixed31_32_add(
 	return res;
 }
 
-struct fixed31_32 dal_fixed31_32_sub(
+struct fixed31_32 dc_fixpt_sub(
 	struct fixed31_32 arg1,
 	struct fixed31_32 arg2)
 {
@@ -172,7 +172,7 @@ struct fixed31_32 dal_fixed31_32_sub(
 	return res;
 }
 
-struct fixed31_32 dal_fixed31_32_mul(
+struct fixed31_32 dc_fixpt_mul(
 	struct fixed31_32 arg1,
 	struct fixed31_32 arg2)
 {
@@ -213,7 +213,7 @@ struct fixed31_32 dal_fixed31_32_mul(
 	tmp = arg1_fra * arg2_fra;
 
 	tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
-		(tmp >= (unsigned long long)dal_fixed31_32_half.value);
+		(tmp >= (unsigned long long)dc_fixpt_half.value);
 
 	ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
 
@@ -225,7 +225,7 @@ struct fixed31_32 dal_fixed31_32_mul(
 	return res;
 }
 
-struct fixed31_32 dal_fixed31_32_sqr(
+struct fixed31_32 dc_fixpt_sqr(
 	struct fixed31_32 arg)
 {
 	struct fixed31_32 res;
@@ -257,7 +257,7 @@ struct fixed31_32 dal_fixed31_32_sqr(
 	tmp = arg_fra * arg_fra;
 
 	tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
-		(tmp >= (unsigned long long)dal_fixed31_32_half.value);
+		(tmp >= (unsigned long long)dc_fixpt_half.value);
 
 	ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
 
@@ -266,7 +266,7 @@ struct fixed31_32 dal_fixed31_32_sqr(
 	return res;
 }
 
-struct fixed31_32 dal_fixed31_32_recip(
+struct fixed31_32 dc_fixpt_recip(
 	struct fixed31_32 arg)
 {
 	/*
@@ -276,41 +276,41 @@ struct fixed31_32 dal_fixed31_32_recip(
 
 	ASSERT(arg.value);
 
-	return dal_fixed31_32_from_fraction(
-		dal_fixed31_32_one.value,
+	return dc_fixpt_from_fraction(
+		dc_fixpt_one.value,
 		arg.value);
 }
 
-struct fixed31_32 dal_fixed31_32_sinc(
+struct fixed31_32 dc_fixpt_sinc(
 	struct fixed31_32 arg)
 {
 	struct fixed31_32 square;
 
-	struct fixed31_32 res = dal_fixed31_32_one;
+	struct fixed31_32 res = dc_fixpt_one;
 
 	int n = 27;
 
 	struct fixed31_32 arg_norm = arg;
 
-	if (dal_fixed31_32_le(
-		dal_fixed31_32_two_pi,
-		dal_fixed31_32_abs(arg))) {
-		arg_norm = dal_fixed31_32_sub(
+	if (dc_fixpt_le(
+		dc_fixpt_two_pi,
+		dc_fixpt_abs(arg))) {
+		arg_norm = dc_fixpt_sub(
 			arg_norm,
-			dal_fixed31_32_mul_int(
-				dal_fixed31_32_two_pi,
+			dc_fixpt_mul_int(
+				dc_fixpt_two_pi,
 				(int)div64_s64(
 					arg_norm.value,
-					dal_fixed31_32_two_pi.value)));
+					dc_fixpt_two_pi.value)));
 	}
 
-	square = dal_fixed31_32_sqr(arg_norm);
+	square = dc_fixpt_sqr(arg_norm);
 
 	do {
-		res = dal_fixed31_32_sub(
-			dal_fixed31_32_one,
-			dal_fixed31_32_div_int(
-				dal_fixed31_32_mul(
+		res = dc_fixpt_sub(
+			dc_fixpt_one,
+			dc_fixpt_div_int(
+				dc_fixpt_mul(
 					square,
 					res),
 				n * (n - 1)));
@@ -319,37 +319,37 @@ struct fixed31_32 dal_fixed31_32_sinc(
 	} while (n > 2);
 
 	if (arg.value != arg_norm.value)
-		res = dal_fixed31_32_div(
-			dal_fixed31_32_mul(res, arg_norm),
+		res = dc_fixpt_div(
+			dc_fixpt_mul(res, arg_norm),
 			arg);
 
 	return res;
 }
 
-struct fixed31_32 dal_fixed31_32_sin(
+struct fixed31_32 dc_fixpt_sin(
 	struct fixed31_32 arg)
 {
-	return dal_fixed31_32_mul(
+	return dc_fixpt_mul(
 		arg,
-		dal_fixed31_32_sinc(arg));
+		dc_fixpt_sinc(arg));
 }
 
-struct fixed31_32 dal_fixed31_32_cos(
+struct fixed31_32 dc_fixpt_cos(
 	struct fixed31_32 arg)
 {
 	/* TODO implement argument normalization */
 
-	const struct fixed31_32 square = dal_fixed31_32_sqr(arg);
+	const struct fixed31_32 square = dc_fixpt_sqr(arg);
 
-	struct fixed31_32 res = dal_fixed31_32_one;
+	struct fixed31_32 res = dc_fixpt_one;
 
 	int n = 26;
 
 	do {
-		res = dal_fixed31_32_sub(
-			dal_fixed31_32_one,
-			dal_fixed31_32_div_int(
-				dal_fixed31_32_mul(
+		res = dc_fixpt_sub(
+			dc_fixpt_one,
+			dc_fixpt_div_int(
+				dc_fixpt_mul(
 					square,
 					res),
 				n * (n - 1)));
@@ -372,31 +372,31 @@ static struct fixed31_32 fixed31_32_exp_from_taylor_series(
 {
 	unsigned int n = 9;
 
-	struct fixed31_32 res = dal_fixed31_32_from_fraction(
+	struct fixed31_32 res = dc_fixpt_from_fraction(
 		n + 2,
 		n + 1);
 	/* TODO find correct res */
 
-	ASSERT(dal_fixed31_32_lt(arg, dal_fixed31_32_one));
+	ASSERT(dc_fixpt_lt(arg, dc_fixpt_one));
 
 	do
-		res = dal_fixed31_32_add(
-			dal_fixed31_32_one,
-			dal_fixed31_32_div_int(
-				dal_fixed31_32_mul(
+		res = dc_fixpt_add(
+			dc_fixpt_one,
+			dc_fixpt_div_int(
+				dc_fixpt_mul(
 					arg,
 					res),
 				n));
 	while (--n != 1);
 
-	return dal_fixed31_32_add(
-		dal_fixed31_32_one,
-		dal_fixed31_32_mul(
+	return dc_fixpt_add(
+		dc_fixpt_one,
+		dc_fixpt_mul(
 			arg,
 			res));
 }
 
-struct fixed31_32 dal_fixed31_32_exp(
+struct fixed31_32 dc_fixpt_exp(
 	struct fixed31_32 arg)
 {
 	/*
@@ -406,44 +406,44 @@ struct fixed31_32 dal_fixed31_32_exp(
 	 * where m = round(x / ln(2)), r = x - m * ln(2)
 	 */
 
-	if (dal_fixed31_32_le(
-		dal_fixed31_32_ln2_div_2,
-		dal_fixed31_32_abs(arg))) {
-		int m = dal_fixed31_32_round(
-			dal_fixed31_32_div(
+	if (dc_fixpt_le(
+		dc_fixpt_ln2_div_2,
+		dc_fixpt_abs(arg))) {
+		int m = dc_fixpt_round(
+			dc_fixpt_div(
 				arg,
-				dal_fixed31_32_ln2));
+				dc_fixpt_ln2));
 
-		struct fixed31_32 r = dal_fixed31_32_sub(
+		struct fixed31_32 r = dc_fixpt_sub(
 			arg,
-			dal_fixed31_32_mul_int(
-				dal_fixed31_32_ln2,
+			dc_fixpt_mul_int(
+				dc_fixpt_ln2,
 				m));
 
 		ASSERT(m != 0);
 
-		ASSERT(dal_fixed31_32_lt(
-			dal_fixed31_32_abs(r),
-			dal_fixed31_32_one));
+		ASSERT(dc_fixpt_lt(
+			dc_fixpt_abs(r),
+			dc_fixpt_one));
 
 		if (m > 0)
-			return dal_fixed31_32_shl(
+			return dc_fixpt_shl(
 				fixed31_32_exp_from_taylor_series(r),
 				(unsigned char)m);
 		else
-			return dal_fixed31_32_div_int(
+			return dc_fixpt_div_int(
 				fixed31_32_exp_from_taylor_series(r),
 				1LL << -m);
 	} else if (arg.value != 0)
 		return fixed31_32_exp_from_taylor_series(arg);
 	else
-		return dal_fixed31_32_one;
+		return dc_fixpt_one;
 }
 
-struct fixed31_32 dal_fixed31_32_log(
+struct fixed31_32 dc_fixpt_log(
 	struct fixed31_32 arg)
 {
-	struct fixed31_32 res = dal_fixed31_32_neg(dal_fixed31_32_one);
+	struct fixed31_32 res = dc_fixpt_neg(dc_fixpt_one);
 	/* TODO improve 1st estimation */
 
 	struct fixed31_32 error;
@@ -453,15 +453,15 @@ struct fixed31_32 dal_fixed31_32_log(
 	/* TODO if arg is zero, return -INF */
 
 	do {
-		struct fixed31_32 res1 = dal_fixed31_32_add(
-			dal_fixed31_32_sub(
+		struct fixed31_32 res1 = dc_fixpt_add(
+			dc_fixpt_sub(
 				res,
-				dal_fixed31_32_one),
-			dal_fixed31_32_div(
+				dc_fixpt_one),
+			dc_fixpt_div(
 				arg,
-				dal_fixed31_32_exp(res)));
+				dc_fixpt_exp(res)));
 
-		error = dal_fixed31_32_sub(
+		error = dc_fixpt_sub(
 			res,
 			res1);
 
@@ -472,17 +472,17 @@ struct fixed31_32 dal_fixed31_32_log(
 	return res;
 }
 
-struct fixed31_32 dal_fixed31_32_pow(
+struct fixed31_32 dc_fixpt_pow(
 	struct fixed31_32 arg1,
 	struct fixed31_32 arg2)
 {
-	return dal_fixed31_32_exp(
-		dal_fixed31_32_mul(
-			dal_fixed31_32_log(arg1),
+	return dc_fixpt_exp(
+		dc_fixpt_mul(
+			dc_fixpt_log(arg1),
 			arg2));
 }
 
-int dal_fixed31_32_floor(
+int dc_fixpt_floor(
 	struct fixed31_32 arg)
 {
 	unsigned long long arg_value = abs_i64(arg.value);
@@ -493,12 +493,12 @@ int dal_fixed31_32_floor(
 		return -(int)GET_INTEGER_PART(arg_value);
 }
 
-int dal_fixed31_32_round(
+int dc_fixpt_round(
 	struct fixed31_32 arg)
 {
 	unsigned long long arg_value = abs_i64(arg.value);
 
-	const long long summand = dal_fixed31_32_half.value;
+	const long long summand = dc_fixpt_half.value;
 
 	ASSERT(LLONG_MAX - (long long)arg_value >= summand);
 
@@ -510,13 +510,13 @@ int dal_fixed31_32_round(
 		return -(int)GET_INTEGER_PART(arg_value);
 }
 
-int dal_fixed31_32_ceil(
+int dc_fixpt_ceil(
 	struct fixed31_32 arg)
 {
 	unsigned long long arg_value = abs_i64(arg.value);
 
-	const long long summand = dal_fixed31_32_one.value -
-		dal_fixed31_32_epsilon.value;
+	const long long summand = dc_fixpt_one.value -
+		dc_fixpt_epsilon.value;
 
 	ASSERT(LLONG_MAX - (long long)arg_value >= summand);
 
@@ -531,7 +531,7 @@ int dal_fixed31_32_ceil(
 /* this function is a generic helper to translate fixed point value to
  * specified integer format that will consist of integer_bits integer part and
  * fractional_bits fractional part. For example it is used in
- * dal_fixed31_32_u2d19 to receive 2 bits integer part and 19 bits fractional
+ * dc_fixpt_u2d19 to receive 2 bits integer part and 19 bits fractional
  * part in 32 bits. It is used in hw programming (scaler)
  */
 
@@ -570,35 +570,35 @@ static inline unsigned int clamp_ux_dy(
 		return min_clamp;
 }
 
-unsigned int dal_fixed31_32_u2d19(
+unsigned int dc_fixpt_u2d19(
 	struct fixed31_32 arg)
 {
 	return ux_dy(arg.value, 2, 19);
 }
 
-unsigned int dal_fixed31_32_u0d19(
+unsigned int dc_fixpt_u0d19(
 	struct fixed31_32 arg)
 {
 	return ux_dy(arg.value, 0, 19);
 }
 
-unsigned int dal_fixed31_32_clamp_u0d14(
+unsigned int dc_fixpt_clamp_u0d14(
 	struct fixed31_32 arg)
 {
 	return clamp_ux_dy(arg.value, 0, 14, 1);
 }
 
-unsigned int dal_fixed31_32_clamp_u0d10(
+unsigned int dc_fixpt_clamp_u0d10(
 	struct fixed31_32 arg)
 {
 	return clamp_ux_dy(arg.value, 0, 10, 1);
 }
 
-int dal_fixed31_32_s4d19(
+int dc_fixpt_s4d19(
 	struct fixed31_32 arg)
 {
 	if (arg.value < 0)
-		return -(int)ux_dy(dal_fixed31_32_abs(arg).value, 4, 19);
+		return -(int)ux_dy(dc_fixpt_abs(arg).value, 4, 19);
 	else
 		return ux_dy(arg.value, 4, 19);
 }
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c
deleted file mode 100644
index 4d3aaa82a07b..000000000000
--- a/drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Copyright 2012-15 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#include "dm_services.h"
-#include "include/fixed32_32.h"
-
-static uint64_t u64_div(uint64_t n, uint64_t d)
-{
-	uint32_t i = 0;
-	uint64_t r;
-	uint64_t q = div64_u64_rem(n, d, &r);
-
-	for (i = 0; i < 32; ++i) {
-		uint64_t sbit = q & (1ULL<<63);
-
-		r <<= 1;
-		r |= sbit ? 1 : 0;
-		q <<= 1;
-		if (r >= d) {
-			r -= d;
-			q |= 1;
-		}
-	}
-
-	if (2*r >= d)
-		q += 1;
-	return q;
-}
-
-struct fixed32_32 dal_fixed32_32_from_fraction(uint32_t n, uint32_t d)
-{
-	struct fixed32_32 fx;
-
-	fx.value = u64_div((uint64_t)n << 32, (uint64_t)d << 32);
-	return fx;
-}
-
-struct fixed32_32 dal_fixed32_32_add(
-	struct fixed32_32 lhs,
-	struct fixed32_32 rhs)
-{
-	struct fixed32_32 fx = {lhs.value + rhs.value};
-
-	ASSERT(fx.value >= rhs.value);
-	return fx;
-}
-
-struct fixed32_32 dal_fixed32_32_add_int(struct fixed32_32 lhs, uint32_t rhs)
-{
-	struct fixed32_32 fx = {lhs.value + ((uint64_t)rhs << 32)};
-
-	ASSERT(fx.value >= (uint64_t)rhs << 32);
-	return fx;
-
-}
-struct fixed32_32 dal_fixed32_32_sub(
-	struct fixed32_32 lhs,
-	struct fixed32_32 rhs)
-{
-	struct fixed32_32 fx;
-
-	ASSERT(lhs.value >= rhs.value);
-	fx.value = lhs.value - rhs.value;
-	return fx;
-}
-
-struct fixed32_32 dal_fixed32_32_sub_int(struct fixed32_32 lhs, uint32_t rhs)
-{
-	struct fixed32_32 fx;
-
-	ASSERT(lhs.value >= ((uint64_t)rhs<<32));
-	fx.value = lhs.value - ((uint64_t)rhs<<32);
-	return fx;
-}
-
-struct fixed32_32 dal_fixed32_32_mul(
-	struct fixed32_32 lhs,
-	struct fixed32_32 rhs)
-{
-	struct fixed32_32 fx;
-	uint64_t lhs_int = lhs.value>>32;
-	uint64_t lhs_frac = (uint32_t)lhs.value;
-	uint64_t rhs_int = rhs.value>>32;
-	uint64_t rhs_frac = (uint32_t)rhs.value;
-	uint64_t ahbh = lhs_int * rhs_int;
-	uint64_t ahbl = lhs_int * rhs_frac;
-	uint64_t albh = lhs_frac * rhs_int;
-	uint64_t albl = lhs_frac * rhs_frac;
-
-	ASSERT((ahbh>>32) == 0);
-
-	fx.value = (ahbh<<32) + ahbl + albh + (albl>>32);
-	return fx;
-
-}
-
-struct fixed32_32 dal_fixed32_32_mul_int(struct fixed32_32 lhs, uint32_t rhs)
-{
-	struct fixed32_32 fx;
-	uint64_t lhsi = (lhs.value>>32) * (uint64_t)rhs;
-	uint64_t lhsf;
-
-	ASSERT((lhsi>>32) == 0);
-	lhsf = ((uint32_t)lhs.value) * (uint64_t)rhs;
-	ASSERT((lhsi<<32) + lhsf >= lhsf);
-	fx.value = (lhsi<<32) + lhsf;
-	return fx;
-}
-
-struct fixed32_32 dal_fixed32_32_div(
-	struct fixed32_32 lhs,
-	struct fixed32_32 rhs)
-{
-	struct fixed32_32 fx;
-
-	fx.value = u64_div(lhs.value, rhs.value);
-	return fx;
-}
-
-struct fixed32_32 dal_fixed32_32_div_int(struct fixed32_32 lhs, uint32_t rhs)
-{
-	struct fixed32_32 fx;
-
-	fx.value = u64_div(lhs.value, (uint64_t)rhs << 32);
-	return fx;
-}
-
-uint32_t dal_fixed32_32_ceil(struct fixed32_32 v)
-{
-	ASSERT((uint32_t)v.value ? (v.value >> 32) + 1 >= 1 : true);
-	return (v.value>>32) + ((uint32_t)v.value ? 1 : 0);
-}
-
-uint32_t dal_fixed32_32_round(struct fixed32_32 v)
-{
-	ASSERT(v.value + (1ULL<<31) >= (1ULL<<31));
-	return (v.value + (1ULL<<31))>>32;
-}
-
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/custom_float.c b/drivers/gpu/drm/amd/display/dc/calcs/custom_float.c
index 7243c37f569e..31d167bc548f 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/custom_float.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/custom_float.c
@@ -36,41 +36,41 @@ static bool build_custom_float(
 	uint32_t exp_offset = (1 << (format->exponenta_bits - 1)) - 1;
 
 	const struct fixed31_32 mantissa_constant_plus_max_fraction =
-		dal_fixed31_32_from_fraction(
+		dc_fixpt_from_fraction(
 			(1LL << (format->mantissa_bits + 1)) - 1,
 			1LL << format->mantissa_bits);
 
 	struct fixed31_32 mantiss;
 
-	if (dal_fixed31_32_eq(
+	if (dc_fixpt_eq(
 		value,
-		dal_fixed31_32_zero)) {
+		dc_fixpt_zero)) {
 		*negative = false;
 		*mantissa = 0;
 		*exponenta = 0;
 		return true;
 	}
 
-	if (dal_fixed31_32_lt(
+	if (dc_fixpt_lt(
 		value,
-		dal_fixed31_32_zero)) {
+		dc_fixpt_zero)) {
 		*negative = format->sign;
-		value = dal_fixed31_32_neg(value);
+		value = dc_fixpt_neg(value);
 	} else {
 		*negative = false;
 	}
 
-	if (dal_fixed31_32_lt(
+	if (dc_fixpt_lt(
 		value,
-		dal_fixed31_32_one)) {
+		dc_fixpt_one)) {
 		uint32_t i = 1;
 
 		do {
-			value = dal_fixed31_32_shl(value, 1);
+			value = dc_fixpt_shl(value, 1);
 			++i;
-		} while (dal_fixed31_32_lt(
+		} while (dc_fixpt_lt(
 			value,
-			dal_fixed31_32_one));
+			dc_fixpt_one));
 
 		--i;
 
@@ -81,15 +81,15 @@ static bool build_custom_float(
 		}
 
 		*exponenta = exp_offset - i;
-	} else if (dal_fixed31_32_le(
+	} else if (dc_fixpt_le(
 		mantissa_constant_plus_max_fraction,
 		value)) {
 		uint32_t i = 1;
 
 		do {
-			value = dal_fixed31_32_shr(value, 1);
+			value = dc_fixpt_shr(value, 1);
 			++i;
-		} while (dal_fixed31_32_lt(
+		} while (dc_fixpt_lt(
 			mantissa_constant_plus_max_fraction,
 			value));
 
@@ -98,23 +98,23 @@ static bool build_custom_float(
 		*exponenta = exp_offset;
 	}
 
-	mantiss = dal_fixed31_32_sub(
+	mantiss = dc_fixpt_sub(
 		value,
-		dal_fixed31_32_one);
+		dc_fixpt_one);
 
-	if (dal_fixed31_32_lt(
+	if (dc_fixpt_lt(
 			mantiss,
-			dal_fixed31_32_zero) ||
-		dal_fixed31_32_lt(
-			dal_fixed31_32_one,
+			dc_fixpt_zero) ||
+		dc_fixpt_lt(
+			dc_fixpt_one,
 			mantiss))
-		mantiss = dal_fixed31_32_zero;
+		mantiss = dc_fixpt_zero;
 	else
-		mantiss = dal_fixed31_32_shl(
+		mantiss = dc_fixpt_shl(
 			mantiss,
 			format->mantissa_bits);
 
-	*mantissa = dal_fixed31_32_floor(mantiss);
+	*mantissa = dc_fixpt_floor(mantiss);
 
 	return true;
 }
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index a102c192328d..49a4ea45466d 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -873,14 +873,14 @@ bool dcn_validate_bandwidth(
 			}
 
 			if (pipe->plane_state->rotation % 2 == 0) {
-				ASSERT(pipe->plane_res.scl_data.ratios.horz.value != dal_fixed31_32_one.value
+				ASSERT(pipe->plane_res.scl_data.ratios.horz.value != dc_fixpt_one.value
 					|| v->scaler_rec_out_width[input_idx] == v->viewport_width[input_idx]);
-				ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dal_fixed31_32_one.value
+				ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dc_fixpt_one.value
 					|| v->scaler_recout_height[input_idx] == v->viewport_height[input_idx]);
 			} else {
-				ASSERT(pipe->plane_res.scl_data.ratios.horz.value != dal_fixed31_32_one.value
+				ASSERT(pipe->plane_res.scl_data.ratios.horz.value != dc_fixpt_one.value
 					|| v->scaler_recout_height[input_idx] == v->viewport_width[input_idx]);
-				ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dal_fixed31_32_one.value
+				ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dc_fixpt_one.value
 					|| v->scaler_rec_out_width[input_idx] == v->viewport_height[input_idx]);
 			}
 			v->dcc_enable[input_idx] = pipe->plane_state->dcc.enable ? dcn_bw_yes : dcn_bw_no;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index b44cf52090a5..ea5d5ffd5522 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -631,7 +631,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
 				/* Need to setup mst link_cap struct here
 				 * otherwise dc_link_detect() will leave mst link_cap
 				 * empty which leads to allocate_mst_payload() has "0"
-				 * pbn_per_slot value leading to exception on dal_fixed31_32_div()
+				 * pbn_per_slot value leading to exception on dc_fixpt_div()
 				 */
 				link->verified_link_cap = link->reported_link_cap;
 				return false;
@@ -2059,10 +2059,10 @@ static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream)
 			&stream->sink->link->cur_link_settings;
 	uint32_t link_rate_in_mbps =
 			link_settings->link_rate * LINK_RATE_REF_FREQ_IN_MHZ;
-	struct fixed31_32 mbps = dal_fixed31_32_from_int(
+	struct fixed31_32 mbps = dc_fixpt_from_int(
 			link_rate_in_mbps * link_settings->lane_count);
 
-	return dal_fixed31_32_div_int(mbps, 54);
+	return dc_fixpt_div_int(mbps, 54);
 }
 
 static int get_color_depth(enum dc_color_depth color_depth)
@@ -2103,7 +2103,7 @@ static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
 	numerator = 64 * PEAK_FACTOR_X1000;
 	denominator = 54 * 8 * 1000 * 1000;
 	kbps *= numerator;
-	peak_kbps = dal_fixed31_32_from_fraction(kbps, denominator);
+	peak_kbps = dc_fixpt_from_fraction(kbps, denominator);
 
 	return peak_kbps;
 }
@@ -2230,7 +2230,7 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
 	/* slot X.Y for only current stream */
 	pbn_per_slot = get_pbn_per_slot(stream);
 	pbn = get_pbn_from_timing(pipe_ctx);
-	avg_time_slots_per_mtp = dal_fixed31_32_div(pbn, pbn_per_slot);
+	avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot);
 
 	stream_encoder->funcs->set_mst_bandwidth(
 		stream_encoder,
@@ -2247,7 +2247,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
 	struct link_encoder *link_encoder = link->link_enc;
 	struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
 	struct dp_mst_stream_allocation_table proposed_table = {0};
-	struct fixed31_32 avg_time_slots_per_mtp = dal_fixed31_32_from_int(0);
+	struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0);
 	uint8_t i;
 	bool mst_mode = (link->type == dc_connection_mst_branch);
 	DC_LOGGER_INIT(link->ctx->logger);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 345835ff58d1..082458f2097c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -496,9 +496,9 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
 	data->viewport_c.x = data->viewport.x / vpc_div;
 	data->viewport_c.y = data->viewport.y / vpc_div;
 	data->inits.h_c = (data->viewport.x % vpc_div) != 0 ?
-			dal_fixed31_32_half : dal_fixed31_32_zero;
+			dc_fixpt_half : dc_fixpt_zero;
 	data->inits.v_c = (data->viewport.y % vpc_div) != 0 ?
-			dal_fixed31_32_half : dal_fixed31_32_zero;
+			dc_fixpt_half : dc_fixpt_zero;
 	/* Round up, assume original video size always even dimensions */
 	data->viewport_c.width = (data->viewport.width + vpc_div - 1) / vpc_div;
 	data->viewport_c.height = (data->viewport.height + vpc_div - 1) / vpc_div;
@@ -627,10 +627,10 @@ static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
 			pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
 		rect_swap_helper(&surf_src);
 
-	pipe_ctx->plane_res.scl_data.ratios.horz = dal_fixed31_32_from_fraction(
+	pipe_ctx->plane_res.scl_data.ratios.horz = dc_fixpt_from_fraction(
 					surf_src.width,
 					plane_state->dst_rect.width);
-	pipe_ctx->plane_res.scl_data.ratios.vert = dal_fixed31_32_from_fraction(
+	pipe_ctx->plane_res.scl_data.ratios.vert = dc_fixpt_from_fraction(
 					surf_src.height,
 					plane_state->dst_rect.height);
 
@@ -688,32 +688,32 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
 	 * 	init_bot = init + scaling_ratio
 	 * 	init_c = init + truncated_vp_c_offset(from calculate viewport)
 	 */
-	data->inits.h = dal_fixed31_32_div_int(
-			dal_fixed31_32_add_int(data->ratios.horz, data->taps.h_taps + 1), 2);
+	data->inits.h = dc_fixpt_div_int(
+			dc_fixpt_add_int(data->ratios.horz, data->taps.h_taps + 1), 2);
 
-	data->inits.h_c = dal_fixed31_32_add(data->inits.h_c, dal_fixed31_32_div_int(
-			dal_fixed31_32_add_int(data->ratios.horz_c, data->taps.h_taps_c + 1), 2));
+	data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_div_int(
+			dc_fixpt_add_int(data->ratios.horz_c, data->taps.h_taps_c + 1), 2));
 
-	data->inits.v = dal_fixed31_32_div_int(
-			dal_fixed31_32_add_int(data->ratios.vert, data->taps.v_taps + 1), 2);
+	data->inits.v = dc_fixpt_div_int(
+			dc_fixpt_add_int(data->ratios.vert, data->taps.v_taps + 1), 2);
 
-	data->inits.v_c = dal_fixed31_32_add(data->inits.v_c, dal_fixed31_32_div_int(
-			dal_fixed31_32_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2));
+	data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_div_int(
+			dc_fixpt_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2));
 
 
 	/* Adjust for viewport end clip-off */
 	if ((data->viewport.x + data->viewport.width) < (src.x + src.width) && !flip_horz_scan_dir) {
 		int vp_clip = src.x + src.width - data->viewport.width - data->viewport.x;
-		int int_part = dal_fixed31_32_floor(
-				dal_fixed31_32_sub(data->inits.h, data->ratios.horz));
+		int int_part = dc_fixpt_floor(
+				dc_fixpt_sub(data->inits.h, data->ratios.horz));
 
 		int_part = int_part > 0 ? int_part : 0;
 		data->viewport.width += int_part < vp_clip ? int_part : vp_clip;
 	}
 	if ((data->viewport.y + data->viewport.height) < (src.y + src.height) && !flip_vert_scan_dir) {
 		int vp_clip = src.y + src.height - data->viewport.height - data->viewport.y;
-		int int_part = dal_fixed31_32_floor(
-				dal_fixed31_32_sub(data->inits.v, data->ratios.vert));
+		int int_part = dc_fixpt_floor(
+				dc_fixpt_sub(data->inits.v, data->ratios.vert));
 
 		int_part = int_part > 0 ? int_part : 0;
 		data->viewport.height += int_part < vp_clip ? int_part : vp_clip;
@@ -721,8 +721,8 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
 	if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div && !flip_horz_scan_dir) {
 		int vp_clip = (src.x + src.width) / vpc_div -
 				data->viewport_c.width - data->viewport_c.x;
-		int int_part = dal_fixed31_32_floor(
-				dal_fixed31_32_sub(data->inits.h_c, data->ratios.horz_c));
+		int int_part = dc_fixpt_floor(
+				dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c));
 
 		int_part = int_part > 0 ? int_part : 0;
 		data->viewport_c.width += int_part < vp_clip ? int_part : vp_clip;
@@ -730,8 +730,8 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
 	if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div && !flip_vert_scan_dir) {
 		int vp_clip = (src.y + src.height) / vpc_div -
 				data->viewport_c.height - data->viewport_c.y;
-		int int_part = dal_fixed31_32_floor(
-				dal_fixed31_32_sub(data->inits.v_c, data->ratios.vert_c));
+		int int_part = dc_fixpt_floor(
+				dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c));
 
 		int_part = int_part > 0 ? int_part : 0;
 		data->viewport_c.height += int_part < vp_clip ? int_part : vp_clip;
@@ -741,9 +741,9 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
 	if (data->viewport.x && !flip_horz_scan_dir) {
 		int int_part;
 
-		data->inits.h = dal_fixed31_32_add(data->inits.h, dal_fixed31_32_mul_int(
+		data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int(
 				data->ratios.horz, recout_skip->width));
-		int_part = dal_fixed31_32_floor(data->inits.h) - data->viewport.x;
+		int_part = dc_fixpt_floor(data->inits.h) - data->viewport.x;
 		if (int_part < data->taps.h_taps) {
 			int int_adj = data->viewport.x >= (data->taps.h_taps - int_part) ?
 						(data->taps.h_taps - int_part) : data->viewport.x;
@@ -756,15 +756,15 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
 			int_part = data->taps.h_taps;
 		}
 		data->inits.h.value &= 0xffffffff;
-		data->inits.h = dal_fixed31_32_add_int(data->inits.h, int_part);
+		data->inits.h = dc_fixpt_add_int(data->inits.h, int_part);
 	}
 
 	if (data->viewport_c.x && !flip_horz_scan_dir) {
 		int int_part;
 
-		data->inits.h_c = dal_fixed31_32_add(data->inits.h_c, dal_fixed31_32_mul_int(
+		data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int(
 				data->ratios.horz_c, recout_skip->width));
-		int_part = dal_fixed31_32_floor(data->inits.h_c) - data->viewport_c.x;
+		int_part = dc_fixpt_floor(data->inits.h_c) - data->viewport_c.x;
 		if (int_part < data->taps.h_taps_c) {
 			int int_adj = data->viewport_c.x >= (data->taps.h_taps_c - int_part) ?
 					(data->taps.h_taps_c - int_part) : data->viewport_c.x;
@@ -777,15 +777,15 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
 			int_part = data->taps.h_taps_c;
 		}
 		data->inits.h_c.value &= 0xffffffff;
-		data->inits.h_c = dal_fixed31_32_add_int(data->inits.h_c, int_part);
+		data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part);
 	}
 
 	if (data->viewport.y && !flip_vert_scan_dir) {
 		int int_part;
 
-		data->inits.v = dal_fixed31_32_add(data->inits.v, dal_fixed31_32_mul_int(
+		data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int(
 				data->ratios.vert, recout_skip->height));
-		int_part = dal_fixed31_32_floor(data->inits.v) - data->viewport.y;
+		int_part = dc_fixpt_floor(data->inits.v) - data->viewport.y;
 		if (int_part < data->taps.v_taps) {
 			int int_adj = data->viewport.y >= (data->taps.v_taps - int_part) ?
 						(data->taps.v_taps - int_part) : data->viewport.y;
@@ -798,15 +798,15 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
 			int_part = data->taps.v_taps;
 		}
 		data->inits.v.value &= 0xffffffff;
-		data->inits.v = dal_fixed31_32_add_int(data->inits.v, int_part);
+		data->inits.v = dc_fixpt_add_int(data->inits.v, int_part);
 	}
 
 	if (data->viewport_c.y && !flip_vert_scan_dir) {
 		int int_part;
 
-		data->inits.v_c = dal_fixed31_32_add(data->inits.v_c, dal_fixed31_32_mul_int(
+		data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int(
 				data->ratios.vert_c, recout_skip->height));
-		int_part = dal_fixed31_32_floor(data->inits.v_c) - data->viewport_c.y;
+		int_part = dc_fixpt_floor(data->inits.v_c) - data->viewport_c.y;
 		if (int_part < data->taps.v_taps_c) {
 			int int_adj = data->viewport_c.y >= (data->taps.v_taps_c - int_part) ?
 					(data->taps.v_taps_c - int_part) : data->viewport_c.y;
@@ -819,12 +819,12 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
 			int_part = data->taps.v_taps_c;
 		}
 		data->inits.v_c.value &= 0xffffffff;
-		data->inits.v_c = dal_fixed31_32_add_int(data->inits.v_c, int_part);
+		data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part);
 	}
 
 	/* Interlaced inits based on final vert inits */
-	data->inits.v_bot = dal_fixed31_32_add(data->inits.v, data->ratios.vert);
-	data->inits.v_c_bot = dal_fixed31_32_add(data->inits.v_c, data->ratios.vert_c);
+	data->inits.v_bot = dc_fixpt_add(data->inits.v, data->ratios.vert);
+	data->inits.v_c_bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c);
 
 	if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
 			pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 2726b02e006b..90bccd5ccaa2 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -26,6 +26,8 @@
 #ifndef DC_DP_TYPES_H
 #define DC_DP_TYPES_H
 
+#include "os_types.h"
+
 enum dc_lane_count {
 	LANE_COUNT_UNKNOWN = 0,
 	LANE_COUNT_ONE = 1,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 9defe3b17617..76df2534c4a4 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -25,7 +25,7 @@
 #ifndef DC_TYPES_H_
 #define DC_TYPES_H_
 
-#include "fixed32_32.h"
+#include "os_types.h"
 #include "fixed31_32.h"
 #include "irq_types.h"
 #include "dc_dp_types.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index fe92a1222803..29294db1a96b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -26,7 +26,7 @@
 #include "dce_abm.h"
 #include "dm_services.h"
 #include "reg_helper.h"
-#include "fixed32_32.h"
+#include "fixed31_32.h"
 #include "dc.h"
 
 #include "atom.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 0570e7e4d0a0..599c7ab6befe 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -657,12 +657,12 @@ static uint32_t dce110_get_d_to_pixel_rate_in_hz(
 			return 0;
 		}
 
-		pix_rate = dal_fixed31_32_from_int(clk_src->ref_freq_khz);
-		pix_rate = dal_fixed31_32_mul_int(pix_rate, 1000);
-		pix_rate = dal_fixed31_32_mul_int(pix_rate, phase);
-		pix_rate = dal_fixed31_32_div_int(pix_rate, modulo);
+		pix_rate = dc_fixpt_from_int(clk_src->ref_freq_khz);
+		pix_rate = dc_fixpt_mul_int(pix_rate, 1000);
+		pix_rate = dc_fixpt_mul_int(pix_rate, phase);
+		pix_rate = dc_fixpt_div_int(pix_rate, modulo);
 
-		return dal_fixed31_32_round(pix_rate);
+		return dc_fixpt_round(pix_rate);
 	} else {
 		return dce110_get_dp_pixel_rate_from_combo_phy_pll(cs, pix_clk_params, pll_settings);
 	}
@@ -711,12 +711,12 @@ static bool calculate_ss(
 		const struct spread_spectrum_data *ss_data,
 		struct delta_sigma_data *ds_data)
 {
-	struct fixed32_32 fb_div;
-	struct fixed32_32 ss_amount;
-	struct fixed32_32 ss_nslip_amount;
-	struct fixed32_32 ss_ds_frac_amount;
-	struct fixed32_32 ss_step_size;
-	struct fixed32_32 modulation_time;
+	struct fixed31_32 fb_div;
+	struct fixed31_32 ss_amount;
+	struct fixed31_32 ss_nslip_amount;
+	struct fixed31_32 ss_ds_frac_amount;
+	struct fixed31_32 ss_step_size;
+	struct fixed31_32 modulation_time;
 
 	if (ds_data == NULL)
 		return false;
@@ -731,42 +731,42 @@ static bool calculate_ss(
 
 	/* compute SS_AMOUNT_FBDIV & SS_AMOUNT_NFRAC_SLIP & SS_AMOUNT_DSFRAC*/
 	/* 6 decimal point support in fractional feedback divider */
-	fb_div  = dal_fixed32_32_from_fraction(
+	fb_div  = dc_fixpt_from_fraction(
 		pll_settings->fract_feedback_divider, 1000000);
-	fb_div = dal_fixed32_32_add_int(fb_div, pll_settings->feedback_divider);
+	fb_div = dc_fixpt_add_int(fb_div, pll_settings->feedback_divider);
 
 	ds_data->ds_frac_amount = 0;
 	/*spreadSpectrumPercentage is in the unit of .01%,
 	 * so have to divided by 100 * 100*/
-	ss_amount = dal_fixed32_32_mul(
-		fb_div, dal_fixed32_32_from_fraction(ss_data->percentage,
+	ss_amount = dc_fixpt_mul(
+		fb_div, dc_fixpt_from_fraction(ss_data->percentage,
 					100 * ss_data->percentage_divider));
-	ds_data->feedback_amount = dal_fixed32_32_floor(ss_amount);
+	ds_data->feedback_amount = dc_fixpt_floor(ss_amount);
 
-	ss_nslip_amount = dal_fixed32_32_sub(ss_amount,
-		dal_fixed32_32_from_int(ds_data->feedback_amount));
-	ss_nslip_amount = dal_fixed32_32_mul_int(ss_nslip_amount, 10);
-	ds_data->nfrac_amount = dal_fixed32_32_floor(ss_nslip_amount);
+	ss_nslip_amount = dc_fixpt_sub(ss_amount,
+		dc_fixpt_from_int(ds_data->feedback_amount));
+	ss_nslip_amount = dc_fixpt_mul_int(ss_nslip_amount, 10);
+	ds_data->nfrac_amount = dc_fixpt_floor(ss_nslip_amount);
 
-	ss_ds_frac_amount = dal_fixed32_32_sub(ss_nslip_amount,
-		dal_fixed32_32_from_int(ds_data->nfrac_amount));
-	ss_ds_frac_amount = dal_fixed32_32_mul_int(ss_ds_frac_amount, 65536);
-	ds_data->ds_frac_amount = dal_fixed32_32_floor(ss_ds_frac_amount);
+	ss_ds_frac_amount = dc_fixpt_sub(ss_nslip_amount,
+		dc_fixpt_from_int(ds_data->nfrac_amount));
+	ss_ds_frac_amount = dc_fixpt_mul_int(ss_ds_frac_amount, 65536);
+	ds_data->ds_frac_amount = dc_fixpt_floor(ss_ds_frac_amount);
 
 	/* compute SS_STEP_SIZE_DSFRAC */
-	modulation_time = dal_fixed32_32_from_fraction(
+	modulation_time = dc_fixpt_from_fraction(
 		pll_settings->reference_freq * 1000,
 		pll_settings->reference_divider * ss_data->modulation_freq_hz);
 
 	if (ss_data->flags.CENTER_SPREAD)
-		modulation_time = dal_fixed32_32_div_int(modulation_time, 4);
+		modulation_time = dc_fixpt_div_int(modulation_time, 4);
 	else
-		modulation_time = dal_fixed32_32_div_int(modulation_time, 2);
+		modulation_time = dc_fixpt_div_int(modulation_time, 2);
 
-	ss_step_size = dal_fixed32_32_div(ss_amount, modulation_time);
+	ss_step_size = dc_fixpt_div(ss_amount, modulation_time);
 	/* SS_STEP_SIZE_DSFRAC_DEC = Int(SS_STEP_SIZE * 2 ^ 16 * 10)*/
-	ss_step_size = dal_fixed32_32_mul_int(ss_step_size, 65536 * 10);
-	ds_data->ds_frac_size =  dal_fixed32_32_floor(ss_step_size);
+	ss_step_size = dc_fixpt_mul_int(ss_step_size, 65536 * 10);
+	ds_data->ds_frac_size =  dc_fixpt_floor(ss_step_size);
 
 	return true;
 }
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
index f043e5ea412c..8a581c67bf2d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
@@ -26,7 +26,7 @@
 #include "dce_clocks.h"
 #include "dm_services.h"
 #include "reg_helper.h"
-#include "fixed32_32.h"
+#include "fixed31_32.h"
 #include "bios_parser_interface.h"
 #include "dc.h"
 #include "dmcu.h"
@@ -228,19 +228,19 @@ static int dce_clocks_get_dp_ref_freq(struct display_clock *clk)
 	 generated according to average value (case as with previous ASICs)
 	  */
 	if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
-		struct fixed32_32 ss_percentage = dal_fixed32_32_div_int(
-				dal_fixed32_32_from_fraction(
+		struct fixed31_32 ss_percentage = dc_fixpt_div_int(
+				dc_fixpt_from_fraction(
 						clk_dce->dprefclk_ss_percentage,
 						clk_dce->dprefclk_ss_divider), 200);
-		struct fixed32_32 adj_dp_ref_clk_khz;
+		struct fixed31_32 adj_dp_ref_clk_khz;
 
-		ss_percentage = dal_fixed32_32_sub(dal_fixed32_32_one,
+		ss_percentage = dc_fixpt_sub(dc_fixpt_one,
 								ss_percentage);
 		adj_dp_ref_clk_khz =
-			dal_fixed32_32_mul_int(
+			dc_fixpt_mul_int(
 				ss_percentage,
 				dp_ref_clk_khz);
-		dp_ref_clk_khz = dal_fixed32_32_floor(adj_dp_ref_clk_khz);
+		dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
 	}
 
 	return dp_ref_clk_khz;
@@ -256,19 +256,19 @@ static int dce_clocks_get_dp_ref_freq_wrkaround(struct display_clock *clk)
 	int dp_ref_clk_khz = 600000;
 
 	if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
-		struct fixed32_32 ss_percentage = dal_fixed32_32_div_int(
-				dal_fixed32_32_from_fraction(
+		struct fixed31_32 ss_percentage = dc_fixpt_div_int(
+				dc_fixpt_from_fraction(
 						clk_dce->dprefclk_ss_percentage,
 						clk_dce->dprefclk_ss_divider), 200);
-		struct fixed32_32 adj_dp_ref_clk_khz;
+		struct fixed31_32 adj_dp_ref_clk_khz;
 
-		ss_percentage = dal_fixed32_32_sub(dal_fixed32_32_one,
+		ss_percentage = dc_fixpt_sub(dc_fixpt_one,
 								ss_percentage);
 		adj_dp_ref_clk_khz =
-			dal_fixed32_32_mul_int(
+			dc_fixpt_mul_int(
 				ss_percentage,
 				dp_ref_clk_khz);
-		dp_ref_clk_khz = dal_fixed32_32_floor(adj_dp_ref_clk_khz);
+		dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
 	}
 
 	return dp_ref_clk_khz;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index 2ee3d9bf1062..a576b8bbb3cd 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -28,7 +28,7 @@
 #include "dce_dmcu.h"
 #include "dm_services.h"
 #include "reg_helper.h"
-#include "fixed32_32.h"
+#include "fixed31_32.h"
 #include "dc.h"
 
 #define TO_DCE_DMCU(dmcu)\
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c b/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c
index d737e911971b..5d9506b3d46b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c
@@ -195,13 +195,13 @@ static void dce_ipp_program_input_lut(
 
 	for (i = 0; i < gamma->num_entries; i++) {
 		REG_SET(DC_LUT_SEQ_COLOR, 0, DC_LUT_SEQ_COLOR,
-				dal_fixed31_32_round(
+				dc_fixpt_round(
 					gamma->entries.red[i]));
 		REG_SET(DC_LUT_SEQ_COLOR, 0, DC_LUT_SEQ_COLOR,
-				dal_fixed31_32_round(
+				dc_fixpt_round(
 					gamma->entries.green[i]));
 		REG_SET(DC_LUT_SEQ_COLOR, 0, DC_LUT_SEQ_COLOR,
-				dal_fixed31_32_round(
+				dc_fixpt_round(
 					gamma->entries.blue[i]));
 	}
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c
index 6243450b41b7..48862bebf29e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c
@@ -1014,11 +1014,11 @@ static const uint16_t filter_8tap_64p_183[264] = {
 
 const uint16_t *get_filter_3tap_16p(struct fixed31_32 ratio)
 {
-	if (ratio.value < dal_fixed31_32_one.value)
+	if (ratio.value < dc_fixpt_one.value)
 		return filter_3tap_16p_upscale;
-	else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+	else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
 		return filter_3tap_16p_117;
-	else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+	else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
 		return filter_3tap_16p_150;
 	else
 		return filter_3tap_16p_183;
@@ -1026,11 +1026,11 @@ const uint16_t *get_filter_3tap_16p(struct fixed31_32 ratio)
 
 const uint16_t *get_filter_3tap_64p(struct fixed31_32 ratio)
 {
-	if (ratio.value < dal_fixed31_32_one.value)
+	if (ratio.value < dc_fixpt_one.value)
 		return filter_3tap_64p_upscale;
-	else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+	else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
 		return filter_3tap_64p_117;
-	else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+	else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
 		return filter_3tap_64p_150;
 	else
 		return filter_3tap_64p_183;
@@ -1038,11 +1038,11 @@ const uint16_t *get_filter_3tap_64p(struct fixed31_32 ratio)
 
 const uint16_t *get_filter_4tap_16p(struct fixed31_32 ratio)
 {
-	if (ratio.value < dal_fixed31_32_one.value)
+	if (ratio.value < dc_fixpt_one.value)
 		return filter_4tap_16p_upscale;
-	else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+	else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
 		return filter_4tap_16p_117;
-	else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+	else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
 		return filter_4tap_16p_150;
 	else
 		return filter_4tap_16p_183;
@@ -1050,11 +1050,11 @@ const uint16_t *get_filter_4tap_16p(struct fixed31_32 ratio)
 
 const uint16_t *get_filter_4tap_64p(struct fixed31_32 ratio)
 {
-	if (ratio.value < dal_fixed31_32_one.value)
+	if (ratio.value < dc_fixpt_one.value)
 		return filter_4tap_64p_upscale;
-	else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+	else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
 		return filter_4tap_64p_117;
-	else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+	else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
 		return filter_4tap_64p_150;
 	else
 		return filter_4tap_64p_183;
@@ -1062,11 +1062,11 @@ const uint16_t *get_filter_4tap_64p(struct fixed31_32 ratio)
 
 const uint16_t *get_filter_5tap_64p(struct fixed31_32 ratio)
 {
-	if (ratio.value < dal_fixed31_32_one.value)
+	if (ratio.value < dc_fixpt_one.value)
 		return filter_5tap_64p_upscale;
-	else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+	else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
 		return filter_5tap_64p_117;
-	else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+	else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
 		return filter_5tap_64p_150;
 	else
 		return filter_5tap_64p_183;
@@ -1074,11 +1074,11 @@ const uint16_t *get_filter_5tap_64p(struct fixed31_32 ratio)
 
 const uint16_t *get_filter_6tap_64p(struct fixed31_32 ratio)
 {
-	if (ratio.value < dal_fixed31_32_one.value)
+	if (ratio.value < dc_fixpt_one.value)
 		return filter_6tap_64p_upscale;
-	else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+	else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
 		return filter_6tap_64p_117;
-	else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+	else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
 		return filter_6tap_64p_150;
 	else
 		return filter_6tap_64p_183;
@@ -1086,11 +1086,11 @@ const uint16_t *get_filter_6tap_64p(struct fixed31_32 ratio)
 
 const uint16_t *get_filter_7tap_64p(struct fixed31_32 ratio)
 {
-	if (ratio.value < dal_fixed31_32_one.value)
+	if (ratio.value < dc_fixpt_one.value)
 		return filter_7tap_64p_upscale;
-	else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+	else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
 		return filter_7tap_64p_117;
-	else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+	else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
 		return filter_7tap_64p_150;
 	else
 		return filter_7tap_64p_183;
@@ -1098,11 +1098,11 @@ const uint16_t *get_filter_7tap_64p(struct fixed31_32 ratio)
 
 const uint16_t *get_filter_8tap_64p(struct fixed31_32 ratio)
 {
-	if (ratio.value < dal_fixed31_32_one.value)
+	if (ratio.value < dc_fixpt_one.value)
 		return filter_8tap_64p_upscale;
-	else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+	else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
 		return filter_8tap_64p_117;
-	else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+	else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
 		return filter_8tap_64p_150;
 	else
 		return filter_8tap_64p_183;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index e265a0abe361..0a6d483dc046 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -683,11 +683,11 @@ static void dce110_stream_encoder_set_mst_bandwidth(
 	struct fixed31_32 avg_time_slots_per_mtp)
 {
 	struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
-	uint32_t x = dal_fixed31_32_floor(
+	uint32_t x = dc_fixpt_floor(
 		avg_time_slots_per_mtp);
-	uint32_t y = dal_fixed31_32_ceil(
-		dal_fixed31_32_shl(
-			dal_fixed31_32_sub_int(
+	uint32_t y = dc_fixpt_ceil(
+		dc_fixpt_shl(
+			dc_fixpt_sub_int(
 				avg_time_slots_per_mtp,
 				x),
 			26));
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
index 832c5daada35..a02e719d7794 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
@@ -41,7 +41,7 @@
 #define DC_LOGGER \
 	xfm_dce->base.ctx->logger
 
-#define IDENTITY_RATIO(ratio) (dal_fixed31_32_u2d19(ratio) == (1 << 19))
+#define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19))
 #define GAMUT_MATRIX_SIZE 12
 #define SCL_PHASES 16
 
@@ -256,27 +256,27 @@ static void calculate_inits(
 	struct fixed31_32 v_init;
 
 	inits->h_int_scale_ratio =
-		dal_fixed31_32_u2d19(data->ratios.horz) << 5;
+		dc_fixpt_u2d19(data->ratios.horz) << 5;
 	inits->v_int_scale_ratio =
-		dal_fixed31_32_u2d19(data->ratios.vert) << 5;
+		dc_fixpt_u2d19(data->ratios.vert) << 5;
 
 	h_init =
-		dal_fixed31_32_div_int(
-			dal_fixed31_32_add(
+		dc_fixpt_div_int(
+			dc_fixpt_add(
 				data->ratios.horz,
-				dal_fixed31_32_from_int(data->taps.h_taps + 1)),
+				dc_fixpt_from_int(data->taps.h_taps + 1)),
 				2);
-	inits->h_init.integer = dal_fixed31_32_floor(h_init);
-	inits->h_init.fraction = dal_fixed31_32_u0d19(h_init) << 5;
+	inits->h_init.integer = dc_fixpt_floor(h_init);
+	inits->h_init.fraction = dc_fixpt_u0d19(h_init) << 5;
 
 	v_init =
-		dal_fixed31_32_div_int(
-			dal_fixed31_32_add(
+		dc_fixpt_div_int(
+			dc_fixpt_add(
 				data->ratios.vert,
-				dal_fixed31_32_from_int(data->taps.v_taps + 1)),
+				dc_fixpt_from_int(data->taps.v_taps + 1)),
 				2);
-	inits->v_init.integer = dal_fixed31_32_floor(v_init);
-	inits->v_init.fraction = dal_fixed31_32_u0d19(v_init) << 5;
+	inits->v_init.integer = dc_fixpt_floor(v_init);
+	inits->v_init.fraction = dc_fixpt_u0d19(v_init) << 5;
 }
 
 static void program_scl_ratios_inits(
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index ae500421edb6..a92fb0aa2ff3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -509,19 +509,19 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
 	rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
 	rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
 
-	arr_points[0].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
-					     dal_fixed31_32_from_int(region_start));
-	arr_points[1].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
-					     dal_fixed31_32_from_int(region_end));
+	arr_points[0].x = dc_fixpt_pow(dc_fixpt_from_int(2),
+					     dc_fixpt_from_int(region_start));
+	arr_points[1].x = dc_fixpt_pow(dc_fixpt_from_int(2),
+					     dc_fixpt_from_int(region_end));
 
 	y_r = rgb_resulted[0].red;
 	y_g = rgb_resulted[0].green;
 	y_b = rgb_resulted[0].blue;
 
-	y1_min = dal_fixed31_32_min(y_r, dal_fixed31_32_min(y_g, y_b));
+	y1_min = dc_fixpt_min(y_r, dc_fixpt_min(y_g, y_b));
 
 	arr_points[0].y = y1_min;
-	arr_points[0].slope = dal_fixed31_32_div(arr_points[0].y,
+	arr_points[0].slope = dc_fixpt_div(arr_points[0].y,
 						 arr_points[0].x);
 
 	y_r = rgb_resulted[hw_points - 1].red;
@@ -531,21 +531,21 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
 	/* see comment above, m_arrPoints[1].y should be the Y value for the
 	 * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
 	 */
-	y3_max = dal_fixed31_32_max(y_r, dal_fixed31_32_max(y_g, y_b));
+	y3_max = dc_fixpt_max(y_r, dc_fixpt_max(y_g, y_b));
 
 	arr_points[1].y = y3_max;
 
-	arr_points[1].slope = dal_fixed31_32_zero;
+	arr_points[1].slope = dc_fixpt_zero;
 
 	if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
 		/* for PQ, we want to have a straight line from last HW X point,
 		 * and the slope to be such that we hit 1.0 at 10000 nits.
 		 */
-		const struct fixed31_32 end_value = dal_fixed31_32_from_int(125);
+		const struct fixed31_32 end_value = dc_fixpt_from_int(125);
 
-		arr_points[1].slope = dal_fixed31_32_div(
-				dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
-				dal_fixed31_32_sub(end_value, arr_points[1].x));
+		arr_points[1].slope = dc_fixpt_div(
+				dc_fixpt_sub(dc_fixpt_one, arr_points[1].y),
+				dc_fixpt_sub(end_value, arr_points[1].x));
 	}
 
 	regamma_params->hw_points_num = hw_points;
@@ -569,16 +569,16 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
 	i = 1;
 
 	while (i != hw_points + 1) {
-		if (dal_fixed31_32_lt(rgb_plus_1->red, rgb->red))
+		if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
 			rgb_plus_1->red = rgb->red;
-		if (dal_fixed31_32_lt(rgb_plus_1->green, rgb->green))
+		if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
 			rgb_plus_1->green = rgb->green;
-		if (dal_fixed31_32_lt(rgb_plus_1->blue, rgb->blue))
+		if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
 			rgb_plus_1->blue = rgb->blue;
 
-		rgb->delta_red = dal_fixed31_32_sub(rgb_plus_1->red, rgb->red);
-		rgb->delta_green = dal_fixed31_32_sub(rgb_plus_1->green, rgb->green);
-		rgb->delta_blue = dal_fixed31_32_sub(rgb_plus_1->blue, rgb->blue);
+		rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red);
+		rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
+		rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue);
 
 		++rgb_plus_1;
 		++rgb;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
index 8ba3c12fc608..a7dce060204f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
@@ -373,13 +373,13 @@ static void calculate_inits(
 	struct rect *chroma_viewport)
 {
 	inits->h_int_scale_ratio_luma =
-		dal_fixed31_32_u2d19(data->ratios.horz) << 5;
+		dc_fixpt_u2d19(data->ratios.horz) << 5;
 	inits->v_int_scale_ratio_luma =
-		dal_fixed31_32_u2d19(data->ratios.vert) << 5;
+		dc_fixpt_u2d19(data->ratios.vert) << 5;
 	inits->h_int_scale_ratio_chroma =
-		dal_fixed31_32_u2d19(data->ratios.horz_c) << 5;
+		dc_fixpt_u2d19(data->ratios.horz_c) << 5;
 	inits->v_int_scale_ratio_chroma =
-		dal_fixed31_32_u2d19(data->ratios.vert_c) << 5;
+		dc_fixpt_u2d19(data->ratios.vert_c) << 5;
 
 	inits->h_init_luma.integer = 1;
 	inits->v_init_luma.integer = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index 96d5878e9ccd..5d95a997fd9f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -169,7 +169,7 @@ bool cm_helper_convert_to_custom_float(
 	}
 
 	if (fixpoint == true)
-		arr_points[1].custom_float_y = dal_fixed31_32_clamp_u0d14(arr_points[1].y);
+		arr_points[1].custom_float_y = dc_fixpt_clamp_u0d14(arr_points[1].y);
 	else if (!convert_to_custom_float_format(arr_points[1].y, &fmt,
 		&arr_points[1].custom_float_y)) {
 		BREAK_TO_DEBUGGER();
@@ -327,19 +327,19 @@ bool cm_helper_translate_curve_to_hw_format(
 	rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
 	rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
 
-	arr_points[0].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
-					     dal_fixed31_32_from_int(region_start));
-	arr_points[1].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
-					     dal_fixed31_32_from_int(region_end));
+	arr_points[0].x = dc_fixpt_pow(dc_fixpt_from_int(2),
+					     dc_fixpt_from_int(region_start));
+	arr_points[1].x = dc_fixpt_pow(dc_fixpt_from_int(2),
+					     dc_fixpt_from_int(region_end));
 
 	y_r = rgb_resulted[0].red;
 	y_g = rgb_resulted[0].green;
 	y_b = rgb_resulted[0].blue;
 
-	y1_min = dal_fixed31_32_min(y_r, dal_fixed31_32_min(y_g, y_b));
+	y1_min = dc_fixpt_min(y_r, dc_fixpt_min(y_g, y_b));
 
 	arr_points[0].y = y1_min;
-	arr_points[0].slope = dal_fixed31_32_div(arr_points[0].y, arr_points[0].x);
+	arr_points[0].slope = dc_fixpt_div(arr_points[0].y, arr_points[0].x);
 	y_r = rgb_resulted[hw_points - 1].red;
 	y_g = rgb_resulted[hw_points - 1].green;
 	y_b = rgb_resulted[hw_points - 1].blue;
@@ -347,22 +347,22 @@ bool cm_helper_translate_curve_to_hw_format(
 	/* see comment above, m_arrPoints[1].y should be the Y value for the
 	 * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
 	 */
-	y3_max = dal_fixed31_32_max(y_r, dal_fixed31_32_max(y_g, y_b));
+	y3_max = dc_fixpt_max(y_r, dc_fixpt_max(y_g, y_b));
 
 	arr_points[1].y = y3_max;
 
-	arr_points[1].slope = dal_fixed31_32_zero;
+	arr_points[1].slope = dc_fixpt_zero;
 
 	if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
 		/* for PQ, we want to have a straight line from last HW X point,
 		 * and the slope to be such that we hit 1.0 at 10000 nits.
 		 */
 		const struct fixed31_32 end_value =
-				dal_fixed31_32_from_int(125);
+				dc_fixpt_from_int(125);
 
-		arr_points[1].slope = dal_fixed31_32_div(
-			dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
-			dal_fixed31_32_sub(end_value, arr_points[1].x));
+		arr_points[1].slope = dc_fixpt_div(
+			dc_fixpt_sub(dc_fixpt_one, arr_points[1].y),
+			dc_fixpt_sub(end_value, arr_points[1].x));
 	}
 
 	lut_params->hw_points_num = hw_points;
@@ -386,24 +386,24 @@ bool cm_helper_translate_curve_to_hw_format(
 
 	i = 1;
 	while (i != hw_points + 1) {
-		if (dal_fixed31_32_lt(rgb_plus_1->red, rgb->red))
+		if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
 			rgb_plus_1->red = rgb->red;
-		if (dal_fixed31_32_lt(rgb_plus_1->green, rgb->green))
+		if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
 			rgb_plus_1->green = rgb->green;
-		if (dal_fixed31_32_lt(rgb_plus_1->blue, rgb->blue))
+		if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
 			rgb_plus_1->blue = rgb->blue;
 
-		rgb->delta_red   = dal_fixed31_32_sub(rgb_plus_1->red,   rgb->red);
-		rgb->delta_green = dal_fixed31_32_sub(rgb_plus_1->green, rgb->green);
-		rgb->delta_blue  = dal_fixed31_32_sub(rgb_plus_1->blue,  rgb->blue);
+		rgb->delta_red   = dc_fixpt_sub(rgb_plus_1->red,   rgb->red);
+		rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
+		rgb->delta_blue  = dc_fixpt_sub(rgb_plus_1->blue,  rgb->blue);
 
 		if (fixpoint == true) {
-			rgb->delta_red_reg   = dal_fixed31_32_clamp_u0d10(rgb->delta_red);
-			rgb->delta_green_reg = dal_fixed31_32_clamp_u0d10(rgb->delta_green);
-			rgb->delta_blue_reg  = dal_fixed31_32_clamp_u0d10(rgb->delta_blue);
-			rgb->red_reg         = dal_fixed31_32_clamp_u0d14(rgb->red);
-			rgb->green_reg       = dal_fixed31_32_clamp_u0d14(rgb->green);
-			rgb->blue_reg        = dal_fixed31_32_clamp_u0d14(rgb->blue);
+			rgb->delta_red_reg   = dc_fixpt_clamp_u0d10(rgb->delta_red);
+			rgb->delta_green_reg = dc_fixpt_clamp_u0d10(rgb->delta_green);
+			rgb->delta_blue_reg  = dc_fixpt_clamp_u0d10(rgb->delta_blue);
+			rgb->red_reg         = dc_fixpt_clamp_u0d14(rgb->red);
+			rgb->green_reg       = dc_fixpt_clamp_u0d14(rgb->green);
+			rgb->blue_reg        = dc_fixpt_clamp_u0d14(rgb->blue);
 		}
 
 		++rgb_plus_1;
@@ -489,19 +489,19 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
 	rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
 	rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
 
-	arr_points[0].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
-					     dal_fixed31_32_from_int(region_start));
-	arr_points[1].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
-					     dal_fixed31_32_from_int(region_end));
+	arr_points[0].x = dc_fixpt_pow(dc_fixpt_from_int(2),
+					     dc_fixpt_from_int(region_start));
+	arr_points[1].x = dc_fixpt_pow(dc_fixpt_from_int(2),
+					     dc_fixpt_from_int(region_end));
 
 	y_r = rgb_resulted[0].red;
 	y_g = rgb_resulted[0].green;
 	y_b = rgb_resulted[0].blue;
 
-	y1_min = dal_fixed31_32_min(y_r, dal_fixed31_32_min(y_g, y_b));
+	y1_min = dc_fixpt_min(y_r, dc_fixpt_min(y_g, y_b));
 
 	arr_points[0].y = y1_min;
-	arr_points[0].slope = dal_fixed31_32_div(arr_points[0].y, arr_points[0].x);
+	arr_points[0].slope = dc_fixpt_div(arr_points[0].y, arr_points[0].x);
 	y_r = rgb_resulted[hw_points - 1].red;
 	y_g = rgb_resulted[hw_points - 1].green;
 	y_b = rgb_resulted[hw_points - 1].blue;
@@ -509,22 +509,22 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
 	/* see comment above, m_arrPoints[1].y should be the Y value for the
 	 * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
 	 */
-	y3_max = dal_fixed31_32_max(y_r, dal_fixed31_32_max(y_g, y_b));
+	y3_max = dc_fixpt_max(y_r, dc_fixpt_max(y_g, y_b));
 
 	arr_points[1].y = y3_max;
 
-	arr_points[1].slope = dal_fixed31_32_zero;
+	arr_points[1].slope = dc_fixpt_zero;
 
 	if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
 		/* for PQ, we want to have a straight line from last HW X point,
 		 * and the slope to be such that we hit 1.0 at 10000 nits.
 		 */
 		const struct fixed31_32 end_value =
-				dal_fixed31_32_from_int(125);
+				dc_fixpt_from_int(125);
 
-		arr_points[1].slope = dal_fixed31_32_div(
-			dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
-			dal_fixed31_32_sub(end_value, arr_points[1].x));
+		arr_points[1].slope = dc_fixpt_div(
+			dc_fixpt_sub(dc_fixpt_one, arr_points[1].y),
+			dc_fixpt_sub(end_value, arr_points[1].x));
 	}
 
 	lut_params->hw_points_num = hw_points;
@@ -548,16 +548,16 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
 
 	i = 1;
 	while (i != hw_points + 1) {
-		if (dal_fixed31_32_lt(rgb_plus_1->red, rgb->red))
+		if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
 			rgb_plus_1->red = rgb->red;
-		if (dal_fixed31_32_lt(rgb_plus_1->green, rgb->green))
+		if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
 			rgb_plus_1->green = rgb->green;
-		if (dal_fixed31_32_lt(rgb_plus_1->blue, rgb->blue))
+		if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
 			rgb_plus_1->blue = rgb->blue;
 
-		rgb->delta_red   = dal_fixed31_32_sub(rgb_plus_1->red,   rgb->red);
-		rgb->delta_green = dal_fixed31_32_sub(rgb_plus_1->green, rgb->green);
-		rgb->delta_blue  = dal_fixed31_32_sub(rgb_plus_1->blue,  rgb->blue);
+		rgb->delta_red   = dc_fixpt_sub(rgb_plus_1->red,   rgb->red);
+		rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
+		rgb->delta_blue  = dc_fixpt_sub(rgb_plus_1->blue,  rgb->blue);
 
 		++rgb_plus_1;
 		++rgb;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index 8c4d9e523331..20796da36de4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -130,7 +130,7 @@ void dpp_set_gamut_remap_bypass(struct dcn10_dpp *dpp)
 	/* Gamut remap in bypass */
 }
 
-#define IDENTITY_RATIO(ratio) (dal_fixed31_32_u2d19(ratio) == (1 << 19))
+#define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19))
 
 
 bool dpp_get_optimal_number_of_taps(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index 4f373c97804f..116977eb24e2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -811,13 +811,13 @@ void dpp1_program_input_lut(
 	REG_UPDATE(CM_IGAM_LUT_RW_INDEX, CM_IGAM_LUT_RW_INDEX, 0);
 	for (i = 0; i < gamma->num_entries; i++) {
 		REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR,
-				dal_fixed31_32_round(
+				dc_fixpt_round(
 					gamma->entries.red[i]));
 		REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR,
-				dal_fixed31_32_round(
+				dc_fixpt_round(
 					gamma->entries.green[i]));
 		REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR,
-				dal_fixed31_32_round(
+				dc_fixpt_round(
 					gamma->entries.blue[i]));
 	}
 	// Power off LUT memory
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
index 3eb824debf43..4ddd6273d5a5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -169,7 +169,7 @@ static enum dscl_mode_sel dpp1_dscl_get_dscl_mode(
 		const struct scaler_data *data,
 		bool dbg_always_scale)
 {
-	const long long one = dal_fixed31_32_one.value;
+	const long long one = dc_fixpt_one.value;
 
 	if (dpp_base->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) {
 		/* DSCL is processing data in fixed format */
@@ -464,8 +464,8 @@ static enum lb_memory_config dpp1_dscl_find_lb_memory_config(struct dcn10_dpp *d
 	int num_part_y, num_part_c;
 	int vtaps = scl_data->taps.v_taps;
 	int vtaps_c = scl_data->taps.v_taps_c;
-	int ceil_vratio = dal_fixed31_32_ceil(scl_data->ratios.vert);
-	int ceil_vratio_c = dal_fixed31_32_ceil(scl_data->ratios.vert_c);
+	int ceil_vratio = dc_fixpt_ceil(scl_data->ratios.vert);
+	int ceil_vratio_c = dc_fixpt_ceil(scl_data->ratios.vert_c);
 	enum lb_memory_config mem_cfg = LB_MEMORY_CONFIG_0;
 
 	if (dpp->base.ctx->dc->debug.use_max_lb)
@@ -565,52 +565,52 @@ static void dpp1_dscl_set_manual_ratio_init(
 	uint32_t init_int = 0;
 
 	REG_SET(SCL_HORZ_FILTER_SCALE_RATIO, 0,
-			SCL_H_SCALE_RATIO, dal_fixed31_32_u2d19(data->ratios.horz) << 5);
+			SCL_H_SCALE_RATIO, dc_fixpt_u2d19(data->ratios.horz) << 5);
 
 	REG_SET(SCL_VERT_FILTER_SCALE_RATIO, 0,
-			SCL_V_SCALE_RATIO, dal_fixed31_32_u2d19(data->ratios.vert) << 5);
+			SCL_V_SCALE_RATIO, dc_fixpt_u2d19(data->ratios.vert) << 5);
 
 	REG_SET(SCL_HORZ_FILTER_SCALE_RATIO_C, 0,
-			SCL_H_SCALE_RATIO_C, dal_fixed31_32_u2d19(data->ratios.horz_c) << 5);
+			SCL_H_SCALE_RATIO_C, dc_fixpt_u2d19(data->ratios.horz_c) << 5);
 
 	REG_SET(SCL_VERT_FILTER_SCALE_RATIO_C, 0,
-			SCL_V_SCALE_RATIO_C, dal_fixed31_32_u2d19(data->ratios.vert_c) << 5);
+			SCL_V_SCALE_RATIO_C, dc_fixpt_u2d19(data->ratios.vert_c) << 5);
 
 	/*
 	 * 0.24 format for fraction, first five bits zeroed
 	 */
-	init_frac = dal_fixed31_32_u0d19(data->inits.h) << 5;
-	init_int = dal_fixed31_32_floor(data->inits.h);
+	init_frac = dc_fixpt_u0d19(data->inits.h) << 5;
+	init_int = dc_fixpt_floor(data->inits.h);
 	REG_SET_2(SCL_HORZ_FILTER_INIT, 0,
 		SCL_H_INIT_FRAC, init_frac,
 		SCL_H_INIT_INT, init_int);
 
-	init_frac = dal_fixed31_32_u0d19(data->inits.h_c) << 5;
-	init_int = dal_fixed31_32_floor(data->inits.h_c);
+	init_frac = dc_fixpt_u0d19(data->inits.h_c) << 5;
+	init_int = dc_fixpt_floor(data->inits.h_c);
 	REG_SET_2(SCL_HORZ_FILTER_INIT_C, 0,
 		SCL_H_INIT_FRAC_C, init_frac,
 		SCL_H_INIT_INT_C, init_int);
 
-	init_frac = dal_fixed31_32_u0d19(data->inits.v) << 5;
-	init_int = dal_fixed31_32_floor(data->inits.v);
+	init_frac = dc_fixpt_u0d19(data->inits.v) << 5;
+	init_int = dc_fixpt_floor(data->inits.v);
 	REG_SET_2(SCL_VERT_FILTER_INIT, 0,
 		SCL_V_INIT_FRAC, init_frac,
 		SCL_V_INIT_INT, init_int);
 
-	init_frac = dal_fixed31_32_u0d19(data->inits.v_bot) << 5;
-	init_int = dal_fixed31_32_floor(data->inits.v_bot);
+	init_frac = dc_fixpt_u0d19(data->inits.v_bot) << 5;
+	init_int = dc_fixpt_floor(data->inits.v_bot);
 	REG_SET_2(SCL_VERT_FILTER_INIT_BOT, 0,
 		SCL_V_INIT_FRAC_BOT, init_frac,
 		SCL_V_INIT_INT_BOT, init_int);
 
-	init_frac = dal_fixed31_32_u0d19(data->inits.v_c) << 5;
-	init_int = dal_fixed31_32_floor(data->inits.v_c);
+	init_frac = dc_fixpt_u0d19(data->inits.v_c) << 5;
+	init_int = dc_fixpt_floor(data->inits.v_c);
 	REG_SET_2(SCL_VERT_FILTER_INIT_C, 0,
 		SCL_V_INIT_FRAC_C, init_frac,
 		SCL_V_INIT_INT_C, init_int);
 
-	init_frac = dal_fixed31_32_u0d19(data->inits.v_c_bot) << 5;
-	init_int = dal_fixed31_32_floor(data->inits.v_c_bot);
+	init_frac = dc_fixpt_u0d19(data->inits.v_c_bot) << 5;
+	init_int = dc_fixpt_floor(data->inits.v_c_bot);
 	REG_SET_2(SCL_VERT_FILTER_INIT_BOT_C, 0,
 		SCL_V_INIT_FRAC_BOT_C, init_frac,
 		SCL_V_INIT_INT_BOT_C, init_int);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 0cbc83edd37f..185f93bda41b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -1054,8 +1054,8 @@ void hubp1_cursor_set_position(
 	ASSERT(param->h_scale_ratio.value);
 
 	if (param->h_scale_ratio.value)
-		dst_x_offset = dal_fixed31_32_floor(dal_fixed31_32_div(
-				dal_fixed31_32_from_int(dst_x_offset),
+		dst_x_offset = dc_fixpt_floor(dc_fixpt_div(
+				dc_fixpt_from_int(dst_x_offset),
 				param->h_scale_ratio));
 
 	if (src_x_offset >= (int)param->viewport_width)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 8adb8dc44af5..50bd7548e230 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -1685,22 +1685,22 @@ static uint16_t fixed_point_to_int_frac(
 
 	uint16_t result;
 
-	uint16_t d = (uint16_t)dal_fixed31_32_floor(
-		dal_fixed31_32_abs(
+	uint16_t d = (uint16_t)dc_fixpt_floor(
+		dc_fixpt_abs(
 			arg));
 
 	if (d <= (uint16_t)(1 << integer_bits) - (1 / (uint16_t)divisor))
-		numerator = (uint16_t)dal_fixed31_32_floor(
-			dal_fixed31_32_mul_int(
+		numerator = (uint16_t)dc_fixpt_floor(
+			dc_fixpt_mul_int(
 				arg,
 				divisor));
 	else {
-		numerator = dal_fixed31_32_floor(
-			dal_fixed31_32_sub(
-				dal_fixed31_32_from_int(
+		numerator = dc_fixpt_floor(
+			dc_fixpt_sub(
+				dc_fixpt_from_int(
 					1LL << integer_bits),
-				dal_fixed31_32_recip(
-					dal_fixed31_32_from_int(
+				dc_fixpt_recip(
+					dc_fixpt_from_int(
 						divisor))));
 	}
 
@@ -1710,8 +1710,8 @@ static uint16_t fixed_point_to_int_frac(
 		result = (uint16_t)(
 		(1 << (integer_bits + fractional_bits + 1)) + numerator);
 
-	if ((result != 0) && dal_fixed31_32_lt(
-		arg, dal_fixed31_32_zero))
+	if ((result != 0) && dc_fixpt_lt(
+		arg, dc_fixpt_zero))
 		result |= 1 << (integer_bits + fractional_bits);
 
 	return result;
@@ -1725,8 +1725,8 @@ void build_prescale_params(struct  dc_bias_and_scale *bias_and_scale,
 			&& plane_state->input_csc_color_matrix.enable_adjustment
 			&& plane_state->coeff_reduction_factor.value != 0) {
 		bias_and_scale->scale_blue = fixed_point_to_int_frac(
-			dal_fixed31_32_mul(plane_state->coeff_reduction_factor,
-					dal_fixed31_32_from_fraction(256, 255)),
+			dc_fixpt_mul(plane_state->coeff_reduction_factor,
+					dc_fixpt_from_fraction(256, 255)),
 				2,
 				13);
 		bias_and_scale->scale_red = bias_and_scale->scale_blue;
@@ -1995,7 +1995,7 @@ static void dcn10_blank_pixel_data(
 
 static void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
 {
-	struct fixed31_32 multiplier = dal_fixed31_32_from_fraction(
+	struct fixed31_32 multiplier = dc_fixpt_from_fraction(
 			pipe_ctx->plane_state->sdr_white_level, 80);
 	uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
 	struct custom_float_format fmt;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index befd8639ad55..653b7b2efe2e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -603,11 +603,11 @@ void enc1_stream_encoder_set_mst_bandwidth(
 	struct fixed31_32 avg_time_slots_per_mtp)
 {
 	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
-	uint32_t x = dal_fixed31_32_floor(
+	uint32_t x = dc_fixpt_floor(
 		avg_time_slots_per_mtp);
-	uint32_t y = dal_fixed31_32_ceil(
-		dal_fixed31_32_shl(
-			dal_fixed31_32_sub_int(
+	uint32_t y = dc_fixpt_ceil(
+		dc_fixpt_shl(
+			dc_fixpt_sub_int(
 				avg_time_slots_per_mtp,
 				x),
 			26));
diff --git a/drivers/gpu/drm/amd/display/dc/irq_types.h b/drivers/gpu/drm/amd/display/dc/irq_types.h
index cc3b1bc6cedd..0b5f3a278c22 100644
--- a/drivers/gpu/drm/amd/display/dc/irq_types.h
+++ b/drivers/gpu/drm/amd/display/dc/irq_types.h
@@ -26,6 +26,8 @@
 #ifndef __DAL_IRQ_TYPES_H__
 #define __DAL_IRQ_TYPES_H__
 
+#include "os_types.h"
+
 struct dc_context;
 
 typedef void (*interrupt_handler)(void *);
diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
index 16cbdb43d856..b5b8d7dea373 100644
--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
+++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
@@ -50,16 +50,16 @@ struct fixed31_32 {
  * Useful constants
  */
 
-static const struct fixed31_32 dal_fixed31_32_zero = { 0 };
-static const struct fixed31_32 dal_fixed31_32_epsilon = { 1LL };
-static const struct fixed31_32 dal_fixed31_32_half = { 0x80000000LL };
-static const struct fixed31_32 dal_fixed31_32_one = { 0x100000000LL };
+static const struct fixed31_32 dc_fixpt_zero = { 0 };
+static const struct fixed31_32 dc_fixpt_epsilon = { 1LL };
+static const struct fixed31_32 dc_fixpt_half = { 0x80000000LL };
+static const struct fixed31_32 dc_fixpt_one = { 0x100000000LL };
 
-static const struct fixed31_32 dal_fixed31_32_pi = { 13493037705LL };
-static const struct fixed31_32 dal_fixed31_32_two_pi = { 26986075409LL };
-static const struct fixed31_32 dal_fixed31_32_e = { 11674931555LL };
-static const struct fixed31_32 dal_fixed31_32_ln2 = { 2977044471LL };
-static const struct fixed31_32 dal_fixed31_32_ln2_div_2 = { 1488522236LL };
+static const struct fixed31_32 dc_fixpt_pi = { 13493037705LL };
+static const struct fixed31_32 dc_fixpt_two_pi = { 26986075409LL };
+static const struct fixed31_32 dc_fixpt_e = { 11674931555LL };
+static const struct fixed31_32 dc_fixpt_ln2 = { 2977044471LL };
+static const struct fixed31_32 dc_fixpt_ln2_div_2 = { 1488522236LL };
 
 /*
  * @brief
@@ -70,7 +70,7 @@ static const struct fixed31_32 dal_fixed31_32_ln2_div_2 = { 1488522236LL };
  * @brief
  * result = numerator / denominator
  */
-struct fixed31_32 dal_fixed31_32_from_fraction(
+struct fixed31_32 dc_fixpt_from_fraction(
 	long long numerator,
 	long long denominator);
 
@@ -78,8 +78,8 @@ struct fixed31_32 dal_fixed31_32_from_fraction(
  * @brief
  * result = arg
  */
-struct fixed31_32 dal_fixed31_32_from_int_nonconst(long long arg);
-static inline struct fixed31_32 dal_fixed31_32_from_int(long long arg)
+struct fixed31_32 dc_fixpt_from_int_nonconst(long long arg);
+static inline struct fixed31_32 dc_fixpt_from_int(long long arg)
 {
 	if (__builtin_constant_p(arg)) {
 		struct fixed31_32 res;
@@ -87,7 +87,7 @@ static inline struct fixed31_32 dal_fixed31_32_from_int(long long arg)
 		res.value = arg << FIXED31_32_BITS_PER_FRACTIONAL_PART;
 		return res;
 	} else
-		return dal_fixed31_32_from_int_nonconst(arg);
+		return dc_fixpt_from_int_nonconst(arg);
 }
 
 /*
@@ -99,7 +99,7 @@ static inline struct fixed31_32 dal_fixed31_32_from_int(long long arg)
  * @brief
  * result = -arg
  */
-static inline struct fixed31_32 dal_fixed31_32_neg(struct fixed31_32 arg)
+static inline struct fixed31_32 dc_fixpt_neg(struct fixed31_32 arg)
 {
 	struct fixed31_32 res;
 
@@ -112,10 +112,10 @@ static inline struct fixed31_32 dal_fixed31_32_neg(struct fixed31_32 arg)
  * @brief
  * result = abs(arg) := (arg >= 0) ? arg : -arg
  */
-static inline struct fixed31_32 dal_fixed31_32_abs(struct fixed31_32 arg)
+static inline struct fixed31_32 dc_fixpt_abs(struct fixed31_32 arg)
 {
 	if (arg.value < 0)
-		return dal_fixed31_32_neg(arg);
+		return dc_fixpt_neg(arg);
 	else
 		return arg;
 }
@@ -129,7 +129,7 @@ static inline struct fixed31_32 dal_fixed31_32_abs(struct fixed31_32 arg)
  * @brief
  * result = arg1 < arg2
  */
-static inline bool dal_fixed31_32_lt(struct fixed31_32 arg1,
+static inline bool dc_fixpt_lt(struct fixed31_32 arg1,
 				     struct fixed31_32 arg2)
 {
 	return arg1.value < arg2.value;
@@ -139,7 +139,7 @@ static inline bool dal_fixed31_32_lt(struct fixed31_32 arg1,
  * @brief
  * result = arg1 <= arg2
  */
-static inline bool dal_fixed31_32_le(struct fixed31_32 arg1,
+static inline bool dc_fixpt_le(struct fixed31_32 arg1,
 				     struct fixed31_32 arg2)
 {
 	return arg1.value <= arg2.value;
@@ -149,7 +149,7 @@ static inline bool dal_fixed31_32_le(struct fixed31_32 arg1,
  * @brief
  * result = arg1 == arg2
  */
-static inline bool dal_fixed31_32_eq(struct fixed31_32 arg1,
+static inline bool dc_fixpt_eq(struct fixed31_32 arg1,
 				     struct fixed31_32 arg2)
 {
 	return arg1.value == arg2.value;
@@ -159,7 +159,7 @@ static inline bool dal_fixed31_32_eq(struct fixed31_32 arg1,
  * @brief
  * result = min(arg1, arg2) := (arg1 <= arg2) ? arg1 : arg2
  */
-static inline struct fixed31_32 dal_fixed31_32_min(struct fixed31_32 arg1,
+static inline struct fixed31_32 dc_fixpt_min(struct fixed31_32 arg1,
 						   struct fixed31_32 arg2)
 {
 	if (arg1.value <= arg2.value)
@@ -172,7 +172,7 @@ static inline struct fixed31_32 dal_fixed31_32_min(struct fixed31_32 arg1,
  * @brief
  * result = max(arg1, arg2) := (arg1 <= arg2) ? arg2 : arg1
  */
-static inline struct fixed31_32 dal_fixed31_32_max(struct fixed31_32 arg1,
+static inline struct fixed31_32 dc_fixpt_max(struct fixed31_32 arg1,
 						   struct fixed31_32 arg2)
 {
 	if (arg1.value <= arg2.value)
@@ -187,14 +187,14 @@ static inline struct fixed31_32 dal_fixed31_32_max(struct fixed31_32 arg1,
  * result = | arg, when min_value < arg < max_value
  *          | max_value, when arg >= max_value
  */
-static inline struct fixed31_32 dal_fixed31_32_clamp(
+static inline struct fixed31_32 dc_fixpt_clamp(
 	struct fixed31_32 arg,
 	struct fixed31_32 min_value,
 	struct fixed31_32 max_value)
 {
-	if (dal_fixed31_32_le(arg, min_value))
+	if (dc_fixpt_le(arg, min_value))
 		return min_value;
-	else if (dal_fixed31_32_le(max_value, arg))
+	else if (dc_fixpt_le(max_value, arg))
 		return max_value;
 	else
 		return arg;
@@ -209,7 +209,7 @@ static inline struct fixed31_32 dal_fixed31_32_clamp(
  * @brief
  * result = arg << shift
  */
-struct fixed31_32 dal_fixed31_32_shl(
+struct fixed31_32 dc_fixpt_shl(
 	struct fixed31_32 arg,
 	unsigned char shift);
 
@@ -217,7 +217,7 @@ struct fixed31_32 dal_fixed31_32_shl(
  * @brief
  * result = arg >> shift
  */
-static inline struct fixed31_32 dal_fixed31_32_shr(
+static inline struct fixed31_32 dc_fixpt_shr(
 	struct fixed31_32 arg,
 	unsigned char shift)
 {
@@ -235,7 +235,7 @@ static inline struct fixed31_32 dal_fixed31_32_shr(
  * @brief
  * result = arg1 + arg2
  */
-struct fixed31_32 dal_fixed31_32_add(
+struct fixed31_32 dc_fixpt_add(
 	struct fixed31_32 arg1,
 	struct fixed31_32 arg2);
 
@@ -243,18 +243,18 @@ struct fixed31_32 dal_fixed31_32_add(
  * @brief
  * result = arg1 + arg2
  */
-static inline struct fixed31_32 dal_fixed31_32_add_int(struct fixed31_32 arg1,
+static inline struct fixed31_32 dc_fixpt_add_int(struct fixed31_32 arg1,
 						       int arg2)
 {
-	return dal_fixed31_32_add(arg1,
-				  dal_fixed31_32_from_int(arg2));
+	return dc_fixpt_add(arg1,
+				  dc_fixpt_from_int(arg2));
 }
 
 /*
  * @brief
  * result = arg1 - arg2
  */
-struct fixed31_32 dal_fixed31_32_sub(
+struct fixed31_32 dc_fixpt_sub(
 	struct fixed31_32 arg1,
 	struct fixed31_32 arg2);
 
@@ -262,11 +262,11 @@ struct fixed31_32 dal_fixed31_32_sub(
  * @brief
  * result = arg1 - arg2
  */
-static inline struct fixed31_32 dal_fixed31_32_sub_int(struct fixed31_32 arg1,
+static inline struct fixed31_32 dc_fixpt_sub_int(struct fixed31_32 arg1,
 						       int arg2)
 {
-	return dal_fixed31_32_sub(arg1,
-				  dal_fixed31_32_from_int(arg2));
+	return dc_fixpt_sub(arg1,
+				  dc_fixpt_from_int(arg2));
 }
 
 
@@ -279,7 +279,7 @@ static inline struct fixed31_32 dal_fixed31_32_sub_int(struct fixed31_32 arg1,
  * @brief
  * result = arg1 * arg2
  */
-struct fixed31_32 dal_fixed31_32_mul(
+struct fixed31_32 dc_fixpt_mul(
 	struct fixed31_32 arg1,
 	struct fixed31_32 arg2);
 
@@ -288,39 +288,39 @@ struct fixed31_32 dal_fixed31_32_mul(
  * @brief
  * result = arg1 * arg2
  */
-static inline struct fixed31_32 dal_fixed31_32_mul_int(struct fixed31_32 arg1,
+static inline struct fixed31_32 dc_fixpt_mul_int(struct fixed31_32 arg1,
 						       int arg2)
 {
-	return dal_fixed31_32_mul(arg1,
-				  dal_fixed31_32_from_int(arg2));
+	return dc_fixpt_mul(arg1,
+				  dc_fixpt_from_int(arg2));
 }
 
 /*
  * @brief
  * result = square(arg) := arg * arg
  */
-struct fixed31_32 dal_fixed31_32_sqr(
+struct fixed31_32 dc_fixpt_sqr(
 	struct fixed31_32 arg);
 
 /*
  * @brief
  * result = arg1 / arg2
  */
-static inline struct fixed31_32 dal_fixed31_32_div_int(struct fixed31_32 arg1,
+static inline struct fixed31_32 dc_fixpt_div_int(struct fixed31_32 arg1,
 						       long long arg2)
 {
-	return dal_fixed31_32_from_fraction(arg1.value,
-					    dal_fixed31_32_from_int(arg2).value);
+	return dc_fixpt_from_fraction(arg1.value,
+					    dc_fixpt_from_int(arg2).value);
 }
 
 /*
  * @brief
  * result = arg1 / arg2
  */
-static inline struct fixed31_32 dal_fixed31_32_div(struct fixed31_32 arg1,
+static inline struct fixed31_32 dc_fixpt_div(struct fixed31_32 arg1,
 						   struct fixed31_32 arg2)
 {
-	return dal_fixed31_32_from_fraction(arg1.value,
+	return dc_fixpt_from_fraction(arg1.value,
 					    arg2.value);
 }
 
@@ -336,7 +336,7 @@ static inline struct fixed31_32 dal_fixed31_32_div(struct fixed31_32 arg1,
  * @note
  * No special actions taken in case argument is zero.
  */
-struct fixed31_32 dal_fixed31_32_recip(
+struct fixed31_32 dc_fixpt_recip(
 	struct fixed31_32 arg);
 
 /*
@@ -352,7 +352,7 @@ struct fixed31_32 dal_fixed31_32_recip(
  * Argument specified in radians,
  * internally it's normalized to [-2pi...2pi] range.
  */
-struct fixed31_32 dal_fixed31_32_sinc(
+struct fixed31_32 dc_fixpt_sinc(
 	struct fixed31_32 arg);
 
 /*
@@ -363,7 +363,7 @@ struct fixed31_32 dal_fixed31_32_sinc(
  * Argument specified in radians,
  * internally it's normalized to [-2pi...2pi] range.
  */
-struct fixed31_32 dal_fixed31_32_sin(
+struct fixed31_32 dc_fixpt_sin(
 	struct fixed31_32 arg);
 
 /*
@@ -376,7 +376,7 @@ struct fixed31_32 dal_fixed31_32_sin(
  * passing arguments outside that range
  * will cause incorrect result!
  */
-struct fixed31_32 dal_fixed31_32_cos(
+struct fixed31_32 dc_fixpt_cos(
 	struct fixed31_32 arg);
 
 /*
@@ -391,7 +391,7 @@ struct fixed31_32 dal_fixed31_32_cos(
  * @note
  * Currently, function is verified for abs(arg) <= 1.
  */
-struct fixed31_32 dal_fixed31_32_exp(
+struct fixed31_32 dc_fixpt_exp(
 	struct fixed31_32 arg);
 
 /*
@@ -404,7 +404,7 @@ struct fixed31_32 dal_fixed31_32_exp(
  * Currently, no special actions taken
  * in case of invalid argument(s). Take care!
  */
-struct fixed31_32 dal_fixed31_32_log(
+struct fixed31_32 dc_fixpt_log(
 	struct fixed31_32 arg);
 
 /*
@@ -419,7 +419,7 @@ struct fixed31_32 dal_fixed31_32_log(
  * @note
  * Currently, abs(arg1) should be less than 1. Take care!
  */
-struct fixed31_32 dal_fixed31_32_pow(
+struct fixed31_32 dc_fixpt_pow(
 	struct fixed31_32 arg1,
 	struct fixed31_32 arg2);
 
@@ -432,21 +432,21 @@ struct fixed31_32 dal_fixed31_32_pow(
  * @brief
  * result = floor(arg) := greatest integer lower than or equal to arg
  */
-int dal_fixed31_32_floor(
+int dc_fixpt_floor(
 	struct fixed31_32 arg);
 
 /*
  * @brief
  * result = round(arg) := integer nearest to arg
  */
-int dal_fixed31_32_round(
+int dc_fixpt_round(
 	struct fixed31_32 arg);
 
 /*
  * @brief
  * result = ceil(arg) := lowest integer greater than or equal to arg
  */
-int dal_fixed31_32_ceil(
+int dc_fixpt_ceil(
 	struct fixed31_32 arg);
 
 /* the following two function are used in scaler hw programming to convert fixed
@@ -455,20 +455,20 @@ int dal_fixed31_32_ceil(
  * fractional
  */
 
-unsigned int dal_fixed31_32_u2d19(
+unsigned int dc_fixpt_u2d19(
 	struct fixed31_32 arg);
 
-unsigned int dal_fixed31_32_u0d19(
+unsigned int dc_fixpt_u0d19(
 	struct fixed31_32 arg);
 
 
-unsigned int dal_fixed31_32_clamp_u0d14(
+unsigned int dc_fixpt_clamp_u0d14(
 	struct fixed31_32 arg);
 
-unsigned int dal_fixed31_32_clamp_u0d10(
+unsigned int dc_fixpt_clamp_u0d10(
 	struct fixed31_32 arg);
 
-int dal_fixed31_32_s4d19(
+int dc_fixpt_s4d19(
 	struct fixed31_32 arg);
 
 #endif
diff --git a/drivers/gpu/drm/amd/display/include/fixed32_32.h b/drivers/gpu/drm/amd/display/include/fixed32_32.h
deleted file mode 100644
index 9c70341fe026..000000000000
--- a/drivers/gpu/drm/amd/display/include/fixed32_32.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Copyright 2012-15 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-
-#ifndef __DAL_FIXED32_32_H__
-#define __DAL_FIXED32_32_H__
-
-#include "os_types.h"
-
-struct fixed32_32 {
-	uint64_t value;
-};
-
-static const struct fixed32_32 dal_fixed32_32_zero = { 0 };
-static const struct fixed32_32 dal_fixed32_32_one = { 0x100000000LL };
-static const struct fixed32_32 dal_fixed32_32_half = { 0x80000000LL };
-
-struct fixed32_32 dal_fixed32_32_from_fraction(uint32_t n, uint32_t d);
-static inline struct fixed32_32 dal_fixed32_32_from_int(uint32_t value)
-{
-	struct fixed32_32 fx;
-
-	fx.value = (uint64_t)value<<32;
-	return fx;
-}
-
-struct fixed32_32 dal_fixed32_32_add(
-	struct fixed32_32 lhs,
-	struct fixed32_32 rhs);
-struct fixed32_32 dal_fixed32_32_add_int(
-	struct fixed32_32 lhs,
-	uint32_t rhs);
-struct fixed32_32 dal_fixed32_32_sub(
-	struct fixed32_32 lhs,
-	struct fixed32_32 rhs);
-struct fixed32_32 dal_fixed32_32_sub_int(
-	struct fixed32_32 lhs,
-	uint32_t rhs);
-struct fixed32_32 dal_fixed32_32_mul(
-	struct fixed32_32 lhs,
-	struct fixed32_32 rhs);
-struct fixed32_32 dal_fixed32_32_mul_int(
-	struct fixed32_32 lhs,
-	uint32_t rhs);
-struct fixed32_32 dal_fixed32_32_div(
-	struct fixed32_32 lhs,
-	struct fixed32_32 rhs);
-struct fixed32_32 dal_fixed32_32_div_int(
-	struct fixed32_32 lhs,
-	uint32_t rhs);
-
-static inline struct fixed32_32 dal_fixed32_32_min(struct fixed32_32 lhs,
-						   struct fixed32_32 rhs)
-{
-	return (lhs.value < rhs.value) ? lhs : rhs;
-}
-
-static inline struct fixed32_32 dal_fixed32_32_max(struct fixed32_32 lhs,
-						   struct fixed32_32 rhs)
-{
-	return (lhs.value > rhs.value) ? lhs : rhs;
-}
-
-static inline bool dal_fixed32_32_gt(struct fixed32_32 lhs, struct fixed32_32 rhs)
-{
-	return lhs.value > rhs.value;
-}
-
-static inline bool dal_fixed32_32_gt_int(struct fixed32_32 lhs, uint32_t rhs)
-{
-	return lhs.value > ((uint64_t)rhs<<32);
-}
-
-static inline bool dal_fixed32_32_lt(struct fixed32_32 lhs, struct fixed32_32 rhs)
-{
-	return lhs.value < rhs.value;
-}
-
-static inline bool dal_fixed32_32_lt_int(struct fixed32_32 lhs, uint32_t rhs)
-{
-	return lhs.value < ((uint64_t)rhs<<32);
-}
-
-static inline bool dal_fixed32_32_le(struct fixed32_32 lhs, struct fixed32_32 rhs)
-{
-	return lhs.value <= rhs.value;
-}
-
-static inline bool dal_fixed32_32_le_int(struct fixed32_32 lhs, uint32_t rhs)
-{
-	return lhs.value <= ((uint64_t)rhs<<32);
-}
-
-static inline bool dal_fixed32_32_eq(struct fixed32_32 lhs, struct fixed32_32 rhs)
-{
-	return lhs.value == rhs.value;
-}
-
-uint32_t dal_fixed32_32_ceil(struct fixed32_32 value);
-static inline uint32_t dal_fixed32_32_floor(struct fixed32_32 value)
-{
-	return value.value>>32;
-}
-
-uint32_t dal_fixed32_32_round(struct fixed32_32 value);
-
-#endif
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 15e5b72e6e00..29d2ec82b924 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -43,7 +43,7 @@ static bool de_pq_initialized; /* = false; */
 /* one-time setup of X points */
 void setup_x_points_distribution(void)
 {
-	struct fixed31_32 region_size = dal_fixed31_32_from_int(128);
+	struct fixed31_32 region_size = dc_fixpt_from_int(128);
 	int32_t segment;
 	uint32_t seg_offset;
 	uint32_t index;
@@ -53,8 +53,8 @@ void setup_x_points_distribution(void)
 	coordinates_x[MAX_HW_POINTS + 1].x = region_size;
 
 	for (segment = 6; segment > (6 - NUM_REGIONS); segment--) {
-		region_size = dal_fixed31_32_div_int(region_size, 2);
-		increment = dal_fixed31_32_div_int(region_size,
+		region_size = dc_fixpt_div_int(region_size, 2);
+		increment = dc_fixpt_div_int(region_size,
 						NUM_PTS_IN_REGION);
 		seg_offset = (segment + (NUM_REGIONS - 7)) * NUM_PTS_IN_REGION;
 		coordinates_x[seg_offset].x = region_size;
@@ -62,7 +62,7 @@ void setup_x_points_distribution(void)
 		for (index = seg_offset + 1;
 				index < seg_offset + NUM_PTS_IN_REGION;
 				index++) {
-			coordinates_x[index].x = dal_fixed31_32_add
+			coordinates_x[index].x = dc_fixpt_add
 					(coordinates_x[index-1].x, increment);
 		}
 	}
@@ -72,63 +72,63 @@ static void compute_pq(struct fixed31_32 in_x, struct fixed31_32 *out_y)
 {
 	/* consts for PQ gamma formula. */
 	const struct fixed31_32 m1 =
-		dal_fixed31_32_from_fraction(159301758, 1000000000);
+		dc_fixpt_from_fraction(159301758, 1000000000);
 	const struct fixed31_32 m2 =
-		dal_fixed31_32_from_fraction(7884375, 100000);
+		dc_fixpt_from_fraction(7884375, 100000);
 	const struct fixed31_32 c1 =
-		dal_fixed31_32_from_fraction(8359375, 10000000);
+		dc_fixpt_from_fraction(8359375, 10000000);
 	const struct fixed31_32 c2 =
-		dal_fixed31_32_from_fraction(188515625, 10000000);
+		dc_fixpt_from_fraction(188515625, 10000000);
 	const struct fixed31_32 c3 =
-		dal_fixed31_32_from_fraction(186875, 10000);
+		dc_fixpt_from_fraction(186875, 10000);
 
 	struct fixed31_32 l_pow_m1;
 	struct fixed31_32 base;
 
-	if (dal_fixed31_32_lt(in_x, dal_fixed31_32_zero))
-		in_x = dal_fixed31_32_zero;
+	if (dc_fixpt_lt(in_x, dc_fixpt_zero))
+		in_x = dc_fixpt_zero;
 
-	l_pow_m1 = dal_fixed31_32_pow(in_x, m1);
-	base = dal_fixed31_32_div(
-			dal_fixed31_32_add(c1,
-					(dal_fixed31_32_mul(c2, l_pow_m1))),
-			dal_fixed31_32_add(dal_fixed31_32_one,
-					(dal_fixed31_32_mul(c3, l_pow_m1))));
-	*out_y = dal_fixed31_32_pow(base, m2);
+	l_pow_m1 = dc_fixpt_pow(in_x, m1);
+	base = dc_fixpt_div(
+			dc_fixpt_add(c1,
+					(dc_fixpt_mul(c2, l_pow_m1))),
+			dc_fixpt_add(dc_fixpt_one,
+					(dc_fixpt_mul(c3, l_pow_m1))));
+	*out_y = dc_fixpt_pow(base, m2);
 }
 
 static void compute_de_pq(struct fixed31_32 in_x, struct fixed31_32 *out_y)
 {
 	/* consts for dePQ gamma formula. */
 	const struct fixed31_32 m1 =
-		dal_fixed31_32_from_fraction(159301758, 1000000000);
+		dc_fixpt_from_fraction(159301758, 1000000000);
 	const struct fixed31_32 m2 =
-		dal_fixed31_32_from_fraction(7884375, 100000);
+		dc_fixpt_from_fraction(7884375, 100000);
 	const struct fixed31_32 c1 =
-		dal_fixed31_32_from_fraction(8359375, 10000000);
+		dc_fixpt_from_fraction(8359375, 10000000);
 	const struct fixed31_32 c2 =
-		dal_fixed31_32_from_fraction(188515625, 10000000);
+		dc_fixpt_from_fraction(188515625, 10000000);
 	const struct fixed31_32 c3 =
-		dal_fixed31_32_from_fraction(186875, 10000);
+		dc_fixpt_from_fraction(186875, 10000);
 
 	struct fixed31_32 l_pow_m1;
 	struct fixed31_32 base, div;
 
 
-	if (dal_fixed31_32_lt(in_x, dal_fixed31_32_zero))
-		in_x = dal_fixed31_32_zero;
+	if (dc_fixpt_lt(in_x, dc_fixpt_zero))
+		in_x = dc_fixpt_zero;
 
-	l_pow_m1 = dal_fixed31_32_pow(in_x,
-			dal_fixed31_32_div(dal_fixed31_32_one, m2));
-	base = dal_fixed31_32_sub(l_pow_m1, c1);
+	l_pow_m1 = dc_fixpt_pow(in_x,
+			dc_fixpt_div(dc_fixpt_one, m2));
+	base = dc_fixpt_sub(l_pow_m1, c1);
 
-	if (dal_fixed31_32_lt(base, dal_fixed31_32_zero))
-		base = dal_fixed31_32_zero;
+	if (dc_fixpt_lt(base, dc_fixpt_zero))
+		base = dc_fixpt_zero;
 
-	div = dal_fixed31_32_sub(c2, dal_fixed31_32_mul(c3, l_pow_m1));
+	div = dc_fixpt_sub(c2, dc_fixpt_mul(c3, l_pow_m1));
 
-	*out_y = dal_fixed31_32_pow(dal_fixed31_32_div(base, div),
-			dal_fixed31_32_div(dal_fixed31_32_one, m1));
+	*out_y = dc_fixpt_pow(dc_fixpt_div(base, div),
+			dc_fixpt_div(dc_fixpt_one, m1));
 
 }
 /* one-time pre-compute PQ values - only for sdr_white_level 80 */
@@ -138,14 +138,14 @@ void precompute_pq(void)
 	struct fixed31_32 x;
 	const struct hw_x_point *coord_x = coordinates_x + 32;
 	struct fixed31_32 scaling_factor =
-			dal_fixed31_32_from_fraction(80, 10000);
+			dc_fixpt_from_fraction(80, 10000);
 
 	/* pow function has problems with arguments too small */
 	for (i = 0; i < 32; i++)
-		pq_table[i] = dal_fixed31_32_zero;
+		pq_table[i] = dc_fixpt_zero;
 
 	for (i = 32; i <= MAX_HW_POINTS; i++) {
-		x = dal_fixed31_32_mul(coord_x->x, scaling_factor);
+		x = dc_fixpt_mul(coord_x->x, scaling_factor);
 		compute_pq(x, &pq_table[i]);
 		++coord_x;
 	}
@@ -158,7 +158,7 @@ void precompute_de_pq(void)
 	struct fixed31_32  y;
 	uint32_t begin_index, end_index;
 
-	struct fixed31_32 scaling_factor = dal_fixed31_32_from_int(125);
+	struct fixed31_32 scaling_factor = dc_fixpt_from_int(125);
 
 	/* X points is 2^-25 to 2^7
 	 * De-gamma X is 2^-12 to 2^0 – we are skipping first -12-(-25) = 13 regions
@@ -167,11 +167,11 @@ void precompute_de_pq(void)
 	end_index = begin_index + 12 * NUM_PTS_IN_REGION;
 
 	for (i = 0; i <= begin_index; i++)
-		de_pq_table[i] = dal_fixed31_32_zero;
+		de_pq_table[i] = dc_fixpt_zero;
 
 	for (; i <= end_index; i++) {
 		compute_de_pq(coordinates_x[i].x, &y);
-		de_pq_table[i] = dal_fixed31_32_mul(y, scaling_factor);
+		de_pq_table[i] = dc_fixpt_mul(y, scaling_factor);
 	}
 
 	for (; i <= MAX_HW_POINTS; i++)
@@ -195,15 +195,15 @@ static void build_coefficients(struct gamma_coefficients *coefficients, bool is_
 	uint32_t index = is_2_4 == true ? 0:1;
 
 	do {
-		coefficients->a0[i] = dal_fixed31_32_from_fraction(
+		coefficients->a0[i] = dc_fixpt_from_fraction(
 			numerator01[index], 10000000);
-		coefficients->a1[i] = dal_fixed31_32_from_fraction(
+		coefficients->a1[i] = dc_fixpt_from_fraction(
 			numerator02[index], 1000);
-		coefficients->a2[i] = dal_fixed31_32_from_fraction(
+		coefficients->a2[i] = dc_fixpt_from_fraction(
 			numerator03[index], 1000);
-		coefficients->a3[i] = dal_fixed31_32_from_fraction(
+		coefficients->a3[i] = dc_fixpt_from_fraction(
 			numerator04[index], 1000);
-		coefficients->user_gamma[i] = dal_fixed31_32_from_fraction(
+		coefficients->user_gamma[i] = dc_fixpt_from_fraction(
 			numerator05[index], 1000);
 
 		++i;
@@ -218,33 +218,33 @@ static struct fixed31_32 translate_from_linear_space(
 	struct fixed31_32 a3,
 	struct fixed31_32 gamma)
 {
-	const struct fixed31_32 one = dal_fixed31_32_from_int(1);
+	const struct fixed31_32 one = dc_fixpt_from_int(1);
 
-	if (dal_fixed31_32_lt(one, arg))
+	if (dc_fixpt_lt(one, arg))
 		return one;
 
-	if (dal_fixed31_32_le(arg, dal_fixed31_32_neg(a0)))
-		return dal_fixed31_32_sub(
+	if (dc_fixpt_le(arg, dc_fixpt_neg(a0)))
+		return dc_fixpt_sub(
 			a2,
-			dal_fixed31_32_mul(
-				dal_fixed31_32_add(
+			dc_fixpt_mul(
+				dc_fixpt_add(
 					one,
 					a3),
-				dal_fixed31_32_pow(
-					dal_fixed31_32_neg(arg),
-					dal_fixed31_32_recip(gamma))));
-	else if (dal_fixed31_32_le(a0, arg))
-		return dal_fixed31_32_sub(
-			dal_fixed31_32_mul(
-				dal_fixed31_32_add(
+				dc_fixpt_pow(
+					dc_fixpt_neg(arg),
+					dc_fixpt_recip(gamma))));
+	else if (dc_fixpt_le(a0, arg))
+		return dc_fixpt_sub(
+			dc_fixpt_mul(
+				dc_fixpt_add(
 					one,
 					a3),
-				dal_fixed31_32_pow(
+				dc_fixpt_pow(
 					arg,
-					dal_fixed31_32_recip(gamma))),
+					dc_fixpt_recip(gamma))),
 			a2);
 	else
-		return dal_fixed31_32_mul(
+		return dc_fixpt_mul(
 			arg,
 			a1);
 }
@@ -259,25 +259,25 @@ static struct fixed31_32 translate_to_linear_space(
 {
 	struct fixed31_32 linear;
 
-	a0 = dal_fixed31_32_mul(a0, a1);
-	if (dal_fixed31_32_le(arg, dal_fixed31_32_neg(a0)))
+	a0 = dc_fixpt_mul(a0, a1);
+	if (dc_fixpt_le(arg, dc_fixpt_neg(a0)))
 
-		linear = dal_fixed31_32_neg(
-				 dal_fixed31_32_pow(
-				 dal_fixed31_32_div(
-				 dal_fixed31_32_sub(a2, arg),
-				 dal_fixed31_32_add(
-				 dal_fixed31_32_one, a3)), gamma));
+		linear = dc_fixpt_neg(
+				 dc_fixpt_pow(
+				 dc_fixpt_div(
+				 dc_fixpt_sub(a2, arg),
+				 dc_fixpt_add(
+				 dc_fixpt_one, a3)), gamma));
 
-	else if (dal_fixed31_32_le(dal_fixed31_32_neg(a0), arg) &&
-			 dal_fixed31_32_le(arg, a0))
-		linear = dal_fixed31_32_div(arg, a1);
+	else if (dc_fixpt_le(dc_fixpt_neg(a0), arg) &&
+			 dc_fixpt_le(arg, a0))
+		linear = dc_fixpt_div(arg, a1);
 	else
-		linear =  dal_fixed31_32_pow(
-					dal_fixed31_32_div(
-					dal_fixed31_32_add(a2, arg),
-					dal_fixed31_32_add(
-					dal_fixed31_32_one, a3)), gamma);
+		linear =  dc_fixpt_pow(
+					dc_fixpt_div(
+					dc_fixpt_add(a2, arg),
+					dc_fixpt_add(
+					dc_fixpt_one, a3)), gamma);
 
 	return linear;
 }
@@ -352,8 +352,8 @@ static bool find_software_points(
 				right = axis_x[max_number - 1].b;
 		}
 
-		if (dal_fixed31_32_le(left, hw_point) &&
-			dal_fixed31_32_le(hw_point, right)) {
+		if (dc_fixpt_le(left, hw_point) &&
+			dc_fixpt_le(hw_point, right)) {
 			*index_to_start = i;
 			*index_left = i;
 
@@ -366,7 +366,7 @@ static bool find_software_points(
 
 			return true;
 		} else if ((i == *index_to_start) &&
-			dal_fixed31_32_le(hw_point, left)) {
+			dc_fixpt_le(hw_point, left)) {
 			*index_to_start = i;
 			*index_left = i;
 			*index_right = i;
@@ -375,7 +375,7 @@ static bool find_software_points(
 
 			return true;
 		} else if ((i == max_number - 1) &&
-			dal_fixed31_32_le(right, hw_point)) {
+			dc_fixpt_le(right, hw_point)) {
 			*index_to_start = i;
 			*index_left = i;
 			*index_right = i;
@@ -457,17 +457,17 @@ static bool build_custom_gamma_mapping_coefficients_worker(
 		}
 
 		if (hw_pos == HW_POINT_POSITION_MIDDLE)
-			point->coeff = dal_fixed31_32_div(
-				dal_fixed31_32_sub(
+			point->coeff = dc_fixpt_div(
+				dc_fixpt_sub(
 					coord_x,
 					left_pos),
-				dal_fixed31_32_sub(
+				dc_fixpt_sub(
 					right_pos,
 					left_pos));
 		else if (hw_pos == HW_POINT_POSITION_LEFT)
-			point->coeff = dal_fixed31_32_zero;
+			point->coeff = dc_fixpt_zero;
 		else if (hw_pos == HW_POINT_POSITION_RIGHT)
-			point->coeff = dal_fixed31_32_from_int(2);
+			point->coeff = dc_fixpt_from_int(2);
 		else {
 			BREAK_TO_DEBUGGER();
 			return false;
@@ -502,45 +502,45 @@ static struct fixed31_32 calculate_mapped_value(
 
 	if ((point->left_index < 0) || (point->left_index > max_index)) {
 		BREAK_TO_DEBUGGER();
-		return dal_fixed31_32_zero;
+		return dc_fixpt_zero;
 	}
 
 	if ((point->right_index < 0) || (point->right_index > max_index)) {
 		BREAK_TO_DEBUGGER();
-		return dal_fixed31_32_zero;
+		return dc_fixpt_zero;
 	}
 
 	if (point->pos == HW_POINT_POSITION_MIDDLE)
 		if (channel == CHANNEL_NAME_RED)
-			result = dal_fixed31_32_add(
-				dal_fixed31_32_mul(
+			result = dc_fixpt_add(
+				dc_fixpt_mul(
 					point->coeff,
-					dal_fixed31_32_sub(
+					dc_fixpt_sub(
 						rgb[point->right_index].r,
 						rgb[point->left_index].r)),
 				rgb[point->left_index].r);
 		else if (channel == CHANNEL_NAME_GREEN)
-			result = dal_fixed31_32_add(
-				dal_fixed31_32_mul(
+			result = dc_fixpt_add(
+				dc_fixpt_mul(
 					point->coeff,
-					dal_fixed31_32_sub(
+					dc_fixpt_sub(
 						rgb[point->right_index].g,
 						rgb[point->left_index].g)),
 				rgb[point->left_index].g);
 		else
-			result = dal_fixed31_32_add(
-				dal_fixed31_32_mul(
+			result = dc_fixpt_add(
+				dc_fixpt_mul(
 					point->coeff,
-					dal_fixed31_32_sub(
+					dc_fixpt_sub(
 						rgb[point->right_index].b,
 						rgb[point->left_index].b)),
 				rgb[point->left_index].b);
 	else if (point->pos == HW_POINT_POSITION_LEFT) {
 		BREAK_TO_DEBUGGER();
-		result = dal_fixed31_32_zero;
+		result = dc_fixpt_zero;
 	} else {
 		BREAK_TO_DEBUGGER();
-		result = dal_fixed31_32_one;
+		result = dc_fixpt_one;
 	}
 
 	return result;
@@ -558,7 +558,7 @@ static void build_pq(struct pwl_float_data_ex *rgb_regamma,
 	struct fixed31_32 x;
 	struct fixed31_32 output;
 	struct fixed31_32 scaling_factor =
-			dal_fixed31_32_from_fraction(sdr_white_level, 10000);
+			dc_fixpt_from_fraction(sdr_white_level, 10000);
 
 	if (!pq_initialized && sdr_white_level == 80) {
 		precompute_pq();
@@ -579,15 +579,15 @@ static void build_pq(struct pwl_float_data_ex *rgb_regamma,
 		if (sdr_white_level == 80) {
 			output = pq_table[i];
 		} else {
-			x = dal_fixed31_32_mul(coord_x->x, scaling_factor);
+			x = dc_fixpt_mul(coord_x->x, scaling_factor);
 			compute_pq(x, &output);
 		}
 
 		/* should really not happen? */
-		if (dal_fixed31_32_lt(output, dal_fixed31_32_zero))
-			output = dal_fixed31_32_zero;
-		else if (dal_fixed31_32_lt(dal_fixed31_32_one, output))
-			output = dal_fixed31_32_one;
+		if (dc_fixpt_lt(output, dc_fixpt_zero))
+			output = dc_fixpt_zero;
+		else if (dc_fixpt_lt(dc_fixpt_one, output))
+			output = dc_fixpt_one;
 
 		rgb->r = output;
 		rgb->g = output;
@@ -605,7 +605,7 @@ static void build_de_pq(struct pwl_float_data_ex *de_pq,
 	uint32_t i;
 	struct fixed31_32 output;
 
-	struct fixed31_32 scaling_factor = dal_fixed31_32_from_int(125);
+	struct fixed31_32 scaling_factor = dc_fixpt_from_int(125);
 
 	if (!de_pq_initialized) {
 		precompute_de_pq();
@@ -616,9 +616,9 @@ static void build_de_pq(struct pwl_float_data_ex *de_pq,
 	for (i = 0; i <= hw_points_num; i++) {
 		output = de_pq_table[i];
 		/* should really not happen? */
-		if (dal_fixed31_32_lt(output, dal_fixed31_32_zero))
-			output = dal_fixed31_32_zero;
-		else if (dal_fixed31_32_lt(scaling_factor, output))
+		if (dc_fixpt_lt(output, dc_fixpt_zero))
+			output = dc_fixpt_zero;
+		else if (dc_fixpt_lt(scaling_factor, output))
 			output = scaling_factor;
 		de_pq[i].r = output;
 		de_pq[i].g = output;
@@ -670,9 +670,9 @@ static void build_degamma(struct pwl_float_data_ex *curve,
 	end_index = begin_index + 12 * NUM_PTS_IN_REGION;
 
 	while (i != begin_index) {
-		curve[i].r = dal_fixed31_32_zero;
-		curve[i].g = dal_fixed31_32_zero;
-		curve[i].b = dal_fixed31_32_zero;
+		curve[i].r = dc_fixpt_zero;
+		curve[i].g = dc_fixpt_zero;
+		curve[i].b = dc_fixpt_zero;
 		i++;
 	}
 
@@ -684,9 +684,9 @@ static void build_degamma(struct pwl_float_data_ex *curve,
 		i++;
 	}
 	while (i != hw_points_num + 1) {
-		curve[i].r = dal_fixed31_32_one;
-		curve[i].g = dal_fixed31_32_one;
-		curve[i].b = dal_fixed31_32_one;
+		curve[i].r = dc_fixpt_one;
+		curve[i].g = dc_fixpt_one;
+		curve[i].b = dc_fixpt_one;
 		i++;
 	}
 }
@@ -695,8 +695,8 @@ static void scale_gamma(struct pwl_float_data *pwl_rgb,
 		const struct dc_gamma *ramp,
 		struct dividers dividers)
 {
-	const struct fixed31_32 max_driver = dal_fixed31_32_from_int(0xFFFF);
-	const struct fixed31_32 max_os = dal_fixed31_32_from_int(0xFF00);
+	const struct fixed31_32 max_driver = dc_fixpt_from_int(0xFFFF);
+	const struct fixed31_32 max_os = dc_fixpt_from_int(0xFF00);
 	struct fixed31_32 scaler = max_os;
 	uint32_t i;
 	struct pwl_float_data *rgb = pwl_rgb;
@@ -705,9 +705,9 @@ static void scale_gamma(struct pwl_float_data *pwl_rgb,
 	i = 0;
 
 	do {
-		if (dal_fixed31_32_lt(max_os, ramp->entries.red[i]) ||
-			dal_fixed31_32_lt(max_os, ramp->entries.green[i]) ||
-			dal_fixed31_32_lt(max_os, ramp->entries.blue[i])) {
+		if (dc_fixpt_lt(max_os, ramp->entries.red[i]) ||
+			dc_fixpt_lt(max_os, ramp->entries.green[i]) ||
+			dc_fixpt_lt(max_os, ramp->entries.blue[i])) {
 			scaler = max_driver;
 			break;
 		}
@@ -717,40 +717,40 @@ static void scale_gamma(struct pwl_float_data *pwl_rgb,
 	i = 0;
 
 	do {
-		rgb->r = dal_fixed31_32_div(
+		rgb->r = dc_fixpt_div(
 			ramp->entries.red[i], scaler);
-		rgb->g = dal_fixed31_32_div(
+		rgb->g = dc_fixpt_div(
 			ramp->entries.green[i], scaler);
-		rgb->b = dal_fixed31_32_div(
+		rgb->b = dc_fixpt_div(
 			ramp->entries.blue[i], scaler);
 
 		++rgb;
 		++i;
 	} while (i != ramp->num_entries);
 
-	rgb->r = dal_fixed31_32_mul(rgb_last->r,
+	rgb->r = dc_fixpt_mul(rgb_last->r,
 			dividers.divider1);
-	rgb->g = dal_fixed31_32_mul(rgb_last->g,
+	rgb->g = dc_fixpt_mul(rgb_last->g,
 			dividers.divider1);
-	rgb->b = dal_fixed31_32_mul(rgb_last->b,
+	rgb->b = dc_fixpt_mul(rgb_last->b,
 			dividers.divider1);
 
 	++rgb;
 
-	rgb->r = dal_fixed31_32_mul(rgb_last->r,
+	rgb->r = dc_fixpt_mul(rgb_last->r,
 			dividers.divider2);
-	rgb->g = dal_fixed31_32_mul(rgb_last->g,
+	rgb->g = dc_fixpt_mul(rgb_last->g,
 			dividers.divider2);
-	rgb->b = dal_fixed31_32_mul(rgb_last->b,
+	rgb->b = dc_fixpt_mul(rgb_last->b,
 			dividers.divider2);
 
 	++rgb;
 
-	rgb->r = dal_fixed31_32_mul(rgb_last->r,
+	rgb->r = dc_fixpt_mul(rgb_last->r,
 			dividers.divider3);
-	rgb->g = dal_fixed31_32_mul(rgb_last->g,
+	rgb->g = dc_fixpt_mul(rgb_last->g,
 			dividers.divider3);
-	rgb->b = dal_fixed31_32_mul(rgb_last->b,
+	rgb->b = dc_fixpt_mul(rgb_last->b,
 			dividers.divider3);
 }
 
@@ -759,62 +759,62 @@ static void scale_gamma_dx(struct pwl_float_data *pwl_rgb,
 		struct dividers dividers)
 {
 	uint32_t i;
-	struct fixed31_32 min = dal_fixed31_32_zero;
-	struct fixed31_32 max = dal_fixed31_32_one;
+	struct fixed31_32 min = dc_fixpt_zero;
+	struct fixed31_32 max = dc_fixpt_one;
 
-	struct fixed31_32 delta = dal_fixed31_32_zero;
-	struct fixed31_32 offset = dal_fixed31_32_zero;
+	struct fixed31_32 delta = dc_fixpt_zero;
+	struct fixed31_32 offset = dc_fixpt_zero;
 
 	for (i = 0 ; i < ramp->num_entries; i++) {
-		if (dal_fixed31_32_lt(ramp->entries.red[i], min))
+		if (dc_fixpt_lt(ramp->entries.red[i], min))
 			min = ramp->entries.red[i];
 
-		if (dal_fixed31_32_lt(ramp->entries.green[i], min))
+		if (dc_fixpt_lt(ramp->entries.green[i], min))
 			min = ramp->entries.green[i];
 
-		if (dal_fixed31_32_lt(ramp->entries.blue[i], min))
+		if (dc_fixpt_lt(ramp->entries.blue[i], min))
 			min = ramp->entries.blue[i];
 
-		if (dal_fixed31_32_lt(max, ramp->entries.red[i]))
+		if (dc_fixpt_lt(max, ramp->entries.red[i]))
 			max = ramp->entries.red[i];
 
-		if (dal_fixed31_32_lt(max, ramp->entries.green[i]))
+		if (dc_fixpt_lt(max, ramp->entries.green[i]))
 			max = ramp->entries.green[i];
 
-		if (dal_fixed31_32_lt(max, ramp->entries.blue[i]))
+		if (dc_fixpt_lt(max, ramp->entries.blue[i]))
 			max = ramp->entries.blue[i];
 	}
 
-	if (dal_fixed31_32_lt(min, dal_fixed31_32_zero))
-		delta = dal_fixed31_32_neg(min);
+	if (dc_fixpt_lt(min, dc_fixpt_zero))
+		delta = dc_fixpt_neg(min);
 
-	offset = dal_fixed31_32_add(min, max);
+	offset = dc_fixpt_add(min, max);
 
 	for (i = 0 ; i < ramp->num_entries; i++) {
-		pwl_rgb[i].r = dal_fixed31_32_div(
-			dal_fixed31_32_add(
+		pwl_rgb[i].r = dc_fixpt_div(
+			dc_fixpt_add(
 				ramp->entries.red[i], delta), offset);
-		pwl_rgb[i].g = dal_fixed31_32_div(
-			dal_fixed31_32_add(
+		pwl_rgb[i].g = dc_fixpt_div(
+			dc_fixpt_add(
 				ramp->entries.green[i], delta), offset);
-		pwl_rgb[i].b = dal_fixed31_32_div(
-			dal_fixed31_32_add(
+		pwl_rgb[i].b = dc_fixpt_div(
+			dc_fixpt_add(
 				ramp->entries.blue[i], delta), offset);
 
 	}
 
-	pwl_rgb[i].r =  dal_fixed31_32_sub(dal_fixed31_32_mul_int(
+	pwl_rgb[i].r =  dc_fixpt_sub(dc_fixpt_mul_int(
 				pwl_rgb[i-1].r, 2), pwl_rgb[i-2].r);
-	pwl_rgb[i].g =  dal_fixed31_32_sub(dal_fixed31_32_mul_int(
+	pwl_rgb[i].g =  dc_fixpt_sub(dc_fixpt_mul_int(
 				pwl_rgb[i-1].g, 2), pwl_rgb[i-2].g);
-	pwl_rgb[i].b =  dal_fixed31_32_sub(dal_fixed31_32_mul_int(
+	pwl_rgb[i].b =  dc_fixpt_sub(dc_fixpt_mul_int(
 				pwl_rgb[i-1].b, 2), pwl_rgb[i-2].b);
 	++i;
-	pwl_rgb[i].r =  dal_fixed31_32_sub(dal_fixed31_32_mul_int(
+	pwl_rgb[i].r =  dc_fixpt_sub(dc_fixpt_mul_int(
 				pwl_rgb[i-1].r, 2), pwl_rgb[i-2].r);
-	pwl_rgb[i].g =  dal_fixed31_32_sub(dal_fixed31_32_mul_int(
+	pwl_rgb[i].g =  dc_fixpt_sub(dc_fixpt_mul_int(
 				pwl_rgb[i-1].g, 2), pwl_rgb[i-2].g);
-	pwl_rgb[i].b =  dal_fixed31_32_sub(dal_fixed31_32_mul_int(
+	pwl_rgb[i].b =  dc_fixpt_sub(dc_fixpt_mul_int(
 				pwl_rgb[i-1].b, 2), pwl_rgb[i-2].b);
 }
 
@@ -846,40 +846,40 @@ static void scale_user_regamma_ramp(struct pwl_float_data *pwl_rgb,
 
 	i = 0;
 	do {
-		rgb->r = dal_fixed31_32_from_fraction(
+		rgb->r = dc_fixpt_from_fraction(
 				ramp->gamma[i], scaler);
-		rgb->g = dal_fixed31_32_from_fraction(
+		rgb->g = dc_fixpt_from_fraction(
 				ramp->gamma[i + 256], scaler);
-		rgb->b = dal_fixed31_32_from_fraction(
+		rgb->b = dc_fixpt_from_fraction(
 				ramp->gamma[i + 512], scaler);
 
 		++rgb;
 		++i;
 	} while (i != GAMMA_RGB_256_ENTRIES);
 
-	rgb->r = dal_fixed31_32_mul(rgb_last->r,
+	rgb->r = dc_fixpt_mul(rgb_last->r,
 			dividers.divider1);
-	rgb->g = dal_fixed31_32_mul(rgb_last->g,
+	rgb->g = dc_fixpt_mul(rgb_last->g,
 			dividers.divider1);
-	rgb->b = dal_fixed31_32_mul(rgb_last->b,
+	rgb->b = dc_fixpt_mul(rgb_last->b,
 			dividers.divider1);
 
 	++rgb;
 
-	rgb->r = dal_fixed31_32_mul(rgb_last->r,
+	rgb->r = dc_fixpt_mul(rgb_last->r,
 			dividers.divider2);
-	rgb->g = dal_fixed31_32_mul(rgb_last->g,
+	rgb->g = dc_fixpt_mul(rgb_last->g,
 			dividers.divider2);
-	rgb->b = dal_fixed31_32_mul(rgb_last->b,
+	rgb->b = dc_fixpt_mul(rgb_last->b,
 			dividers.divider2);
 
 	++rgb;
 
-	rgb->r = dal_fixed31_32_mul(rgb_last->r,
+	rgb->r = dc_fixpt_mul(rgb_last->r,
 			dividers.divider3);
-	rgb->g = dal_fixed31_32_mul(rgb_last->g,
+	rgb->g = dc_fixpt_mul(rgb_last->g,
 			dividers.divider3);
-	rgb->b = dal_fixed31_32_mul(rgb_last->b,
+	rgb->b = dc_fixpt_mul(rgb_last->b,
 			dividers.divider3);
 }
 
@@ -913,7 +913,7 @@ static void apply_lut_1d(
 	struct fixed31_32 lut2;
 	const int max_lut_index = 4095;
 	const struct fixed31_32 max_lut_index_f =
-			dal_fixed31_32_from_int_nonconst(max_lut_index);
+			dc_fixpt_from_int_nonconst(max_lut_index);
 	int32_t index = 0, index_next = 0;
 	struct fixed31_32 index_f;
 	struct fixed31_32 delta_lut;
@@ -931,10 +931,10 @@ static void apply_lut_1d(
 			else
 				regamma_y = &tf_pts->blue[i];
 
-			norm_y = dal_fixed31_32_mul(max_lut_index_f,
+			norm_y = dc_fixpt_mul(max_lut_index_f,
 						   *regamma_y);
-			index = dal_fixed31_32_floor(norm_y);
-			index_f = dal_fixed31_32_from_int_nonconst(index);
+			index = dc_fixpt_floor(norm_y);
+			index_f = dc_fixpt_from_int_nonconst(index);
 
 			if (index < 0 || index > max_lut_index)
 				continue;
@@ -953,11 +953,11 @@ static void apply_lut_1d(
 			}
 
 			// we have everything now, so interpolate
-			delta_lut = dal_fixed31_32_sub(lut2, lut1);
-			delta_index = dal_fixed31_32_sub(norm_y, index_f);
+			delta_lut = dc_fixpt_sub(lut2, lut1);
+			delta_index = dc_fixpt_sub(norm_y, index_f);
 
-			*regamma_y = dal_fixed31_32_add(lut1,
-				dal_fixed31_32_mul(delta_index, delta_lut));
+			*regamma_y = dc_fixpt_add(lut1,
+				dc_fixpt_mul(delta_index, delta_lut));
 		}
 	}
 }
@@ -973,7 +973,7 @@ static void build_evenly_distributed_points(
 	uint32_t i = 0;
 
 	do {
-		struct fixed31_32 value = dal_fixed31_32_from_fraction(i,
+		struct fixed31_32 value = dc_fixpt_from_fraction(i,
 			numberof_points - 1);
 
 		p->r = value;
@@ -984,21 +984,21 @@ static void build_evenly_distributed_points(
 		++i;
 	} while (i != numberof_points);
 
-	p->r = dal_fixed31_32_div(p_last->r, dividers.divider1);
-	p->g = dal_fixed31_32_div(p_last->g, dividers.divider1);
-	p->b = dal_fixed31_32_div(p_last->b, dividers.divider1);
+	p->r = dc_fixpt_div(p_last->r, dividers.divider1);
+	p->g = dc_fixpt_div(p_last->g, dividers.divider1);
+	p->b = dc_fixpt_div(p_last->b, dividers.divider1);
 
 	++p;
 
-	p->r = dal_fixed31_32_div(p_last->r, dividers.divider2);
-	p->g = dal_fixed31_32_div(p_last->g, dividers.divider2);
-	p->b = dal_fixed31_32_div(p_last->b, dividers.divider2);
+	p->r = dc_fixpt_div(p_last->r, dividers.divider2);
+	p->g = dc_fixpt_div(p_last->g, dividers.divider2);
+	p->b = dc_fixpt_div(p_last->b, dividers.divider2);
 
 	++p;
 
-	p->r = dal_fixed31_32_div(p_last->r, dividers.divider3);
-	p->g = dal_fixed31_32_div(p_last->g, dividers.divider3);
-	p->b = dal_fixed31_32_div(p_last->b, dividers.divider3);
+	p->r = dc_fixpt_div(p_last->r, dividers.divider3);
+	p->g = dc_fixpt_div(p_last->g, dividers.divider3);
+	p->b = dc_fixpt_div(p_last->b, dividers.divider3);
 }
 
 static inline void copy_rgb_regamma_to_coordinates_x(
@@ -1094,7 +1094,7 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
 	struct fixed31_32 *tf_point;
 	struct fixed31_32 hw_x;
 	struct fixed31_32 norm_factor =
-			dal_fixed31_32_from_int_nonconst(255);
+			dc_fixpt_from_int_nonconst(255);
 	struct fixed31_32 norm_x;
 	struct fixed31_32 index_f;
 	struct fixed31_32 lut1;
@@ -1105,9 +1105,9 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
 	i = 0;
 	/* fixed_pt library has problems handling too small values */
 	while (i != 32) {
-		tf_pts->red[i] = dal_fixed31_32_zero;
-		tf_pts->green[i] = dal_fixed31_32_zero;
-		tf_pts->blue[i] = dal_fixed31_32_zero;
+		tf_pts->red[i] = dc_fixpt_zero;
+		tf_pts->green[i] = dc_fixpt_zero;
+		tf_pts->blue[i] = dc_fixpt_zero;
 		++i;
 	}
 	while (i <= hw_points_num + 1) {
@@ -1129,12 +1129,12 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
 			} else
 				hw_x = coordinates_x[i].x;
 
-			norm_x = dal_fixed31_32_mul(norm_factor, hw_x);
-			index = dal_fixed31_32_floor(norm_x);
+			norm_x = dc_fixpt_mul(norm_factor, hw_x);
+			index = dc_fixpt_floor(norm_x);
 			if (index < 0 || index > 255)
 				continue;
 
-			index_f = dal_fixed31_32_from_int_nonconst(index);
+			index_f = dc_fixpt_from_int_nonconst(index);
 			index_next = (index == 255) ? index : index + 1;
 
 			if (color == 0) {
@@ -1149,11 +1149,11 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
 			}
 
 			// we have everything now, so interpolate
-			delta_lut = dal_fixed31_32_sub(lut2, lut1);
-			delta_index = dal_fixed31_32_sub(norm_x, index_f);
+			delta_lut = dc_fixpt_sub(lut2, lut1);
+			delta_index = dc_fixpt_sub(norm_x, index_f);
 
-			*tf_point = dal_fixed31_32_add(lut1,
-				dal_fixed31_32_mul(delta_index, delta_lut));
+			*tf_point = dc_fixpt_add(lut1,
+				dc_fixpt_mul(delta_index, delta_lut));
 		}
 		++i;
 	}
@@ -1168,15 +1168,15 @@ static void build_new_custom_resulted_curve(
 	i = 0;
 
 	while (i != hw_points_num + 1) {
-		tf_pts->red[i] = dal_fixed31_32_clamp(
-			tf_pts->red[i], dal_fixed31_32_zero,
-			dal_fixed31_32_one);
-		tf_pts->green[i] = dal_fixed31_32_clamp(
-			tf_pts->green[i], dal_fixed31_32_zero,
-			dal_fixed31_32_one);
-		tf_pts->blue[i] = dal_fixed31_32_clamp(
-			tf_pts->blue[i], dal_fixed31_32_zero,
-			dal_fixed31_32_one);
+		tf_pts->red[i] = dc_fixpt_clamp(
+			tf_pts->red[i], dc_fixpt_zero,
+			dc_fixpt_one);
+		tf_pts->green[i] = dc_fixpt_clamp(
+			tf_pts->green[i], dc_fixpt_zero,
+			dc_fixpt_one);
+		tf_pts->blue[i] = dc_fixpt_clamp(
+			tf_pts->blue[i], dc_fixpt_zero,
+			dc_fixpt_one);
 
 		++i;
 	}
@@ -1290,9 +1290,9 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
 	if (!coeff)
 		goto coeff_alloc_fail;
 
-	dividers.divider1 = dal_fixed31_32_from_fraction(3, 2);
-	dividers.divider2 = dal_fixed31_32_from_int(2);
-	dividers.divider3 = dal_fixed31_32_from_fraction(5, 2);
+	dividers.divider1 = dc_fixpt_from_fraction(3, 2);
+	dividers.divider2 = dc_fixpt_from_int(2);
+	dividers.divider3 = dc_fixpt_from_fraction(5, 2);
 
 	tf = output_tf->tf;
 
@@ -1357,15 +1357,15 @@ bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf,
 	uint32_t i = 0;
 
 	do {
-		coeff.a0[i] = dal_fixed31_32_from_fraction(
+		coeff.a0[i] = dc_fixpt_from_fraction(
 				regamma->coeff.A0[i], 10000000);
-		coeff.a1[i] = dal_fixed31_32_from_fraction(
+		coeff.a1[i] = dc_fixpt_from_fraction(
 				regamma->coeff.A1[i], 1000);
-		coeff.a2[i] = dal_fixed31_32_from_fraction(
+		coeff.a2[i] = dc_fixpt_from_fraction(
 				regamma->coeff.A2[i], 1000);
-		coeff.a3[i] = dal_fixed31_32_from_fraction(
+		coeff.a3[i] = dc_fixpt_from_fraction(
 				regamma->coeff.A3[i], 1000);
-		coeff.user_gamma[i] = dal_fixed31_32_from_fraction(
+		coeff.user_gamma[i] = dc_fixpt_from_fraction(
 				regamma->coeff.gamma[i], 1000);
 
 		++i;
@@ -1374,9 +1374,9 @@ bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf,
 	i = 0;
 	/* fixed_pt library has problems handling too small values */
 	while (i != 32) {
-		output_tf->tf_pts.red[i] = dal_fixed31_32_zero;
-		output_tf->tf_pts.green[i] = dal_fixed31_32_zero;
-		output_tf->tf_pts.blue[i] = dal_fixed31_32_zero;
+		output_tf->tf_pts.red[i] = dc_fixpt_zero;
+		output_tf->tf_pts.green[i] = dc_fixpt_zero;
+		output_tf->tf_pts.blue[i] = dc_fixpt_zero;
 		++coord_x;
 		++i;
 	}
@@ -1423,9 +1423,9 @@ bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf,
 	if (!rgb_regamma)
 		goto rgb_regamma_alloc_fail;
 
-	dividers.divider1 = dal_fixed31_32_from_fraction(3, 2);
-	dividers.divider2 = dal_fixed31_32_from_int(2);
-	dividers.divider3 = dal_fixed31_32_from_fraction(5, 2);
+	dividers.divider1 = dc_fixpt_from_fraction(3, 2);
+	dividers.divider2 = dc_fixpt_from_int(2);
+	dividers.divider3 = dc_fixpt_from_fraction(5, 2);
 
 	scale_user_regamma_ramp(rgb_user, &regamma->ramp, dividers);
 
@@ -1496,9 +1496,9 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
 	if (!coeff)
 		goto coeff_alloc_fail;
 
-	dividers.divider1 = dal_fixed31_32_from_fraction(3, 2);
-	dividers.divider2 = dal_fixed31_32_from_int(2);
-	dividers.divider3 = dal_fixed31_32_from_fraction(5, 2);
+	dividers.divider1 = dc_fixpt_from_fraction(3, 2);
+	dividers.divider2 = dc_fixpt_from_int(2);
+	dividers.divider3 = dc_fixpt_from_fraction(5, 2);
 
 	tf = input_tf->tf;
 

From f3ba7a2fd1ebffe7fc6a9c524754db05dcd0c0e4 Mon Sep 17 00:00:00 2001
From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Date: Wed, 18 Apr 2018 13:54:24 -0400
Subject: [PATCH 1221/1286] drm/amd/display: inline more of fixed point code

Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../drm/amd/display/dc/basics/fixpt31_32.c    | 156 ++-----------
 .../gpu/drm/amd/display/include/fixed31_32.h  | 207 ++++++++++--------
 .../amd/display/modules/color/color_gamma.c   |   8 +-
 3 files changed, 135 insertions(+), 236 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
index e398ecdf742c..e61dd97d0928 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
@@ -64,9 +64,7 @@ static inline unsigned long long complete_integer_division_u64(
 #define GET_FRACTIONAL_PART(x) \
 	(FRACTIONAL_PART_MASK & (x))
 
-struct fixed31_32 dc_fixpt_from_fraction(
-	long long numerator,
-	long long denominator)
+struct fixed31_32 dc_fixpt_from_fraction(long long numerator, long long denominator)
 {
 	struct fixed31_32 res;
 
@@ -118,63 +116,7 @@ struct fixed31_32 dc_fixpt_from_fraction(
 	return res;
 }
 
-struct fixed31_32 dc_fixpt_from_int_nonconst(
-	long long arg)
-{
-	struct fixed31_32 res;
-
-	ASSERT((LONG_MIN <= arg) && (arg <= LONG_MAX));
-
-	res.value = arg << FIXED31_32_BITS_PER_FRACTIONAL_PART;
-
-	return res;
-}
-
-struct fixed31_32 dc_fixpt_shl(
-	struct fixed31_32 arg,
-	unsigned char shift)
-{
-	struct fixed31_32 res;
-
-	ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) ||
-		((arg.value < 0) && (arg.value >= LLONG_MIN >> shift)));
-
-	res.value = arg.value << shift;
-
-	return res;
-}
-
-struct fixed31_32 dc_fixpt_add(
-	struct fixed31_32 arg1,
-	struct fixed31_32 arg2)
-{
-	struct fixed31_32 res;
-
-	ASSERT(((arg1.value >= 0) && (LLONG_MAX - arg1.value >= arg2.value)) ||
-		((arg1.value < 0) && (LLONG_MIN - arg1.value <= arg2.value)));
-
-	res.value = arg1.value + arg2.value;
-
-	return res;
-}
-
-struct fixed31_32 dc_fixpt_sub(
-	struct fixed31_32 arg1,
-	struct fixed31_32 arg2)
-{
-	struct fixed31_32 res;
-
-	ASSERT(((arg2.value >= 0) && (LLONG_MIN + arg2.value <= arg1.value)) ||
-		((arg2.value < 0) && (LLONG_MAX + arg2.value >= arg1.value)));
-
-	res.value = arg1.value - arg2.value;
-
-	return res;
-}
-
-struct fixed31_32 dc_fixpt_mul(
-	struct fixed31_32 arg1,
-	struct fixed31_32 arg2)
+struct fixed31_32 dc_fixpt_mul(struct fixed31_32 arg1, struct fixed31_32 arg2)
 {
 	struct fixed31_32 res;
 
@@ -225,8 +167,7 @@ struct fixed31_32 dc_fixpt_mul(
 	return res;
 }
 
-struct fixed31_32 dc_fixpt_sqr(
-	struct fixed31_32 arg)
+struct fixed31_32 dc_fixpt_sqr(struct fixed31_32 arg)
 {
 	struct fixed31_32 res;
 
@@ -266,8 +207,7 @@ struct fixed31_32 dc_fixpt_sqr(
 	return res;
 }
 
-struct fixed31_32 dc_fixpt_recip(
-	struct fixed31_32 arg)
+struct fixed31_32 dc_fixpt_recip(struct fixed31_32 arg)
 {
 	/*
 	 * @note
@@ -281,8 +221,7 @@ struct fixed31_32 dc_fixpt_recip(
 		arg.value);
 }
 
-struct fixed31_32 dc_fixpt_sinc(
-	struct fixed31_32 arg)
+struct fixed31_32 dc_fixpt_sinc(struct fixed31_32 arg)
 {
 	struct fixed31_32 square;
 
@@ -326,16 +265,14 @@ struct fixed31_32 dc_fixpt_sinc(
 	return res;
 }
 
-struct fixed31_32 dc_fixpt_sin(
-	struct fixed31_32 arg)
+struct fixed31_32 dc_fixpt_sin(struct fixed31_32 arg)
 {
 	return dc_fixpt_mul(
 		arg,
 		dc_fixpt_sinc(arg));
 }
 
-struct fixed31_32 dc_fixpt_cos(
-	struct fixed31_32 arg)
+struct fixed31_32 dc_fixpt_cos(struct fixed31_32 arg)
 {
 	/* TODO implement argument normalization */
 
@@ -367,8 +304,7 @@ struct fixed31_32 dc_fixpt_cos(
  *
  * Calculated as Taylor series.
  */
-static struct fixed31_32 fixed31_32_exp_from_taylor_series(
-	struct fixed31_32 arg)
+static struct fixed31_32 fixed31_32_exp_from_taylor_series(struct fixed31_32 arg)
 {
 	unsigned int n = 9;
 
@@ -396,8 +332,7 @@ static struct fixed31_32 fixed31_32_exp_from_taylor_series(
 			res));
 }
 
-struct fixed31_32 dc_fixpt_exp(
-	struct fixed31_32 arg)
+struct fixed31_32 dc_fixpt_exp(struct fixed31_32 arg)
 {
 	/*
 	 * @brief
@@ -440,8 +375,7 @@ struct fixed31_32 dc_fixpt_exp(
 		return dc_fixpt_one;
 }
 
-struct fixed31_32 dc_fixpt_log(
-	struct fixed31_32 arg)
+struct fixed31_32 dc_fixpt_log(struct fixed31_32 arg)
 {
 	struct fixed31_32 res = dc_fixpt_neg(dc_fixpt_one);
 	/* TODO improve 1st estimation */
@@ -472,61 +406,6 @@ struct fixed31_32 dc_fixpt_log(
 	return res;
 }
 
-struct fixed31_32 dc_fixpt_pow(
-	struct fixed31_32 arg1,
-	struct fixed31_32 arg2)
-{
-	return dc_fixpt_exp(
-		dc_fixpt_mul(
-			dc_fixpt_log(arg1),
-			arg2));
-}
-
-int dc_fixpt_floor(
-	struct fixed31_32 arg)
-{
-	unsigned long long arg_value = abs_i64(arg.value);
-
-	if (arg.value >= 0)
-		return (int)GET_INTEGER_PART(arg_value);
-	else
-		return -(int)GET_INTEGER_PART(arg_value);
-}
-
-int dc_fixpt_round(
-	struct fixed31_32 arg)
-{
-	unsigned long long arg_value = abs_i64(arg.value);
-
-	const long long summand = dc_fixpt_half.value;
-
-	ASSERT(LLONG_MAX - (long long)arg_value >= summand);
-
-	arg_value += summand;
-
-	if (arg.value >= 0)
-		return (int)GET_INTEGER_PART(arg_value);
-	else
-		return -(int)GET_INTEGER_PART(arg_value);
-}
-
-int dc_fixpt_ceil(
-	struct fixed31_32 arg)
-{
-	unsigned long long arg_value = abs_i64(arg.value);
-
-	const long long summand = dc_fixpt_one.value -
-		dc_fixpt_epsilon.value;
-
-	ASSERT(LLONG_MAX - (long long)arg_value >= summand);
-
-	arg_value += summand;
-
-	if (arg.value >= 0)
-		return (int)GET_INTEGER_PART(arg_value);
-	else
-		return -(int)GET_INTEGER_PART(arg_value);
-}
 
 /* this function is a generic helper to translate fixed point value to
  * specified integer format that will consist of integer_bits integer part and
@@ -570,32 +449,27 @@ static inline unsigned int clamp_ux_dy(
 		return min_clamp;
 }
 
-unsigned int dc_fixpt_u2d19(
-	struct fixed31_32 arg)
+unsigned int dc_fixpt_u2d19(struct fixed31_32 arg)
 {
 	return ux_dy(arg.value, 2, 19);
 }
 
-unsigned int dc_fixpt_u0d19(
-	struct fixed31_32 arg)
+unsigned int dc_fixpt_u0d19(struct fixed31_32 arg)
 {
 	return ux_dy(arg.value, 0, 19);
 }
 
-unsigned int dc_fixpt_clamp_u0d14(
-	struct fixed31_32 arg)
+unsigned int dc_fixpt_clamp_u0d14(struct fixed31_32 arg)
 {
 	return clamp_ux_dy(arg.value, 0, 14, 1);
 }
 
-unsigned int dc_fixpt_clamp_u0d10(
-	struct fixed31_32 arg)
+unsigned int dc_fixpt_clamp_u0d10(struct fixed31_32 arg)
 {
 	return clamp_ux_dy(arg.value, 0, 10, 1);
 }
 
-int dc_fixpt_s4d19(
-	struct fixed31_32 arg)
+int dc_fixpt_s4d19(struct fixed31_32 arg)
 {
 	if (arg.value < 0)
 		return -(int)ux_dy(dc_fixpt_abs(arg).value, 4, 19);
diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
index b5b8d7dea373..ebfd33e91ee8 100644
--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
+++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
@@ -70,24 +70,19 @@ static const struct fixed31_32 dc_fixpt_ln2_div_2 = { 1488522236LL };
  * @brief
  * result = numerator / denominator
  */
-struct fixed31_32 dc_fixpt_from_fraction(
-	long long numerator,
-	long long denominator);
+struct fixed31_32 dc_fixpt_from_fraction(long long numerator, long long denominator);
 
 /*
  * @brief
  * result = arg
  */
-struct fixed31_32 dc_fixpt_from_int_nonconst(long long arg);
-static inline struct fixed31_32 dc_fixpt_from_int(long long arg)
+static inline struct fixed31_32 dc_fixpt_from_int(int arg)
 {
-	if (__builtin_constant_p(arg)) {
-		struct fixed31_32 res;
-		BUILD_BUG_ON((LONG_MIN > arg) || (arg > LONG_MAX));
-		res.value = arg << FIXED31_32_BITS_PER_FRACTIONAL_PART;
-		return res;
-	} else
-		return dc_fixpt_from_int_nonconst(arg);
+	struct fixed31_32 res;
+
+	res.value = (long long) arg << FIXED31_32_BITS_PER_FRACTIONAL_PART;
+
+	return res;
 }
 
 /*
@@ -129,8 +124,7 @@ static inline struct fixed31_32 dc_fixpt_abs(struct fixed31_32 arg)
  * @brief
  * result = arg1 < arg2
  */
-static inline bool dc_fixpt_lt(struct fixed31_32 arg1,
-				     struct fixed31_32 arg2)
+static inline bool dc_fixpt_lt(struct fixed31_32 arg1, struct fixed31_32 arg2)
 {
 	return arg1.value < arg2.value;
 }
@@ -139,8 +133,7 @@ static inline bool dc_fixpt_lt(struct fixed31_32 arg1,
  * @brief
  * result = arg1 <= arg2
  */
-static inline bool dc_fixpt_le(struct fixed31_32 arg1,
-				     struct fixed31_32 arg2)
+static inline bool dc_fixpt_le(struct fixed31_32 arg1, struct fixed31_32 arg2)
 {
 	return arg1.value <= arg2.value;
 }
@@ -149,8 +142,7 @@ static inline bool dc_fixpt_le(struct fixed31_32 arg1,
  * @brief
  * result = arg1 == arg2
  */
-static inline bool dc_fixpt_eq(struct fixed31_32 arg1,
-				     struct fixed31_32 arg2)
+static inline bool dc_fixpt_eq(struct fixed31_32 arg1, struct fixed31_32 arg2)
 {
 	return arg1.value == arg2.value;
 }
@@ -159,8 +151,7 @@ static inline bool dc_fixpt_eq(struct fixed31_32 arg1,
  * @brief
  * result = min(arg1, arg2) := (arg1 <= arg2) ? arg1 : arg2
  */
-static inline struct fixed31_32 dc_fixpt_min(struct fixed31_32 arg1,
-						   struct fixed31_32 arg2)
+static inline struct fixed31_32 dc_fixpt_min(struct fixed31_32 arg1, struct fixed31_32 arg2)
 {
 	if (arg1.value <= arg2.value)
 		return arg1;
@@ -172,8 +163,7 @@ static inline struct fixed31_32 dc_fixpt_min(struct fixed31_32 arg1,
  * @brief
  * result = max(arg1, arg2) := (arg1 <= arg2) ? arg2 : arg1
  */
-static inline struct fixed31_32 dc_fixpt_max(struct fixed31_32 arg1,
-						   struct fixed31_32 arg2)
+static inline struct fixed31_32 dc_fixpt_max(struct fixed31_32 arg1, struct fixed31_32 arg2)
 {
 	if (arg1.value <= arg2.value)
 		return arg2;
@@ -209,17 +199,23 @@ static inline struct fixed31_32 dc_fixpt_clamp(
  * @brief
  * result = arg << shift
  */
-struct fixed31_32 dc_fixpt_shl(
-	struct fixed31_32 arg,
-	unsigned char shift);
+static inline struct fixed31_32 dc_fixpt_shl(struct fixed31_32 arg, unsigned char shift)
+{
+	struct fixed31_32 res;
+
+	ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) ||
+		((arg.value < 0) && (arg.value >= LLONG_MIN >> shift)));
+
+	res.value = arg.value << shift;
+
+	return res;
+}
 
 /*
  * @brief
  * result = arg >> shift
  */
-static inline struct fixed31_32 dc_fixpt_shr(
-	struct fixed31_32 arg,
-	unsigned char shift)
+static inline struct fixed31_32 dc_fixpt_shr(struct fixed31_32 arg, unsigned char shift)
 {
 	struct fixed31_32 res;
 	res.value = arg.value >> shift;
@@ -235,38 +231,50 @@ static inline struct fixed31_32 dc_fixpt_shr(
  * @brief
  * result = arg1 + arg2
  */
-struct fixed31_32 dc_fixpt_add(
-	struct fixed31_32 arg1,
-	struct fixed31_32 arg2);
+static inline struct fixed31_32 dc_fixpt_add(struct fixed31_32 arg1, struct fixed31_32 arg2)
+{
+	struct fixed31_32 res;
+
+	ASSERT(((arg1.value >= 0) && (LLONG_MAX - arg1.value >= arg2.value)) ||
+		((arg1.value < 0) && (LLONG_MIN - arg1.value <= arg2.value)));
+
+	res.value = arg1.value + arg2.value;
+
+	return res;
+}
 
 /*
  * @brief
  * result = arg1 + arg2
  */
-static inline struct fixed31_32 dc_fixpt_add_int(struct fixed31_32 arg1,
-						       int arg2)
+static inline struct fixed31_32 dc_fixpt_add_int(struct fixed31_32 arg1, int arg2)
 {
-	return dc_fixpt_add(arg1,
-				  dc_fixpt_from_int(arg2));
+	return dc_fixpt_add(arg1, dc_fixpt_from_int(arg2));
 }
 
 /*
  * @brief
  * result = arg1 - arg2
  */
-struct fixed31_32 dc_fixpt_sub(
-	struct fixed31_32 arg1,
-	struct fixed31_32 arg2);
+static inline struct fixed31_32 dc_fixpt_sub(struct fixed31_32 arg1, struct fixed31_32 arg2)
+{
+	struct fixed31_32 res;
+
+	ASSERT(((arg2.value >= 0) && (LLONG_MIN + arg2.value <= arg1.value)) ||
+		((arg2.value < 0) && (LLONG_MAX + arg2.value >= arg1.value)));
+
+	res.value = arg1.value - arg2.value;
+
+	return res;
+}
 
 /*
  * @brief
  * result = arg1 - arg2
  */
-static inline struct fixed31_32 dc_fixpt_sub_int(struct fixed31_32 arg1,
-						       int arg2)
+static inline struct fixed31_32 dc_fixpt_sub_int(struct fixed31_32 arg1, int arg2)
 {
-	return dc_fixpt_sub(arg1,
-				  dc_fixpt_from_int(arg2));
+	return dc_fixpt_sub(arg1, dc_fixpt_from_int(arg2));
 }
 
 
@@ -279,49 +287,40 @@ static inline struct fixed31_32 dc_fixpt_sub_int(struct fixed31_32 arg1,
  * @brief
  * result = arg1 * arg2
  */
-struct fixed31_32 dc_fixpt_mul(
-	struct fixed31_32 arg1,
-	struct fixed31_32 arg2);
+struct fixed31_32 dc_fixpt_mul(struct fixed31_32 arg1, struct fixed31_32 arg2);
 
 
 /*
  * @brief
  * result = arg1 * arg2
  */
-static inline struct fixed31_32 dc_fixpt_mul_int(struct fixed31_32 arg1,
-						       int arg2)
+static inline struct fixed31_32 dc_fixpt_mul_int(struct fixed31_32 arg1, int arg2)
 {
-	return dc_fixpt_mul(arg1,
-				  dc_fixpt_from_int(arg2));
+	return dc_fixpt_mul(arg1, dc_fixpt_from_int(arg2));
 }
 
 /*
  * @brief
  * result = square(arg) := arg * arg
  */
-struct fixed31_32 dc_fixpt_sqr(
-	struct fixed31_32 arg);
+struct fixed31_32 dc_fixpt_sqr(struct fixed31_32 arg);
 
 /*
  * @brief
  * result = arg1 / arg2
  */
-static inline struct fixed31_32 dc_fixpt_div_int(struct fixed31_32 arg1,
-						       long long arg2)
+static inline struct fixed31_32 dc_fixpt_div_int(struct fixed31_32 arg1, long long arg2)
 {
-	return dc_fixpt_from_fraction(arg1.value,
-					    dc_fixpt_from_int(arg2).value);
+	return dc_fixpt_from_fraction(arg1.value, dc_fixpt_from_int(arg2).value);
 }
 
 /*
  * @brief
  * result = arg1 / arg2
  */
-static inline struct fixed31_32 dc_fixpt_div(struct fixed31_32 arg1,
-						   struct fixed31_32 arg2)
+static inline struct fixed31_32 dc_fixpt_div(struct fixed31_32 arg1, struct fixed31_32 arg2)
 {
-	return dc_fixpt_from_fraction(arg1.value,
-					    arg2.value);
+	return dc_fixpt_from_fraction(arg1.value, arg2.value);
 }
 
 /*
@@ -336,8 +335,7 @@ static inline struct fixed31_32 dc_fixpt_div(struct fixed31_32 arg1,
  * @note
  * No special actions taken in case argument is zero.
  */
-struct fixed31_32 dc_fixpt_recip(
-	struct fixed31_32 arg);
+struct fixed31_32 dc_fixpt_recip(struct fixed31_32 arg);
 
 /*
  * @brief
@@ -352,8 +350,7 @@ struct fixed31_32 dc_fixpt_recip(
  * Argument specified in radians,
  * internally it's normalized to [-2pi...2pi] range.
  */
-struct fixed31_32 dc_fixpt_sinc(
-	struct fixed31_32 arg);
+struct fixed31_32 dc_fixpt_sinc(struct fixed31_32 arg);
 
 /*
  * @brief
@@ -363,8 +360,7 @@ struct fixed31_32 dc_fixpt_sinc(
  * Argument specified in radians,
  * internally it's normalized to [-2pi...2pi] range.
  */
-struct fixed31_32 dc_fixpt_sin(
-	struct fixed31_32 arg);
+struct fixed31_32 dc_fixpt_sin(struct fixed31_32 arg);
 
 /*
  * @brief
@@ -376,8 +372,7 @@ struct fixed31_32 dc_fixpt_sin(
  * passing arguments outside that range
  * will cause incorrect result!
  */
-struct fixed31_32 dc_fixpt_cos(
-	struct fixed31_32 arg);
+struct fixed31_32 dc_fixpt_cos(struct fixed31_32 arg);
 
 /*
  * @brief
@@ -391,8 +386,7 @@ struct fixed31_32 dc_fixpt_cos(
  * @note
  * Currently, function is verified for abs(arg) <= 1.
  */
-struct fixed31_32 dc_fixpt_exp(
-	struct fixed31_32 arg);
+struct fixed31_32 dc_fixpt_exp(struct fixed31_32 arg);
 
 /*
  * @brief
@@ -404,8 +398,7 @@ struct fixed31_32 dc_fixpt_exp(
  * Currently, no special actions taken
  * in case of invalid argument(s). Take care!
  */
-struct fixed31_32 dc_fixpt_log(
-	struct fixed31_32 arg);
+struct fixed31_32 dc_fixpt_log(struct fixed31_32 arg);
 
 /*
  * @brief
@@ -419,9 +412,13 @@ struct fixed31_32 dc_fixpt_log(
  * @note
  * Currently, abs(arg1) should be less than 1. Take care!
  */
-struct fixed31_32 dc_fixpt_pow(
-	struct fixed31_32 arg1,
-	struct fixed31_32 arg2);
+static inline struct fixed31_32 dc_fixpt_pow(struct fixed31_32 arg1, struct fixed31_32 arg2)
+{
+	return dc_fixpt_exp(
+		dc_fixpt_mul(
+			dc_fixpt_log(arg1),
+			arg2));
+}
 
 /*
  * @brief
@@ -432,22 +429,56 @@ struct fixed31_32 dc_fixpt_pow(
  * @brief
  * result = floor(arg) := greatest integer lower than or equal to arg
  */
-int dc_fixpt_floor(
-	struct fixed31_32 arg);
+static inline int dc_fixpt_floor(struct fixed31_32 arg)
+{
+	unsigned long long arg_value = arg.value > 0 ? arg.value : -arg.value;
+
+	if (arg.value >= 0)
+		return (int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
+	else
+		return -(int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
+}
 
 /*
  * @brief
  * result = round(arg) := integer nearest to arg
  */
-int dc_fixpt_round(
-	struct fixed31_32 arg);
+static inline int dc_fixpt_round(struct fixed31_32 arg)
+{
+	unsigned long long arg_value = arg.value > 0 ? arg.value : -arg.value;
+
+	const long long summand = dc_fixpt_half.value;
+
+	ASSERT(LLONG_MAX - (long long)arg_value >= summand);
+
+	arg_value += summand;
+
+	if (arg.value >= 0)
+		return (int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
+	else
+		return -(int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
+}
 
 /*
  * @brief
  * result = ceil(arg) := lowest integer greater than or equal to arg
  */
-int dc_fixpt_ceil(
-	struct fixed31_32 arg);
+static inline int dc_fixpt_ceil(struct fixed31_32 arg)
+{
+	unsigned long long arg_value = arg.value > 0 ? arg.value : -arg.value;
+
+	const long long summand = dc_fixpt_one.value -
+		dc_fixpt_epsilon.value;
+
+	ASSERT(LLONG_MAX - (long long)arg_value >= summand);
+
+	arg_value += summand;
+
+	if (arg.value >= 0)
+		return (int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
+	else
+		return -(int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
+}
 
 /* the following two function are used in scaler hw programming to convert fixed
  * point value to format 2 bits from integer part and 19 bits from fractional
@@ -455,20 +486,14 @@ int dc_fixpt_ceil(
  * fractional
  */
 
-unsigned int dc_fixpt_u2d19(
-	struct fixed31_32 arg);
+unsigned int dc_fixpt_u2d19(struct fixed31_32 arg);
 
-unsigned int dc_fixpt_u0d19(
-	struct fixed31_32 arg);
+unsigned int dc_fixpt_u0d19(struct fixed31_32 arg);
 
+unsigned int dc_fixpt_clamp_u0d14(struct fixed31_32 arg);
 
-unsigned int dc_fixpt_clamp_u0d14(
-	struct fixed31_32 arg);
+unsigned int dc_fixpt_clamp_u0d10(struct fixed31_32 arg);
 
-unsigned int dc_fixpt_clamp_u0d10(
-	struct fixed31_32 arg);
-
-int dc_fixpt_s4d19(
-	struct fixed31_32 arg);
+int dc_fixpt_s4d19(struct fixed31_32 arg);
 
 #endif
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 29d2ec82b924..e803b375e835 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -913,7 +913,7 @@ static void apply_lut_1d(
 	struct fixed31_32 lut2;
 	const int max_lut_index = 4095;
 	const struct fixed31_32 max_lut_index_f =
-			dc_fixpt_from_int_nonconst(max_lut_index);
+			dc_fixpt_from_int(max_lut_index);
 	int32_t index = 0, index_next = 0;
 	struct fixed31_32 index_f;
 	struct fixed31_32 delta_lut;
@@ -934,7 +934,7 @@ static void apply_lut_1d(
 			norm_y = dc_fixpt_mul(max_lut_index_f,
 						   *regamma_y);
 			index = dc_fixpt_floor(norm_y);
-			index_f = dc_fixpt_from_int_nonconst(index);
+			index_f = dc_fixpt_from_int(index);
 
 			if (index < 0 || index > max_lut_index)
 				continue;
@@ -1094,7 +1094,7 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
 	struct fixed31_32 *tf_point;
 	struct fixed31_32 hw_x;
 	struct fixed31_32 norm_factor =
-			dc_fixpt_from_int_nonconst(255);
+			dc_fixpt_from_int(255);
 	struct fixed31_32 norm_x;
 	struct fixed31_32 index_f;
 	struct fixed31_32 lut1;
@@ -1134,7 +1134,7 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
 			if (index < 0 || index > 255)
 				continue;
 
-			index_f = dc_fixpt_from_int_nonconst(index);
+			index_f = dc_fixpt_from_int(index);
 			index_next = (index == 255) ? index : index + 1;
 
 			if (color == 0) {

From e8838df1cb987fe690dfd069824ff08107327607 Mon Sep 17 00:00:00 2001
From: Anthony Koo <Anthony.Koo@amd.com>
Date: Thu, 19 Apr 2018 10:05:22 -0400
Subject: [PATCH 1222/1286] drm/amd/display: Make DisplayStats work with just
 DC DisplayStats minor

Remove dependency on the old FREESYNC_SW_STATS log mask used by DAL2
Also rename from profiling to displaystats

Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/dc/basics/logger.c    |  2 +-
 .../drm/amd/display/include/logger_types.h    |  2 +-
 .../gpu/drm/amd/display/modules/stats/stats.c | 75 ++++++++++---------
 3 files changed, 43 insertions(+), 36 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/basics/logger.c b/drivers/gpu/drm/amd/display/dc/basics/logger.c
index 31bee054f43a..0001a3c5b862 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/logger.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/logger.c
@@ -61,7 +61,7 @@ static const struct dc_log_type_info log_type_info_tbl[] = {
 		{LOG_EVENT_UNDERFLOW,       "Underflow"},
 		{LOG_IF_TRACE,              "InterfaceTrace"},
 		{LOG_DTN,                   "DTN"},
-		{LOG_PROFILING,             "Profiling"}
+		{LOG_DISPLAYSTATS,          "DisplayStats"}
 };
 
 
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index b608a0830801..0a540b9897a6 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -98,7 +98,7 @@ enum dc_log_type {
 	LOG_EVENT_UNDERFLOW,
 	LOG_IF_TRACE,
 	LOG_PERF_TRACE,
-	LOG_PROFILING,
+	LOG_DISPLAYSTATS,
 
 	LOG_SECTION_TOTAL_COUNT
 };
diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
index 48e02197919f..d16aac7b30b3 100644
--- a/drivers/gpu/drm/amd/display/modules/stats/stats.c
+++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c
@@ -177,44 +177,51 @@ void mod_stats_dump(struct mod_stats *mod_stats)
 	logger = dc->ctx->logger;
 	time = core_stats->time;
 
-	//LogEntry* pLog = GetLog()->Open(LogMajor_ISR, LogMinor_ISR_FreeSyncSW);
+	dm_logger_write(logger, LOG_DISPLAYSTATS, "==Display Caps==");
+	dm_logger_write(logger, LOG_DISPLAYSTATS, " ");
 
-	//if (!pLog->IsDummyEntry())
-	{
-		dm_logger_write(logger, LOG_PROFILING, "==Display Caps==\n");
-		dm_logger_write(logger, LOG_PROFILING, "\n");
-		dm_logger_write(logger, LOG_PROFILING, "\n");
+	dm_logger_write(logger, LOG_DISPLAYSTATS, "==Display Stats==");
+	dm_logger_write(logger, LOG_DISPLAYSTATS, " ");
 
-		dm_logger_write(logger, LOG_PROFILING, "==Stats==\n");
-		dm_logger_write(logger, LOG_PROFILING,
-			"render avgRender minWindow midPoint maxWindow vsyncToFlip flipToVsync #vsyncBetweenFlip #frame insertDuration vTotalMin vTotalMax eventTrigs vSyncTime1 vSyncTime2 vSyncTime3 vSyncTime4 vSyncTime5 flags\n");
+	dm_logger_write(logger, LOG_DISPLAYSTATS,
+		"%10s %10s %10s %10s %10s"
+			" %11s %11s %17s %10s %14s"
+			" %10s %10s %10s %10s %10s"
+			" %10s %10s %10s %10s",
+		"render", "avgRender",
+		"minWindow", "midPoint", "maxWindow",
+		"vsyncToFlip", "flipToVsync", "vsyncsBetweenFlip",
+		"numFrame", "insertDuration",
+		"vTotalMin", "vTotalMax", "eventTrigs",
+		"vSyncTime1", "vSyncTime2", "vSyncTime3",
+		"vSyncTime4", "vSyncTime5", "flags");
 
-		for (int i = 0; i < core_stats->index && i < core_stats->entries; i++) {
-			dm_logger_write(logger, LOG_PROFILING,
-					"%u  %u  %u  %u  %u  %u  %u  %u  %u  %u  %u  %u  %u  %u  %u  %u  %u  %u  %u",
-					time[i].render_time_in_us,
-					time[i].avg_render_time_in_us_last_ten,
-					time[i].min_window,
-					time[i].lfc_mid_point_in_us,
-					time[i].max_window,
-					time[i].vsync_to_flip_time_in_us,
-					time[i].flip_to_vsync_time_in_us,
-					time[i].num_vsync_between_flips,
-					time[i].num_frames_inserted,
-					time[i].inserted_duration_in_us,
-					time[i].v_total_min,
-					time[i].v_total_max,
-					time[i].event_triggers,
-					time[i].v_sync_time_in_us[0],
-					time[i].v_sync_time_in_us[1],
-					time[i].v_sync_time_in_us[2],
-					time[i].v_sync_time_in_us[3],
-					time[i].v_sync_time_in_us[4],
-					time[i].flags);
-		}
+	for (int i = 0; i < core_stats->index && i < core_stats->entries; i++) {
+		dm_logger_write(logger, LOG_DISPLAYSTATS,
+			"%10u %10u %10u %10u %10u"
+				" %11u %11u %17u %10u %14u"
+				" %10u %10u %10u %10u %10u"
+				" %10u %10u %10u %10u",
+			time[i].render_time_in_us,
+			time[i].avg_render_time_in_us_last_ten,
+			time[i].min_window,
+			time[i].lfc_mid_point_in_us,
+			time[i].max_window,
+			time[i].vsync_to_flip_time_in_us,
+			time[i].flip_to_vsync_time_in_us,
+			time[i].num_vsync_between_flips,
+			time[i].num_frames_inserted,
+			time[i].inserted_duration_in_us,
+			time[i].v_total_min,
+			time[i].v_total_max,
+			time[i].event_triggers,
+			time[i].v_sync_time_in_us[0],
+			time[i].v_sync_time_in_us[1],
+			time[i].v_sync_time_in_us[2],
+			time[i].v_sync_time_in_us[3],
+			time[i].v_sync_time_in_us[4],
+			time[i].flags);
 	}
-	//GetLog()->Close(pLog);
-	//GetLog()->UnSetLogMask(LogMajor_ISR, LogMinor_ISR_FreeSyncSW);
 }
 
 void mod_stats_reset_data(struct mod_stats *mod_stats)

From ab9c2062d960df84d41c03efc49cb01071b398c6 Mon Sep 17 00:00:00 2001
From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Date: Wed, 18 Apr 2018 14:11:43 -0400
Subject: [PATCH 1223/1286] drm/amd/display: add fixed point fractional bit
 truncation function

Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/include/fixed31_32.h    | 17 +++++++++++++++++
 1 file changed, 17 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
index ebfd33e91ee8..61f11e23bf70 100644
--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
+++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
@@ -496,4 +496,21 @@ unsigned int dc_fixpt_clamp_u0d10(struct fixed31_32 arg);
 
 int dc_fixpt_s4d19(struct fixed31_32 arg);
 
+static inline struct fixed31_32 dc_fixpt_truncate(struct fixed31_32 arg, unsigned int frac_bits)
+{
+	bool negative = arg.value < 0;
+
+	if (frac_bits >= FIXED31_32_BITS_PER_FRACTIONAL_PART) {
+		ASSERT(frac_bits == FIXED31_32_BITS_PER_FRACTIONAL_PART);
+		return arg;
+	}
+
+	if (negative)
+		arg.value = -arg.value;
+	arg.value &= (~0LL) << (FIXED31_32_BITS_PER_FRACTIONAL_PART - frac_bits);
+	if (negative)
+		arg.value = -arg.value;
+	return arg;
+}
+
 #endif

From 0002d3ac8aadcb2850475557de32234b447ba502 Mon Sep 17 00:00:00 2001
From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Date: Wed, 18 Apr 2018 14:19:23 -0400
Subject: [PATCH 1224/1286] drm/amd/display: truncate scaling ratios and inits
 to 19 bit precision

Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/dc/core/dc_resource.c | 25 +++++++++++++------
 1 file changed, 17 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 082458f2097c..751f3ac9d921 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -652,6 +652,14 @@ static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
 		pipe_ctx->plane_res.scl_data.ratios.horz_c.value /= 2;
 		pipe_ctx->plane_res.scl_data.ratios.vert_c.value /= 2;
 	}
+	pipe_ctx->plane_res.scl_data.ratios.horz = dc_fixpt_truncate(
+			pipe_ctx->plane_res.scl_data.ratios.horz, 19);
+	pipe_ctx->plane_res.scl_data.ratios.vert = dc_fixpt_truncate(
+			pipe_ctx->plane_res.scl_data.ratios.vert, 19);
+	pipe_ctx->plane_res.scl_data.ratios.horz_c = dc_fixpt_truncate(
+			pipe_ctx->plane_res.scl_data.ratios.horz_c, 19);
+	pipe_ctx->plane_res.scl_data.ratios.vert_c = dc_fixpt_truncate(
+			pipe_ctx->plane_res.scl_data.ratios.vert_c, 19);
 }
 
 static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *recout_skip)
@@ -688,17 +696,18 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
 	 * 	init_bot = init + scaling_ratio
 	 * 	init_c = init + truncated_vp_c_offset(from calculate viewport)
 	 */
-	data->inits.h = dc_fixpt_div_int(
-			dc_fixpt_add_int(data->ratios.horz, data->taps.h_taps + 1), 2);
+	data->inits.h = dc_fixpt_truncate(dc_fixpt_div_int(
+			dc_fixpt_add_int(data->ratios.horz, data->taps.h_taps + 1), 2), 19);
 
-	data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_div_int(
-			dc_fixpt_add_int(data->ratios.horz_c, data->taps.h_taps_c + 1), 2));
+	data->inits.h_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.h_c, dc_fixpt_div_int(
+			dc_fixpt_add_int(data->ratios.horz_c, data->taps.h_taps_c + 1), 2)), 19);
 
-	data->inits.v = dc_fixpt_div_int(
-			dc_fixpt_add_int(data->ratios.vert, data->taps.v_taps + 1), 2);
+	data->inits.v = dc_fixpt_truncate(dc_fixpt_div_int(
+			dc_fixpt_add_int(data->ratios.vert, data->taps.v_taps + 1), 2), 19);
+
+	data->inits.v_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.v_c, dc_fixpt_div_int(
+			dc_fixpt_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2)), 19);
 
-	data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_div_int(
-			dc_fixpt_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2));
 
 
 	/* Adjust for viewport end clip-off */

From 3ba43a59927fbde07414393dfc2b6753cb233e00 Mon Sep 17 00:00:00 2001
From: Charlene Liu <charlene.liu@amd.com>
Date: Wed, 18 Apr 2018 14:31:41 -0400
Subject: [PATCH 1225/1286] drm/amd/display: underflow/blankscreen recovery

[Description]
for any reason, if driver detects HUBP underflow,
if a debug option enabled to enable recovery.
it will kick in a sequence of recovery.

Signed-off-by: Charlene Liu <charlene.liu@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dc.h           |  2 +
 .../drm/amd/display/dc/dcn10/dcn10_hubbub.c   |  8 ++
 .../drm/amd/display/dc/dcn10/dcn10_hubbub.h   |  7 +-
 .../gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 24 +++++
 .../gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h |  3 +
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c | 90 ++++++++++++++++++-
 .../drm/amd/display/dc/dcn10/dcn10_resource.c |  1 +
 drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h  |  2 +
 8 files changed, 135 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index cd4f4341cb53..1c39c9996a04 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -239,6 +239,8 @@ struct dc_debug {
 	bool az_endpoint_mute_only;
 	bool always_use_regamma;
 	bool p010_mpo_support;
+	bool recovery_enabled;
+
 };
 struct dc_state;
 struct resource_pool;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index b9fb14a3224b..943143efbb82 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -476,6 +476,14 @@ void hubbub1_toggle_watermark_change_req(struct hubbub *hubbub)
 			DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, watermark_change_req);
 }
 
+void hubbub1_soft_reset(struct hubbub *hubbub, bool reset)
+{
+	uint32_t reset_en = reset ? 1 : 0;
+
+	REG_UPDATE(DCHUBBUB_SOFT_RESET,
+			DCHUBBUB_GLOBAL_SOFT_RESET, reset_en);
+}
+
 static bool hubbub1_dcc_support_swizzle(
 		enum swizzle_mode_values swizzle,
 		unsigned int bytes_per_element,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
index f479f54e5bb2..6315a0e6b0d6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
@@ -48,7 +48,8 @@
 	SR(DCHUBBUB_ARB_DF_REQ_OUTSTAND),\
 	SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \
 	SR(DCHUBBUB_TEST_DEBUG_INDEX), \
-	SR(DCHUBBUB_TEST_DEBUG_DATA)
+	SR(DCHUBBUB_TEST_DEBUG_DATA),\
+	SR(DCHUBBUB_SOFT_RESET)
 
 #define HUBBUB_SR_WATERMARK_REG_LIST()\
 	SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A),\
@@ -105,6 +106,7 @@ struct dcn_hubbub_registers {
 	uint32_t DCHUBBUB_SDPIF_AGP_BOT;
 	uint32_t DCHUBBUB_SDPIF_AGP_TOP;
 	uint32_t DCHUBBUB_CRC_CTRL;
+	uint32_t DCHUBBUB_SOFT_RESET;
 };
 
 /* set field name */
@@ -114,6 +116,7 @@ struct dcn_hubbub_registers {
 
 #define HUBBUB_MASK_SH_LIST_DCN(mask_sh)\
 		HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \
+		HUBBUB_SF(DCHUBBUB_SOFT_RESET, DCHUBBUB_GLOBAL_SOFT_RESET, mask_sh), \
 		HUBBUB_SF(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, mask_sh), \
 		HUBBUB_SF(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, mask_sh), \
 		HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, mask_sh), \
@@ -143,6 +146,7 @@ struct dcn_hubbub_registers {
 		type DCHUBBUB_ARB_SAT_LEVEL;\
 		type DCHUBBUB_ARB_MIN_REQ_OUTSTAND;\
 		type DCHUBBUB_GLOBAL_TIMER_REFDIV;\
+		type DCHUBBUB_GLOBAL_SOFT_RESET; \
 		type SDPIF_FB_TOP;\
 		type SDPIF_FB_BASE;\
 		type SDPIF_FB_OFFSET;\
@@ -201,6 +205,7 @@ void hubbub1_toggle_watermark_change_req(
 void hubbub1_wm_read_state(struct hubbub *hubbub,
 		struct dcn_hubbub_wm *wm);
 
+void hubbub1_soft_reset(struct hubbub *hubbub, bool reset);
 void hubbub1_construct(struct hubbub *hubbub,
 	struct dc_context *ctx,
 	const struct dcn_hubbub_registers *hubbub_regs,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 185f93bda41b..d2ab78b35a7a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -78,6 +78,27 @@ static void hubp1_disconnect(struct hubp *hubp)
 			CURSOR_ENABLE, 0);
 }
 
+static void hubp1_disable_control(struct hubp *hubp, bool disable_hubp)
+{
+	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+	uint32_t disable = disable_hubp ? 1 : 0;
+
+	REG_UPDATE(DCHUBP_CNTL,
+			HUBP_DISABLE, disable);
+}
+
+static unsigned int hubp1_get_underflow_status(struct hubp *hubp)
+{
+	uint32_t hubp_underflow = 0;
+	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+	REG_GET(DCHUBP_CNTL,
+		HUBP_UNDERFLOW_STATUS,
+		&hubp_underflow);
+
+	return hubp_underflow;
+}
+
 static void hubp1_set_hubp_blank_en(struct hubp *hubp, bool blank)
 {
 	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
@@ -1117,6 +1138,9 @@ static struct hubp_funcs dcn10_hubp_funcs = {
 	.hubp_clk_cntl = hubp1_clk_cntl,
 	.hubp_vtg_sel = hubp1_vtg_sel,
 	.hubp_read_state = hubp1_read_state,
+	.hubp_disable_control =  hubp1_disable_control,
+	.hubp_get_underflow_status = hubp1_get_underflow_status,
+
 };
 
 /*****************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index fe9b8c4a91ca..af384034398f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -253,6 +253,7 @@
 	HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\
 	HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, mask_sh),\
 	HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VTG_SEL, mask_sh),\
+	HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_DISABLE, mask_sh),\
 	HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_PIPES, mask_sh),\
 	HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_BANKS, mask_sh),\
 	HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, PIPE_INTERLEAVE, mask_sh),\
@@ -421,6 +422,7 @@
 
 #define DCN_HUBP_REG_FIELD_LIST(type) \
 	type HUBP_BLANK_EN;\
+	type HUBP_DISABLE;\
 	type HUBP_TTU_DISABLE;\
 	type HUBP_NO_OUTSTANDING_REQ;\
 	type HUBP_VTG_SEL;\
@@ -723,4 +725,5 @@ void hubp1_read_state(struct hubp *hubp);
 
 enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch);
 
+
 #endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 50bd7548e230..be8820d8a2e6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -747,6 +747,90 @@ static void reset_back_end_for_pipe(
 					pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
 }
 
+static bool dcn10_hw_wa_force_recovery(struct dc *dc)
+{
+	struct hubp *hubp ;
+	unsigned int i;
+	bool need_recover = true;
+
+	if (!dc->debug.recovery_enabled)
+		return false;
+
+	for (i = 0; i < dc->res_pool->pipe_count; i++) {
+		struct pipe_ctx *pipe_ctx =
+			&dc->current_state->res_ctx.pipe_ctx[i];
+		if (pipe_ctx != NULL) {
+			hubp = pipe_ctx->plane_res.hubp;
+			if (hubp != NULL) {
+				if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
+					/* one pipe underflow, we will reset all the pipes*/
+					need_recover = true;
+				}
+			}
+		}
+	}
+	if (!need_recover)
+		return false;
+	/*
+	DCHUBP_CNTL:HUBP_BLANK_EN=1
+	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
+	DCHUBP_CNTL:HUBP_DISABLE=1
+	DCHUBP_CNTL:HUBP_DISABLE=0
+	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
+	DCSURF_PRIMARY_SURFACE_ADDRESS
+	DCHUBP_CNTL:HUBP_BLANK_EN=0
+	*/
+
+	for (i = 0; i < dc->res_pool->pipe_count; i++) {
+		struct pipe_ctx *pipe_ctx =
+			&dc->current_state->res_ctx.pipe_ctx[i];
+		if (pipe_ctx != NULL) {
+			hubp = pipe_ctx->plane_res.hubp;
+			/*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
+			if (hubp != NULL)
+				hubp->funcs->set_hubp_blank_en(hubp, true);
+		}
+	}
+	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
+	hubbub1_soft_reset(dc->res_pool->hubbub, true);
+
+	for (i = 0; i < dc->res_pool->pipe_count; i++) {
+		struct pipe_ctx *pipe_ctx =
+			&dc->current_state->res_ctx.pipe_ctx[i];
+		if (pipe_ctx != NULL) {
+			hubp = pipe_ctx->plane_res.hubp;
+			/*DCHUBP_CNTL:HUBP_DISABLE=1*/
+			if (hubp != NULL)
+				hubp->funcs->hubp_disable_control(hubp, true);
+		}
+	}
+	for (i = 0; i < dc->res_pool->pipe_count; i++) {
+		struct pipe_ctx *pipe_ctx =
+			&dc->current_state->res_ctx.pipe_ctx[i];
+		if (pipe_ctx != NULL) {
+			hubp = pipe_ctx->plane_res.hubp;
+			/*DCHUBP_CNTL:HUBP_DISABLE=0*/
+			if (hubp != NULL)
+				hubp->funcs->hubp_disable_control(hubp, true);
+		}
+	}
+	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
+	hubbub1_soft_reset(dc->res_pool->hubbub, false);
+	for (i = 0; i < dc->res_pool->pipe_count; i++) {
+		struct pipe_ctx *pipe_ctx =
+			&dc->current_state->res_ctx.pipe_ctx[i];
+		if (pipe_ctx != NULL) {
+			hubp = pipe_ctx->plane_res.hubp;
+			/*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
+			if (hubp != NULL)
+				hubp->funcs->set_hubp_blank_en(hubp, true);
+		}
+	}
+	return true;
+
+}
+
+
 static void dcn10_verify_allow_pstate_change_high(struct dc *dc)
 {
 	static bool should_log_hw_state; /* prevent hw state log by default */
@@ -755,8 +839,12 @@ static void dcn10_verify_allow_pstate_change_high(struct dc *dc)
 		if (should_log_hw_state) {
 			dcn10_log_hw_state(dc);
 		}
-
 		BREAK_TO_DEBUGGER();
+		if (dcn10_hw_wa_force_recovery(dc)) {
+		/*check again*/
+			if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub))
+				BREAK_TO_DEBUGGER();
+		}
 	}
 }
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 2c0a3150bf2d..16c84e9ee33b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -446,6 +446,7 @@ static const struct dc_debug debug_defaults_drv = {
 		.vsr_support = true,
 		.performance_trace = false,
 		.az_endpoint_mute_only = true,
+		.recovery_enabled = false, /*enable this by default after testing.*/
 };
 
 static const struct dc_debug debug_defaults_diags = {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 331f8ff57ed7..97df82cddf82 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -121,6 +121,8 @@ struct hubp_funcs {
 	void (*hubp_clk_cntl)(struct hubp *hubp, bool enable);
 	void (*hubp_vtg_sel)(struct hubp *hubp, uint32_t otg_inst);
 	void (*hubp_read_state)(struct hubp *hubp);
+	void (*hubp_disable_control)(struct hubp *hubp, bool disable_hubp);
+	unsigned int (*hubp_get_underflow_status)(struct hubp *hubp);
 
 };
 

From 6b8e1eb7c6e059d8bb52f24b13081205242fded9 Mon Sep 17 00:00:00 2001
From: Eric Bernstein <eric.bernstein@amd.com>
Date: Tue, 17 Apr 2018 16:50:28 -0400
Subject: [PATCH 1226/1286] drm/amd/display: Update HW sequencer initialization

Signed-off-by: Eric Bernstein <eric.bernstein@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c  |  6 +++---
 .../gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h  |  2 ++
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c      | 10 +++++-----
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h      | 10 ++++++++++
 4 files changed, 20 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index be8820d8a2e6..24bcc5e58720 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -849,7 +849,7 @@ static void dcn10_verify_allow_pstate_change_high(struct dc *dc)
 }
 
 /* trigger HW to start disconnect plane from stream on the next vsync */
-static void plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
 {
 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
 	int dpp_id = pipe_ctx->plane_res.dpp->inst;
@@ -1032,7 +1032,7 @@ static void dcn10_init_hw(struct dc *dc)
 		dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
 		pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
 
-		plane_atomic_disconnect(dc, pipe_ctx);
+		hwss1_plane_atomic_disconnect(dc, pipe_ctx);
 	}
 
 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -2267,7 +2267,7 @@ static void dcn10_apply_ctx_for_surface(
 			old_pipe_ctx->plane_state &&
 			old_pipe_ctx->stream_res.tg == tg) {
 
-			plane_atomic_disconnect(dc, old_pipe_ctx);
+			hwss1_plane_atomic_disconnect(dc, old_pipe_ctx);
 			removed_pipe[i] = true;
 
 			DC_LOG_DC(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index 6c526b5095d9..44f734b73f9e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -37,4 +37,6 @@ extern void fill_display_configs(
 
 bool is_rgb_cspace(enum dc_color_space output_color_space);
 
+void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx);
+
 #endif /* __DC_HWSS_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index c734b7fa5835..f2fbce0e3fc5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -360,7 +360,7 @@ void optc1_program_timing(
 
 }
 
-static void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable)
+void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable)
 {
 	struct optc *optc1 = DCN10TG_FROM_TG(optc);
 
@@ -1257,20 +1257,20 @@ void optc1_read_otg_state(struct optc *optc1,
 			OPTC_UNDERFLOW_OCCURRED_STATUS, &s->underflow_occurred_status);
 }
 
-static void optc1_clear_optc_underflow(struct timing_generator *optc)
+void optc1_clear_optc_underflow(struct timing_generator *optc)
 {
 	struct optc *optc1 = DCN10TG_FROM_TG(optc);
 
 	REG_UPDATE(OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, 1);
 }
 
-static void optc1_tg_init(struct timing_generator *optc)
+void optc1_tg_init(struct timing_generator *optc)
 {
 	optc1_set_blank_data_double_buffer(optc, true);
 	optc1_clear_optc_underflow(optc);
 }
 
-static bool optc1_is_tg_enabled(struct timing_generator *optc)
+bool optc1_is_tg_enabled(struct timing_generator *optc)
 {
 	struct optc *optc1 = DCN10TG_FROM_TG(optc);
 	uint32_t otg_enabled = 0;
@@ -1281,7 +1281,7 @@ static bool optc1_is_tg_enabled(struct timing_generator *optc)
 
 }
 
-static bool optc1_is_optc_underflow_occurred(struct timing_generator *optc)
+bool optc1_is_optc_underflow_occurred(struct timing_generator *optc)
 {
 	struct optc *optc1 = DCN10TG_FROM_TG(optc);
 	uint32_t underflow_occurred = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index 89e09e5327a2..c62052f46460 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -497,4 +497,14 @@ void optc1_program_stereo(struct timing_generator *optc,
 
 bool optc1_is_stereo_left_eye(struct timing_generator *optc);
 
+void optc1_clear_optc_underflow(struct timing_generator *optc);
+
+void optc1_tg_init(struct timing_generator *optc);
+
+bool optc1_is_tg_enabled(struct timing_generator *optc);
+
+bool optc1_is_optc_underflow_occurred(struct timing_generator *optc);
+
+void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable);
+
 #endif /* __DC_TIMING_GENERATOR_DCN10_H__ */

From a21ddec61c5ed30b58eea3268ad3e0c69452ebfe Mon Sep 17 00:00:00 2001
From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Date: Mon, 23 Apr 2018 12:41:34 -0400
Subject: [PATCH 1227/1286] drm/amd/display: fix 31_32_fixpt shift functions

Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Reviewed-by: Eric Yang <eric.yang2@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/include/fixed31_32.h  | 26 +++++++++++++------
 1 file changed, 18 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
index 61f11e23bf70..bd8a30462258 100644
--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
+++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
@@ -27,6 +27,12 @@
 #define __DAL_FIXED31_32_H__
 
 #define FIXED31_32_BITS_PER_FRACTIONAL_PART 32
+#ifndef LLONG_MIN
+#define LLONG_MIN (1LL<<63)
+#endif
+#ifndef LLONG_MAX
+#define LLONG_MAX (-1LL>>1)
+#endif
 
 /*
  * @brief
@@ -45,6 +51,7 @@ struct fixed31_32 {
 	long long value;
 };
 
+
 /*
  * @brief
  * Useful constants
@@ -201,14 +208,12 @@ static inline struct fixed31_32 dc_fixpt_clamp(
  */
 static inline struct fixed31_32 dc_fixpt_shl(struct fixed31_32 arg, unsigned char shift)
 {
-	struct fixed31_32 res;
-
 	ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) ||
-		((arg.value < 0) && (arg.value >= LLONG_MIN >> shift)));
+		((arg.value < 0) && (arg.value >= (LLONG_MIN / (1 << shift)))));
 
-	res.value = arg.value << shift;
+	arg.value = arg.value << shift;
 
-	return res;
+	return arg;
 }
 
 /*
@@ -217,9 +222,14 @@ static inline struct fixed31_32 dc_fixpt_shl(struct fixed31_32 arg, unsigned cha
  */
 static inline struct fixed31_32 dc_fixpt_shr(struct fixed31_32 arg, unsigned char shift)
 {
-	struct fixed31_32 res;
-	res.value = arg.value >> shift;
-	return res;
+	bool negative = arg.value < 0;
+
+	if (negative)
+		arg.value = -arg.value;
+	arg.value = arg.value >> shift;
+	if (negative)
+		arg.value = -arg.value;
+	return arg;
 }
 
 /*

From 7ea034ce8188eaf61ce2b7d4e747e1f6e3bb8aa3 Mon Sep 17 00:00:00 2001
From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Date: Mon, 23 Apr 2018 14:39:23 -0400
Subject: [PATCH 1228/1286] drm/amd/display: fix a 32 bit shift meant to be 64
 warning

Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/include/fixed31_32.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
index bd8a30462258..76f64e910422 100644
--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
+++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
@@ -209,7 +209,7 @@ static inline struct fixed31_32 dc_fixpt_clamp(
 static inline struct fixed31_32 dc_fixpt_shl(struct fixed31_32 arg, unsigned char shift)
 {
 	ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) ||
-		((arg.value < 0) && (arg.value >= (LLONG_MIN / (1 << shift)))));
+		((arg.value < 0) && (arg.value >= (LLONG_MIN / (1LL << shift)))));
 
 	arg.value = arg.value << shift;
 

From 3f460907be1b53441526e644019bcf150c433f59 Mon Sep 17 00:00:00 2001
From: Xingyue Tao <xingyue.tao@amd.com>
Date: Thu, 19 Apr 2018 16:23:12 -0400
Subject: [PATCH 1229/1286] drm/amd/display: Add dc cap to restrict VSR
 downscaling src size

- Adds int max_downscale_src_width in dc struct
- Checks and does not support if downscale size is more than 4k (width > 3840)

Signed-off-by: Xingyue Tao <xingyue.tao@amd.com>
Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dc.h              |  1 +
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c | 16 +++++++++++-----
 .../drm/amd/display/dc/dcn10/dcn10_resource.c    |  1 +
 3 files changed, 13 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 1c39c9996a04..08b29a742921 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -203,6 +203,7 @@ struct dc_debug {
 	bool clock_trace;
 	bool validation_trace;
 	bool bandwidth_calcs_trace;
+	int max_downscale_src_width;
 
 	/* stutter efficiency related */
 	bool disable_stutter;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index 20796da36de4..2da138904312 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -145,12 +145,18 @@ bool dpp_get_optimal_number_of_taps(
 	else
 		pixel_width = scl_data->viewport.width;
 
-	/* Some ASICs does not support  FP16 scaling, so we reject modes require this*/
 	if (scl_data->viewport.width  != scl_data->h_active &&
-		scl_data->viewport.height != scl_data->v_active &&
-		dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
-		scl_data->format == PIXEL_FORMAT_FP16)
-		return false;
+		scl_data->viewport.height != scl_data->v_active) {
+
+		/* Some ASICs does not support  FP16 scaling, so we reject modes require this*/
+		if (dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
+			scl_data->format == PIXEL_FORMAT_FP16)
+			return false;
+
+		if (dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
+			scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
+			return false;
+	}
 
 	/* TODO: add lb check */
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 16c84e9ee33b..f69f3a54f001 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -447,6 +447,7 @@ static const struct dc_debug debug_defaults_drv = {
 		.performance_trace = false,
 		.az_endpoint_mute_only = true,
 		.recovery_enabled = false, /*enable this by default after testing.*/
+		.max_downscale_src_width = 3840,
 };
 
 static const struct dc_debug debug_defaults_diags = {

From 07049507fd1b5813f667bb34e6903369487f9e34 Mon Sep 17 00:00:00 2001
From: Yue Hin Lau <Yuehin.Lau@amd.com>
Date: Wed, 18 Apr 2018 16:07:04 -0400
Subject: [PATCH 1230/1286] drm/amd/display: disable mpo if brightness adjusted

Signed-off-by: Yue Hin Lau <Yuehin.Lau@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dc.h                   | 1 +
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 1 +
 2 files changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 08b29a742921..7a9f600662ce 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -75,6 +75,7 @@ struct dc_caps {
 	bool dynamic_audio;
 	bool is_apu;
 	bool dual_link_dvi;
+	bool post_blend_color_processing;
 };
 
 struct dc_dcc_surface_param {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index f69f3a54f001..ace2e03dced4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -1023,6 +1023,7 @@ static bool construct(
 	dc->caps.max_cursor_size = 256;
 	dc->caps.max_slave_planes = 1;
 	dc->caps.is_apu = true;
+	dc->caps.post_blend_color_processing = false;
 
 	if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
 		dc->debug = debug_defaults_drv;

From a3cb1c1c8e5e494f7630349fbebb79b1787128a1 Mon Sep 17 00:00:00 2001
From: Nikola Cornij <nikola.cornij@amd.com>
Date: Mon, 23 Apr 2018 15:55:36 -0400
Subject: [PATCH 1231/1286] drm/amd/display: Log DTN only after the atomic
 commit in Diag

Also print HUBP info only if pipe enabled. This fixes having different
DTN logs for different test sequences.

Signed-off-by: Nikola Cornij <nikola.cornij@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c | 119 +++++++++---------
 1 file changed, 62 insertions(+), 57 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 24bcc5e58720..c452972bf1c3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -127,24 +127,26 @@ static void dcn10_log_hubp_states(struct dc *dc)
 
 		hubp->funcs->hubp_read_state(hubp);
 
-		DTN_INFO("[%2d]:  %5xh  %6xh  %5d  %6d  %2xh  %2xh  %6xh"
-				"  %6d  %8d  %7d  %8xh",
-				hubp->inst,
-				s->pixel_format,
-				s->inuse_addr_hi,
-				s->viewport_width,
-				s->viewport_height,
-				s->rotation_angle,
-				s->h_mirror_en,
-				s->sw_mode,
-				s->dcc_en,
-				s->blank_en,
-				s->ttu_disable,
-				s->underflow_status);
-		DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
-		DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
-		DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
-		DTN_INFO("\n");
+		if (!s->blank_en) {
+			DTN_INFO("[%2d]:  %5xh  %6xh  %5d  %6d  %2xh  %2xh  %6xh"
+					"  %6d  %8d  %7d  %8xh",
+					hubp->inst,
+					s->pixel_format,
+					s->inuse_addr_hi,
+					s->viewport_width,
+					s->viewport_height,
+					s->rotation_angle,
+					s->h_mirror_en,
+					s->sw_mode,
+					s->dcc_en,
+					s->blank_en,
+					s->ttu_disable,
+					s->underflow_status);
+			DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
+			DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
+			DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
+			DTN_INFO("\n");
+		}
 	}
 
 	DTN_INFO("\n=========RQ========\n");
@@ -155,16 +157,17 @@ static void dcn10_log_hubp_states(struct dc *dc)
 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
 		struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
 
-		DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
-			i, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
-			rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
-			rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
-			rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
-			rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
-			rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
-			rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
-			rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
-			rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
+		if (!s->blank_en)
+			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
+				pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
+				rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
+				rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
+				rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
+				rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
+				rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
+				rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
+				rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
+				rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
 	}
 
 	DTN_INFO("========DLG========\n");
@@ -179,27 +182,28 @@ static void dcn10_log_hubp_states(struct dc *dc)
 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
 		struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
 
-		DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
-			"%  8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
-			"  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
-			i, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
-			dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
-			dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
-			dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
-			dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
-			dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
-			dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
-			dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
-			dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
-			dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
-			dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
-			dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
-			dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
-			dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
-			dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
-			dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
-			dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
-			dlg_regs->xfc_reg_remote_surface_flip_latency);
+		if (!s->blank_en)
+			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
+				"%  8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
+				"  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
+				pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
+				dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
+				dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
+				dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
+				dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
+				dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
+				dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
+				dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
+				dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
+				dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
+				dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
+				dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
+				dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
+				dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
+				dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
+				dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
+				dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
+				dlg_regs->xfc_reg_remote_surface_flip_latency);
 	}
 
 	DTN_INFO("========TTU========\n");
@@ -210,14 +214,15 @@ static void dcn10_log_hubp_states(struct dc *dc)
 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
 		struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
 
-		DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
-			i, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
-			ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
-			ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
-			ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
-			ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
-			ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
-			ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
+		if (!s->blank_en)
+			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
+				pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
+				ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
+				ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
+				ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
+				ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
+				ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
+				ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
 	}
 	DTN_INFO("\n");
 }

From cba5e8708ee6123af14ab1f1196353dcda3eb533 Mon Sep 17 00:00:00 2001
From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Date: Tue, 20 Mar 2018 08:25:16 -0400
Subject: [PATCH 1232/1286] drm/amd/display: update dml to allow sync with DV

Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../amd/display/dc/dml/display_mode_enums.h   |  13 +
 .../amd/display/dc/dml/display_mode_structs.h | 854 +++++++++---------
 .../drm/amd/display/dc/dml/dml_inline_defs.h  |  10 +
 3 files changed, 461 insertions(+), 416 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
index b1ad3553f900..47c19f8fe7d1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
@@ -108,4 +108,17 @@ enum output_standard {
 	dm_std_uninitialized = 0, dm_std_cvtr2, dm_std_cvt
 };
 
+enum mpc_combine_affinity {
+	dm_mpc_always_when_possible,
+	dm_mpc_reduce_voltage,
+	dm_mpc_reduce_voltage_and_clocks
+};
+
+enum self_refresh_affinity {
+	dm_try_to_allow_self_refresh_and_mclk_switch,
+	dm_allow_self_refresh_and_mclk_switch,
+	dm_allow_self_refresh,
+	dm_neither_self_refresh_nor_mclk_switch
+};
+
 #endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index ce750edc1e5f..7fa0375939ae 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -25,39 +25,39 @@
 #ifndef __DISPLAY_MODE_STRUCTS_H__
 #define __DISPLAY_MODE_STRUCTS_H__
 
-typedef struct _vcs_dpi_voltage_scaling_st	voltage_scaling_st;
-typedef struct _vcs_dpi_soc_bounding_box_st	soc_bounding_box_st;
-typedef struct _vcs_dpi_ip_params_st	ip_params_st;
-typedef struct _vcs_dpi_display_pipe_source_params_st	display_pipe_source_params_st;
-typedef struct _vcs_dpi_display_output_params_st	display_output_params_st;
-typedef struct _vcs_dpi_display_bandwidth_st	display_bandwidth_st;
-typedef struct _vcs_dpi_scaler_ratio_depth_st	scaler_ratio_depth_st;
-typedef struct _vcs_dpi_scaler_taps_st	scaler_taps_st;
-typedef struct _vcs_dpi_display_pipe_dest_params_st	display_pipe_dest_params_st;
-typedef struct _vcs_dpi_display_pipe_params_st	display_pipe_params_st;
-typedef struct _vcs_dpi_display_clocks_and_cfg_st	display_clocks_and_cfg_st;
-typedef struct _vcs_dpi_display_e2e_pipe_params_st	display_e2e_pipe_params_st;
-typedef struct _vcs_dpi_dchub_buffer_sizing_st	dchub_buffer_sizing_st;
-typedef struct _vcs_dpi_watermarks_perf_st	watermarks_perf_st;
-typedef struct _vcs_dpi_cstate_pstate_watermarks_st	cstate_pstate_watermarks_st;
-typedef struct _vcs_dpi_wm_calc_pipe_params_st	wm_calc_pipe_params_st;
-typedef struct _vcs_dpi_vratio_pre_st	vratio_pre_st;
-typedef struct _vcs_dpi_display_data_rq_misc_params_st	display_data_rq_misc_params_st;
-typedef struct _vcs_dpi_display_data_rq_sizing_params_st	display_data_rq_sizing_params_st;
-typedef struct _vcs_dpi_display_data_rq_dlg_params_st	display_data_rq_dlg_params_st;
-typedef struct _vcs_dpi_display_cur_rq_dlg_params_st	display_cur_rq_dlg_params_st;
-typedef struct _vcs_dpi_display_rq_dlg_params_st	display_rq_dlg_params_st;
-typedef struct _vcs_dpi_display_rq_sizing_params_st	display_rq_sizing_params_st;
-typedef struct _vcs_dpi_display_rq_misc_params_st	display_rq_misc_params_st;
-typedef struct _vcs_dpi_display_rq_params_st	display_rq_params_st;
-typedef struct _vcs_dpi_display_dlg_regs_st	display_dlg_regs_st;
-typedef struct _vcs_dpi_display_ttu_regs_st	display_ttu_regs_st;
-typedef struct _vcs_dpi_display_data_rq_regs_st	display_data_rq_regs_st;
-typedef struct _vcs_dpi_display_rq_regs_st	display_rq_regs_st;
-typedef struct _vcs_dpi_display_dlg_sys_params_st	display_dlg_sys_params_st;
-typedef struct _vcs_dpi_display_dlg_prefetch_param_st	display_dlg_prefetch_param_st;
-typedef struct _vcs_dpi_display_pipe_clock_st	display_pipe_clock_st;
-typedef struct _vcs_dpi_display_arb_params_st	display_arb_params_st;
+typedef struct _vcs_dpi_voltage_scaling_st voltage_scaling_st;
+typedef struct _vcs_dpi_soc_bounding_box_st soc_bounding_box_st;
+typedef struct _vcs_dpi_ip_params_st ip_params_st;
+typedef struct _vcs_dpi_display_pipe_source_params_st display_pipe_source_params_st;
+typedef struct _vcs_dpi_display_output_params_st display_output_params_st;
+typedef struct _vcs_dpi_display_bandwidth_st display_bandwidth_st;
+typedef struct _vcs_dpi_scaler_ratio_depth_st scaler_ratio_depth_st;
+typedef struct _vcs_dpi_scaler_taps_st scaler_taps_st;
+typedef struct _vcs_dpi_display_pipe_dest_params_st display_pipe_dest_params_st;
+typedef struct _vcs_dpi_display_pipe_params_st display_pipe_params_st;
+typedef struct _vcs_dpi_display_clocks_and_cfg_st display_clocks_and_cfg_st;
+typedef struct _vcs_dpi_display_e2e_pipe_params_st display_e2e_pipe_params_st;
+typedef struct _vcs_dpi_dchub_buffer_sizing_st dchub_buffer_sizing_st;
+typedef struct _vcs_dpi_watermarks_perf_st watermarks_perf_st;
+typedef struct _vcs_dpi_cstate_pstate_watermarks_st cstate_pstate_watermarks_st;
+typedef struct _vcs_dpi_wm_calc_pipe_params_st wm_calc_pipe_params_st;
+typedef struct _vcs_dpi_vratio_pre_st vratio_pre_st;
+typedef struct _vcs_dpi_display_data_rq_misc_params_st display_data_rq_misc_params_st;
+typedef struct _vcs_dpi_display_data_rq_sizing_params_st display_data_rq_sizing_params_st;
+typedef struct _vcs_dpi_display_data_rq_dlg_params_st display_data_rq_dlg_params_st;
+typedef struct _vcs_dpi_display_cur_rq_dlg_params_st display_cur_rq_dlg_params_st;
+typedef struct _vcs_dpi_display_rq_dlg_params_st display_rq_dlg_params_st;
+typedef struct _vcs_dpi_display_rq_sizing_params_st display_rq_sizing_params_st;
+typedef struct _vcs_dpi_display_rq_misc_params_st display_rq_misc_params_st;
+typedef struct _vcs_dpi_display_rq_params_st display_rq_params_st;
+typedef struct _vcs_dpi_display_dlg_regs_st display_dlg_regs_st;
+typedef struct _vcs_dpi_display_ttu_regs_st display_ttu_regs_st;
+typedef struct _vcs_dpi_display_data_rq_regs_st display_data_rq_regs_st;
+typedef struct _vcs_dpi_display_rq_regs_st display_rq_regs_st;
+typedef struct _vcs_dpi_display_dlg_sys_params_st display_dlg_sys_params_st;
+typedef struct _vcs_dpi_display_dlg_prefetch_param_st display_dlg_prefetch_param_st;
+typedef struct _vcs_dpi_display_pipe_clock_st display_pipe_clock_st;
+typedef struct _vcs_dpi_display_arb_params_st display_arb_params_st;
 
 struct _vcs_dpi_voltage_scaling_st {
 	int state;
@@ -72,89 +72,107 @@ struct _vcs_dpi_voltage_scaling_st {
 	double dppclk_mhz;
 };
 
-struct	_vcs_dpi_soc_bounding_box_st	{
-	double	sr_exit_time_us;
-	double	sr_enter_plus_exit_time_us;
-	double	urgent_latency_us;
-	double	writeback_latency_us;
-	double	ideal_dram_bw_after_urgent_percent;
-	unsigned int	max_request_size_bytes;
-	double	downspread_percent;
-	double	dram_page_open_time_ns;
-	double	dram_rw_turnaround_time_ns;
-	double	dram_return_buffer_per_channel_bytes;
-	double	dram_channel_width_bytes;
+struct _vcs_dpi_soc_bounding_box_st {
+	double sr_exit_time_us;
+	double sr_enter_plus_exit_time_us;
+	double urgent_latency_us;
+	double urgent_latency_pixel_data_only_us;
+	double urgent_latency_pixel_mixed_with_vm_data_us;
+	double urgent_latency_vm_data_only_us;
+	double writeback_latency_us;
+	double ideal_dram_bw_after_urgent_percent;
+	double pct_ideal_dram_sdp_bw_after_urgent_pixel_only; // PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly
+	double pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm;
+	double pct_ideal_dram_sdp_bw_after_urgent_vm_only;
+	double max_avg_sdp_bw_use_normal_percent;
+	double max_avg_dram_bw_use_normal_percent;
+	unsigned int max_request_size_bytes;
+	double downspread_percent;
+	double dram_page_open_time_ns;
+	double dram_rw_turnaround_time_ns;
+	double dram_return_buffer_per_channel_bytes;
+	double dram_channel_width_bytes;
 	double fabric_datapath_to_dcn_data_return_bytes;
 	double dcn_downspread_percent;
 	double dispclk_dppclk_vco_speed_mhz;
 	double dfs_vco_period_ps;
-	unsigned int	round_trip_ping_latency_dcfclk_cycles;
-	unsigned int	urgent_out_of_order_return_per_channel_bytes;
-	unsigned int	channel_interleave_bytes;
-	unsigned int	num_banks;
-	unsigned int	num_chans;
-	unsigned int	vmm_page_size_bytes;
-	double	dram_clock_change_latency_us;
-	double	writeback_dram_clock_change_latency_us;
-	unsigned int	return_bus_width_bytes;
-	unsigned int	voltage_override;
-	double	xfc_bus_transport_time_us;
-	double	xfc_xbuf_latency_tolerance_us;
+	unsigned int urgent_out_of_order_return_per_channel_pixel_only_bytes;
+	unsigned int urgent_out_of_order_return_per_channel_pixel_and_vm_bytes;
+	unsigned int urgent_out_of_order_return_per_channel_vm_only_bytes;
+	unsigned int round_trip_ping_latency_dcfclk_cycles;
+	unsigned int urgent_out_of_order_return_per_channel_bytes;
+	unsigned int channel_interleave_bytes;
+	unsigned int num_banks;
+	unsigned int num_chans;
+	unsigned int vmm_page_size_bytes;
+	double dram_clock_change_latency_us;
+	double writeback_dram_clock_change_latency_us;
+	unsigned int return_bus_width_bytes;
+	unsigned int voltage_override;
+	double xfc_bus_transport_time_us;
+	double xfc_xbuf_latency_tolerance_us;
+	int use_urgent_burst_bw;
 	struct _vcs_dpi_voltage_scaling_st clock_limits[7];
 };
 
-struct	_vcs_dpi_ip_params_st	{
-	unsigned int	max_inter_dcn_tile_repeaters;
-	unsigned int	num_dsc;
-	unsigned int	odm_capable;
-	unsigned int	rob_buffer_size_kbytes;
-	unsigned int	det_buffer_size_kbytes;
-	unsigned int	dpte_buffer_size_in_pte_reqs;
-	unsigned int	pde_proc_buffer_size_64k_reqs;
-	unsigned int	dpp_output_buffer_pixels;
-	unsigned int	opp_output_buffer_lines;
-	unsigned int	pixel_chunk_size_kbytes;
-	unsigned char	pte_enable;
-	unsigned int	pte_chunk_size_kbytes;
-	unsigned int	meta_chunk_size_kbytes;
-	unsigned int	writeback_chunk_size_kbytes;
-	unsigned int	line_buffer_size_bits;
-	unsigned int	max_line_buffer_lines;
-	unsigned int	writeback_luma_buffer_size_kbytes;
-	unsigned int	writeback_chroma_buffer_size_kbytes;
-	unsigned int	writeback_chroma_line_buffer_width_pixels;
-	unsigned int	max_page_table_levels;
-	unsigned int	max_num_dpp;
-	unsigned int	max_num_otg;
-	unsigned int	cursor_chunk_size;
-	unsigned int	cursor_buffer_size;
-	unsigned int	max_num_wb;
-	unsigned int	max_dchub_pscl_bw_pix_per_clk;
-	unsigned int	max_pscl_lb_bw_pix_per_clk;
-	unsigned int	max_lb_vscl_bw_pix_per_clk;
-	unsigned int	max_vscl_hscl_bw_pix_per_clk;
-	double	max_hscl_ratio;
-	double	max_vscl_ratio;
-	unsigned int	hscl_mults;
-	unsigned int	vscl_mults;
-	unsigned int	max_hscl_taps;
-	unsigned int	max_vscl_taps;
-	unsigned int	xfc_supported;
-	unsigned int	xfc_fill_constant_bytes;
-	double	dispclk_ramp_margin_percent;
-	double	xfc_fill_bw_overhead_percent;
-	double	underscan_factor;
-	unsigned int	min_vblank_lines;
-	unsigned int	dppclk_delay_subtotal;
-	unsigned int	dispclk_delay_subtotal;
-	unsigned int	dcfclk_cstate_latency;
-	unsigned int	dppclk_delay_scl;
-	unsigned int	dppclk_delay_scl_lb_only;
-	unsigned int	dppclk_delay_cnvc_formatter;
-	unsigned int	dppclk_delay_cnvc_cursor;
-	unsigned int	is_line_buffer_bpp_fixed;
-	unsigned int	line_buffer_fixed_bpp;
-	unsigned int	dcc_supported;
+struct _vcs_dpi_ip_params_st {
+	bool gpuvm_enable;
+	bool hostvm_enable;
+	unsigned int gpuvm_max_page_table_levels;
+	unsigned int hostvm_max_page_table_levels;
+	unsigned int hostvm_cached_page_table_levels;
+	unsigned int pte_group_size_bytes;
+	unsigned int max_inter_dcn_tile_repeaters;
+	unsigned int num_dsc;
+	unsigned int odm_capable;
+	unsigned int rob_buffer_size_kbytes;
+	unsigned int det_buffer_size_kbytes;
+	unsigned int dpte_buffer_size_in_pte_reqs;
+	unsigned int pde_proc_buffer_size_64k_reqs;
+	unsigned int dpp_output_buffer_pixels;
+	unsigned int opp_output_buffer_lines;
+	unsigned int pixel_chunk_size_kbytes;
+	unsigned char pte_enable;
+	unsigned int pte_chunk_size_kbytes;
+	unsigned int meta_chunk_size_kbytes;
+	unsigned int writeback_chunk_size_kbytes;
+	unsigned int line_buffer_size_bits;
+	unsigned int max_line_buffer_lines;
+	unsigned int writeback_luma_buffer_size_kbytes;
+	unsigned int writeback_chroma_buffer_size_kbytes;
+	unsigned int writeback_chroma_line_buffer_width_pixels;
+	unsigned int max_page_table_levels;
+	unsigned int max_num_dpp;
+	unsigned int max_num_otg;
+	unsigned int cursor_chunk_size;
+	unsigned int cursor_buffer_size;
+	unsigned int max_num_wb;
+	unsigned int max_dchub_pscl_bw_pix_per_clk;
+	unsigned int max_pscl_lb_bw_pix_per_clk;
+	unsigned int max_lb_vscl_bw_pix_per_clk;
+	unsigned int max_vscl_hscl_bw_pix_per_clk;
+	double max_hscl_ratio;
+	double max_vscl_ratio;
+	unsigned int hscl_mults;
+	unsigned int vscl_mults;
+	unsigned int max_hscl_taps;
+	unsigned int max_vscl_taps;
+	unsigned int xfc_supported;
+	unsigned int xfc_fill_constant_bytes;
+	double dispclk_ramp_margin_percent;
+	double xfc_fill_bw_overhead_percent;
+	double underscan_factor;
+	unsigned int min_vblank_lines;
+	unsigned int dppclk_delay_subtotal;
+	unsigned int dispclk_delay_subtotal;
+	unsigned int dcfclk_cstate_latency;
+	unsigned int dppclk_delay_scl;
+	unsigned int dppclk_delay_scl_lb_only;
+	unsigned int dppclk_delay_cnvc_formatter;
+	unsigned int dppclk_delay_cnvc_cursor;
+	unsigned int is_line_buffer_bpp_fixed;
+	unsigned int line_buffer_fixed_bpp;
+	unsigned int dcc_supported;
 
 	unsigned int IsLineBufferBppFixed;
 	unsigned int LineBufferFixedBpp;
@@ -169,41 +187,45 @@ struct _vcs_dpi_display_xfc_params_st {
 	int xfc_slv_chunk_size_bytes;
 };
 
-struct	_vcs_dpi_display_pipe_source_params_st	{
-	int	source_format;
-	unsigned char	dcc;
-	unsigned int	dcc_override;
-	unsigned int	dcc_rate;
-	unsigned char	dcc_use_global;
-	unsigned char	vm;
-	unsigned char	vm_levels_force_en;
-	unsigned int	vm_levels_force;
-	int	source_scan;
-	int	sw_mode;
-	int	macro_tile_size;
-	unsigned char	is_display_sw;
-	unsigned int	viewport_width;
-	unsigned int	viewport_height;
-	unsigned int	viewport_y_y;
-	unsigned int	viewport_y_c;
-	unsigned int	viewport_width_c;
-	unsigned int	viewport_height_c;
-	unsigned int	data_pitch;
-	unsigned int	data_pitch_c;
-	unsigned int	meta_pitch;
-	unsigned int	meta_pitch_c;
-	unsigned int	cur0_src_width;
-	int	cur0_bpp;
-	unsigned int	cur1_src_width;
-	int	cur1_bpp;
-	int	num_cursors;
-	unsigned char	is_hsplit;
-	unsigned char	dynamic_metadata_enable;
-	unsigned int	dynamic_metadata_lines_before_active;
-	unsigned int	dynamic_metadata_xmit_bytes;
-	unsigned int	hsplit_grp;
-	unsigned char	xfc_enable;
-	unsigned char	xfc_slave;
+struct _vcs_dpi_display_pipe_source_params_st {
+	int source_format;
+	unsigned char dcc;
+	unsigned int dcc_override;
+	unsigned int dcc_rate;
+	unsigned char dcc_use_global;
+	unsigned char vm;
+	bool gpuvm;    // gpuvm enabled
+	bool hostvm;    // hostvm enabled
+	bool gpuvm_levels_force_en;
+	unsigned int gpuvm_levels_force;
+	bool hostvm_levels_force_en;
+	unsigned int hostvm_levels_force;
+	int source_scan;
+	int sw_mode;
+	int macro_tile_size;
+	unsigned char is_display_sw;
+	unsigned int viewport_width;
+	unsigned int viewport_height;
+	unsigned int viewport_y_y;
+	unsigned int viewport_y_c;
+	unsigned int viewport_width_c;
+	unsigned int viewport_height_c;
+	unsigned int data_pitch;
+	unsigned int data_pitch_c;
+	unsigned int meta_pitch;
+	unsigned int meta_pitch_c;
+	unsigned int cur0_src_width;
+	int cur0_bpp;
+	unsigned int cur1_src_width;
+	int cur1_bpp;
+	int num_cursors;
+	unsigned char is_hsplit;
+	unsigned char dynamic_metadata_enable;
+	unsigned int dynamic_metadata_lines_before_active;
+	unsigned int dynamic_metadata_xmit_bytes;
+	unsigned int hsplit_grp;
+	unsigned char xfc_enable;
+	unsigned char xfc_slave;
 	struct _vcs_dpi_display_xfc_params_st xfc_params;
 };
 struct writeback_st {
@@ -219,335 +241,335 @@ struct writeback_st {
 	double wb_vratio;
 };
 
-struct	_vcs_dpi_display_output_params_st	{
-	int	dp_lanes;
-	int	output_bpp;
-	int	dsc_enable;
-	int	wb_enable;
-	int	num_active_wb;
-	int	opp_input_bpc;
-	int	output_type;
-	int	output_format;
-	int	output_standard;
-	int	dsc_slices;
+struct _vcs_dpi_display_output_params_st {
+	int dp_lanes;
+	int output_bpp;
+	int dsc_enable;
+	int wb_enable;
+	int num_active_wb;
+	int output_bpc;
+	int output_type;
+	int output_format;
+	int output_standard;
+	int dsc_slices;
 	struct writeback_st wb;
 };
 
-struct	_vcs_dpi_display_bandwidth_st	{
-	double	total_bw_consumed_gbps;
-	double	guaranteed_urgent_return_bw_gbps;
+struct _vcs_dpi_display_bandwidth_st {
+	double total_bw_consumed_gbps;
+	double guaranteed_urgent_return_bw_gbps;
 };
 
-struct	_vcs_dpi_scaler_ratio_depth_st	{
-	double	hscl_ratio;
-	double	vscl_ratio;
-	double	hscl_ratio_c;
-	double	vscl_ratio_c;
-	double	vinit;
-	double	vinit_c;
-	double	vinit_bot;
-	double	vinit_bot_c;
-	int	lb_depth;
-	int	scl_enable;
+struct _vcs_dpi_scaler_ratio_depth_st {
+	double hscl_ratio;
+	double vscl_ratio;
+	double hscl_ratio_c;
+	double vscl_ratio_c;
+	double vinit;
+	double vinit_c;
+	double vinit_bot;
+	double vinit_bot_c;
+	int lb_depth;
+	int scl_enable;
 };
 
-struct	_vcs_dpi_scaler_taps_st	{
-	unsigned int	htaps;
-	unsigned int	vtaps;
-	unsigned int	htaps_c;
-	unsigned int	vtaps_c;
+struct _vcs_dpi_scaler_taps_st {
+	unsigned int htaps;
+	unsigned int vtaps;
+	unsigned int htaps_c;
+	unsigned int vtaps_c;
 };
 
-struct	_vcs_dpi_display_pipe_dest_params_st	{
-	unsigned int	recout_width;
-	unsigned int	recout_height;
-	unsigned int	full_recout_width;
-	unsigned int	full_recout_height;
-	unsigned int	hblank_start;
-	unsigned int	hblank_end;
-	unsigned int	vblank_start;
-	unsigned int	vblank_end;
-	unsigned int	htotal;
-	unsigned int	vtotal;
-	unsigned int	vactive;
-	unsigned int	hactive;
-	unsigned int	vstartup_start;
-	unsigned int	vupdate_offset;
-	unsigned int	vupdate_width;
-	unsigned int	vready_offset;
-	unsigned char	interlaced;
-	unsigned char	underscan;
-	double	pixel_rate_mhz;
-	unsigned char	synchronized_vblank_all_planes;
-	unsigned char	otg_inst;
-	unsigned char	odm_split_cnt;
-	unsigned char	odm_combine;
+struct _vcs_dpi_display_pipe_dest_params_st {
+	unsigned int recout_width;
+	unsigned int recout_height;
+	unsigned int full_recout_width;
+	unsigned int full_recout_height;
+	unsigned int hblank_start;
+	unsigned int hblank_end;
+	unsigned int vblank_start;
+	unsigned int vblank_end;
+	unsigned int htotal;
+	unsigned int vtotal;
+	unsigned int vactive;
+	unsigned int hactive;
+	unsigned int vstartup_start;
+	unsigned int vupdate_offset;
+	unsigned int vupdate_width;
+	unsigned int vready_offset;
+	unsigned char interlaced;
+	unsigned char underscan;
+	double pixel_rate_mhz;
+	unsigned char synchronized_vblank_all_planes;
+	unsigned char otg_inst;
+	unsigned char odm_split_cnt;
+	unsigned char odm_combine;
 };
 
-struct	_vcs_dpi_display_pipe_params_st	{
-	display_pipe_source_params_st	src;
-	display_pipe_dest_params_st	dest;
-	scaler_ratio_depth_st	scale_ratio_depth;
-	scaler_taps_st	scale_taps;
+struct _vcs_dpi_display_pipe_params_st {
+	display_pipe_source_params_st src;
+	display_pipe_dest_params_st dest;
+	scaler_ratio_depth_st scale_ratio_depth;
+	scaler_taps_st scale_taps;
 };
 
-struct	_vcs_dpi_display_clocks_and_cfg_st	{
-	int	voltage;
-	double	dppclk_mhz;
-	double	refclk_mhz;
-	double	dispclk_mhz;
-	double	dcfclk_mhz;
-	double	socclk_mhz;
+struct _vcs_dpi_display_clocks_and_cfg_st {
+	int voltage;
+	double dppclk_mhz;
+	double refclk_mhz;
+	double dispclk_mhz;
+	double dcfclk_mhz;
+	double socclk_mhz;
 };
 
-struct	_vcs_dpi_display_e2e_pipe_params_st	{
-	display_pipe_params_st	pipe;
-	display_output_params_st	dout;
-	display_clocks_and_cfg_st	clks_cfg;
+struct _vcs_dpi_display_e2e_pipe_params_st {
+	display_pipe_params_st pipe;
+	display_output_params_st dout;
+	display_clocks_and_cfg_st clks_cfg;
 };
 
-struct	_vcs_dpi_dchub_buffer_sizing_st	{
-	unsigned int	swath_width_y;
-	unsigned int	swath_height_y;
-	unsigned int	swath_height_c;
-	unsigned int	detail_buffer_size_y;
+struct _vcs_dpi_dchub_buffer_sizing_st {
+	unsigned int swath_width_y;
+	unsigned int swath_height_y;
+	unsigned int swath_height_c;
+	unsigned int detail_buffer_size_y;
 };
 
-struct	_vcs_dpi_watermarks_perf_st	{
-	double	stutter_eff_in_active_region_percent;
-	double	urgent_latency_supported_us;
-	double	non_urgent_latency_supported_us;
-	double	dram_clock_change_margin_us;
-	double	dram_access_eff_percent;
+struct _vcs_dpi_watermarks_perf_st {
+	double stutter_eff_in_active_region_percent;
+	double urgent_latency_supported_us;
+	double non_urgent_latency_supported_us;
+	double dram_clock_change_margin_us;
+	double dram_access_eff_percent;
 };
 
-struct	_vcs_dpi_cstate_pstate_watermarks_st	{
-	double	cstate_exit_us;
-	double	cstate_enter_plus_exit_us;
-	double	pstate_change_us;
+struct _vcs_dpi_cstate_pstate_watermarks_st {
+	double cstate_exit_us;
+	double cstate_enter_plus_exit_us;
+	double pstate_change_us;
 };
 
-struct	_vcs_dpi_wm_calc_pipe_params_st	{
-	unsigned int	num_dpp;
-	int	voltage;
-	int	output_type;
-	double	dcfclk_mhz;
-	double	socclk_mhz;
-	double	dppclk_mhz;
-	double	pixclk_mhz;
-	unsigned char	interlace_en;
-	unsigned char	pte_enable;
-	unsigned char	dcc_enable;
-	double	dcc_rate;
-	double	bytes_per_pixel_c;
-	double	bytes_per_pixel_y;
-	unsigned int	swath_width_y;
-	unsigned int	swath_height_y;
-	unsigned int	swath_height_c;
-	unsigned int	det_buffer_size_y;
-	double	h_ratio;
-	double	v_ratio;
-	unsigned int	h_taps;
-	unsigned int	h_total;
-	unsigned int	v_total;
-	unsigned int	v_active;
-	unsigned int	e2e_index;
-	double	display_pipe_line_delivery_time;
-	double	read_bw;
-	unsigned int	lines_in_det_y;
-	unsigned int	lines_in_det_y_rounded_down_to_swath;
-	double	full_det_buffering_time;
-	double	dcfclk_deepsleep_mhz_per_plane;
+struct _vcs_dpi_wm_calc_pipe_params_st {
+	unsigned int num_dpp;
+	int voltage;
+	int output_type;
+	double dcfclk_mhz;
+	double socclk_mhz;
+	double dppclk_mhz;
+	double pixclk_mhz;
+	unsigned char interlace_en;
+	unsigned char pte_enable;
+	unsigned char dcc_enable;
+	double dcc_rate;
+	double bytes_per_pixel_c;
+	double bytes_per_pixel_y;
+	unsigned int swath_width_y;
+	unsigned int swath_height_y;
+	unsigned int swath_height_c;
+	unsigned int det_buffer_size_y;
+	double h_ratio;
+	double v_ratio;
+	unsigned int h_taps;
+	unsigned int h_total;
+	unsigned int v_total;
+	unsigned int v_active;
+	unsigned int e2e_index;
+	double display_pipe_line_delivery_time;
+	double read_bw;
+	unsigned int lines_in_det_y;
+	unsigned int lines_in_det_y_rounded_down_to_swath;
+	double full_det_buffering_time;
+	double dcfclk_deepsleep_mhz_per_plane;
 };
 
-struct	_vcs_dpi_vratio_pre_st	{
-	double	vratio_pre_l;
-	double	vratio_pre_c;
+struct _vcs_dpi_vratio_pre_st {
+	double vratio_pre_l;
+	double vratio_pre_c;
 };
 
-struct	_vcs_dpi_display_data_rq_misc_params_st	{
-	unsigned int	full_swath_bytes;
-	unsigned int	stored_swath_bytes;
-	unsigned int	blk256_height;
-	unsigned int	blk256_width;
-	unsigned int	req_height;
-	unsigned int	req_width;
+struct _vcs_dpi_display_data_rq_misc_params_st {
+	unsigned int full_swath_bytes;
+	unsigned int stored_swath_bytes;
+	unsigned int blk256_height;
+	unsigned int blk256_width;
+	unsigned int req_height;
+	unsigned int req_width;
 };
 
-struct	_vcs_dpi_display_data_rq_sizing_params_st	{
-	unsigned int	chunk_bytes;
-	unsigned int	min_chunk_bytes;
-	unsigned int	meta_chunk_bytes;
-	unsigned int	min_meta_chunk_bytes;
-	unsigned int	mpte_group_bytes;
-	unsigned int	dpte_group_bytes;
+struct _vcs_dpi_display_data_rq_sizing_params_st {
+	unsigned int chunk_bytes;
+	unsigned int min_chunk_bytes;
+	unsigned int meta_chunk_bytes;
+	unsigned int min_meta_chunk_bytes;
+	unsigned int mpte_group_bytes;
+	unsigned int dpte_group_bytes;
 };
 
-struct	_vcs_dpi_display_data_rq_dlg_params_st	{
-	unsigned int	swath_width_ub;
-	unsigned int	swath_height;
-	unsigned int	req_per_swath_ub;
-	unsigned int	meta_pte_bytes_per_frame_ub;
-	unsigned int	dpte_req_per_row_ub;
-	unsigned int	dpte_groups_per_row_ub;
-	unsigned int	dpte_row_height;
-	unsigned int	dpte_bytes_per_row_ub;
-	unsigned int	meta_chunks_per_row_ub;
-	unsigned int	meta_req_per_row_ub;
-	unsigned int	meta_row_height;
-	unsigned int	meta_bytes_per_row_ub;
+struct _vcs_dpi_display_data_rq_dlg_params_st {
+	unsigned int swath_width_ub;
+	unsigned int swath_height;
+	unsigned int req_per_swath_ub;
+	unsigned int meta_pte_bytes_per_frame_ub;
+	unsigned int dpte_req_per_row_ub;
+	unsigned int dpte_groups_per_row_ub;
+	unsigned int dpte_row_height;
+	unsigned int dpte_bytes_per_row_ub;
+	unsigned int meta_chunks_per_row_ub;
+	unsigned int meta_req_per_row_ub;
+	unsigned int meta_row_height;
+	unsigned int meta_bytes_per_row_ub;
 };
 
-struct	_vcs_dpi_display_cur_rq_dlg_params_st	{
-	unsigned char	enable;
-	unsigned int	swath_height;
-	unsigned int	req_per_line;
+struct _vcs_dpi_display_cur_rq_dlg_params_st {
+	unsigned char enable;
+	unsigned int swath_height;
+	unsigned int req_per_line;
 };
 
-struct	_vcs_dpi_display_rq_dlg_params_st	{
-	display_data_rq_dlg_params_st	rq_l;
-	display_data_rq_dlg_params_st	rq_c;
-	display_cur_rq_dlg_params_st	rq_cur0;
+struct _vcs_dpi_display_rq_dlg_params_st {
+	display_data_rq_dlg_params_st rq_l;
+	display_data_rq_dlg_params_st rq_c;
+	display_cur_rq_dlg_params_st rq_cur0;
 };
 
-struct	_vcs_dpi_display_rq_sizing_params_st	{
-	display_data_rq_sizing_params_st	rq_l;
-	display_data_rq_sizing_params_st	rq_c;
+struct _vcs_dpi_display_rq_sizing_params_st {
+	display_data_rq_sizing_params_st rq_l;
+	display_data_rq_sizing_params_st rq_c;
 };
 
-struct	_vcs_dpi_display_rq_misc_params_st	{
-	display_data_rq_misc_params_st	rq_l;
-	display_data_rq_misc_params_st	rq_c;
+struct _vcs_dpi_display_rq_misc_params_st {
+	display_data_rq_misc_params_st rq_l;
+	display_data_rq_misc_params_st rq_c;
 };
 
-struct	_vcs_dpi_display_rq_params_st	{
-	unsigned char	yuv420;
-	unsigned char	yuv420_10bpc;
-	display_rq_misc_params_st	misc;
-	display_rq_sizing_params_st	sizing;
-	display_rq_dlg_params_st	dlg;
+struct _vcs_dpi_display_rq_params_st {
+	unsigned char yuv420;
+	unsigned char yuv420_10bpc;
+	display_rq_misc_params_st misc;
+	display_rq_sizing_params_st sizing;
+	display_rq_dlg_params_st dlg;
 };
 
-struct	_vcs_dpi_display_dlg_regs_st	{
-	unsigned int	refcyc_h_blank_end;
-	unsigned int	dlg_vblank_end;
-	unsigned int	min_dst_y_next_start;
-	unsigned int	refcyc_per_htotal;
-	unsigned int	refcyc_x_after_scaler;
-	unsigned int	dst_y_after_scaler;
-	unsigned int	dst_y_prefetch;
-	unsigned int	dst_y_per_vm_vblank;
-	unsigned int	dst_y_per_row_vblank;
-	unsigned int	dst_y_per_vm_flip;
-	unsigned int	dst_y_per_row_flip;
-	unsigned int	ref_freq_to_pix_freq;
-	unsigned int	vratio_prefetch;
-	unsigned int	vratio_prefetch_c;
-	unsigned int	refcyc_per_pte_group_vblank_l;
-	unsigned int	refcyc_per_pte_group_vblank_c;
-	unsigned int	refcyc_per_meta_chunk_vblank_l;
-	unsigned int	refcyc_per_meta_chunk_vblank_c;
-	unsigned int	refcyc_per_pte_group_flip_l;
-	unsigned int	refcyc_per_pte_group_flip_c;
-	unsigned int	refcyc_per_meta_chunk_flip_l;
-	unsigned int	refcyc_per_meta_chunk_flip_c;
-	unsigned int	dst_y_per_pte_row_nom_l;
-	unsigned int	dst_y_per_pte_row_nom_c;
-	unsigned int	refcyc_per_pte_group_nom_l;
-	unsigned int	refcyc_per_pte_group_nom_c;
-	unsigned int	dst_y_per_meta_row_nom_l;
-	unsigned int	dst_y_per_meta_row_nom_c;
-	unsigned int	refcyc_per_meta_chunk_nom_l;
-	unsigned int	refcyc_per_meta_chunk_nom_c;
-	unsigned int	refcyc_per_line_delivery_pre_l;
-	unsigned int	refcyc_per_line_delivery_pre_c;
-	unsigned int	refcyc_per_line_delivery_l;
-	unsigned int	refcyc_per_line_delivery_c;
-	unsigned int	chunk_hdl_adjust_cur0;
-	unsigned int	chunk_hdl_adjust_cur1;
-	unsigned int	vready_after_vcount0;
-	unsigned int	dst_y_offset_cur0;
-	unsigned int	dst_y_offset_cur1;
-	unsigned int	xfc_reg_transfer_delay;
-	unsigned int	xfc_reg_precharge_delay;
-	unsigned int	xfc_reg_remote_surface_flip_latency;
-	unsigned int	xfc_reg_prefetch_margin;
-	unsigned int	dst_y_delta_drq_limit;
+struct _vcs_dpi_display_dlg_regs_st {
+	unsigned int refcyc_h_blank_end;
+	unsigned int dlg_vblank_end;
+	unsigned int min_dst_y_next_start;
+	unsigned int refcyc_per_htotal;
+	unsigned int refcyc_x_after_scaler;
+	unsigned int dst_y_after_scaler;
+	unsigned int dst_y_prefetch;
+	unsigned int dst_y_per_vm_vblank;
+	unsigned int dst_y_per_row_vblank;
+	unsigned int dst_y_per_vm_flip;
+	unsigned int dst_y_per_row_flip;
+	unsigned int ref_freq_to_pix_freq;
+	unsigned int vratio_prefetch;
+	unsigned int vratio_prefetch_c;
+	unsigned int refcyc_per_pte_group_vblank_l;
+	unsigned int refcyc_per_pte_group_vblank_c;
+	unsigned int refcyc_per_meta_chunk_vblank_l;
+	unsigned int refcyc_per_meta_chunk_vblank_c;
+	unsigned int refcyc_per_pte_group_flip_l;
+	unsigned int refcyc_per_pte_group_flip_c;
+	unsigned int refcyc_per_meta_chunk_flip_l;
+	unsigned int refcyc_per_meta_chunk_flip_c;
+	unsigned int dst_y_per_pte_row_nom_l;
+	unsigned int dst_y_per_pte_row_nom_c;
+	unsigned int refcyc_per_pte_group_nom_l;
+	unsigned int refcyc_per_pte_group_nom_c;
+	unsigned int dst_y_per_meta_row_nom_l;
+	unsigned int dst_y_per_meta_row_nom_c;
+	unsigned int refcyc_per_meta_chunk_nom_l;
+	unsigned int refcyc_per_meta_chunk_nom_c;
+	unsigned int refcyc_per_line_delivery_pre_l;
+	unsigned int refcyc_per_line_delivery_pre_c;
+	unsigned int refcyc_per_line_delivery_l;
+	unsigned int refcyc_per_line_delivery_c;
+	unsigned int chunk_hdl_adjust_cur0;
+	unsigned int chunk_hdl_adjust_cur1;
+	unsigned int vready_after_vcount0;
+	unsigned int dst_y_offset_cur0;
+	unsigned int dst_y_offset_cur1;
+	unsigned int xfc_reg_transfer_delay;
+	unsigned int xfc_reg_precharge_delay;
+	unsigned int xfc_reg_remote_surface_flip_latency;
+	unsigned int xfc_reg_prefetch_margin;
+	unsigned int dst_y_delta_drq_limit;
 };
 
-struct	_vcs_dpi_display_ttu_regs_st	{
-	unsigned int	qos_level_low_wm;
-	unsigned int	qos_level_high_wm;
-	unsigned int	min_ttu_vblank;
-	unsigned int	qos_level_flip;
-	unsigned int	refcyc_per_req_delivery_l;
-	unsigned int	refcyc_per_req_delivery_c;
-	unsigned int	refcyc_per_req_delivery_cur0;
-	unsigned int	refcyc_per_req_delivery_cur1;
-	unsigned int	refcyc_per_req_delivery_pre_l;
-	unsigned int	refcyc_per_req_delivery_pre_c;
-	unsigned int	refcyc_per_req_delivery_pre_cur0;
-	unsigned int	refcyc_per_req_delivery_pre_cur1;
-	unsigned int	qos_level_fixed_l;
-	unsigned int	qos_level_fixed_c;
-	unsigned int	qos_level_fixed_cur0;
-	unsigned int	qos_level_fixed_cur1;
-	unsigned int	qos_ramp_disable_l;
-	unsigned int	qos_ramp_disable_c;
-	unsigned int	qos_ramp_disable_cur0;
-	unsigned int	qos_ramp_disable_cur1;
+struct _vcs_dpi_display_ttu_regs_st {
+	unsigned int qos_level_low_wm;
+	unsigned int qos_level_high_wm;
+	unsigned int min_ttu_vblank;
+	unsigned int qos_level_flip;
+	unsigned int refcyc_per_req_delivery_l;
+	unsigned int refcyc_per_req_delivery_c;
+	unsigned int refcyc_per_req_delivery_cur0;
+	unsigned int refcyc_per_req_delivery_cur1;
+	unsigned int refcyc_per_req_delivery_pre_l;
+	unsigned int refcyc_per_req_delivery_pre_c;
+	unsigned int refcyc_per_req_delivery_pre_cur0;
+	unsigned int refcyc_per_req_delivery_pre_cur1;
+	unsigned int qos_level_fixed_l;
+	unsigned int qos_level_fixed_c;
+	unsigned int qos_level_fixed_cur0;
+	unsigned int qos_level_fixed_cur1;
+	unsigned int qos_ramp_disable_l;
+	unsigned int qos_ramp_disable_c;
+	unsigned int qos_ramp_disable_cur0;
+	unsigned int qos_ramp_disable_cur1;
 };
 
-struct	_vcs_dpi_display_data_rq_regs_st	{
-	unsigned int	chunk_size;
-	unsigned int	min_chunk_size;
-	unsigned int	meta_chunk_size;
-	unsigned int	min_meta_chunk_size;
-	unsigned int	dpte_group_size;
-	unsigned int	mpte_group_size;
-	unsigned int	swath_height;
-	unsigned int	pte_row_height_linear;
+struct _vcs_dpi_display_data_rq_regs_st {
+	unsigned int chunk_size;
+	unsigned int min_chunk_size;
+	unsigned int meta_chunk_size;
+	unsigned int min_meta_chunk_size;
+	unsigned int dpte_group_size;
+	unsigned int mpte_group_size;
+	unsigned int swath_height;
+	unsigned int pte_row_height_linear;
 };
 
-struct	_vcs_dpi_display_rq_regs_st	{
-	display_data_rq_regs_st	rq_regs_l;
-	display_data_rq_regs_st	rq_regs_c;
-	unsigned int	drq_expansion_mode;
-	unsigned int	prq_expansion_mode;
-	unsigned int	mrq_expansion_mode;
-	unsigned int	crq_expansion_mode;
-	unsigned int	plane1_base_address;
+struct _vcs_dpi_display_rq_regs_st {
+	display_data_rq_regs_st rq_regs_l;
+	display_data_rq_regs_st rq_regs_c;
+	unsigned int drq_expansion_mode;
+	unsigned int prq_expansion_mode;
+	unsigned int mrq_expansion_mode;
+	unsigned int crq_expansion_mode;
+	unsigned int plane1_base_address;
 };
 
-struct	_vcs_dpi_display_dlg_sys_params_st	{
-	double	t_mclk_wm_us;
-	double	t_urg_wm_us;
-	double	t_sr_wm_us;
-	double	t_extra_us;
-	double	mem_trip_us;
-	double	t_srx_delay_us;
-	double	deepsleep_dcfclk_mhz;
-	double	total_flip_bw;
-	unsigned int	total_flip_bytes;
+struct _vcs_dpi_display_dlg_sys_params_st {
+	double t_mclk_wm_us;
+	double t_urg_wm_us;
+	double t_sr_wm_us;
+	double t_extra_us;
+	double mem_trip_us;
+	double t_srx_delay_us;
+	double deepsleep_dcfclk_mhz;
+	double total_flip_bw;
+	unsigned int total_flip_bytes;
 };
 
-struct	_vcs_dpi_display_dlg_prefetch_param_st	{
-	double	prefetch_bw;
-	unsigned int	flip_bytes;
+struct _vcs_dpi_display_dlg_prefetch_param_st {
+	double prefetch_bw;
+	unsigned int flip_bytes;
 };
 
-struct	_vcs_dpi_display_pipe_clock_st	{
-	double	dcfclk_mhz;
-	double	dispclk_mhz;
-	double	socclk_mhz;
-	double	dscclk_mhz[6];
-	double	dppclk_mhz[6];
+struct _vcs_dpi_display_pipe_clock_st {
+	double dcfclk_mhz;
+	double dispclk_mhz;
+	double socclk_mhz;
+	double dscclk_mhz[6];
+	double dppclk_mhz[6];
 };
 
-struct	_vcs_dpi_display_arb_params_st	{
-	int	max_req_outstanding;
-	int	min_req_outstanding;
-	int	sat_level_us;
+struct _vcs_dpi_display_arb_params_st {
+	int max_req_outstanding;
+	int min_req_outstanding;
+	int sat_level_us;
 };
 
 #endif /*__DISPLAY_MODE_STRUCTS_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
index f9cf08357989..e8ce08567cd8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
@@ -35,6 +35,16 @@ static inline double dml_min(double a, double b)
 	return (double) dcn_bw_min2(a, b);
 }
 
+static inline double dml_min3(double a, double b, double c)
+{
+	return dml_min(dml_min(a, b), c);
+}
+
+static inline double dml_min4(double a, double b, double c, double d)
+{
+	return dml_min(dml_min(a, b), dml_min(c, d));
+}
+
 static inline double dml_max(double a, double b)
 {
 	return (double) dcn_bw_max2(a, b);

From 66dec27a987bfcd2572bfc7520826b11340d264f Mon Sep 17 00:00:00 2001
From: Anthony Koo <Anthony.Koo@amd.com>
Date: Tue, 24 Apr 2018 15:21:33 -0400
Subject: [PATCH 1233/1286] drm/amd/display: Fix up dm logging functionality

Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../amd/display/amdgpu_dm/amdgpu_dm_helpers.c |  5 ----
 .../drm/amd/display/dc/basics/log_helpers.c   |  1 -
 .../gpu/drm/amd/display/dc/basics/logger.c    |  1 +
 drivers/gpu/drm/amd/display/dc/dm_services.h  |  4 ----
 .../gpu/drm/amd/display/modules/stats/stats.c | 24 ++++++++++++-------
 5 files changed, 17 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index ca0b08bfa2cf..bd449351803f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -330,11 +330,6 @@ bool dm_helpers_dp_mst_send_payload_allocation(
 	return true;
 }
 
-bool dm_helpers_dc_conn_log(struct dc_context *ctx, struct log_entry *entry, enum dc_log_type event)
-{
-	return true;
-}
-
 void dm_dtn_log_begin(struct dc_context *ctx)
 {}
 
diff --git a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
index 854678a0c54b..021451549ff7 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
@@ -94,7 +94,6 @@ void dc_conn_log(struct dc_context *ctx,
 			dm_logger_append(&entry, "%2.2X ", hex_data[i]);
 
 	dm_logger_append(&entry, "^\n");
-	dm_helpers_dc_conn_log(ctx, &entry, event);
 
 fail:
 	dm_logger_close(&entry);
diff --git a/drivers/gpu/drm/amd/display/dc/basics/logger.c b/drivers/gpu/drm/amd/display/dc/basics/logger.c
index 0001a3c5b862..738a818d58d1 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/logger.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/logger.c
@@ -402,3 +402,4 @@ cleanup:
 		entry->max_buf_bytes = 0;
 	}
 }
+
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index 8eafe1af8a5e..4ff9b2bba178 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -355,10 +355,6 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line);
 /*
  * Debug and verification hooks
  */
-bool dm_helpers_dc_conn_log(
-		struct dc_context *ctx,
-		struct log_entry *entry,
-		enum dc_log_type event);
 
 void dm_dtn_log_begin(struct dc_context *ctx);
 void dm_dtn_log_append_v(struct dc_context *ctx, const char *msg, ...);
diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
index d16aac7b30b3..ae2d92b73cf1 100644
--- a/drivers/gpu/drm/amd/display/modules/stats/stats.c
+++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c
@@ -168,6 +168,7 @@ void mod_stats_dump(struct mod_stats *mod_stats)
 	struct core_stats *core_stats = NULL;
 	struct stats_time_cache *time = NULL;
 	unsigned int index = 0;
+	struct log_entry log_entry;
 
 	if (mod_stats == NULL)
 		return;
@@ -177,17 +178,22 @@ void mod_stats_dump(struct mod_stats *mod_stats)
 	logger = dc->ctx->logger;
 	time = core_stats->time;
 
-	dm_logger_write(logger, LOG_DISPLAYSTATS, "==Display Caps==");
-	dm_logger_write(logger, LOG_DISPLAYSTATS, " ");
+	dm_logger_open(
+		dc->ctx->logger,
+		&log_entry,
+		LOG_DISPLAYSTATS);
 
-	dm_logger_write(logger, LOG_DISPLAYSTATS, "==Display Stats==");
-	dm_logger_write(logger, LOG_DISPLAYSTATS, " ");
+	dm_logger_append(&log_entry, "==Display Caps==\n");
+	dm_logger_append(&log_entry, "\n");
 
-	dm_logger_write(logger, LOG_DISPLAYSTATS,
+	dm_logger_append(&log_entry, "==Display Stats==\n");
+	dm_logger_append(&log_entry, "\n");
+
+	dm_logger_append(&log_entry,
 		"%10s %10s %10s %10s %10s"
 			" %11s %11s %17s %10s %14s"
 			" %10s %10s %10s %10s %10s"
-			" %10s %10s %10s %10s",
+			" %10s %10s %10s %10s\n",
 		"render", "avgRender",
 		"minWindow", "midPoint", "maxWindow",
 		"vsyncToFlip", "flipToVsync", "vsyncsBetweenFlip",
@@ -197,11 +203,11 @@ void mod_stats_dump(struct mod_stats *mod_stats)
 		"vSyncTime4", "vSyncTime5", "flags");
 
 	for (int i = 0; i < core_stats->index && i < core_stats->entries; i++) {
-		dm_logger_write(logger, LOG_DISPLAYSTATS,
+		dm_logger_append(&log_entry,
 			"%10u %10u %10u %10u %10u"
 				" %11u %11u %17u %10u %14u"
 				" %10u %10u %10u %10u %10u"
-				" %10u %10u %10u %10u",
+				" %10u %10u %10u %10u\n",
 			time[i].render_time_in_us,
 			time[i].avg_render_time_in_us_last_ten,
 			time[i].min_window,
@@ -222,6 +228,8 @@ void mod_stats_dump(struct mod_stats *mod_stats)
 			time[i].v_sync_time_in_us[4],
 			time[i].flags);
 	}
+
+	dm_logger_close(&log_entry);
 }
 
 void mod_stats_reset_data(struct mod_stats *mod_stats)

From 5103c5688518ea16c7f2f864b784c1266cd13c89 Mon Sep 17 00:00:00 2001
From: Anthony Koo <Anthony.Koo@amd.com>
Date: Tue, 24 Apr 2018 15:36:27 -0400
Subject: [PATCH 1234/1286] drm/amd/display: use macro for logs

Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../amd/display/include/logger_interface.h    |  9 +++++++++
 .../gpu/drm/amd/display/modules/stats/stats.c | 19 ++++++-------------
 2 files changed, 15 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/include/logger_interface.h b/drivers/gpu/drm/amd/display/include/logger_interface.h
index 28dee960d509..dc98d6d4b2bd 100644
--- a/drivers/gpu/drm/amd/display/include/logger_interface.h
+++ b/drivers/gpu/drm/amd/display/include/logger_interface.h
@@ -190,4 +190,13 @@ void context_clock_trace(
 	} \
 } while (0)
 
+#define DISPLAY_STATS_BEGIN(entry) \
+	dm_logger_open(dc->ctx->logger, &entry, LOG_DISPLAYSTATS)
+
+#define DISPLAY_STATS(msg, ...) \
+	dm_logger_append(&log_entry, msg, ##__VA_ARGS__)
+
+#define DISPLAY_STATS_END(entry) \
+	dm_logger_close(&entry)
+
 #endif /* __DAL_LOGGER_INTERFACE_H__ */
diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
index ae2d92b73cf1..45acdbc3c08a 100644
--- a/drivers/gpu/drm/amd/display/modules/stats/stats.c
+++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c
@@ -178,19 +178,13 @@ void mod_stats_dump(struct mod_stats *mod_stats)
 	logger = dc->ctx->logger;
 	time = core_stats->time;
 
-	dm_logger_open(
-		dc->ctx->logger,
-		&log_entry,
-		LOG_DISPLAYSTATS);
+	DISPLAY_STATS_BEGIN(log_entry);
 
-	dm_logger_append(&log_entry, "==Display Caps==\n");
-	dm_logger_append(&log_entry, "\n");
+	DISPLAY_STATS("==Display Caps==\n");
 
-	dm_logger_append(&log_entry, "==Display Stats==\n");
-	dm_logger_append(&log_entry, "\n");
+	DISPLAY_STATS("==Display Stats==\n");
 
-	dm_logger_append(&log_entry,
-		"%10s %10s %10s %10s %10s"
+	DISPLAY_STATS("%10s %10s %10s %10s %10s"
 			" %11s %11s %17s %10s %14s"
 			" %10s %10s %10s %10s %10s"
 			" %10s %10s %10s %10s\n",
@@ -203,8 +197,7 @@ void mod_stats_dump(struct mod_stats *mod_stats)
 		"vSyncTime4", "vSyncTime5", "flags");
 
 	for (int i = 0; i < core_stats->index && i < core_stats->entries; i++) {
-		dm_logger_append(&log_entry,
-			"%10u %10u %10u %10u %10u"
+		DISPLAY_STATS("%10u %10u %10u %10u %10u"
 				" %11u %11u %17u %10u %14u"
 				" %10u %10u %10u %10u %10u"
 				" %10u %10u %10u %10u\n",
@@ -229,7 +222,7 @@ void mod_stats_dump(struct mod_stats *mod_stats)
 			time[i].flags);
 	}
 
-	dm_logger_close(&log_entry);
+	DISPLAY_STATS_END(log_entry);
 }
 
 void mod_stats_reset_data(struct mod_stats *mod_stats)

From eb815442e840e436108ae4112fa80fc2e7ff47f3 Mon Sep 17 00:00:00 2001
From: Samson Tam <Samson.Tam@amd.com>
Date: Fri, 13 Apr 2018 18:38:56 -0400
Subject: [PATCH 1235/1286] drm/amd/display: don't create new dc_sink if
 nothing changed at detection

Signed-off-by: Samson Tam <Samson.Tam@amd.com>
Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/core/dc_link.c | 95 +++++++++++++++----
 1 file changed, 77 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index ea5d5ffd5522..2fa521812d23 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -469,6 +469,13 @@ static void link_disconnect_sink(struct dc_link *link)
 	link->dpcd_sink_count = 0;
 }
 
+static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *link)
+{
+	dc_sink_release(link->local_sink);
+	link->local_sink = prev_sink;
+}
+
+
 static bool detect_dp(
 	struct dc_link *link,
 	struct display_sink_capability *sink_caps,
@@ -551,6 +558,17 @@ static bool detect_dp(
 	return true;
 }
 
+static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid)
+{
+	if (old_edid->length != new_edid->length)
+		return false;
+
+	if (new_edid->length == 0)
+		return false;
+
+	return (memcmp(old_edid->raw_edid, new_edid->raw_edid, new_edid->length) == 0);
+}
+
 bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
 {
 	struct dc_sink_init_data sink_init_data = { 0 };
@@ -558,9 +576,13 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
 	uint8_t i;
 	bool converter_disable_audio = false;
 	struct audio_support *aud_support = &link->dc->res_pool->audio_support;
+	bool same_edid = false;
 	enum dc_edid_status edid_status;
 	struct dc_context *dc_ctx = link->ctx;
 	struct dc_sink *sink = NULL;
+	struct dc_sink *prev_sink = NULL;
+	struct dpcd_caps prev_dpcd_caps;
+	bool same_dpcd = true;
 	enum dc_connection_type new_connection_type = dc_connection_none;
 	DC_LOGGER_INIT(link->ctx->logger);
 	if (link->connector_signal == SIGNAL_TYPE_VIRTUAL)
@@ -575,6 +597,11 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
 			link->local_sink)
 		return true;
 
+	prev_sink = link->local_sink;
+	if (prev_sink != NULL) {
+		dc_sink_retain(prev_sink);
+		memcpy(&prev_dpcd_caps, &link->dpcd_caps, sizeof(struct dpcd_caps));
+	}
 	link_disconnect_sink(link);
 
 	if (new_connection_type != dc_connection_none) {
@@ -616,14 +643,25 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
 				link,
 				&sink_caps,
 				&converter_disable_audio,
-				aud_support, reason))
+				aud_support, reason)) {
+				if (prev_sink != NULL)
+					dc_sink_release(prev_sink);
 				return false;
+			}
 
+			// Check if dpcp block is the same
+			if (prev_sink != NULL) {
+				if (memcmp(&link->dpcd_caps, &prev_dpcd_caps, sizeof(struct dpcd_caps)))
+					same_dpcd = false;
+			}
 			/* Active dongle downstream unplug */
 			if (link->type == dc_connection_active_dongle
 					&& link->dpcd_caps.sink_count.
-					bits.SINK_COUNT == 0)
+					bits.SINK_COUNT == 0) {
+				if (prev_sink != NULL)
+					dc_sink_release(prev_sink);
 				return true;
+			}
 
 			if (link->type == dc_connection_mst_branch) {
 				LINK_INFO("link=%d, mst branch is now Connected\n",
@@ -634,6 +672,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
 				 * pbn_per_slot value leading to exception on dc_fixpt_div()
 				 */
 				link->verified_link_cap = link->reported_link_cap;
+				if (prev_sink != NULL)
+					dc_sink_release(prev_sink);
 				return false;
 			}
 
@@ -643,6 +683,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
 		default:
 			DC_ERROR("Invalid connector type! signal:%d\n",
 				link->connector_signal);
+			if (prev_sink != NULL)
+				dc_sink_release(prev_sink);
 			return false;
 		} /* switch() */
 
@@ -665,6 +707,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
 		sink = dc_sink_create(&sink_init_data);
 		if (!sink) {
 			DC_ERROR("Failed to create sink!\n");
+			if (prev_sink != NULL)
+				dc_sink_release(prev_sink);
 			return false;
 		}
 
@@ -688,23 +732,34 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
 			break;
 		}
 
-		if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
-			sink_caps.transaction_type ==
-			DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
-			/*
-			 * TODO debug why Dell 2413 doesn't like
-			 *  two link trainings
-			 */
+		// Check if edid is the same
+		if ((prev_sink != NULL) && ((edid_status == EDID_THE_SAME) || (edid_status == EDID_OK)))
+			same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid);
 
-			/* deal with non-mst cases */
-			dp_hbr_verify_link_cap(link, &link->reported_link_cap);
+		// If both edid and dpcd are the same, then discard new sink and revert back to original sink
+		if ((same_edid) && (same_dpcd)) {
+			link_disconnect_remap(prev_sink, link);
+			sink = prev_sink;
+			prev_sink = NULL;
+		} else {
+			if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+					sink_caps.transaction_type ==
+						DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
+				/*
+				 * TODO debug why Dell 2413 doesn't like
+				 *  two link trainings
+				 */
+
+				/* deal with non-mst cases */
+				dp_hbr_verify_link_cap(link, &link->reported_link_cap);
+			}
+
+			/* HDMI-DVI Dongle */
+			if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
+					!sink->edid_caps.edid_hdmi)
+				sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
 		}
 
-		/* HDMI-DVI Dongle */
-		if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
-				!sink->edid_caps.edid_hdmi)
-			sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
-
 		/* Connectivity log: detection */
 		for (i = 0; i < sink->dc_edid.length / EDID_BLOCK_SIZE; i++) {
 			CONN_DATA_DETECT(link,
@@ -762,10 +817,14 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
 		sink_caps.signal = SIGNAL_TYPE_NONE;
 	}
 
-	LINK_INFO("link=%d, dc_sink_in=%p is now %s\n",
+	LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p dpcd same=%d edid same=%d\n",
 		link->link_index, sink,
 		(sink_caps.signal == SIGNAL_TYPE_NONE ?
-			"Disconnected":"Connected"));
+			"Disconnected":"Connected"), prev_sink,
+			same_dpcd, same_edid);
+
+	if (prev_sink != NULL)
+		dc_sink_release(prev_sink);
 
 	return true;
 }

From 8fc06ebc2bb719cddb041bcb14b5ca87adbcd57f Mon Sep 17 00:00:00 2001
From: Xingyue Tao <xingyue.tao@amd.com>
Date: Thu, 19 Apr 2018 16:23:12 -0400
Subject: [PATCH 1236/1286] drm/amd/display: Only limit VSR downscaling when
 actually downscaling

Signed-off-by: Xingyue Tao <xingyue.tao@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c  | 19 +++++++++----------
 1 file changed, 9 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index 2da138904312..46a35c7f01df 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -145,18 +145,17 @@ bool dpp_get_optimal_number_of_taps(
 	else
 		pixel_width = scl_data->viewport.width;
 
+	/* Some ASICs does not support  FP16 scaling, so we reject modes require this*/
 	if (scl_data->viewport.width  != scl_data->h_active &&
-		scl_data->viewport.height != scl_data->v_active) {
+		scl_data->viewport.height != scl_data->v_active &&
+		dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
+		scl_data->format == PIXEL_FORMAT_FP16)
+		return false;
 
-		/* Some ASICs does not support  FP16 scaling, so we reject modes require this*/
-		if (dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
-			scl_data->format == PIXEL_FORMAT_FP16)
-			return false;
-
-		if (dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
-			scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
-			return false;
-	}
+	if (scl_data->viewport.width > scl_data->h_active &&
+		dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
+		scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
+		return false;
 
 	/* TODO: add lb check */
 

From 109ece8d43cdb491a968b3690e947e27225f886e Mon Sep 17 00:00:00 2001
From: Jun Lei <Jun.Lei@amd.com>
Date: Thu, 26 Apr 2018 10:24:25 -0400
Subject: [PATCH 1237/1286] drm/amd/display: constify a few dc_surface_update
 fields

Signed-off-by: Jun Lei <Jun.Lei@amd.com>
Reviewed-by: Jun Lei <Jun.Lei@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dc.h | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 7a9f600662ce..9cfde0ccf4e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -503,18 +503,18 @@ struct dc_surface_update {
 	struct dc_plane_state *surface;
 
 	/* isr safe update parameters.  null means no updates */
-	struct dc_flip_addrs *flip_addr;
-	struct dc_plane_info *plane_info;
-	struct dc_scaling_info *scaling_info;
+	const struct dc_flip_addrs *flip_addr;
+	const struct dc_plane_info *plane_info;
+	const struct dc_scaling_info *scaling_info;
 
 	/* following updates require alloc/sleep/spin that is not isr safe,
 	 * null means no updates
 	 */
-	struct dc_gamma *gamma;
-	struct dc_transfer_func *in_transfer_func;
+	const struct dc_gamma *gamma;
+	const struct dc_transfer_func *in_transfer_func;
 
-	struct dc_csc_transform *input_csc_color_matrix;
-	struct fixed31_32 *coeff_reduction_factor;
+	const struct dc_csc_transform *input_csc_color_matrix;
+	const struct fixed31_32 *coeff_reduction_factor;
 };
 
 /*

From 6474b2824d71ac6cd1005aff8841dd8bcfa0901d Mon Sep 17 00:00:00 2001
From: Anthony Koo <Anthony.Koo@amd.com>
Date: Thu, 26 Apr 2018 10:03:44 -0400
Subject: [PATCH 1238/1286] drm/amd/display: Add fullscreen transitions to log

Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../drm/amd/display/modules/inc/mod_stats.h   |   4 +
 .../gpu/drm/amd/display/modules/stats/stats.c | 139 ++++++++++++++----
 2 files changed, 115 insertions(+), 28 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
index 3230e2adb870..3812094b52e8 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
@@ -46,6 +46,10 @@ void mod_stats_dump(struct mod_stats *mod_stats);
 
 void mod_stats_reset_data(struct mod_stats *mod_stats);
 
+void mod_stats_update_event(struct mod_stats *mod_stats,
+		char *event_string,
+		unsigned int length);
+
 void mod_stats_update_flip(struct mod_stats *mod_stats,
 		unsigned long timestamp_in_ns);
 
diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
index 45acdbc3c08a..4b00bae725b9 100644
--- a/drivers/gpu/drm/amd/display/modules/stats/stats.c
+++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c
@@ -36,9 +36,14 @@
 #define DAL_STATS_ENTRIES_REGKEY_DEFAULT	0x00350000
 #define DAL_STATS_ENTRIES_REGKEY_MAX		0x01000000
 
+#define DAL_STATS_EVENT_ENTRIES_DEFAULT		0x00000100
+
 #define MOD_STATS_NUM_VSYNCS			5
+#define MOD_STATS_EVENT_STRING_MAX		512
 
 struct stats_time_cache {
+	unsigned int entry_id;
+
 	unsigned long flip_timestamp_in_ns;
 	unsigned long vupdate_timestamp_in_ns;
 
@@ -63,15 +68,26 @@ struct stats_time_cache {
 	unsigned int flags;
 };
 
+struct stats_event_cache {
+	unsigned int entry_id;
+	char event_string[MOD_STATS_EVENT_STRING_MAX];
+};
+
 struct core_stats {
 	struct mod_stats public;
 	struct dc *dc;
 
+	bool enabled;
+	unsigned int entries;
+	unsigned int event_entries;
+	unsigned int entry_id;
+
 	struct stats_time_cache *time;
 	unsigned int index;
 
-	bool enabled;
-	unsigned int entries;
+	struct stats_event_cache *events;
+	unsigned int event_index;
+
 };
 
 #define MOD_STATS_TO_CORE(mod_stats)\
@@ -125,9 +141,18 @@ struct mod_stats *mod_stats_create(struct dc *dc)
 			else
 				core_stats->entries = reg_data;
 		}
-
-		core_stats->time = kzalloc(sizeof(struct stats_time_cache) * core_stats->entries,
+		core_stats->time = kzalloc(
+			sizeof(struct stats_time_cache) *
+				core_stats->entries,
 						GFP_KERNEL);
+
+
+		core_stats->event_entries = DAL_STATS_EVENT_ENTRIES_DEFAULT;
+		core_stats->events = kzalloc(
+			sizeof(struct stats_event_cache) *
+				core_stats->event_entries,
+						GFP_KERNEL);
+
 	} else {
 		core_stats->entries = 0;
 	}
@@ -139,6 +164,10 @@ struct mod_stats *mod_stats_create(struct dc *dc)
 	 * handle calculation cases that depend on previous flip data.
 	 */
 	core_stats->index = 1;
+	core_stats->event_index = 0;
+
+	// Keeps track of ordering within the different stats structures
+	core_stats->entry_id = 0;
 
 	return &core_stats->public;
 
@@ -167,6 +196,9 @@ void mod_stats_dump(struct mod_stats *mod_stats)
 	struct dal_logger *logger = NULL;
 	struct core_stats *core_stats = NULL;
 	struct stats_time_cache *time = NULL;
+	struct stats_event_cache *events = NULL;
+	unsigned int time_index = 1;
+	unsigned int event_index = 0;
 	unsigned int index = 0;
 	struct log_entry log_entry;
 
@@ -177,6 +209,7 @@ void mod_stats_dump(struct mod_stats *mod_stats)
 	dc = core_stats->dc;
 	logger = dc->ctx->logger;
 	time = core_stats->time;
+	events = core_stats->events;
 
 	DISPLAY_STATS_BEGIN(log_entry);
 
@@ -196,30 +229,39 @@ void mod_stats_dump(struct mod_stats *mod_stats)
 		"vSyncTime1", "vSyncTime2", "vSyncTime3",
 		"vSyncTime4", "vSyncTime5", "flags");
 
-	for (int i = 0; i < core_stats->index && i < core_stats->entries; i++) {
-		DISPLAY_STATS("%10u %10u %10u %10u %10u"
-				" %11u %11u %17u %10u %14u"
-				" %10u %10u %10u %10u %10u"
-				" %10u %10u %10u %10u\n",
-			time[i].render_time_in_us,
-			time[i].avg_render_time_in_us_last_ten,
-			time[i].min_window,
-			time[i].lfc_mid_point_in_us,
-			time[i].max_window,
-			time[i].vsync_to_flip_time_in_us,
-			time[i].flip_to_vsync_time_in_us,
-			time[i].num_vsync_between_flips,
-			time[i].num_frames_inserted,
-			time[i].inserted_duration_in_us,
-			time[i].v_total_min,
-			time[i].v_total_max,
-			time[i].event_triggers,
-			time[i].v_sync_time_in_us[0],
-			time[i].v_sync_time_in_us[1],
-			time[i].v_sync_time_in_us[2],
-			time[i].v_sync_time_in_us[3],
-			time[i].v_sync_time_in_us[4],
-			time[i].flags);
+	for (int i = 0; i < core_stats->entry_id; i++) {
+		if (event_index < core_stats->event_index &&
+				i == events[event_index].entry_id) {
+			DISPLAY_STATS("%s\n", events[event_index].event_string);
+			event_index++;
+		} else if (time_index < core_stats->index &&
+				i == time[time_index].entry_id) {
+			DISPLAY_STATS("%10u %10u %10u %10u %10u"
+					" %11u %11u %17u %10u %14u"
+					" %10u %10u %10u %10u %10u"
+					" %10u %10u %10u %10u\n",
+				time[time_index].render_time_in_us,
+				time[time_index].avg_render_time_in_us_last_ten,
+				time[time_index].min_window,
+				time[time_index].lfc_mid_point_in_us,
+				time[time_index].max_window,
+				time[time_index].vsync_to_flip_time_in_us,
+				time[time_index].flip_to_vsync_time_in_us,
+				time[time_index].num_vsync_between_flips,
+				time[time_index].num_frames_inserted,
+				time[time_index].inserted_duration_in_us,
+				time[time_index].v_total_min,
+				time[time_index].v_total_max,
+				time[time_index].event_triggers,
+				time[time_index].v_sync_time_in_us[0],
+				time[time_index].v_sync_time_in_us[1],
+				time[time_index].v_sync_time_in_us[2],
+				time[time_index].v_sync_time_in_us[3],
+				time[time_index].v_sync_time_in_us[4],
+				time[time_index].flags);
+
+			time_index++;
+		}
 	}
 
 	DISPLAY_STATS_END(log_entry);
@@ -239,7 +281,46 @@ void mod_stats_reset_data(struct mod_stats *mod_stats)
 	memset(core_stats->time, 0,
 		sizeof(struct stats_time_cache) * core_stats->entries);
 
+	memset(core_stats->events, 0,
+		sizeof(struct stats_event_cache) * core_stats->event_entries);
+
 	core_stats->index = 1;
+	core_stats->event_index = 0;
+
+	// Keeps track of ordering within the different stats structures
+	core_stats->entry_id = 0;
+}
+
+void mod_stats_update_event(struct mod_stats *mod_stats,
+		char *event_string,
+		unsigned int length)
+{
+	struct core_stats *core_stats = NULL;
+	struct stats_event_cache *events = NULL;
+	unsigned int index = 0;
+	unsigned int copy_length = 0;
+
+	if (mod_stats == NULL)
+		return;
+
+	core_stats = MOD_STATS_TO_CORE(mod_stats);
+
+	if (core_stats->index >= core_stats->entries)
+		return;
+
+	events = core_stats->events;
+	index = core_stats->event_index;
+
+	copy_length = length;
+	if (length > MOD_STATS_EVENT_STRING_MAX)
+		copy_length = MOD_STATS_EVENT_STRING_MAX;
+
+	memcpy(&events[index].event_string, event_string, copy_length);
+	events[index].event_string[copy_length - 1] = '\0';
+
+	events[index].entry_id = core_stats->entry_id;
+	core_stats->event_index++;
+	core_stats->entry_id++;
 }
 
 void mod_stats_update_flip(struct mod_stats *mod_stats,
@@ -280,7 +361,9 @@ void mod_stats_update_flip(struct mod_stats *mod_stats,
 			(timestamp_in_ns -
 				time[index - 1].vupdate_timestamp_in_ns) / 1000;
 
+	time[index].entry_id = core_stats->entry_id;
 	core_stats->index++;
+	core_stats->entry_id++;
 }
 
 void mod_stats_update_vupdate(struct mod_stats *mod_stats,

From dab911d535ae24a39b4e383c0dffaa3e5583883d Mon Sep 17 00:00:00 2001
From: Anthony Koo <Anthony.Koo@amd.com>
Date: Fri, 27 Apr 2018 15:23:23 -0400
Subject: [PATCH 1239/1286] drm/amd/display: fix bug with index check

Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/modules/stats/stats.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
index 4b00bae725b9..fe9e4b316d3a 100644
--- a/drivers/gpu/drm/amd/display/modules/stats/stats.c
+++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c
@@ -305,7 +305,7 @@ void mod_stats_update_event(struct mod_stats *mod_stats,
 
 	core_stats = MOD_STATS_TO_CORE(mod_stats);
 
-	if (core_stats->index >= core_stats->entries)
+	if (core_stats->event_index >= core_stats->event_entries)
 		return;
 
 	events = core_stats->events;

From a944744ba517256fcc9311e12c083563cbbe7c88 Mon Sep 17 00:00:00 2001
From: Nikola Cornij <nikola.cornij@amd.com>
Date: Fri, 27 Apr 2018 17:26:25 -0400
Subject: [PATCH 1240/1286] drm/amd/display: Clear underflow status for debug
 purposes

We want to keep underflow sticky bit on for the longevity tests outside
of test environment. For debug purposes it is, however, useful to clear
underflow status after the test that caused it so that the following
tests are not affected. This change fullfils both requirements by clearing
the underflow only from within Windows or Diags test environment.

Signed-off-by: Nikola Cornij <nikola.cornij@amd.com>
Reviewed-by: Nikola Cornij <Nikola.Cornij@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index c452972bf1c3..f8e0576af6e0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -326,6 +326,12 @@ void dcn10_log_hw_state(struct dc *dc)
 				s.h_total,
 				s.v_total,
 				s.underflow_occurred_status);
+
+		// Clear underflow for debug purposes
+		// We want to keep underflow sticky bit on for the longevity tests outside of test environment.
+		// This function is called only from Windows or Diags test environment, hence it's safe to clear
+		// it from here without affecting the original intent.
+		tg->funcs->clear_optc_underflow(tg);
 	}
 	DTN_INFO("\n");
 

From f0cd0a346dfd1df4b691fe38dafb51911392fbce Mon Sep 17 00:00:00 2001
From: Eric Bernstein <eric.bernstein@amd.com>
Date: Thu, 26 Apr 2018 14:06:00 -0400
Subject: [PATCH 1241/1286] drm/amd/display: DCN1 link encoder

Create DCN1 link encoder files and update AUX and HPD register access.

Signed-off-by: Eric Bernstein <eric.bernstein@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../drm/amd/display/dc/core/dc_link_hwss.c    |    2 -
 drivers/gpu/drm/amd/display/dc/dcn10/Makefile |    2 +-
 .../amd/display/dc/dcn10/dcn10_link_encoder.c | 1362 +++++++++++++++++
 .../amd/display/dc/dcn10/dcn10_link_encoder.h |  330 ++++
 .../drm/amd/display/dc/dcn10/dcn10_resource.c |   43 +-
 5 files changed, 1716 insertions(+), 23 deletions(-)
 create mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
 create mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index 7c866a7d5e77..82cd1d6e6e59 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -11,8 +11,6 @@
 #include "dc_link_dp.h"
 #include "dc_link_ddc.h"
 #include "dm_helpers.h"
-#include "dce/dce_link_encoder.h"
-#include "dce/dce_stream_encoder.h"
 #include "dpcd_defs.h"
 
 enum dc_status core_link_read_dpcd(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
index 5c69743a4b4f..84f52c63d95c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -26,7 +26,7 @@ DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \
 		dcn10_dpp.o dcn10_opp.o dcn10_optc.o \
 		dcn10_hubp.o dcn10_mpc.o \
 		dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \
-		dcn10_hubbub.o dcn10_stream_encoder.o
+		dcn10_hubbub.o dcn10_stream_encoder.o dcn10_link_encoder.o
 
 AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10))
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
new file mode 100644
index 000000000000..21fa40ac0786
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -0,0 +1,1362 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+
+#include "core_types.h"
+#include "link_encoder.h"
+#include "dcn10_link_encoder.h"
+#include "stream_encoder.h"
+#include "i2caux_interface.h"
+#include "dc_bios_types.h"
+
+#include "gpio_service_interface.h"
+
+#define CTX \
+	enc10->base.ctx
+#define DC_LOGGER \
+	enc10->base.ctx->logger
+
+#define REG(reg)\
+	(enc10->link_regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+	enc10->link_shift->field_name, enc10->link_mask->field_name
+
+
+/*
+ * @brief
+ * Trigger Source Select
+ * ASIC-dependent, actual values for register programming
+ */
+#define DCN10_DIG_FE_SOURCE_SELECT_INVALID 0x0
+#define DCN10_DIG_FE_SOURCE_SELECT_DIGA 0x1
+#define DCN10_DIG_FE_SOURCE_SELECT_DIGB 0x2
+#define DCN10_DIG_FE_SOURCE_SELECT_DIGC 0x4
+#define DCN10_DIG_FE_SOURCE_SELECT_DIGD 0x08
+#define DCN10_DIG_FE_SOURCE_SELECT_DIGE 0x10
+#define DCN10_DIG_FE_SOURCE_SELECT_DIGF 0x20
+#define DCN10_DIG_FE_SOURCE_SELECT_DIGG 0x40
+
+enum {
+	DP_MST_UPDATE_MAX_RETRY = 50
+};
+
+
+
+static void aux_initialize(struct dcn10_link_encoder *enc10);
+
+
+static const struct link_encoder_funcs dcn10_lnk_enc_funcs = {
+	.validate_output_with_stream =
+		dcn10_link_encoder_validate_output_with_stream,
+	.hw_init = dcn10_link_encoder_hw_init,
+	.setup = dcn10_link_encoder_setup,
+	.enable_tmds_output = dcn10_link_encoder_enable_tmds_output,
+	.enable_dp_output = dcn10_link_encoder_enable_dp_output,
+	.enable_dp_mst_output = dcn10_link_encoder_enable_dp_mst_output,
+	.disable_output = dcn10_link_encoder_disable_output,
+	.dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings,
+	.dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern,
+	.update_mst_stream_allocation_table =
+		dcn10_link_encoder_update_mst_stream_allocation_table,
+	.psr_program_dp_dphy_fast_training =
+			dcn10_psr_program_dp_dphy_fast_training,
+	.psr_program_secondary_packet = dcn10_psr_program_secondary_packet,
+	.connect_dig_be_to_fe = dcn10_link_encoder_connect_dig_be_to_fe,
+	.enable_hpd = dcn10_link_encoder_enable_hpd,
+	.disable_hpd = dcn10_link_encoder_disable_hpd,
+	.is_dig_enabled = dcn10_is_dig_enabled,
+	.destroy = dcn10_link_encoder_destroy
+};
+
+static enum bp_result link_transmitter_control(
+	struct dcn10_link_encoder *enc10,
+	struct bp_transmitter_control *cntl)
+{
+	enum bp_result result;
+	struct dc_bios *bp = enc10->base.ctx->dc_bios;
+
+	result = bp->funcs->transmitter_control(bp, cntl);
+
+	return result;
+}
+
+static void enable_phy_bypass_mode(
+	struct dcn10_link_encoder *enc10,
+	bool enable)
+{
+	/* This register resides in DP back end block;
+	 * transmitter is used for the offset
+	 */
+	REG_UPDATE(DP_DPHY_CNTL, DPHY_BYPASS, enable);
+
+}
+
+static void disable_prbs_symbols(
+	struct dcn10_link_encoder *enc10,
+	bool disable)
+{
+	/* This register resides in DP back end block;
+	 * transmitter is used for the offset
+	 */
+	REG_UPDATE_4(DP_DPHY_CNTL,
+			DPHY_ATEST_SEL_LANE0, disable,
+			DPHY_ATEST_SEL_LANE1, disable,
+			DPHY_ATEST_SEL_LANE2, disable,
+			DPHY_ATEST_SEL_LANE3, disable);
+}
+
+static void disable_prbs_mode(
+	struct dcn10_link_encoder *enc10)
+{
+	REG_UPDATE(DP_DPHY_PRBS_CNTL, DPHY_PRBS_EN, 0);
+}
+
+static void program_pattern_symbols(
+	struct dcn10_link_encoder *enc10,
+	uint16_t pattern_symbols[8])
+{
+	/* This register resides in DP back end block;
+	 * transmitter is used for the offset
+	 */
+	REG_SET_3(DP_DPHY_SYM0, 0,
+			DPHY_SYM1, pattern_symbols[0],
+			DPHY_SYM2, pattern_symbols[1],
+			DPHY_SYM3, pattern_symbols[2]);
+
+	/* This register resides in DP back end block;
+	 * transmitter is used for the offset
+	 */
+	REG_SET_3(DP_DPHY_SYM1, 0,
+			DPHY_SYM4, pattern_symbols[3],
+			DPHY_SYM5, pattern_symbols[4],
+			DPHY_SYM6, pattern_symbols[5]);
+
+	/* This register resides in DP back end block;
+	 * transmitter is used for the offset
+	 */
+	REG_SET_2(DP_DPHY_SYM2, 0,
+			DPHY_SYM7, pattern_symbols[6],
+			DPHY_SYM8, pattern_symbols[7]);
+}
+
+static void set_dp_phy_pattern_d102(
+	struct dcn10_link_encoder *enc10)
+{
+	/* Disable PHY Bypass mode to setup the test pattern */
+	enable_phy_bypass_mode(enc10, false);
+
+	/* For 10-bit PRBS or debug symbols
+	 * please use the following sequence:
+	 *
+	 * Enable debug symbols on the lanes
+	 */
+	disable_prbs_symbols(enc10, true);
+
+	/* Disable PRBS mode */
+	disable_prbs_mode(enc10);
+
+	/* Program debug symbols to be output */
+	{
+		uint16_t pattern_symbols[8] = {
+			0x2AA, 0x2AA, 0x2AA, 0x2AA,
+			0x2AA, 0x2AA, 0x2AA, 0x2AA
+		};
+
+		program_pattern_symbols(enc10, pattern_symbols);
+	}
+
+	/* Enable phy bypass mode to enable the test pattern */
+
+	enable_phy_bypass_mode(enc10, true);
+}
+
+static void set_link_training_complete(
+	struct dcn10_link_encoder *enc10,
+	bool complete)
+{
+	/* This register resides in DP back end block;
+	 * transmitter is used for the offset
+	 */
+	REG_UPDATE(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, complete);
+
+}
+
+void dcn10_link_encoder_set_dp_phy_pattern_training_pattern(
+	struct link_encoder *enc,
+	uint32_t index)
+{
+	struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+	/* Write Training Pattern */
+
+	REG_WRITE(DP_DPHY_TRAINING_PATTERN_SEL, index);
+
+	/* Set HW Register Training Complete to false */
+
+	set_link_training_complete(enc10, false);
+
+	/* Disable PHY Bypass mode to output Training Pattern */
+
+	enable_phy_bypass_mode(enc10, false);
+
+	/* Disable PRBS mode */
+	disable_prbs_mode(enc10);
+}
+
+static void setup_panel_mode(
+	struct dcn10_link_encoder *enc10,
+	enum dp_panel_mode panel_mode)
+{
+	uint32_t value;
+
+	ASSERT(REG(DP_DPHY_INTERNAL_CTRL));
+	value = REG_READ(DP_DPHY_INTERNAL_CTRL);
+
+	switch (panel_mode) {
+	case DP_PANEL_MODE_EDP:
+		value = 0x1;
+		break;
+	case DP_PANEL_MODE_SPECIAL:
+		value = 0x11;
+		break;
+	default:
+		value = 0x0;
+		break;
+	}
+
+	REG_WRITE(DP_DPHY_INTERNAL_CTRL, value);
+}
+
+static void set_dp_phy_pattern_symbol_error(
+	struct dcn10_link_encoder *enc10)
+{
+	/* Disable PHY Bypass mode to setup the test pattern */
+	enable_phy_bypass_mode(enc10, false);
+
+	/* program correct panel mode*/
+	setup_panel_mode(enc10, DP_PANEL_MODE_DEFAULT);
+
+	/* A PRBS23 pattern is used for most DP electrical measurements. */
+
+	/* Enable PRBS symbols on the lanes */
+	disable_prbs_symbols(enc10, false);
+
+	/* For PRBS23 Set bit DPHY_PRBS_SEL=1 and Set bit DPHY_PRBS_EN=1 */
+	REG_UPDATE_2(DP_DPHY_PRBS_CNTL,
+			DPHY_PRBS_SEL, 1,
+			DPHY_PRBS_EN, 1);
+
+	/* Enable phy bypass mode to enable the test pattern */
+	enable_phy_bypass_mode(enc10, true);
+}
+
+static void set_dp_phy_pattern_prbs7(
+	struct dcn10_link_encoder *enc10)
+{
+	/* Disable PHY Bypass mode to setup the test pattern */
+	enable_phy_bypass_mode(enc10, false);
+
+	/* A PRBS7 pattern is used for most DP electrical measurements. */
+
+	/* Enable PRBS symbols on the lanes */
+	disable_prbs_symbols(enc10, false);
+
+	/* For PRBS7 Set bit DPHY_PRBS_SEL=0 and Set bit DPHY_PRBS_EN=1 */
+	REG_UPDATE_2(DP_DPHY_PRBS_CNTL,
+			DPHY_PRBS_SEL, 0,
+			DPHY_PRBS_EN, 1);
+
+	/* Enable phy bypass mode to enable the test pattern */
+	enable_phy_bypass_mode(enc10, true);
+}
+
+static void set_dp_phy_pattern_80bit_custom(
+	struct dcn10_link_encoder *enc10,
+	const uint8_t *pattern)
+{
+	/* Disable PHY Bypass mode to setup the test pattern */
+	enable_phy_bypass_mode(enc10, false);
+
+	/* Enable debug symbols on the lanes */
+
+	disable_prbs_symbols(enc10, true);
+
+	/* Enable PHY bypass mode to enable the test pattern */
+	/* TODO is it really needed ? */
+
+	enable_phy_bypass_mode(enc10, true);
+
+	/* Program 80 bit custom pattern */
+	{
+		uint16_t pattern_symbols[8];
+
+		pattern_symbols[0] =
+			((pattern[1] & 0x03) << 8) | pattern[0];
+		pattern_symbols[1] =
+			((pattern[2] & 0x0f) << 6) | ((pattern[1] >> 2) & 0x3f);
+		pattern_symbols[2] =
+			((pattern[3] & 0x3f) << 4) | ((pattern[2] >> 4) & 0x0f);
+		pattern_symbols[3] =
+			(pattern[4] << 2) | ((pattern[3] >> 6) & 0x03);
+		pattern_symbols[4] =
+			((pattern[6] & 0x03) << 8) | pattern[5];
+		pattern_symbols[5] =
+			((pattern[7] & 0x0f) << 6) | ((pattern[6] >> 2) & 0x3f);
+		pattern_symbols[6] =
+			((pattern[8] & 0x3f) << 4) | ((pattern[7] >> 4) & 0x0f);
+		pattern_symbols[7] =
+			(pattern[9] << 2) | ((pattern[8] >> 6) & 0x03);
+
+		program_pattern_symbols(enc10, pattern_symbols);
+	}
+
+	/* Enable phy bypass mode to enable the test pattern */
+
+	enable_phy_bypass_mode(enc10, true);
+}
+
+static void set_dp_phy_pattern_hbr2_compliance_cp2520_2(
+	struct dcn10_link_encoder *enc10,
+	unsigned int cp2520_pattern)
+{
+
+	/* previously there is a register DP_HBR2_EYE_PATTERN
+	 * that is enabled to get the pattern.
+	 * But it does not work with the latest spec change,
+	 * so we are programming the following registers manually.
+	 *
+	 * The following settings have been confirmed
+	 * by Nick Chorney and Sandra Liu
+	 */
+
+	/* Disable PHY Bypass mode to setup the test pattern */
+
+	enable_phy_bypass_mode(enc10, false);
+
+	/* Setup DIG encoder in DP SST mode */
+	enc10->base.funcs->setup(&enc10->base, SIGNAL_TYPE_DISPLAY_PORT);
+
+	/* ensure normal panel mode. */
+	setup_panel_mode(enc10, DP_PANEL_MODE_DEFAULT);
+
+	/* no vbid after BS (SR)
+	 * DP_LINK_FRAMING_CNTL changed history Sandra Liu
+	 * 11000260 / 11000104 / 110000FC
+	 */
+	REG_UPDATE_3(DP_LINK_FRAMING_CNTL,
+			DP_IDLE_BS_INTERVAL, 0xFC,
+			DP_VBID_DISABLE, 1,
+			DP_VID_ENHANCED_FRAME_MODE, 1);
+
+	/* swap every BS with SR */
+	REG_UPDATE(DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_BS_COUNT, 0);
+
+	/* select cp2520 patterns */
+	if (REG(DP_DPHY_HBR2_PATTERN_CONTROL))
+		REG_UPDATE(DP_DPHY_HBR2_PATTERN_CONTROL,
+				DP_DPHY_HBR2_PATTERN_CONTROL, cp2520_pattern);
+	else
+		/* pre-DCE11 can only generate CP2520 pattern 2 */
+		ASSERT(cp2520_pattern == 2);
+
+	/* set link training complete */
+	set_link_training_complete(enc10, true);
+
+	/* disable video stream */
+	REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
+
+	/* Disable PHY Bypass mode to setup the test pattern */
+	enable_phy_bypass_mode(enc10, false);
+}
+
+static void set_dp_phy_pattern_passthrough_mode(
+	struct dcn10_link_encoder *enc10,
+	enum dp_panel_mode panel_mode)
+{
+	/* program correct panel mode */
+	setup_panel_mode(enc10, panel_mode);
+
+	/* restore LINK_FRAMING_CNTL and DPHY_SCRAMBLER_BS_COUNT
+	 * in case we were doing HBR2 compliance pattern before
+	 */
+	REG_UPDATE_3(DP_LINK_FRAMING_CNTL,
+			DP_IDLE_BS_INTERVAL, 0x2000,
+			DP_VBID_DISABLE, 0,
+			DP_VID_ENHANCED_FRAME_MODE, 1);
+
+	REG_UPDATE(DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_BS_COUNT, 0x1FF);
+
+	/* set link training complete */
+	set_link_training_complete(enc10, true);
+
+	/* Disable PHY Bypass mode to setup the test pattern */
+	enable_phy_bypass_mode(enc10, false);
+
+	/* Disable PRBS mode */
+	disable_prbs_mode(enc10);
+}
+
+/* return value is bit-vector */
+static uint8_t get_frontend_source(
+	enum engine_id engine)
+{
+	switch (engine) {
+	case ENGINE_ID_DIGA:
+		return DCN10_DIG_FE_SOURCE_SELECT_DIGA;
+	case ENGINE_ID_DIGB:
+		return DCN10_DIG_FE_SOURCE_SELECT_DIGB;
+	case ENGINE_ID_DIGC:
+		return DCN10_DIG_FE_SOURCE_SELECT_DIGC;
+	case ENGINE_ID_DIGD:
+		return DCN10_DIG_FE_SOURCE_SELECT_DIGD;
+	case ENGINE_ID_DIGE:
+		return DCN10_DIG_FE_SOURCE_SELECT_DIGE;
+	case ENGINE_ID_DIGF:
+		return DCN10_DIG_FE_SOURCE_SELECT_DIGF;
+	case ENGINE_ID_DIGG:
+		return DCN10_DIG_FE_SOURCE_SELECT_DIGG;
+	default:
+		ASSERT_CRITICAL(false);
+		return DCN10_DIG_FE_SOURCE_SELECT_INVALID;
+	}
+}
+
+static void configure_encoder(
+	struct dcn10_link_encoder *enc10,
+	const struct dc_link_settings *link_settings)
+{
+	/* set number of lanes */
+
+	REG_SET(DP_CONFIG, 0,
+			DP_UDI_LANES, link_settings->lane_count - LANE_COUNT_ONE);
+
+	/* setup scrambler */
+	REG_UPDATE(DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_ADVANCE, 1);
+}
+
+void dcn10_psr_program_dp_dphy_fast_training(struct link_encoder *enc,
+			bool exit_link_training_required)
+{
+	struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+	if (exit_link_training_required)
+		REG_UPDATE(DP_DPHY_FAST_TRAINING,
+				DPHY_RX_FAST_TRAINING_CAPABLE, 1);
+	else {
+		REG_UPDATE(DP_DPHY_FAST_TRAINING,
+				DPHY_RX_FAST_TRAINING_CAPABLE, 0);
+		/*In DCE 11, we are able to pre-program a Force SR register
+		 * to be able to trigger SR symbol after 5 idle patterns
+		 * transmitted. Upon PSR Exit, DMCU can trigger
+		 * DPHY_LOAD_BS_COUNT_START = 1. Upon writing 1 to
+		 * DPHY_LOAD_BS_COUNT_START and the internal counter
+		 * reaches DPHY_LOAD_BS_COUNT, the next BS symbol will be
+		 * replaced by SR symbol once.
+		 */
+
+		REG_UPDATE(DP_DPHY_BS_SR_SWAP_CNTL, DPHY_LOAD_BS_COUNT, 0x5);
+	}
+}
+
+void dcn10_psr_program_secondary_packet(struct link_encoder *enc,
+			unsigned int sdp_transmit_line_num_deadline)
+{
+	struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+	REG_UPDATE_2(DP_SEC_CNTL1,
+		DP_SEC_GSP0_LINE_NUM, sdp_transmit_line_num_deadline,
+		DP_SEC_GSP0_PRIORITY, 1);
+}
+
+bool dcn10_is_dig_enabled(struct link_encoder *enc)
+{
+	struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+	uint32_t value;
+
+	REG_GET(DIG_BE_EN_CNTL, DIG_ENABLE, &value);
+	return value;
+}
+
+static void link_encoder_disable(struct dcn10_link_encoder *enc10)
+{
+	/* reset training pattern */
+	REG_SET(DP_DPHY_TRAINING_PATTERN_SEL, 0,
+			DPHY_TRAINING_PATTERN_SEL, 0);
+
+	/* reset training complete */
+	REG_UPDATE(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, 0);
+
+	/* reset panel mode */
+	setup_panel_mode(enc10, DP_PANEL_MODE_DEFAULT);
+}
+
+static void hpd_initialize(
+	struct dcn10_link_encoder *enc10)
+{
+	/* Associate HPD with DIG_BE */
+	enum hpd_source_id hpd_source = enc10->base.hpd_source;
+
+	REG_UPDATE(DIG_BE_CNTL, DIG_HPD_SELECT, hpd_source);
+}
+
+bool dcn10_link_encoder_validate_dvi_output(
+	const struct dcn10_link_encoder *enc10,
+	enum signal_type connector_signal,
+	enum signal_type signal,
+	const struct dc_crtc_timing *crtc_timing)
+{
+	uint32_t max_pixel_clock = TMDS_MAX_PIXEL_CLOCK;
+
+	if (signal == SIGNAL_TYPE_DVI_DUAL_LINK)
+		max_pixel_clock *= 2;
+
+	/* This handles the case of HDMI downgrade to DVI we don't want to
+	 * we don't want to cap the pixel clock if the DDI is not DVI.
+	 */
+	if (connector_signal != SIGNAL_TYPE_DVI_DUAL_LINK &&
+			connector_signal != SIGNAL_TYPE_DVI_SINGLE_LINK)
+		max_pixel_clock = enc10->base.features.max_hdmi_pixel_clock;
+
+	/* DVI only support RGB pixel encoding */
+	if (crtc_timing->pixel_encoding != PIXEL_ENCODING_RGB)
+		return false;
+
+	/*connect DVI via adpater's HDMI connector*/
+	if ((connector_signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
+		connector_signal == SIGNAL_TYPE_HDMI_TYPE_A) &&
+		signal != SIGNAL_TYPE_HDMI_TYPE_A &&
+		crtc_timing->pix_clk_khz > TMDS_MAX_PIXEL_CLOCK)
+		return false;
+	if (crtc_timing->pix_clk_khz < TMDS_MIN_PIXEL_CLOCK)
+		return false;
+
+	if (crtc_timing->pix_clk_khz > max_pixel_clock)
+		return false;
+
+	/* DVI supports 6/8bpp single-link and 10/16bpp dual-link */
+	switch (crtc_timing->display_color_depth) {
+	case COLOR_DEPTH_666:
+	case COLOR_DEPTH_888:
+	break;
+	case COLOR_DEPTH_101010:
+	case COLOR_DEPTH_161616:
+		if (signal != SIGNAL_TYPE_DVI_DUAL_LINK)
+			return false;
+	break;
+	default:
+		return false;
+	}
+
+	return true;
+}
+
+static bool dcn10_link_encoder_validate_hdmi_output(
+	const struct dcn10_link_encoder *enc10,
+	const struct dc_crtc_timing *crtc_timing,
+	int adjusted_pix_clk_khz)
+{
+	enum dc_color_depth max_deep_color =
+			enc10->base.features.max_hdmi_deep_color;
+
+	if (max_deep_color < crtc_timing->display_color_depth)
+		return false;
+
+	if (crtc_timing->display_color_depth < COLOR_DEPTH_888)
+		return false;
+	if (adjusted_pix_clk_khz < TMDS_MIN_PIXEL_CLOCK)
+		return false;
+
+	if ((adjusted_pix_clk_khz == 0) ||
+		(adjusted_pix_clk_khz > enc10->base.features.max_hdmi_pixel_clock))
+		return false;
+
+	/* DCE11 HW does not support 420 */
+	if (!enc10->base.features.ycbcr420_supported &&
+			crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+		return false;
+
+	if (!enc10->base.features.flags.bits.HDMI_6GB_EN &&
+		adjusted_pix_clk_khz >= 300000)
+		return false;
+	return true;
+}
+
+bool dcn10_link_encoder_validate_dp_output(
+	const struct dcn10_link_encoder *enc10,
+	const struct dc_crtc_timing *crtc_timing)
+{
+	/* default RGB only */
+	if (crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
+		return true;
+
+	if (enc10->base.features.flags.bits.IS_YCBCR_CAPABLE)
+		return true;
+
+	/* for DCE 8.x or later DP Y-only feature,
+	 * we need ASIC cap + FeatureSupportDPYonly, not support 666
+	 */
+	if (crtc_timing->flags.Y_ONLY &&
+		enc10->base.features.flags.bits.IS_YCBCR_CAPABLE &&
+		crtc_timing->display_color_depth != COLOR_DEPTH_666)
+		return true;
+
+	return false;
+}
+
+void dcn10_link_encoder_construct(
+	struct dcn10_link_encoder *enc10,
+	const struct encoder_init_data *init_data,
+	const struct encoder_feature_support *enc_features,
+	const struct dcn10_link_enc_registers *link_regs,
+	const struct dcn10_link_enc_aux_registers *aux_regs,
+	const struct dcn10_link_enc_hpd_registers *hpd_regs,
+	const struct dcn10_link_enc_shift *link_shift,
+	const struct dcn10_link_enc_mask *link_mask)
+{
+	struct bp_encoder_cap_info bp_cap_info = {0};
+	const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
+	enum bp_result result = BP_RESULT_OK;
+
+	enc10->base.funcs = &dcn10_lnk_enc_funcs;
+	enc10->base.ctx = init_data->ctx;
+	enc10->base.id = init_data->encoder;
+
+	enc10->base.hpd_source = init_data->hpd_source;
+	enc10->base.connector = init_data->connector;
+
+	enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
+
+	enc10->base.features = *enc_features;
+
+	enc10->base.transmitter = init_data->transmitter;
+
+	/* set the flag to indicate whether driver poll the I2C data pin
+	 * while doing the DP sink detect
+	 */
+
+/*	if (dal_adapter_service_is_feature_supported(as,
+		FEATURE_DP_SINK_DETECT_POLL_DATA_PIN))
+		enc10->base.features.flags.bits.
+			DP_SINK_DETECT_POLL_DATA_PIN = true;*/
+
+	enc10->base.output_signals =
+		SIGNAL_TYPE_DVI_SINGLE_LINK |
+		SIGNAL_TYPE_DVI_DUAL_LINK |
+		SIGNAL_TYPE_LVDS |
+		SIGNAL_TYPE_DISPLAY_PORT |
+		SIGNAL_TYPE_DISPLAY_PORT_MST |
+		SIGNAL_TYPE_EDP |
+		SIGNAL_TYPE_HDMI_TYPE_A;
+
+	/* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
+	 * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
+	 * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
+	 * DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS.
+	 * Prefer DIG assignment is decided by board design.
+	 * For DCE 8.0, there are only max 6 UNIPHYs, we assume board design
+	 * and VBIOS will filter out 7 UNIPHY for DCE 8.0.
+	 * By this, adding DIGG should not hurt DCE 8.0.
+	 * This will let DCE 8.1 share DCE 8.0 as much as possible
+	 */
+
+	enc10->link_regs = link_regs;
+	enc10->aux_regs = aux_regs;
+	enc10->hpd_regs = hpd_regs;
+	enc10->link_shift = link_shift;
+	enc10->link_mask = link_mask;
+
+	switch (enc10->base.transmitter) {
+	case TRANSMITTER_UNIPHY_A:
+		enc10->base.preferred_engine = ENGINE_ID_DIGA;
+	break;
+	case TRANSMITTER_UNIPHY_B:
+		enc10->base.preferred_engine = ENGINE_ID_DIGB;
+	break;
+	case TRANSMITTER_UNIPHY_C:
+		enc10->base.preferred_engine = ENGINE_ID_DIGC;
+	break;
+	case TRANSMITTER_UNIPHY_D:
+		enc10->base.preferred_engine = ENGINE_ID_DIGD;
+	break;
+	case TRANSMITTER_UNIPHY_E:
+		enc10->base.preferred_engine = ENGINE_ID_DIGE;
+	break;
+	case TRANSMITTER_UNIPHY_F:
+		enc10->base.preferred_engine = ENGINE_ID_DIGF;
+	break;
+	case TRANSMITTER_UNIPHY_G:
+		enc10->base.preferred_engine = ENGINE_ID_DIGG;
+	break;
+	default:
+		ASSERT_CRITICAL(false);
+		enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
+	}
+
+	/* default to one to mirror Windows behavior */
+	enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
+
+	result = bp_funcs->get_encoder_cap_info(enc10->base.ctx->dc_bios,
+						enc10->base.id, &bp_cap_info);
+
+	/* Override features with DCE-specific values */
+	if (result == BP_RESULT_OK) {
+		enc10->base.features.flags.bits.IS_HBR2_CAPABLE =
+				bp_cap_info.DP_HBR2_EN;
+		enc10->base.features.flags.bits.IS_HBR3_CAPABLE =
+				bp_cap_info.DP_HBR3_EN;
+		enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+	} else {
+		DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
+				__func__,
+				result);
+	}
+}
+
+bool dcn10_link_encoder_validate_output_with_stream(
+	struct link_encoder *enc,
+	const struct dc_stream_state *stream)
+{
+	struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+	bool is_valid;
+
+	switch (stream->signal) {
+	case SIGNAL_TYPE_DVI_SINGLE_LINK:
+	case SIGNAL_TYPE_DVI_DUAL_LINK:
+		is_valid = dcn10_link_encoder_validate_dvi_output(
+			enc10,
+			stream->sink->link->connector_signal,
+			stream->signal,
+			&stream->timing);
+	break;
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+		is_valid = dcn10_link_encoder_validate_hdmi_output(
+				enc10,
+				&stream->timing,
+				stream->phy_pix_clk);
+	break;
+	case SIGNAL_TYPE_DISPLAY_PORT:
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+		is_valid = dcn10_link_encoder_validate_dp_output(
+					enc10, &stream->timing);
+	break;
+	case SIGNAL_TYPE_EDP:
+		is_valid = (stream->timing.pixel_encoding == PIXEL_ENCODING_RGB) ? true : false;
+	break;
+	case SIGNAL_TYPE_VIRTUAL:
+		is_valid = true;
+		break;
+	default:
+		is_valid = false;
+	break;
+	}
+
+	return is_valid;
+}
+
+void dcn10_link_encoder_hw_init(
+	struct link_encoder *enc)
+{
+	struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+	struct bp_transmitter_control cntl = { 0 };
+	enum bp_result result;
+
+	cntl.action = TRANSMITTER_CONTROL_INIT;
+	cntl.engine_id = ENGINE_ID_UNKNOWN;
+	cntl.transmitter = enc10->base.transmitter;
+	cntl.connector_obj_id = enc10->base.connector;
+	cntl.lanes_number = LANE_COUNT_FOUR;
+	cntl.coherent = false;
+	cntl.hpd_sel = enc10->base.hpd_source;
+
+	if (enc10->base.connector.id == CONNECTOR_ID_EDP)
+		cntl.signal = SIGNAL_TYPE_EDP;
+
+	result = link_transmitter_control(enc10, &cntl);
+
+	if (result != BP_RESULT_OK) {
+		DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
+			__func__);
+		BREAK_TO_DEBUGGER();
+		return;
+	}
+
+	if (enc10->base.connector.id == CONNECTOR_ID_LVDS) {
+		cntl.action = TRANSMITTER_CONTROL_BACKLIGHT_BRIGHTNESS;
+
+		result = link_transmitter_control(enc10, &cntl);
+
+		ASSERT(result == BP_RESULT_OK);
+
+	}
+	aux_initialize(enc10);
+
+	/* reinitialize HPD.
+	 * hpd_initialize() will pass DIG_FE id to HW context.
+	 * All other routine within HW context will use fe_engine_offset
+	 * as DIG_FE id even caller pass DIG_FE id.
+	 * So this routine must be called first.
+	 */
+	hpd_initialize(enc10);
+}
+
+void dcn10_link_encoder_destroy(struct link_encoder **enc)
+{
+	kfree(TO_DCN10_LINK_ENC(*enc));
+	*enc = NULL;
+}
+
+void dcn10_link_encoder_setup(
+	struct link_encoder *enc,
+	enum signal_type signal)
+{
+	struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+	switch (signal) {
+	case SIGNAL_TYPE_EDP:
+	case SIGNAL_TYPE_DISPLAY_PORT:
+		/* DP SST */
+		REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 0);
+		break;
+	case SIGNAL_TYPE_LVDS:
+		/* LVDS */
+		REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 1);
+		break;
+	case SIGNAL_TYPE_DVI_SINGLE_LINK:
+	case SIGNAL_TYPE_DVI_DUAL_LINK:
+		/* TMDS-DVI */
+		REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 2);
+		break;
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+		/* TMDS-HDMI */
+		REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 3);
+		break;
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+		/* DP MST */
+		REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 5);
+		break;
+	default:
+		ASSERT_CRITICAL(false);
+		/* invalid mode ! */
+		break;
+	}
+
+}
+
+/* TODO: still need depth or just pass in adjusted pixel clock? */
+void dcn10_link_encoder_enable_tmds_output(
+	struct link_encoder *enc,
+	enum clock_source_id clock_source,
+	enum dc_color_depth color_depth,
+	enum signal_type signal,
+	uint32_t pixel_clock)
+{
+	struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+	struct bp_transmitter_control cntl = { 0 };
+	enum bp_result result;
+
+	/* Enable the PHY */
+
+	cntl.action = TRANSMITTER_CONTROL_ENABLE;
+	cntl.engine_id = enc->preferred_engine;
+	cntl.transmitter = enc10->base.transmitter;
+	cntl.pll_id = clock_source;
+	cntl.signal = signal;
+	if (cntl.signal == SIGNAL_TYPE_DVI_DUAL_LINK)
+		cntl.lanes_number = 8;
+	else
+		cntl.lanes_number = 4;
+
+	cntl.hpd_sel = enc10->base.hpd_source;
+
+	cntl.pixel_clock = pixel_clock;
+	cntl.color_depth = color_depth;
+
+	result = link_transmitter_control(enc10, &cntl);
+
+	if (result != BP_RESULT_OK) {
+		DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
+			__func__);
+		BREAK_TO_DEBUGGER();
+	}
+}
+
+/* enables DP PHY output */
+void dcn10_link_encoder_enable_dp_output(
+	struct link_encoder *enc,
+	const struct dc_link_settings *link_settings,
+	enum clock_source_id clock_source)
+{
+	struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+	struct bp_transmitter_control cntl = { 0 };
+	enum bp_result result;
+
+	/* Enable the PHY */
+
+	/* number_of_lanes is used for pixel clock adjust,
+	 * but it's not passed to asic_control.
+	 * We need to set number of lanes manually.
+	 */
+	configure_encoder(enc10, link_settings);
+
+	cntl.action = TRANSMITTER_CONTROL_ENABLE;
+	cntl.engine_id = enc->preferred_engine;
+	cntl.transmitter = enc10->base.transmitter;
+	cntl.pll_id = clock_source;
+	cntl.signal = SIGNAL_TYPE_DISPLAY_PORT;
+	cntl.lanes_number = link_settings->lane_count;
+	cntl.hpd_sel = enc10->base.hpd_source;
+	cntl.pixel_clock = link_settings->link_rate
+						* LINK_RATE_REF_FREQ_IN_KHZ;
+	/* TODO: check if undefined works */
+	cntl.color_depth = COLOR_DEPTH_UNDEFINED;
+
+	result = link_transmitter_control(enc10, &cntl);
+
+	if (result != BP_RESULT_OK) {
+		DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
+			__func__);
+		BREAK_TO_DEBUGGER();
+	}
+}
+
+/* enables DP PHY output in MST mode */
+void dcn10_link_encoder_enable_dp_mst_output(
+	struct link_encoder *enc,
+	const struct dc_link_settings *link_settings,
+	enum clock_source_id clock_source)
+{
+	struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+	struct bp_transmitter_control cntl = { 0 };
+	enum bp_result result;
+
+	/* Enable the PHY */
+
+	/* number_of_lanes is used for pixel clock adjust,
+	 * but it's not passed to asic_control.
+	 * We need to set number of lanes manually.
+	 */
+	configure_encoder(enc10, link_settings);
+
+	cntl.action = TRANSMITTER_CONTROL_ENABLE;
+	cntl.engine_id = ENGINE_ID_UNKNOWN;
+	cntl.transmitter = enc10->base.transmitter;
+	cntl.pll_id = clock_source;
+	cntl.signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
+	cntl.lanes_number = link_settings->lane_count;
+	cntl.hpd_sel = enc10->base.hpd_source;
+	cntl.pixel_clock = link_settings->link_rate
+						* LINK_RATE_REF_FREQ_IN_KHZ;
+	/* TODO: check if undefined works */
+	cntl.color_depth = COLOR_DEPTH_UNDEFINED;
+
+	result = link_transmitter_control(enc10, &cntl);
+
+	if (result != BP_RESULT_OK) {
+		DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
+			__func__);
+		BREAK_TO_DEBUGGER();
+	}
+}
+/*
+ * @brief
+ * Disable transmitter and its encoder
+ */
+void dcn10_link_encoder_disable_output(
+	struct link_encoder *enc,
+	enum signal_type signal)
+{
+	struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+	struct bp_transmitter_control cntl = { 0 };
+	enum bp_result result;
+
+	if (!dcn10_is_dig_enabled(enc)) {
+		/* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */
+		return;
+	}
+	/* Power-down RX and disable GPU PHY should be paired.
+	 * Disabling PHY without powering down RX may cause
+	 * symbol lock loss, on which we will get DP Sink interrupt.
+	 */
+
+	/* There is a case for the DP active dongles
+	 * where we want to disable the PHY but keep RX powered,
+	 * for those we need to ignore DP Sink interrupt
+	 * by checking lane count that has been set
+	 * on the last do_enable_output().
+	 */
+
+	/* disable transmitter */
+	cntl.action = TRANSMITTER_CONTROL_DISABLE;
+	cntl.transmitter = enc10->base.transmitter;
+	cntl.hpd_sel = enc10->base.hpd_source;
+	cntl.signal = signal;
+	cntl.connector_obj_id = enc10->base.connector;
+
+	result = link_transmitter_control(enc10, &cntl);
+
+	if (result != BP_RESULT_OK) {
+		DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
+			__func__);
+		BREAK_TO_DEBUGGER();
+		return;
+	}
+
+	/* disable encoder */
+	if (dc_is_dp_signal(signal))
+		link_encoder_disable(enc10);
+}
+
+void dcn10_link_encoder_dp_set_lane_settings(
+	struct link_encoder *enc,
+	const struct link_training_settings *link_settings)
+{
+	struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+	union dpcd_training_lane_set training_lane_set = { { 0 } };
+	int32_t lane = 0;
+	struct bp_transmitter_control cntl = { 0 };
+
+	if (!link_settings) {
+		BREAK_TO_DEBUGGER();
+		return;
+	}
+
+	cntl.action = TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS;
+	cntl.transmitter = enc10->base.transmitter;
+	cntl.connector_obj_id = enc10->base.connector;
+	cntl.lanes_number = link_settings->link_settings.lane_count;
+	cntl.hpd_sel = enc10->base.hpd_source;
+	cntl.pixel_clock = link_settings->link_settings.link_rate *
+						LINK_RATE_REF_FREQ_IN_KHZ;
+
+	for (lane = 0; lane < link_settings->link_settings.lane_count; lane++) {
+		/* translate lane settings */
+
+		training_lane_set.bits.VOLTAGE_SWING_SET =
+			link_settings->lane_settings[lane].VOLTAGE_SWING;
+		training_lane_set.bits.PRE_EMPHASIS_SET =
+			link_settings->lane_settings[lane].PRE_EMPHASIS;
+
+		/* post cursor 2 setting only applies to HBR2 link rate */
+		if (link_settings->link_settings.link_rate == LINK_RATE_HIGH2) {
+			/* this is passed to VBIOS
+			 * to program post cursor 2 level
+			 */
+			training_lane_set.bits.POST_CURSOR2_SET =
+				link_settings->lane_settings[lane].POST_CURSOR2;
+		}
+
+		cntl.lane_select = lane;
+		cntl.lane_settings = training_lane_set.raw;
+
+		/* call VBIOS table to set voltage swing and pre-emphasis */
+		link_transmitter_control(enc10, &cntl);
+	}
+}
+
+/* set DP PHY test and training patterns */
+void dcn10_link_encoder_dp_set_phy_pattern(
+	struct link_encoder *enc,
+	const struct encoder_set_dp_phy_pattern_param *param)
+{
+	struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+	switch (param->dp_phy_pattern) {
+	case DP_TEST_PATTERN_TRAINING_PATTERN1:
+		dcn10_link_encoder_set_dp_phy_pattern_training_pattern(enc, 0);
+		break;
+	case DP_TEST_PATTERN_TRAINING_PATTERN2:
+		dcn10_link_encoder_set_dp_phy_pattern_training_pattern(enc, 1);
+		break;
+	case DP_TEST_PATTERN_TRAINING_PATTERN3:
+		dcn10_link_encoder_set_dp_phy_pattern_training_pattern(enc, 2);
+		break;
+	case DP_TEST_PATTERN_TRAINING_PATTERN4:
+		dcn10_link_encoder_set_dp_phy_pattern_training_pattern(enc, 3);
+		break;
+	case DP_TEST_PATTERN_D102:
+		set_dp_phy_pattern_d102(enc10);
+		break;
+	case DP_TEST_PATTERN_SYMBOL_ERROR:
+		set_dp_phy_pattern_symbol_error(enc10);
+		break;
+	case DP_TEST_PATTERN_PRBS7:
+		set_dp_phy_pattern_prbs7(enc10);
+		break;
+	case DP_TEST_PATTERN_80BIT_CUSTOM:
+		set_dp_phy_pattern_80bit_custom(
+			enc10, param->custom_pattern);
+		break;
+	case DP_TEST_PATTERN_CP2520_1:
+		set_dp_phy_pattern_hbr2_compliance_cp2520_2(enc10, 1);
+		break;
+	case DP_TEST_PATTERN_CP2520_2:
+		set_dp_phy_pattern_hbr2_compliance_cp2520_2(enc10, 2);
+		break;
+	case DP_TEST_PATTERN_CP2520_3:
+		set_dp_phy_pattern_hbr2_compliance_cp2520_2(enc10, 3);
+		break;
+	case DP_TEST_PATTERN_VIDEO_MODE: {
+		set_dp_phy_pattern_passthrough_mode(
+			enc10, param->dp_panel_mode);
+		break;
+	}
+
+	default:
+		/* invalid phy pattern */
+		ASSERT_CRITICAL(false);
+		break;
+	}
+}
+
+static void fill_stream_allocation_row_info(
+	const struct link_mst_stream_allocation *stream_allocation,
+	uint32_t *src,
+	uint32_t *slots)
+{
+	const struct stream_encoder *stream_enc = stream_allocation->stream_enc;
+
+	if (stream_enc) {
+		*src = stream_enc->id;
+		*slots = stream_allocation->slot_count;
+	} else {
+		*src = 0;
+		*slots = 0;
+	}
+}
+
+/* programs DP MST VC payload allocation */
+void dcn10_link_encoder_update_mst_stream_allocation_table(
+	struct link_encoder *enc,
+	const struct link_mst_stream_allocation_table *table)
+{
+	struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+	uint32_t value0 = 0;
+	uint32_t value1 = 0;
+	uint32_t value2 = 0;
+	uint32_t slots = 0;
+	uint32_t src = 0;
+	uint32_t retries = 0;
+
+	/* For CZ, there are only 3 pipes. So Virtual channel is up 3.*/
+
+	/* --- Set MSE Stream Attribute -
+	 * Setup VC Payload Table on Tx Side,
+	 * Issue allocation change trigger
+	 * to commit payload on both tx and rx side
+	 */
+
+	/* we should clean-up table each time */
+
+	if (table->stream_count >= 1) {
+		fill_stream_allocation_row_info(
+			&table->stream_allocations[0],
+			&src,
+			&slots);
+	} else {
+		src = 0;
+		slots = 0;
+	}
+
+	REG_UPDATE_2(DP_MSE_SAT0,
+			DP_MSE_SAT_SRC0, src,
+			DP_MSE_SAT_SLOT_COUNT0, slots);
+
+	if (table->stream_count >= 2) {
+		fill_stream_allocation_row_info(
+			&table->stream_allocations[1],
+			&src,
+			&slots);
+	} else {
+		src = 0;
+		slots = 0;
+	}
+
+	REG_UPDATE_2(DP_MSE_SAT0,
+			DP_MSE_SAT_SRC1, src,
+			DP_MSE_SAT_SLOT_COUNT1, slots);
+
+	if (table->stream_count >= 3) {
+		fill_stream_allocation_row_info(
+			&table->stream_allocations[2],
+			&src,
+			&slots);
+	} else {
+		src = 0;
+		slots = 0;
+	}
+
+	REG_UPDATE_2(DP_MSE_SAT1,
+			DP_MSE_SAT_SRC2, src,
+			DP_MSE_SAT_SLOT_COUNT2, slots);
+
+	if (table->stream_count >= 4) {
+		fill_stream_allocation_row_info(
+			&table->stream_allocations[3],
+			&src,
+			&slots);
+	} else {
+		src = 0;
+		slots = 0;
+	}
+
+	REG_UPDATE_2(DP_MSE_SAT1,
+			DP_MSE_SAT_SRC3, src,
+			DP_MSE_SAT_SLOT_COUNT3, slots);
+
+	/* --- wait for transaction finish */
+
+	/* send allocation change trigger (ACT) ?
+	 * this step first sends the ACT,
+	 * then double buffers the SAT into the hardware
+	 * making the new allocation active on the DP MST mode link
+	 */
+
+	/* DP_MSE_SAT_UPDATE:
+	 * 0 - No Action
+	 * 1 - Update SAT with trigger
+	 * 2 - Update SAT without trigger
+	 */
+	REG_UPDATE(DP_MSE_SAT_UPDATE,
+			DP_MSE_SAT_UPDATE, 1);
+
+	/* wait for update to complete
+	 * (i.e. DP_MSE_SAT_UPDATE field is reset to 0)
+	 * then wait for the transmission
+	 * of at least 16 MTP headers on immediate local link.
+	 * i.e. DP_MSE_16_MTP_KEEPOUT field (read only) is reset to 0
+	 * a value of 1 indicates that DP MST mode
+	 * is in the 16 MTP keepout region after a VC has been added.
+	 * MST stream bandwidth (VC rate) can be configured
+	 * after this bit is cleared
+	 */
+	do {
+		udelay(10);
+
+		value0 = REG_READ(DP_MSE_SAT_UPDATE);
+
+		REG_GET(DP_MSE_SAT_UPDATE,
+				DP_MSE_SAT_UPDATE, &value1);
+
+		REG_GET(DP_MSE_SAT_UPDATE,
+				DP_MSE_16_MTP_KEEPOUT, &value2);
+
+		/* bit field DP_MSE_SAT_UPDATE is set to 1 already */
+		if (!value1 && !value2)
+			break;
+		++retries;
+	} while (retries < DP_MST_UPDATE_MAX_RETRY);
+}
+
+void dcn10_link_encoder_connect_dig_be_to_fe(
+	struct link_encoder *enc,
+	enum engine_id engine,
+	bool connect)
+{
+	struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+	uint32_t field;
+
+	if (engine != ENGINE_ID_UNKNOWN) {
+
+		REG_GET(DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, &field);
+
+		if (connect)
+			field |= get_frontend_source(engine);
+		else
+			field &= ~get_frontend_source(engine);
+
+		REG_UPDATE(DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, field);
+	}
+}
+
+
+#define HPD_REG(reg)\
+	(enc10->hpd_regs->reg)
+
+#define HPD_REG_READ(reg_name) \
+		dm_read_reg(CTX, HPD_REG(reg_name))
+
+#define HPD_REG_UPDATE_N(reg_name, n, ...)	\
+		generic_reg_update_ex(CTX, \
+				HPD_REG(reg_name), \
+				HPD_REG_READ(reg_name), \
+				n, __VA_ARGS__)
+
+#define HPD_REG_UPDATE(reg_name, field, val)	\
+		HPD_REG_UPDATE_N(reg_name, 1, \
+				FN(reg_name, field), val)
+
+void dcn10_link_encoder_enable_hpd(struct link_encoder *enc)
+{
+	struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+	HPD_REG_UPDATE(DC_HPD_CONTROL,
+			DC_HPD_EN, 1);
+}
+
+void dcn10_link_encoder_disable_hpd(struct link_encoder *enc)
+{
+	struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+	HPD_REG_UPDATE(DC_HPD_CONTROL,
+			DC_HPD_EN, 0);
+}
+
+
+#define AUX_REG(reg)\
+	(enc10->aux_regs->reg)
+
+#define AUX_REG_READ(reg_name) \
+		dm_read_reg(CTX, AUX_REG(reg_name))
+
+#define AUX_REG_UPDATE_N(reg_name, n, ...)	\
+		generic_reg_update_ex(CTX, \
+				AUX_REG(reg_name), \
+				AUX_REG_READ(reg_name), \
+				n, __VA_ARGS__)
+
+#define AUX_REG_UPDATE(reg_name, field, val)	\
+		AUX_REG_UPDATE_N(reg_name, 1, \
+				FN(reg_name, field), val)
+
+#define AUX_REG_UPDATE_2(reg, f1, v1, f2, v2)	\
+		AUX_REG_UPDATE_N(reg, 2,\
+				FN(reg, f1), v1,\
+				FN(reg, f2), v2)
+
+static void aux_initialize(
+	struct dcn10_link_encoder *enc10)
+{
+	enum hpd_source_id hpd_source = enc10->base.hpd_source;
+
+	AUX_REG_UPDATE_2(AUX_CONTROL,
+			AUX_HPD_SEL, hpd_source,
+			AUX_LS_READ_EN, 0);
+
+	/* 1/4 window (the maximum allowed) */
+	AUX_REG_UPDATE(AUX_DPHY_RX_CONTROL0,
+			AUX_RX_RECEIVE_WINDOW, 1);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
new file mode 100644
index 000000000000..2a97cdb2cfbb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -0,0 +1,330 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_ENCODER__DCN10_H__
+#define __DC_LINK_ENCODER__DCN10_H__
+
+#include "link_encoder.h"
+
+#define TO_DCN10_LINK_ENC(link_encoder)\
+	container_of(link_encoder, struct dcn10_link_encoder, base)
+
+
+#define AUX_REG_LIST(id)\
+	SRI(AUX_CONTROL, DP_AUX, id), \
+	SRI(AUX_DPHY_RX_CONTROL0, DP_AUX, id)
+
+#define HPD_REG_LIST(id)\
+	SRI(DC_HPD_CONTROL, HPD, id)
+
+#define LE_DCN_COMMON_REG_LIST(id) \
+	SRI(DIG_BE_CNTL, DIG, id), \
+	SRI(DIG_BE_EN_CNTL, DIG, id), \
+	SRI(DP_CONFIG, DP, id), \
+	SRI(DP_DPHY_CNTL, DP, id), \
+	SRI(DP_DPHY_PRBS_CNTL, DP, id), \
+	SRI(DP_DPHY_SCRAM_CNTL, DP, id),\
+	SRI(DP_DPHY_SYM0, DP, id), \
+	SRI(DP_DPHY_SYM1, DP, id), \
+	SRI(DP_DPHY_SYM2, DP, id), \
+	SRI(DP_DPHY_TRAINING_PATTERN_SEL, DP, id), \
+	SRI(DP_LINK_CNTL, DP, id), \
+	SRI(DP_LINK_FRAMING_CNTL, DP, id), \
+	SRI(DP_MSE_SAT0, DP, id), \
+	SRI(DP_MSE_SAT1, DP, id), \
+	SRI(DP_MSE_SAT2, DP, id), \
+	SRI(DP_MSE_SAT_UPDATE, DP, id), \
+	SRI(DP_SEC_CNTL, DP, id), \
+	SRI(DP_VID_STREAM_CNTL, DP, id), \
+	SRI(DP_DPHY_FAST_TRAINING, DP, id), \
+	SRI(DP_SEC_CNTL1, DP, id), \
+	SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
+	SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
+	SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id)
+
+#define LE_DCN10_REG_LIST(id)\
+	LE_DCN_COMMON_REG_LIST(id)
+
+struct dcn10_link_enc_aux_registers {
+	uint32_t AUX_CONTROL;
+	uint32_t AUX_DPHY_RX_CONTROL0;
+};
+
+struct dcn10_link_enc_hpd_registers {
+	uint32_t DC_HPD_CONTROL;
+};
+
+struct dcn10_link_enc_registers {
+	uint32_t DIG_BE_CNTL;
+	uint32_t DIG_BE_EN_CNTL;
+	uint32_t DP_CONFIG;
+	uint32_t DP_DPHY_CNTL;
+	uint32_t DP_DPHY_INTERNAL_CTRL;
+	uint32_t DP_DPHY_PRBS_CNTL;
+	uint32_t DP_DPHY_SCRAM_CNTL;
+	uint32_t DP_DPHY_SYM0;
+	uint32_t DP_DPHY_SYM1;
+	uint32_t DP_DPHY_SYM2;
+	uint32_t DP_DPHY_TRAINING_PATTERN_SEL;
+	uint32_t DP_LINK_CNTL;
+	uint32_t DP_LINK_FRAMING_CNTL;
+	uint32_t DP_MSE_SAT0;
+	uint32_t DP_MSE_SAT1;
+	uint32_t DP_MSE_SAT2;
+	uint32_t DP_MSE_SAT_UPDATE;
+	uint32_t DP_SEC_CNTL;
+	uint32_t DP_VID_STREAM_CNTL;
+	uint32_t DP_DPHY_FAST_TRAINING;
+	uint32_t DP_DPHY_BS_SR_SWAP_CNTL;
+	uint32_t DP_DPHY_HBR2_PATTERN_CONTROL;
+	uint32_t DP_SEC_CNTL1;
+};
+
+#define LE_SF(reg_name, field_name, post_fix)\
+	.field_name = reg_name ## __ ## field_name ## post_fix
+
+#define LINK_ENCODER_MASK_SH_LIST_DCN10(mask_sh)\
+	LE_SF(DIG0_DIG_BE_EN_CNTL, DIG_ENABLE, mask_sh),\
+	LE_SF(DIG0_DIG_BE_CNTL, DIG_HPD_SELECT, mask_sh),\
+	LE_SF(DIG0_DIG_BE_CNTL, DIG_MODE, mask_sh),\
+	LE_SF(DIG0_DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, mask_sh),\
+	LE_SF(DP0_DP_DPHY_CNTL, DPHY_BYPASS, mask_sh),\
+	LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE0, mask_sh),\
+	LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE1, mask_sh),\
+	LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE2, mask_sh),\
+	LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE3, mask_sh),\
+	LE_SF(DP0_DP_DPHY_PRBS_CNTL, DPHY_PRBS_EN, mask_sh),\
+	LE_SF(DP0_DP_DPHY_PRBS_CNTL, DPHY_PRBS_SEL, mask_sh),\
+	LE_SF(DP0_DP_DPHY_SYM0, DPHY_SYM1, mask_sh),\
+	LE_SF(DP0_DP_DPHY_SYM0, DPHY_SYM2, mask_sh),\
+	LE_SF(DP0_DP_DPHY_SYM0, DPHY_SYM3, mask_sh),\
+	LE_SF(DP0_DP_DPHY_SYM1, DPHY_SYM4, mask_sh),\
+	LE_SF(DP0_DP_DPHY_SYM1, DPHY_SYM5, mask_sh),\
+	LE_SF(DP0_DP_DPHY_SYM1, DPHY_SYM6, mask_sh),\
+	LE_SF(DP0_DP_DPHY_SYM2, DPHY_SYM7, mask_sh),\
+	LE_SF(DP0_DP_DPHY_SYM2, DPHY_SYM8, mask_sh),\
+	LE_SF(DP0_DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_BS_COUNT, mask_sh),\
+	LE_SF(DP0_DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_ADVANCE, mask_sh),\
+	LE_SF(DP0_DP_DPHY_FAST_TRAINING, DPHY_RX_FAST_TRAINING_CAPABLE, mask_sh),\
+	LE_SF(DP0_DP_DPHY_BS_SR_SWAP_CNTL, DPHY_LOAD_BS_COUNT, mask_sh),\
+	LE_SF(DP0_DP_DPHY_TRAINING_PATTERN_SEL, DPHY_TRAINING_PATTERN_SEL, mask_sh),\
+	LE_SF(DP0_DP_DPHY_HBR2_PATTERN_CONTROL, DP_DPHY_HBR2_PATTERN_CONTROL, mask_sh),\
+	LE_SF(DP0_DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, mask_sh),\
+	LE_SF(DP0_DP_LINK_FRAMING_CNTL, DP_IDLE_BS_INTERVAL, mask_sh),\
+	LE_SF(DP0_DP_LINK_FRAMING_CNTL, DP_VBID_DISABLE, mask_sh),\
+	LE_SF(DP0_DP_LINK_FRAMING_CNTL, DP_VID_ENHANCED_FRAME_MODE, mask_sh),\
+	LE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, mask_sh),\
+	LE_SF(DP0_DP_CONFIG, DP_UDI_LANES, mask_sh),\
+	LE_SF(DP0_DP_SEC_CNTL1, DP_SEC_GSP0_LINE_NUM, mask_sh),\
+	LE_SF(DP0_DP_SEC_CNTL1, DP_SEC_GSP0_PRIORITY, mask_sh),\
+	LE_SF(DP0_DP_MSE_SAT0, DP_MSE_SAT_SRC0, mask_sh),\
+	LE_SF(DP0_DP_MSE_SAT0, DP_MSE_SAT_SRC1, mask_sh),\
+	LE_SF(DP0_DP_MSE_SAT0, DP_MSE_SAT_SLOT_COUNT0, mask_sh),\
+	LE_SF(DP0_DP_MSE_SAT0, DP_MSE_SAT_SLOT_COUNT1, mask_sh),\
+	LE_SF(DP0_DP_MSE_SAT1, DP_MSE_SAT_SRC2, mask_sh),\
+	LE_SF(DP0_DP_MSE_SAT1, DP_MSE_SAT_SRC3, mask_sh),\
+	LE_SF(DP0_DP_MSE_SAT1, DP_MSE_SAT_SLOT_COUNT2, mask_sh),\
+	LE_SF(DP0_DP_MSE_SAT1, DP_MSE_SAT_SLOT_COUNT3, mask_sh),\
+	LE_SF(DP0_DP_MSE_SAT_UPDATE, DP_MSE_SAT_UPDATE, mask_sh),\
+	LE_SF(DP0_DP_MSE_SAT_UPDATE, DP_MSE_16_MTP_KEEPOUT, mask_sh),\
+	LE_SF(DP_AUX0_AUX_CONTROL, AUX_HPD_SEL, mask_sh),\
+	LE_SF(DP_AUX0_AUX_CONTROL, AUX_LS_READ_EN, mask_sh),\
+	LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_RECEIVE_WINDOW, mask_sh),\
+	LE_SF(HPD0_DC_HPD_CONTROL, DC_HPD_EN, mask_sh)
+
+#define DCN_LINK_ENCODER_REG_FIELD_LIST(type) \
+	type DIG_ENABLE;\
+	type DIG_HPD_SELECT;\
+	type DIG_MODE;\
+	type DIG_FE_SOURCE_SELECT;\
+	type DPHY_BYPASS;\
+	type DPHY_ATEST_SEL_LANE0;\
+	type DPHY_ATEST_SEL_LANE1;\
+	type DPHY_ATEST_SEL_LANE2;\
+	type DPHY_ATEST_SEL_LANE3;\
+	type DPHY_PRBS_EN;\
+	type DPHY_PRBS_SEL;\
+	type DPHY_SYM1;\
+	type DPHY_SYM2;\
+	type DPHY_SYM3;\
+	type DPHY_SYM4;\
+	type DPHY_SYM5;\
+	type DPHY_SYM6;\
+	type DPHY_SYM7;\
+	type DPHY_SYM8;\
+	type DPHY_SCRAMBLER_BS_COUNT;\
+	type DPHY_SCRAMBLER_ADVANCE;\
+	type DPHY_RX_FAST_TRAINING_CAPABLE;\
+	type DPHY_LOAD_BS_COUNT;\
+	type DPHY_TRAINING_PATTERN_SEL;\
+	type DP_DPHY_HBR2_PATTERN_CONTROL;\
+	type DP_LINK_TRAINING_COMPLETE;\
+	type DP_IDLE_BS_INTERVAL;\
+	type DP_VBID_DISABLE;\
+	type DP_VID_ENHANCED_FRAME_MODE;\
+	type DP_VID_STREAM_ENABLE;\
+	type DP_UDI_LANES;\
+	type DP_SEC_GSP0_LINE_NUM;\
+	type DP_SEC_GSP0_PRIORITY;\
+	type DP_MSE_SAT_SRC0;\
+	type DP_MSE_SAT_SRC1;\
+	type DP_MSE_SAT_SRC2;\
+	type DP_MSE_SAT_SRC3;\
+	type DP_MSE_SAT_SLOT_COUNT0;\
+	type DP_MSE_SAT_SLOT_COUNT1;\
+	type DP_MSE_SAT_SLOT_COUNT2;\
+	type DP_MSE_SAT_SLOT_COUNT3;\
+	type DP_MSE_SAT_UPDATE;\
+	type DP_MSE_16_MTP_KEEPOUT;\
+	type AUX_HPD_SEL;\
+	type AUX_LS_READ_EN;\
+	type AUX_RX_RECEIVE_WINDOW;\
+	type DC_HPD_EN
+
+struct dcn10_link_enc_shift {
+	DCN_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn10_link_enc_mask {
+	DCN_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
+};
+
+struct dcn10_link_encoder {
+	struct link_encoder base;
+	const struct dcn10_link_enc_registers *link_regs;
+	const struct dcn10_link_enc_aux_registers *aux_regs;
+	const struct dcn10_link_enc_hpd_registers *hpd_regs;
+	const struct dcn10_link_enc_shift *link_shift;
+	const struct dcn10_link_enc_mask *link_mask;
+};
+
+
+void dcn10_link_encoder_construct(
+	struct dcn10_link_encoder *enc10,
+	const struct encoder_init_data *init_data,
+	const struct encoder_feature_support *enc_features,
+	const struct dcn10_link_enc_registers *link_regs,
+	const struct dcn10_link_enc_aux_registers *aux_regs,
+	const struct dcn10_link_enc_hpd_registers *hpd_regs,
+	const struct dcn10_link_enc_shift *link_shift,
+	const struct dcn10_link_enc_mask *link_mask);
+
+bool dcn10_link_encoder_validate_dvi_output(
+	const struct dcn10_link_encoder *enc10,
+	enum signal_type connector_signal,
+	enum signal_type signal,
+	const struct dc_crtc_timing *crtc_timing);
+
+bool dcn10_link_encoder_validate_rgb_output(
+	const struct dcn10_link_encoder *enc10,
+	const struct dc_crtc_timing *crtc_timing);
+
+bool dcn10_link_encoder_validate_dp_output(
+	const struct dcn10_link_encoder *enc10,
+	const struct dc_crtc_timing *crtc_timing);
+
+bool dcn10_link_encoder_validate_wireless_output(
+	const struct dcn10_link_encoder *enc10,
+	const struct dc_crtc_timing *crtc_timing);
+
+bool dcn10_link_encoder_validate_output_with_stream(
+	struct link_encoder *enc,
+	const struct dc_stream_state *stream);
+
+/****************** HW programming ************************/
+
+/* initialize HW */  /* why do we initialze aux in here? */
+void dcn10_link_encoder_hw_init(struct link_encoder *enc);
+
+void dcn10_link_encoder_destroy(struct link_encoder **enc);
+
+/* program DIG_MODE in DIG_BE */
+/* TODO can this be combined with enable_output? */
+void dcn10_link_encoder_setup(
+	struct link_encoder *enc,
+	enum signal_type signal);
+
+/* enables TMDS PHY output */
+/* TODO: still need depth or just pass in adjusted pixel clock? */
+void dcn10_link_encoder_enable_tmds_output(
+	struct link_encoder *enc,
+	enum clock_source_id clock_source,
+	enum dc_color_depth color_depth,
+	enum signal_type signal,
+	uint32_t pixel_clock);
+
+/* enables DP PHY output */
+void dcn10_link_encoder_enable_dp_output(
+	struct link_encoder *enc,
+	const struct dc_link_settings *link_settings,
+	enum clock_source_id clock_source);
+
+/* enables DP PHY output in MST mode */
+void dcn10_link_encoder_enable_dp_mst_output(
+	struct link_encoder *enc,
+	const struct dc_link_settings *link_settings,
+	enum clock_source_id clock_source);
+
+/* disable PHY output */
+void dcn10_link_encoder_disable_output(
+	struct link_encoder *enc,
+	enum signal_type signal);
+
+/* set DP lane settings */
+void dcn10_link_encoder_dp_set_lane_settings(
+	struct link_encoder *enc,
+	const struct link_training_settings *link_settings);
+
+void dcn10_link_encoder_dp_set_phy_pattern(
+	struct link_encoder *enc,
+	const struct encoder_set_dp_phy_pattern_param *param);
+
+/* programs DP MST VC payload allocation */
+void dcn10_link_encoder_update_mst_stream_allocation_table(
+	struct link_encoder *enc,
+	const struct link_mst_stream_allocation_table *table);
+
+void dcn10_link_encoder_connect_dig_be_to_fe(
+	struct link_encoder *enc,
+	enum engine_id engine,
+	bool connect);
+
+void dcn10_link_encoder_set_dp_phy_pattern_training_pattern(
+	struct link_encoder *enc,
+	uint32_t index);
+
+void dcn10_link_encoder_enable_hpd(struct link_encoder *enc);
+
+void dcn10_link_encoder_disable_hpd(struct link_encoder *enc);
+
+void dcn10_psr_program_dp_dphy_fast_training(struct link_encoder *enc,
+			bool exit_link_training_required);
+
+void dcn10_psr_program_secondary_packet(struct link_encoder *enc,
+			unsigned int sdp_transmit_line_num_deadline);
+
+bool dcn10_is_dig_enabled(struct link_encoder *enc);
+
+#endif /* __DC_LINK_ENCODER__DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index ace2e03dced4..df5cb2d1d164 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -38,7 +38,7 @@
 #include "dcn10/dcn10_hw_sequencer.h"
 #include "dce110/dce110_hw_sequencer.h"
 #include "dcn10/dcn10_opp.h"
-#include "dce/dce_link_encoder.h"
+#include "dcn10/dcn10_link_encoder.h"
 #include "dcn10/dcn10_stream_encoder.h"
 #include "dce/dce_clocks.h"
 #include "dce/dce_clock_source.h"
@@ -214,13 +214,11 @@ static const struct dce_aduio_mask audio_mask = {
 	AUX_REG_LIST(id)\
 }
 
-static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
+static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
 		aux_regs(0),
 		aux_regs(1),
 		aux_regs(2),
-		aux_regs(3),
-		aux_regs(4),
-		aux_regs(5)
+		aux_regs(3)
 };
 
 #define hpd_regs(id)\
@@ -228,13 +226,11 @@ static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
 	HPD_REG_LIST(id)\
 }
 
-static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
+static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
 		hpd_regs(0),
 		hpd_regs(1),
 		hpd_regs(2),
-		hpd_regs(3),
-		hpd_regs(4),
-		hpd_regs(5)
+		hpd_regs(3)
 };
 
 #define link_regs(id)\
@@ -243,14 +239,19 @@ static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
 	SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
 }
 
-static const struct dce110_link_enc_registers link_enc_regs[] = {
+static const struct dcn10_link_enc_registers link_enc_regs[] = {
 	link_regs(0),
 	link_regs(1),
 	link_regs(2),
-	link_regs(3),
-	link_regs(4),
-	link_regs(5),
-	link_regs(6),
+	link_regs(3)
+};
+
+static const struct dcn10_link_enc_shift le_shift = {
+		LINK_ENCODER_MASK_SH_LIST_DCN10(__SHIFT)
+};
+
+static const struct dcn10_link_enc_mask le_mask = {
+		LINK_ENCODER_MASK_SH_LIST_DCN10(_MASK)
 };
 
 #define ipp_regs(id)\
@@ -583,20 +584,22 @@ static const struct encoder_feature_support link_enc_feature = {
 struct link_encoder *dcn10_link_encoder_create(
 	const struct encoder_init_data *enc_init_data)
 {
-	struct dce110_link_encoder *enc110 =
-		kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+	struct dcn10_link_encoder *enc10 =
+		kzalloc(sizeof(struct dcn10_link_encoder), GFP_KERNEL);
 
-	if (!enc110)
+	if (!enc10)
 		return NULL;
 
-	dce110_link_encoder_construct(enc110,
+	dcn10_link_encoder_construct(enc10,
 				      enc_init_data,
 				      &link_enc_feature,
 				      &link_enc_regs[enc_init_data->transmitter],
 				      &link_enc_aux_regs[enc_init_data->channel - 1],
-				      &link_enc_hpd_regs[enc_init_data->hpd_source]);
+				      &link_enc_hpd_regs[enc_init_data->hpd_source],
+				      &le_shift,
+				      &le_mask);
 
-	return &enc110->base;
+	return &enc10->base;
 }
 
 struct clock_source *dcn10_clock_source_create(

From 9fcab85c580b31f6eb56dd3a00edd5f5270ad55c Mon Sep 17 00:00:00 2001
From: Anthony Koo <Anthony.Koo@amd.com>
Date: Fri, 27 Apr 2018 20:50:07 -0400
Subject: [PATCH 1242/1286] drm/amd/display: fix memory leaks

Signed-off-by: Anthony Koo <Anthony.Koo@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/display/modules/stats/stats.c | 24 ++++++++++++-------
 1 file changed, 16 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
index fe9e4b316d3a..3f7d47fdc367 100644
--- a/drivers/gpu/drm/amd/display/modules/stats/stats.c
+++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c
@@ -115,12 +115,12 @@ struct mod_stats *mod_stats_create(struct dc *dc)
 	unsigned int reg_data;
 	int i = 0;
 
+	if (dc == NULL)
+		goto fail_construct;
+
 	core_stats = kzalloc(sizeof(struct core_stats), GFP_KERNEL);
 
 	if (core_stats == NULL)
-		goto fail_alloc_context;
-
-	if (dc == NULL)
 		goto fail_construct;
 
 	core_stats->dc = dc;
@@ -146,6 +146,8 @@ struct mod_stats *mod_stats_create(struct dc *dc)
 				core_stats->entries,
 						GFP_KERNEL);
 
+		if (core_stats->time == NULL)
+			goto fail_construct_time;
 
 		core_stats->event_entries = DAL_STATS_EVENT_ENTRIES_DEFAULT;
 		core_stats->events = kzalloc(
@@ -153,13 +155,13 @@ struct mod_stats *mod_stats_create(struct dc *dc)
 				core_stats->event_entries,
 						GFP_KERNEL);
 
+		if (core_stats->events == NULL)
+			goto fail_construct_events;
+
 	} else {
 		core_stats->entries = 0;
 	}
 
-	if (core_stats->time == NULL)
-		goto fail_construct;
-
 	/* Purposely leave index 0 unused so we don't need special logic to
 	 * handle calculation cases that depend on previous flip data.
 	 */
@@ -171,10 +173,13 @@ struct mod_stats *mod_stats_create(struct dc *dc)
 
 	return &core_stats->public;
 
-fail_construct:
+fail_construct_events:
+	kfree(core_stats->time);
+
+fail_construct_time:
 	kfree(core_stats);
 
-fail_alloc_context:
+fail_construct:
 	return NULL;
 }
 
@@ -186,6 +191,9 @@ void mod_stats_destroy(struct mod_stats *mod_stats)
 		if (core_stats->time != NULL)
 			kfree(core_stats->time);
 
+		if (core_stats->events != NULL)
+			kfree(core_stats->events);
+
 		kfree(core_stats);
 	}
 }

From 5326c4525d1b2d5f1519268dd305e19c9bd4ef56 Mon Sep 17 00:00:00 2001
From: Mikita Lipski <mikita.lipski@amd.com>
Date: Fri, 27 Apr 2018 09:09:52 -0400
Subject: [PATCH 1243/1286] drm/amd/display: Clear connector's edid pointer

Clear connector's edid pointer on coonnector update, when unplugging
the display.

Fix poison EDID when hotplugging on previously used connector.

Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
Cc: stable@vger.kernel.org
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 6d0dc1fecb39..1ce10bc2d37b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -911,6 +911,7 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
 		drm_mode_connector_update_edid_property(connector, NULL);
 		aconnector->num_modes = 0;
 		aconnector->dc_sink = NULL;
+		aconnector->edid = NULL;
 	}
 
 	mutex_unlock(&dev->mode_config.mutex);

From cd3cb7c08754cd5dd1cbccfc2296d6b7dde511f2 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Thu, 17 May 2018 20:21:42 +0800
Subject: [PATCH 1244/1286] drm/amd/pp: Fix build warning in vegam

warning: missing braces around initializer [-Wmissing-braces]

Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c    | 2 +-
 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c | 4 +++-
 2 files changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index cf99c5eaf080..ec38c9f50a4d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -320,7 +320,7 @@ int atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr *hwmgr,
 					pp_atomctrl_memory_clock_param_ai *mpll_param)
 {
 	struct amdgpu_device *adev = hwmgr->adev;
-	COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_3 mpll_parameters = {0};
+	COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_3 mpll_parameters = {{0}, 0, 0};
 	int result;
 
 	mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
index c9a563399330..a40f7141131c 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
@@ -1366,10 +1366,12 @@ static int vegam_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
 {
 	struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
 	struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
-	struct SMU75_Discrete_MCArbDramTimingTable arb_regs = {0};
+	struct SMU75_Discrete_MCArbDramTimingTable arb_regs;
 	uint32_t i, j;
 	int result = 0;
 
+	memset(&arb_regs, 0, sizeof(SMU75_Discrete_MCArbDramTimingTable));
+
 	for (i = 0; i < hw_data->dpm_table.sclk_table.count; i++) {
 		for (j = 0; j < hw_data->dpm_table.mclk_table.count; j++) {
 			result = vegam_populate_memory_timing_parameters(hwmgr,

From 6ee21dbfe9a79edf6f09d5f3ab1f3c4f0699dbf2 Mon Sep 17 00:00:00 2001
From: Leo Liu <leo.liu@amd.com>
Date: Thu, 17 May 2018 13:31:49 -0400
Subject: [PATCH 1245/1286] drm/amdgpu: fix insert nop for VCN decode ring
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

NO_OP register should be writen to 0

Signed-off-by: Leo Liu <leo.liu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 14 ++++++++------
 1 file changed, 8 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 0501746b6c2c..7fbbdb1e58da 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -1048,14 +1048,17 @@ static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
 	return 0;
 }
 
-static void vcn_v1_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 {
-	int i;
 	struct amdgpu_device *adev = ring->adev;
+	int i;
 
-	for (i = 0; i < count; i++)
+	WARN_ON(ring->wptr % 2 || count % 2);
+
+	for (i = 0; i < count / 2; i++) {
 		amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
-
+		amdgpu_ring_write(ring, 0);
+	}
 }
 
 
@@ -1082,7 +1085,6 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
 static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
 	.type = AMDGPU_RING_TYPE_VCN_DEC,
 	.align_mask = 0xf,
-	.nop = PACKET0(0x81ff, 0),
 	.support_64bit_ptrs = false,
 	.vmhub = AMDGPU_MMHUB,
 	.get_rptr = vcn_v1_0_dec_ring_get_rptr,
@@ -1101,7 +1103,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
 	.emit_vm_flush = vcn_v1_0_dec_ring_emit_vm_flush,
 	.test_ring = amdgpu_vcn_dec_ring_test_ring,
 	.test_ib = amdgpu_vcn_dec_ring_test_ib,
-	.insert_nop = vcn_v1_0_ring_insert_nop,
+	.insert_nop = vcn_v1_0_dec_ring_insert_nop,
 	.insert_start = vcn_v1_0_dec_ring_insert_start,
 	.insert_end = vcn_v1_0_dec_ring_insert_end,
 	.pad_ib = amdgpu_ring_generic_pad_ib,

From cbb7a239117d45d512fae1806cc7722f68c7b82f Mon Sep 17 00:00:00 2001
From: Leo Liu <leo.liu@amd.com>
Date: Thu, 17 May 2018 13:37:50 -0400
Subject: [PATCH 1246/1286] drm/amdgpu: fix insert nop for UVD7 ring
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

NO_OP register should be writen to 0

Signed-off-by: Leo Liu <leo.liu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index f9a5482101bc..57d32f21b3a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -1325,12 +1325,15 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
 
 static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 {
-	int i;
 	struct amdgpu_device *adev = ring->adev;
+	int i;
 
-	for (i = 0; i < count; i++)
+	WARN_ON(ring->wptr % 2 || count % 2);
+
+	for (i = 0; i < count / 2; i++) {
 		amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0));
-
+		amdgpu_ring_write(ring, 0);
+	}
 }
 
 static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
@@ -1710,7 +1713,6 @@ const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
 static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
 	.type = AMDGPU_RING_TYPE_UVD,
 	.align_mask = 0xf,
-	.nop = PACKET0(0x81ff, 0),
 	.support_64bit_ptrs = false,
 	.vmhub = AMDGPU_MMHUB,
 	.get_rptr = uvd_v7_0_ring_get_rptr,

From 1aac3c918036d6bb0075281d431da3844a058d00 Mon Sep 17 00:00:00 2001
From: Leo Liu <leo.liu@amd.com>
Date: Thu, 17 May 2018 13:44:28 -0400
Subject: [PATCH 1247/1286] drm/amdgpu: fix insert nop for UVD6 ring
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

NO_OP register should be writen to 0

Signed-off-by: Leo Liu <leo.liu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 15 +++++++++++++--
 1 file changed, 13 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index dc391693d7ce..bfddf97dd13e 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -1100,6 +1100,18 @@ static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
 	amdgpu_ring_write(ring, 0xE);
 }
 
+static void uvd_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+	int i;
+
+	WARN_ON(ring->wptr % 2 || count % 2);
+
+	for (i = 0; i < count / 2; i++) {
+		amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
+		amdgpu_ring_write(ring, 0);
+	}
+}
+
 static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
 {
 	uint32_t seq = ring->fence_drv.sync_seq;
@@ -1532,7 +1544,6 @@ static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
 static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
 	.type = AMDGPU_RING_TYPE_UVD,
 	.align_mask = 0xf,
-	.nop = PACKET0(mmUVD_NO_OP, 0),
 	.support_64bit_ptrs = false,
 	.get_rptr = uvd_v6_0_ring_get_rptr,
 	.get_wptr = uvd_v6_0_ring_get_wptr,
@@ -1548,7 +1559,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
 	.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
 	.test_ring = uvd_v6_0_ring_test_ring,
 	.test_ib = amdgpu_uvd_ring_test_ib,
-	.insert_nop = amdgpu_ring_insert_nop,
+	.insert_nop = uvd_v6_0_ring_insert_nop,
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.begin_use = amdgpu_uvd_ring_begin_use,
 	.end_use = amdgpu_uvd_ring_end_use,

From 0232e30623f3761ce9350328d4d96cea8372b114 Mon Sep 17 00:00:00 2001
From: Leo Liu <leo.liu@amd.com>
Date: Thu, 17 May 2018 13:52:00 -0400
Subject: [PATCH 1248/1286] drm/amdgpu: fix insert nop for UVD5 ring
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

NO_OP register should be writen to 0

Signed-off-by: Leo Liu <leo.liu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | 15 +++++++++++++--
 1 file changed, 13 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index f5d074a887fc..341ee6d55ce8 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -541,6 +541,18 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
 	amdgpu_ring_write(ring, ib->length_dw);
 }
 
+static void uvd_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+	int i;
+
+	WARN_ON(ring->wptr % 2 || count % 2);
+
+	for (i = 0; i < count / 2; i++) {
+		amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
+		amdgpu_ring_write(ring, 0);
+	}
+}
+
 static bool uvd_v5_0_is_idle(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -841,7 +853,6 @@ static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
 static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
 	.type = AMDGPU_RING_TYPE_UVD,
 	.align_mask = 0xf,
-	.nop = PACKET0(mmUVD_NO_OP, 0),
 	.support_64bit_ptrs = false,
 	.get_rptr = uvd_v5_0_ring_get_rptr,
 	.get_wptr = uvd_v5_0_ring_get_wptr,
@@ -854,7 +865,7 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
 	.emit_fence = uvd_v5_0_ring_emit_fence,
 	.test_ring = uvd_v5_0_ring_test_ring,
 	.test_ib = amdgpu_uvd_ring_test_ib,
-	.insert_nop = amdgpu_ring_insert_nop,
+	.insert_nop = uvd_v5_0_ring_insert_nop,
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.begin_use = amdgpu_uvd_ring_begin_use,
 	.end_use = amdgpu_uvd_ring_end_use,

From def139037bbf9195467fa83c0a299d666e6ed0bb Mon Sep 17 00:00:00 2001
From: Leo Liu <leo.liu@amd.com>
Date: Thu, 17 May 2018 13:54:21 -0400
Subject: [PATCH 1249/1286] drm/amdgpu: fix insert nop for UVD4.2 ring
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

NO_OP register should be writen to 0

Signed-off-by: Leo Liu <leo.liu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | 15 +++++++++++++--
 1 file changed, 13 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 5f22135de77f..6fed3d7797a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -524,6 +524,18 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
 	amdgpu_ring_write(ring, ib->length_dw);
 }
 
+static void uvd_v4_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+	int i;
+
+	WARN_ON(ring->wptr % 2 || count % 2);
+
+	for (i = 0; i < count / 2; i++) {
+		amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
+		amdgpu_ring_write(ring, 0);
+	}
+}
+
 /**
  * uvd_v4_2_mc_resume - memory controller programming
  *
@@ -733,7 +745,6 @@ static const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
 static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
 	.type = AMDGPU_RING_TYPE_UVD,
 	.align_mask = 0xf,
-	.nop = PACKET0(mmUVD_NO_OP, 0),
 	.support_64bit_ptrs = false,
 	.get_rptr = uvd_v4_2_ring_get_rptr,
 	.get_wptr = uvd_v4_2_ring_get_wptr,
@@ -746,7 +757,7 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
 	.emit_fence = uvd_v4_2_ring_emit_fence,
 	.test_ring = uvd_v4_2_ring_test_ring,
 	.test_ib = amdgpu_uvd_ring_test_ib,
-	.insert_nop = amdgpu_ring_insert_nop,
+	.insert_nop = uvd_v4_2_ring_insert_nop,
 	.pad_ib = amdgpu_ring_generic_pad_ib,
 	.begin_use = amdgpu_uvd_ring_begin_use,
 	.end_use = amdgpu_uvd_ring_end_use,

From 6134534ca24f42043cacdd7108026803577f6c59 Mon Sep 17 00:00:00 2001
From: Thierry Reding <treding@nvidia.com>
Date: Wed, 16 May 2018 16:43:11 +0200
Subject: [PATCH 1250/1286] drm/tegra: Add kerneldoc for UAPI

Document the userspace ABI with kerneldoc to provide some information on
how to use it.

v3:
- reword description of arrays and array lengths

v2:
- keep GEM object creation flags for ABI compatibility
- fix typo in struct drm_tegra_syncpt_incr kerneldoc
- fix typos in struct drm_tegra_submit kerneldoc
- reworded some descriptions as suggested

Reviewed-by: Dmitry Osipenko <digetx@gmail.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
 include/uapi/drm/tegra_drm.h | 510 +++++++++++++++++++++++++++++++++--
 1 file changed, 491 insertions(+), 19 deletions(-)

diff --git a/include/uapi/drm/tegra_drm.h b/include/uapi/drm/tegra_drm.h
index 99e15d82d1e9..c4df3c3668b3 100644
--- a/include/uapi/drm/tegra_drm.h
+++ b/include/uapi/drm/tegra_drm.h
@@ -32,143 +32,615 @@ extern "C" {
 #define DRM_TEGRA_GEM_CREATE_TILED     (1 << 0)
 #define DRM_TEGRA_GEM_CREATE_BOTTOM_UP (1 << 1)
 
+/**
+ * struct drm_tegra_gem_create - parameters for the GEM object creation IOCTL
+ */
 struct drm_tegra_gem_create {
+	/**
+	 * @size:
+	 *
+	 * The size, in bytes, of the buffer object to be created.
+	 */
 	__u64 size;
+
+	/**
+	 * @flags:
+	 *
+	 * A bitmask of flags that influence the creation of GEM objects:
+	 *
+	 * DRM_TEGRA_GEM_CREATE_TILED
+	 *   Use the 16x16 tiling format for this buffer.
+	 *
+	 * DRM_TEGRA_GEM_CREATE_BOTTOM_UP
+	 *   The buffer has a bottom-up layout.
+	 */
 	__u32 flags;
+
+	/**
+	 * @handle:
+	 *
+	 * The handle of the created GEM object. Set by the kernel upon
+	 * successful completion of the IOCTL.
+	 */
 	__u32 handle;
 };
 
+/**
+ * struct drm_tegra_gem_mmap - parameters for the GEM mmap IOCTL
+ */
 struct drm_tegra_gem_mmap {
+	/**
+	 * @handle:
+	 *
+	 * Handle of the GEM object to obtain an mmap offset for.
+	 */
 	__u32 handle;
+
+	/**
+	 * @pad:
+	 *
+	 * Structure padding that may be used in the future. Must be 0.
+	 */
 	__u32 pad;
+
+	/**
+	 * @offset:
+	 *
+	 * The mmap offset for the given GEM object. Set by the kernel upon
+	 * successful completion of the IOCTL.
+	 */
 	__u64 offset;
 };
 
+/**
+ * struct drm_tegra_syncpt_read - parameters for the read syncpoint IOCTL
+ */
 struct drm_tegra_syncpt_read {
+	/**
+	 * @id:
+	 *
+	 * ID of the syncpoint to read the current value from.
+	 */
 	__u32 id;
+
+	/**
+	 * @value:
+	 *
+	 * The current syncpoint value. Set by the kernel upon successful
+	 * completion of the IOCTL.
+	 */
 	__u32 value;
 };
 
+/**
+ * struct drm_tegra_syncpt_incr - parameters for the increment syncpoint IOCTL
+ */
 struct drm_tegra_syncpt_incr {
+	/**
+	 * @id:
+	 *
+	 * ID of the syncpoint to increment.
+	 */
 	__u32 id;
+
+	/**
+	 * @pad:
+	 *
+	 * Structure padding that may be used in the future. Must be 0.
+	 */
 	__u32 pad;
 };
 
+/**
+ * struct drm_tegra_syncpt_wait - parameters for the wait syncpoint IOCTL
+ */
 struct drm_tegra_syncpt_wait {
+	/**
+	 * @id:
+	 *
+	 * ID of the syncpoint to wait on.
+	 */
 	__u32 id;
+
+	/**
+	 * @thresh:
+	 *
+	 * Threshold value for which to wait.
+	 */
 	__u32 thresh;
+
+	/**
+	 * @timeout:
+	 *
+	 * Timeout, in milliseconds, to wait.
+	 */
 	__u32 timeout;
+
+	/**
+	 * @value:
+	 *
+	 * The new syncpoint value after the wait. Set by the kernel upon
+	 * successful completion of the IOCTL.
+	 */
 	__u32 value;
 };
 
 #define DRM_TEGRA_NO_TIMEOUT	(0xffffffff)
 
+/**
+ * struct drm_tegra_open_channel - parameters for the open channel IOCTL
+ */
 struct drm_tegra_open_channel {
+	/**
+	 * @client:
+	 *
+	 * The client ID for this channel.
+	 */
 	__u32 client;
+
+	/**
+	 * @pad:
+	 *
+	 * Structure padding that may be used in the future. Must be 0.
+	 */
 	__u32 pad;
+
+	/**
+	 * @context:
+	 *
+	 * The application context of this channel. Set by the kernel upon
+	 * successful completion of the IOCTL. This context needs to be passed
+	 * to the DRM_TEGRA_CHANNEL_CLOSE or the DRM_TEGRA_SUBMIT IOCTLs.
+	 */
 	__u64 context;
 };
 
+/**
+ * struct drm_tegra_close_channel - parameters for the close channel IOCTL
+ */
 struct drm_tegra_close_channel {
+	/**
+	 * @context:
+	 *
+	 * The application context of this channel. This is obtained from the
+	 * DRM_TEGRA_OPEN_CHANNEL IOCTL.
+	 */
 	__u64 context;
 };
 
+/**
+ * struct drm_tegra_get_syncpt - parameters for the get syncpoint IOCTL
+ */
 struct drm_tegra_get_syncpt {
+	/**
+	 * @context:
+	 *
+	 * The application context identifying the channel for which to obtain
+	 * the syncpoint ID.
+	 */
 	__u64 context;
+
+	/**
+	 * @index:
+	 *
+	 * Index of the client syncpoint for which to obtain the ID.
+	 */
 	__u32 index;
+
+	/**
+	 * @id:
+	 *
+	 * The ID of the given syncpoint. Set by the kernel upon successful
+	 * completion of the IOCTL.
+	 */
 	__u32 id;
 };
 
+/**
+ * struct drm_tegra_get_syncpt_base - parameters for the get wait base IOCTL
+ */
 struct drm_tegra_get_syncpt_base {
+	/**
+	 * @context:
+	 *
+	 * The application context identifying for which channel to obtain the
+	 * wait base.
+	 */
 	__u64 context;
+
+	/**
+	 * @syncpt:
+	 *
+	 * ID of the syncpoint for which to obtain the wait base.
+	 */
 	__u32 syncpt;
+
+	/**
+	 * @id:
+	 *
+	 * The ID of the wait base corresponding to the client syncpoint. Set
+	 * by the kernel upon successful completion of the IOCTL.
+	 */
 	__u32 id;
 };
 
+/**
+ * struct drm_tegra_syncpt - syncpoint increment operation
+ */
 struct drm_tegra_syncpt {
+	/**
+	 * @id:
+	 *
+	 * ID of the syncpoint to operate on.
+	 */
 	__u32 id;
+
+	/**
+	 * @incrs:
+	 *
+	 * Number of increments to perform for the syncpoint.
+	 */
 	__u32 incrs;
 };
 
+/**
+ * struct drm_tegra_cmdbuf - structure describing a command buffer
+ */
 struct drm_tegra_cmdbuf {
+	/**
+	 * @handle:
+	 *
+	 * Handle to a GEM object containing the command buffer.
+	 */
 	__u32 handle;
+
+	/**
+	 * @offset:
+	 *
+	 * Offset, in bytes, into the GEM object identified by @handle at
+	 * which the command buffer starts.
+	 */
 	__u32 offset;
+
+	/**
+	 * @words:
+	 *
+	 * Number of 32-bit words in this command buffer.
+	 */
 	__u32 words;
+
+	/**
+	 * @pad:
+	 *
+	 * Structure padding that may be used in the future. Must be 0.
+	 */
 	__u32 pad;
 };
 
+/**
+ * struct drm_tegra_reloc - GEM object relocation structure
+ */
 struct drm_tegra_reloc {
 	struct {
+		/**
+		 * @cmdbuf.handle:
+		 *
+		 * Handle to the GEM object containing the command buffer for
+		 * which to perform this GEM object relocation.
+		 */
 		__u32 handle;
+
+		/**
+		 * @cmdbuf.offset:
+		 *
+		 * Offset, in bytes, into the command buffer at which to
+		 * insert the relocated address.
+		 */
 		__u32 offset;
 	} cmdbuf;
 	struct {
+		/**
+		 * @target.handle:
+		 *
+		 * Handle to the GEM object to be relocated.
+		 */
 		__u32 handle;
+
+		/**
+		 * @target.offset:
+		 *
+		 * Offset, in bytes, into the target GEM object at which the
+		 * relocated data starts.
+		 */
 		__u32 offset;
 	} target;
+
+	/**
+	 * @shift:
+	 *
+	 * The number of bits by which to shift relocated addresses.
+	 */
 	__u32 shift;
+
+	/**
+	 * @pad:
+	 *
+	 * Structure padding that may be used in the future. Must be 0.
+	 */
 	__u32 pad;
 };
 
+/**
+ * struct drm_tegra_waitchk - wait check structure
+ */
 struct drm_tegra_waitchk {
+	/**
+	 * @handle:
+	 *
+	 * Handle to the GEM object containing a command stream on which to
+	 * perform the wait check.
+	 */
 	__u32 handle;
+
+	/**
+	 * @offset:
+	 *
+	 * Offset, in bytes, of the location in the command stream to perform
+	 * the wait check on.
+	 */
 	__u32 offset;
+
+	/**
+	 * @syncpt:
+	 *
+	 * ID of the syncpoint to wait check.
+	 */
 	__u32 syncpt;
+
+	/**
+	 * @thresh:
+	 *
+	 * Threshold value for which to check.
+	 */
 	__u32 thresh;
 };
 
+/**
+ * struct drm_tegra_submit - job submission structure
+ */
 struct drm_tegra_submit {
+	/**
+	 * @context:
+	 *
+	 * The application context identifying the channel to use for the
+	 * execution of this job.
+	 */
 	__u64 context;
-	__u32 num_syncpts;
-	__u32 num_cmdbufs;
-	__u32 num_relocs;
-	__u32 num_waitchks;
-	__u32 waitchk_mask;
-	__u32 timeout;
-	__u64 syncpts;
-	__u64 cmdbufs;
-	__u64 relocs;
-	__u64 waitchks;
-	__u32 fence;		/* Return value */
 
-	__u32 reserved[5];	/* future expansion */
+	/**
+	 * @num_syncpts:
+	 *
+	 * The number of syncpoints operated on by this job. This defines the
+	 * length of the array pointed to by @syncpts.
+	 */
+	__u32 num_syncpts;
+
+	/**
+	 * @num_cmdbufs:
+	 *
+	 * The number of command buffers to execute as part of this job. This
+	 * defines the length of the array pointed to by @cmdbufs.
+	 */
+	__u32 num_cmdbufs;
+
+	/**
+	 * @num_relocs:
+	 *
+	 * The number of relocations to perform before executing this job.
+	 * This defines the length of the array pointed to by @relocs.
+	 */
+	__u32 num_relocs;
+
+	/**
+	 * @num_waitchks:
+	 *
+	 * The number of wait checks to perform as part of this job. This
+	 * defines the length of the array pointed to by @waitchks.
+	 */
+	__u32 num_waitchks;
+
+	/**
+	 * @waitchk_mask:
+	 *
+	 * Bitmask of valid wait checks.
+	 */
+	__u32 waitchk_mask;
+
+	/**
+	 * @timeout:
+	 *
+	 * Timeout, in milliseconds, before this job is cancelled.
+	 */
+	__u32 timeout;
+
+	/**
+	 * @syncpts:
+	 *
+	 * A pointer to an array of &struct drm_tegra_syncpt structures that
+	 * specify the syncpoint operations performed as part of this job.
+	 * The number of elements in the array must be equal to the value
+	 * given by @num_syncpts.
+	 */
+	__u64 syncpts;
+
+	/**
+	 * @cmdbufs:
+	 *
+	 * A pointer to an array of &struct drm_tegra_cmdbuf structures that
+	 * define the command buffers to execute as part of this job. The
+	 * number of elements in the array must be equal to the value given
+	 * by @num_syncpts.
+	 */
+	__u64 cmdbufs;
+
+	/**
+	 * @relocs:
+	 *
+	 * A pointer to an array of &struct drm_tegra_reloc structures that
+	 * specify the relocations that need to be performed before executing
+	 * this job. The number of elements in the array must be equal to the
+	 * value given by @num_relocs.
+	 */
+	__u64 relocs;
+
+	/**
+	 * @waitchks:
+	 *
+	 * A pointer to an array of &struct drm_tegra_waitchk structures that
+	 * specify the wait checks to be performed while executing this job.
+	 * The number of elements in the array must be equal to the value
+	 * given by @num_waitchks.
+	 */
+	__u64 waitchks;
+
+	/**
+	 * @fence:
+	 *
+	 * The threshold of the syncpoint associated with this job after it
+	 * has been completed. Set by the kernel upon successful completion of
+	 * the IOCTL. This can be used with the DRM_TEGRA_SYNCPT_WAIT IOCTL to
+	 * wait for this job to be finished.
+	 */
+	__u32 fence;
+
+	/**
+	 * @reserved:
+	 *
+	 * This field is reserved for future use. Must be 0.
+	 */
+	__u32 reserved[5];
 };
 
 #define DRM_TEGRA_GEM_TILING_MODE_PITCH 0
 #define DRM_TEGRA_GEM_TILING_MODE_TILED 1
 #define DRM_TEGRA_GEM_TILING_MODE_BLOCK 2
 
+/**
+ * struct drm_tegra_gem_set_tiling - parameters for the set tiling IOCTL
+ */
 struct drm_tegra_gem_set_tiling {
-	/* input */
+	/**
+	 * @handle:
+	 *
+	 * Handle to the GEM object for which to set the tiling parameters.
+	 */
 	__u32 handle;
+
+	/**
+	 * @mode:
+	 *
+	 * The tiling mode to set. Must be one of:
+	 *
+	 * DRM_TEGRA_GEM_TILING_MODE_PITCH
+	 *   pitch linear format
+	 *
+	 * DRM_TEGRA_GEM_TILING_MODE_TILED
+	 *   16x16 tiling format
+	 *
+	 * DRM_TEGRA_GEM_TILING_MODE_BLOCK
+	 *   16Bx2 tiling format
+	 */
 	__u32 mode;
+
+	/**
+	 * @value:
+	 *
+	 * The value to set for the tiling mode parameter.
+	 */
 	__u32 value;
+
+	/**
+	 * @pad:
+	 *
+	 * Structure padding that may be used in the future. Must be 0.
+	 */
 	__u32 pad;
 };
 
+/**
+ * struct drm_tegra_gem_get_tiling - parameters for the get tiling IOCTL
+ */
 struct drm_tegra_gem_get_tiling {
-	/* input */
+	/**
+	 * @handle:
+	 *
+	 * Handle to the GEM object for which to query the tiling parameters.
+	 */
 	__u32 handle;
-	/* output */
+
+	/**
+	 * @mode:
+	 *
+	 * The tiling mode currently associated with the GEM object. Set by
+	 * the kernel upon successful completion of the IOCTL.
+	 */
 	__u32 mode;
+
+	/**
+	 * @value:
+	 *
+	 * The tiling mode parameter currently associated with the GEM object.
+	 * Set by the kernel upon successful completion of the IOCTL.
+	 */
 	__u32 value;
+
+	/**
+	 * @pad:
+	 *
+	 * Structure padding that may be used in the future. Must be 0.
+	 */
 	__u32 pad;
 };
 
 #define DRM_TEGRA_GEM_BOTTOM_UP		(1 << 0)
 #define DRM_TEGRA_GEM_FLAGS		(DRM_TEGRA_GEM_BOTTOM_UP)
 
+/**
+ * struct drm_tegra_gem_set_flags - parameters for the set flags IOCTL
+ */
 struct drm_tegra_gem_set_flags {
-	/* input */
+	/**
+	 * @handle:
+	 *
+	 * Handle to the GEM object for which to set the flags.
+	 */
 	__u32 handle;
-	/* output */
+
+	/**
+	 * @flags:
+	 *
+	 * The flags to set for the GEM object.
+	 */
 	__u32 flags;
 };
 
+/**
+ * struct drm_tegra_gem_get_flags - parameters for the get flags IOCTL
+ */
 struct drm_tegra_gem_get_flags {
-	/* input */
+	/**
+	 * @handle:
+	 *
+	 * Handle to the GEM object for which to query the flags.
+	 */
 	__u32 handle;
-	/* output */
+
+	/**
+	 * @flags:
+	 *
+	 * The flags currently associated with the GEM object. Set by the
+	 * kernel upon successful completion of the IOCTL.
+	 */
 	__u32 flags;
 };
 

From 63e482f172f4680a4ff00305dd140589ae270306 Mon Sep 17 00:00:00 2001
From: Fabio Estevam <fabio.estevam@nxp.com>
Date: Sun, 20 May 2018 12:03:54 -0300
Subject: [PATCH 1251/1286] MAINTAINERS: drm: fsl-dcu: Update to Alison's NXP
 email address

The freescale.com email domain is not valid anymore, so use
the nxp.com domain instead.

Signed-off-by: Fabio Estevam <fabio.estevam@nxp.com>
Signed-off-by: Stefan Agner <stefan@agner.ch>
---
 MAINTAINERS | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/MAINTAINERS b/MAINTAINERS
index 0a1410d5a621..268bcfd04b14 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4665,7 +4665,7 @@ F:	Documentation/devicetree/bindings/display/exynos/
 
 DRM DRIVERS FOR FREESCALE DCU
 M:	Stefan Agner <stefan@agner.ch>
-M:	Alison Wang <alison.wang@freescale.com>
+M:	Alison Wang <alison.wang@nxp.com>
 L:	dri-devel@lists.freedesktop.org
 S:	Supported
 F:	drivers/gpu/drm/fsl-dcu/

From e05f3ea260d6db1fb9aede6af29d7cf8f491f0ed Mon Sep 17 00:00:00 2001
From: Marek Szyprowski <m.szyprowski@samsung.com>
Date: Wed, 23 May 2018 13:00:00 +0200
Subject: [PATCH 1252/1286] dt-bindings: exynos5433-decon: add more required
 clocks

Proper support for Decon's hardware window no 4 and 5 require enabling
a few more clocks ("aclk_smmu_decon1x", "aclk_xiu_decon1x",
"pclk_smmu_decon1x"). Add those clocks to required clocks list in Decon's
dt bindings.

Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Acked-by: Krzysztof Kozlowski <krzk@kernel.org>
Signed-off-by: Inki Dae <inki.dae@samsung.com>
---
 .../bindings/display/exynos/exynos5433-decon.txt         | 9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/Documentation/devicetree/bindings/display/exynos/exynos5433-decon.txt b/Documentation/devicetree/bindings/display/exynos/exynos5433-decon.txt
index fc2588292a68..775193e1c641 100644
--- a/Documentation/devicetree/bindings/display/exynos/exynos5433-decon.txt
+++ b/Documentation/devicetree/bindings/display/exynos/exynos5433-decon.txt
@@ -19,7 +19,8 @@ Required properties:
 	  clock-names property.
 - clock-names: list of clock names sorted in the same order as the clocks
 	       property. Must contain "pclk", "aclk_decon", "aclk_smmu_decon0x",
-	       "aclk_xiu_decon0x", "pclk_smmu_decon0x", clk_decon_vclk",
+	       "aclk_xiu_decon0x", "pclk_smmu_decon0x", "aclk_smmu_decon1x",
+	       "aclk_xiu_decon1x", "pclk_smmu_decon1x", clk_decon_vclk",
 	       "sclk_decon_eclk"
 - ports: contains a port which is connected to mic node. address-cells and
 	 size-cells must 1 and 0, respectively.
@@ -34,10 +35,14 @@ decon: decon@13800000 {
 	clocks = <&cmu_disp CLK_ACLK_DECON>, <&cmu_disp CLK_ACLK_SMMU_DECON0X>,
 		<&cmu_disp CLK_ACLK_XIU_DECON0X>,
 		<&cmu_disp CLK_PCLK_SMMU_DECON0X>,
+		<&cmu_disp CLK_ACLK_SMMU_DECON1X>,
+		<&cmu_disp CLK_ACLK_XIU_DECON1X>,
+		<&cmu_disp CLK_PCLK_SMMU_DECON1X>,
 		<&cmu_disp CLK_SCLK_DECON_VCLK>,
 		<&cmu_disp CLK_SCLK_DECON_ECLK>;
 	clock-names = "aclk_decon", "aclk_smmu_decon0x", "aclk_xiu_decon0x",
-		"pclk_smmu_decon0x", "sclk_decon_vclk", "sclk_decon_eclk";
+		"pclk_smmu_decon0x", "aclk_smmu_decon1x", "aclk_xiu_decon1x",
+		"pclk_smmu_decon1x", "sclk_decon_vclk", "sclk_decon_eclk";
 	interrupt-names = "vsync", "lcd_sys";
 	interrupts = <0 202 0>, <0 203 0>;
 

From cb5fba715babc599e82f40d53b6b956efcc8fc25 Mon Sep 17 00:00:00 2001
From: Marek Szyprowski <m.szyprowski@samsung.com>
Date: Wed, 23 May 2018 13:00:02 +0200
Subject: [PATCH 1253/1286] drm/exynos: decon: Add support for hardware windows
 no 4 and 5

Enable support for 2 more hardware windows. This require enabling a few
more clocks and set proper plane type for all windows. In the new
configuration primary plane uses hardware window no 3 and cursor uses
window no 5. The remaining hardware windows are used for overlays. This
gives us an overlay plane both below and above primary plane for both
Decon and DeconTV (which uses hardware window nr 0 for background).

Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: Inki Dae <inki.dae@samsung.com>
---
 drivers/gpu/drm/exynos/exynos5433_drm_decon.c | 21 +++++++++++--------
 1 file changed, 12 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 1c330f2a7a5d..82c95c34447f 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -31,7 +31,10 @@
 #define DSD_CFG_MUX 0x1004
 #define DSD_CFG_MUX_TE_UNMASK_GLOBAL BIT(13)
 
-#define WINDOWS_NR	3
+#define WINDOWS_NR	5
+#define PRIMARY_WIN	2
+#define CURSON_WIN	4
+
 #define MIN_FB_WIDTH_FOR_16WORD_BURST	128
 
 #define I80_HW_TRG	(1 << 0)
@@ -43,6 +46,9 @@ static const char * const decon_clks_name[] = {
 	"aclk_smmu_decon0x",
 	"aclk_xiu_decon0x",
 	"pclk_smmu_decon0x",
+	"aclk_smmu_decon1x",
+	"aclk_xiu_decon1x",
+	"pclk_smmu_decon1x",
 	"sclk_decon_vclk",
 	"sclk_decon_eclk",
 };
@@ -74,9 +80,8 @@ static const uint32_t decon_formats[] = {
 };
 
 static const enum drm_plane_type decon_win_types[WINDOWS_NR] = {
-	DRM_PLANE_TYPE_PRIMARY,
-	DRM_PLANE_TYPE_OVERLAY,
-	DRM_PLANE_TYPE_CURSOR,
+	[PRIMARY_WIN] = DRM_PLANE_TYPE_PRIMARY,
+	[CURSON_WIN] = DRM_PLANE_TYPE_CURSOR,
 };
 
 static inline void decon_set_bits(struct decon_context *ctx, u32 reg, u32 mask,
@@ -552,12 +557,10 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
 	drm_dev->max_vblank_count = 0xffffffff;
 
 	for (win = ctx->first_win; win < WINDOWS_NR; win++) {
-		int tmp = (win == ctx->first_win) ? 0 : win;
-
 		ctx->configs[win].pixel_formats = decon_formats;
 		ctx->configs[win].num_pixel_formats = ARRAY_SIZE(decon_formats);
-		ctx->configs[win].zpos = win;
-		ctx->configs[win].type = decon_win_types[tmp];
+		ctx->configs[win].zpos = win - ctx->first_win;
+		ctx->configs[win].type = decon_win_types[win];
 
 		ret = exynos_plane_init(drm_dev, &ctx->planes[win], win,
 					&ctx->configs[win]);
@@ -565,7 +568,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
 			return ret;
 	}
 
-	exynos_plane = &ctx->planes[ctx->first_win];
+	exynos_plane = &ctx->planes[PRIMARY_WIN];
 	out_type = (ctx->out_type & IFTYPE_HDMI) ? EXYNOS_DISPLAY_TYPE_HDMI
 						  : EXYNOS_DISPLAY_TYPE_LCD;
 	ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,

From bf83060408fea52eccdcf695f3b4b16c71207691 Mon Sep 17 00:00:00 2001
From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Date: Thu, 17 May 2018 11:18:34 -0400
Subject: [PATCH 1254/1286] Remove calls to suspend/resume atomic helpers from
 amdgpu_device_gpu_recover. (v2)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

First of all it's already being called from the display code from amd_ip_funcs.suspend/resume hooks.
Second of all, the place in amdgpu_device_gpu_recover it's being called is wrong for GPU stalls since
it is called BEFORE we cancel and force completion of all in flight jobs which were not yet processed.
So, as Bas pointed in the ticket we will try to wait for fence  in amdgpu_pm_compute_clocks but the pipe
is hanged so we end up in deadlock.

v2: remove unused variable

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=106500
Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Reviewed-by: Harry Wentland <harry.wentland@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 10 +---------
 1 file changed, 1 insertion(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 0e3f69d31b80..adeb48ec4897 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3176,7 +3176,6 @@ error:
 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 			      struct amdgpu_job *job, bool force)
 {
-	struct drm_atomic_state *state = NULL;
 	int i, r, resched;
 
 	if (!force && !amdgpu_device_ip_check_soft_reset(adev)) {
@@ -3199,10 +3198,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 	/* block TTM */
 	resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
 
-	/* store modesetting */
-	if (amdgpu_device_has_dc_support(adev))
-		state = drm_atomic_helper_suspend(adev->ddev);
-
 	/* block all schedulers and reset given job's ring */
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
 		struct amdgpu_ring *ring = adev->rings[i];
@@ -3242,10 +3237,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 		kthread_unpark(ring->sched.thread);
 	}
 
-	if (amdgpu_device_has_dc_support(adev)) {
-		if (drm_atomic_helper_resume(adev->ddev, state))
-			dev_info(adev->dev, "drm resume failed:%d\n", r);
-	} else {
+	if (!amdgpu_device_has_dc_support(adev)) {
 		drm_helper_resume_force_mode(adev->ddev);
 	}
 

From 99631045862e2994b47285a8cc96bc939ae5b42f Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Thu, 10 May 2018 14:45:12 -0500
Subject: [PATCH 1255/1286] drm/amdgpu: add new DF 1.7 register defs

Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_offset.h  | 4 ++++
 drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_sh_mask.h | 4 ++++
 2 files changed, 8 insertions(+)

diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_offset.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_offset.h
index 2b305dd021e8..e6044e27a913 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_offset.h
@@ -30,4 +30,8 @@
 #define mmDF_CS_AON0_DramBaseAddress0									0x0044
 #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX								0
 
+#define mmDF_CS_AON0_CoherentSlaveModeCtrlA0								0x0214
+#define mmDF_CS_AON0_CoherentSlaveModeCtrlA0_BASE_IDX							0
+
+
 #endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_sh_mask.h
index 2ba849798924..a78c99480e2d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_sh_mask.h
@@ -45,4 +45,8 @@
 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK							0x00000700L
 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK							0xFFFFF000L
 
+//DF_CS_AON0_CoherentSlaveModeCtrlA0
+#define DF_CS_AON0_CoherentSlaveModeCtrlA0__ForceParWrRMW__SHIFT					0x3
+#define DF_CS_AON0_CoherentSlaveModeCtrlA0__ForceParWrRMW_MASK						0x00000008L
+
 #endif

From 8f9b2e506129e6eb0d21d163f361dd68a050b974 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Thu, 10 May 2018 14:59:31 -0500
Subject: [PATCH 1256/1286] drm/amdgpu: add new DF callback for ECC setup

The ForceParWrRMW setting needs to be enabled for ECC, but disabled
when ECC is not enabled.

Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 03a2c0be0bf2..a59c07590cee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1401,6 +1401,8 @@ struct amdgpu_df_funcs {
 						 bool enable);
 	void (*get_clockgating_state)(struct amdgpu_device *adev,
 				      u32 *flags);
+	void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev,
+					    bool enable);
 };
 /* Define the HW IP blocks will be used in driver , add more if necessary */
 enum amd_hw_ip_block_type {

From 1ca2393b7373d5b0e5a356124fb10fc97e143e88 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Thu, 10 May 2018 15:06:55 -0500
Subject: [PATCH 1257/1286] drm/amdgpu: add a df 1.7 implementation of
 enable_ecc_force_par_wr_rmw

Needed for proper memory setup depending on whether ECC is
enabled on a particular board.

Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/df_v1_7.c | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
index 4ffda996660f..9935371db7ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
@@ -102,6 +102,13 @@ static void df_v1_7_get_clockgating_state(struct amdgpu_device *adev,
 		*flags |= AMD_CG_SUPPORT_DF_MGCG;
 }
 
+static void df_v1_7_enable_ecc_force_par_wr_rmw(struct amdgpu_device *adev,
+						bool enable)
+{
+	WREG32_FIELD15(DF, 0, DF_CS_AON0_CoherentSlaveModeCtrlA0,
+		       ForceParWrRMW, enable);
+}
+
 const struct amdgpu_df_funcs df_v1_7_funcs = {
 	.init = df_v1_7_init,
 	.enable_broadcast_mode = df_v1_7_enable_broadcast_mode,
@@ -109,4 +116,5 @@ const struct amdgpu_df_funcs df_v1_7_funcs = {
 	.get_hbm_channel_number = df_v1_7_get_hbm_channel_number,
 	.update_medium_grain_clock_gating = df_v1_7_update_medium_grain_clock_gating,
 	.get_clockgating_state = df_v1_7_get_clockgating_state,
+	.enable_ecc_force_par_wr_rmw = df_v1_7_enable_ecc_force_par_wr_rmw,
 };

From e1d1a7729a62d7b79fb2ab4ac3bc6fc0ebfb6db9 Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Thu, 10 May 2018 15:15:12 -0500
Subject: [PATCH 1258/1286] drm/amdgpu/gmc9: disable partial wr rmw if ECC is
 not enabled

The vbios mistakenly sets this bit on some boards without ECC.
This can lead to reduced performance in some workloads.  Disable
the bit if the board does not have ECC.

Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index b60ed288d314..3c0a85d4e4ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -675,6 +675,7 @@ static int gmc_v9_0_late_init(void *handle)
 			DRM_INFO("ECC is active.\n");
 		} else if (r == 0) {
 			DRM_INFO("ECC is not present.\n");
+			adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
 		} else {
 			DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r);
 			return r;

From 63e138abf0761c7ea3dcb29060bfd48a34e58ecf Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Fri, 18 May 2018 14:24:44 +0800
Subject: [PATCH 1259/1286] drm/amd/pp: Fix static checker warning

error: uninitialized symbol 'xxxx'

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c  | 24 ++++++++-----------
 .../gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c  |  3 ++-
 .../drm/amd/powerplay/smumgr/smu7_smumgr.c    |  6 ++---
 3 files changed, 14 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index ec38c9f50a4d..7047e29755c3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -1104,10 +1104,8 @@ int atomctrl_get_voltage_evv_on_sclk(
 			GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
 			(uint32_t *)&get_voltage_info_param_space);
 
-	if (0 != result)
-		return result;
-
-	*voltage = le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
+	*voltage = result ? 0 :
+			le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
 				(&get_voltage_info_param_space))->usVoltageLevel);
 
 	return result;
@@ -1312,8 +1310,7 @@ int atomctrl_read_efuse(struct pp_hwmgr *hwmgr, uint16_t start_index,
 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
 			GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
 			(uint32_t *)&efuse_param);
-	if (!result)
-		*efuse = le32_to_cpu(efuse_param.ulEfuseValue) & mask;
+	*efuse = result ? 0 : le32_to_cpu(efuse_param.ulEfuseValue) & mask;
 
 	return result;
 }
@@ -1354,11 +1351,8 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_
 			GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
 			(uint32_t *)&get_voltage_info_param_space);
 
-	if (0 != result)
-		return result;
-
-	*voltage = le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)
-				(&get_voltage_info_param_space))->ulVoltageLevel);
+	*voltage = result ? 0 :
+		le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel);
 
 	return result;
 }
@@ -1552,15 +1546,17 @@ void atomctrl_get_voltage_range(struct pp_hwmgr *hwmgr, uint32_t *max_vddc,
 		case CHIP_FIJI:
 			*max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMaxVddc/4);
 			*min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMinVddc/4);
-			break;
+			return;
 		case CHIP_POLARIS11:
 		case CHIP_POLARIS10:
 		case CHIP_POLARIS12:
 			*max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMaxVddc/100);
 			*min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMinVddc/100);
-			break;
-		default:
 			return;
+		default:
+			break;
 		}
 	}
+	*max_vddc = 0;
+	*min_vddc = 0;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 646c9e9bf681..45e9b8cb169d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -860,7 +860,8 @@ static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr)
 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
 	struct phm_ppt_v1_information *table_info =
 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
-	uint32_t min_vddc, max_vddc;
+	uint32_t min_vddc = 0;
+	uint32_t max_vddc = 0;
 
 	if (!table_info)
 		return;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index 64d33b775906..d644a9bb9078 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -283,11 +283,9 @@ int smu7_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t
 
 	result = smu7_set_smc_sram_address(hwmgr, smc_addr, limit);
 
-	if (result)
-		return result;
+	*value = result ? 0 : cgs_read_register(hwmgr->device, mmSMC_IND_DATA_11);
 
-	*value = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_11);
-	return 0;
+	return result;
 }
 
 int smu7_write_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t value, uint32_t limit)

From e6ee925b795311679dd6e0ebeae6f1dbe983c059 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Fri, 18 May 2018 14:59:46 +0800
Subject: [PATCH 1260/1286] drm/amd/pp: fix a couple locking issues

We should return unlock on the error path

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../drm/amd/powerplay/hwmgr/smu7_powertune.c  | 31 ++++++++++++-------
 1 file changed, 19 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index 99b29ff45d91..c952845833d7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -936,45 +936,49 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
 
 			if (hwmgr->chip_id == CHIP_POLARIS10) {
 				result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris10);
-				PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
+				PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", goto error);
 				result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10);
-				PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
+				PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", goto error);
 			} else if (hwmgr->chip_id == CHIP_POLARIS11) {
 				result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11);
-				PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
+				PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", goto error);
 				if (hwmgr->is_kicker)
 					result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11_Kicker);
 				else
 					result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11);
-				PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
+				PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", goto error);
 			} else if (hwmgr->chip_id == CHIP_POLARIS12) {
 				result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11);
-				PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
+				PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", goto error);
 				result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris12);
-				PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
+				PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", goto error);
 			} else if (hwmgr->chip_id == CHIP_VEGAM) {
 				result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_VegaM);
-				PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
+				PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", goto error);
 				result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_VegaM);
-				PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
+				PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", goto error);
 			}
 		}
 		cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value2);
 
 		result = smu7_enable_didt(hwmgr, true);
-		PP_ASSERT_WITH_CODE((result == 0), "EnableDiDt failed.", return result);
+		PP_ASSERT_WITH_CODE((result == 0), "EnableDiDt failed.", goto error);
 
 		if (hwmgr->chip_id == CHIP_POLARIS11) {
 			result = smum_send_msg_to_smc(hwmgr,
 						(uint16_t)(PPSMC_MSG_EnableDpmDidt));
 			PP_ASSERT_WITH_CODE((0 == result),
-					"Failed to enable DPM DIDT.", return result);
+					"Failed to enable DPM DIDT.", goto error);
 		}
 		mutex_unlock(&adev->grbm_idx_mutex);
 		adev->gfx.rlc.funcs->exit_safe_mode(adev);
 	}
 
 	return 0;
+error:
+	mutex_unlock(&adev->grbm_idx_mutex);
+	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	return result;
 }
 
 int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
@@ -992,17 +996,20 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
 		result = smu7_enable_didt(hwmgr, false);
 		PP_ASSERT_WITH_CODE((result == 0),
 				"Post DIDT enable clock gating failed.",
-				return result);
+				goto error);
 		if (hwmgr->chip_id == CHIP_POLARIS11) {
 			result = smum_send_msg_to_smc(hwmgr,
 						(uint16_t)(PPSMC_MSG_DisableDpmDidt));
 			PP_ASSERT_WITH_CODE((0 == result),
-					"Failed to disable DPM DIDT.", return result);
+					"Failed to disable DPM DIDT.", goto error);
 		}
 		adev->gfx.rlc.funcs->exit_safe_mode(adev);
 	}
 
 	return 0;
+error:
+	adev->gfx.rlc.funcs->exit_safe_mode(adev);
+	return result;
 }
 
 int smu7_enable_smc_cac(struct pp_hwmgr *hwmgr)

From 34319b329f73eabd7e3baefecf9f71eb8b86db6f Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Wed, 16 May 2018 20:06:53 +0800
Subject: [PATCH 1261/1286] drm/amdgpu: skip CG for VCN when late_init/fini

VCN clockgating is handled manually like VCE and UVD.

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index adeb48ec4897..290e279abf0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1718,6 +1718,7 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
 		/* skip CG for VCE/UVD, it's handled specially */
 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
+		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
 		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {
 			/* enable clockgating to save power */
 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
@@ -1817,6 +1818,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
 
 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
 			adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
+			adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
 			adev->ip_blocks[i].version->funcs->set_clockgating_state) {
 			/* ungate blocks before hw fini so that we can shutdown the blocks safely */
 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,

From ca0b9494633f65ee6779d4c7ca19e799b8308e69 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Wed, 16 May 2018 20:09:09 +0800
Subject: [PATCH 1262/1286] drm/amd/pp: Add smu support for VCN powergating on
 RV

Add the powerplay callback for powergating VCN (same
as UVD and VCE).

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 .../gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 19 ++++++++++++++++++-
 1 file changed, 18 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 017ef2d169e9..85f84f4d8be5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -1128,6 +1128,23 @@ static int smu10_set_mmhub_powergating_by_smu(struct pp_hwmgr *hwmgr)
 	return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub);
 }
 
+static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate)
+{
+	if (bgate) {
+		amdgpu_device_ip_set_powergating_state(hwmgr->adev,
+						AMD_IP_BLOCK_TYPE_VCN,
+						AMD_PG_STATE_GATE);
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+					PPSMC_MSG_PowerDownVcn, 0);
+	} else {
+		smum_send_msg_to_smc_with_parameter(hwmgr,
+						PPSMC_MSG_PowerUpVcn, 0);
+		amdgpu_device_ip_set_powergating_state(hwmgr->adev,
+						AMD_IP_BLOCK_TYPE_VCN,
+						AMD_PG_STATE_UNGATE);
+	}
+}
+
 static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
 	.backend_init = smu10_hwmgr_backend_init,
 	.backend_fini = smu10_hwmgr_backend_fini,
@@ -1136,7 +1153,7 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
 	.force_dpm_level = smu10_dpm_force_dpm_level,
 	.get_power_state_size = smu10_get_power_state_size,
 	.powerdown_uvd = NULL,
-	.powergate_uvd = NULL,
+	.powergate_uvd = smu10_powergate_vcn,
 	.powergate_vce = NULL,
 	.get_mclk = smu10_dpm_get_mclk,
 	.get_sclk = smu10_dpm_get_sclk,

From 8dbb8cdf52dbc264d531d1e51f5e311bd1558c21 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Wed, 16 May 2018 20:10:25 +0800
Subject: [PATCH 1263/1286] drm/amdgpu: Add CG/PG flags for VCN

Define new clock and powergating flags for VCN block.

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/include/amd_shared.h | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 33de33016bda..b178176b72ac 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -92,7 +92,7 @@ enum amd_powergating_state {
 #define AMD_CG_SUPPORT_GFX_3D_CGLS		(1 << 21)
 #define AMD_CG_SUPPORT_DRM_MGCG			(1 << 22)
 #define AMD_CG_SUPPORT_DF_MGCG			(1 << 23)
-
+#define AMD_CG_SUPPORT_VCN_MGCG			(1 << 24)
 /* PG flags */
 #define AMD_PG_SUPPORT_GFX_PG			(1 << 0)
 #define AMD_PG_SUPPORT_GFX_SMG			(1 << 1)
@@ -108,6 +108,7 @@ enum amd_powergating_state {
 #define AMD_PG_SUPPORT_GFX_QUICK_MG		(1 << 11)
 #define AMD_PG_SUPPORT_GFX_PIPELINE		(1 << 12)
 #define AMD_PG_SUPPORT_MMHUB			(1 << 13)
+#define AMD_PG_SUPPORT_VCN			(1 << 14)
 
 enum PP_FEATURE_MASK {
 	PP_SCLK_DPM_MASK = 0x1,

From ac06b4cfd78b79ec6c8306062801a4276a3e0c79 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Thu, 17 May 2018 15:58:53 +0800
Subject: [PATCH 1264/1286] drm/amdgpu: Add SOC15_WAIT_ON_RREG macro define

Add new macro to wait on a register field to be a specific
value.

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/soc15_common.h | 15 +++++++++++++++
 1 file changed, 15 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
index def865067edd..0942f492d2e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
@@ -47,6 +47,21 @@
 #define WREG32_SOC15_OFFSET(ip, inst, reg, offset, value) \
 	WREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, value)
 
+#define SOC15_WAIT_ON_RREG(ip, inst, reg, expected_value, mask, ret) \
+	do {							\
+		uint32_t tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \
+		uint32_t loop = adev->usec_timeout;		\
+		while ((tmp_ & (mask)) != (expected_value)) {	\
+			udelay(2);				\
+			tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \
+			loop--;					\
+			if (!loop) {				\
+				ret = -ETIMEDOUT;		\
+				break;				\
+			}					\
+		}						\
+	} while (0)
+
 #endif
 
 

From ecf81ed98c8df8c6d397f4e044af175481b5b831 Mon Sep 17 00:00:00 2001
From: Andrzej Hajda <a.hajda@samsung.com>
Date: Tue, 8 May 2018 11:36:58 +0200
Subject: [PATCH 1265/1286] drm/exynos/dsi: mask frame-done interrupt

DSI driver is not really interested in this interrupt. It causes only
unnecessary code execution of interrupt handler and could possibly
cause FIFO overflow - as it triggers DSI interrupt handler to process
next DSI transfer. With this patch we will get rid of about 30 IRQ
handler calls per second.

Fixes: e6f988a45857 ("drm/exynos: dsi: add support for Exynos5433")
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Signed-off-by: Inki Dae <inki.dae@samsung.com>
---
 drivers/gpu/drm/exynos/exynos_drm_dsi.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index eae44fd714f0..7c3030b7e586 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1264,15 +1264,15 @@ static irqreturn_t exynos_dsi_irq(int irq, void *dev_id)
 
 	if (status & DSIM_INT_SW_RST_RELEASE) {
 		u32 mask = ~(DSIM_INT_RX_DONE | DSIM_INT_SFR_FIFO_EMPTY |
-			DSIM_INT_SFR_HDR_FIFO_EMPTY | DSIM_INT_FRAME_DONE |
-			DSIM_INT_RX_ECC_ERR | DSIM_INT_SW_RST_RELEASE);
+			DSIM_INT_SFR_HDR_FIFO_EMPTY | DSIM_INT_RX_ECC_ERR |
+			DSIM_INT_SW_RST_RELEASE);
 		exynos_dsi_write(dsi, DSIM_INTMSK_REG, mask);
 		complete(&dsi->completed);
 		return IRQ_HANDLED;
 	}
 
 	if (!(status & (DSIM_INT_RX_DONE | DSIM_INT_SFR_FIFO_EMPTY |
-			DSIM_INT_FRAME_DONE | DSIM_INT_PLL_STABLE)))
+			DSIM_INT_PLL_STABLE)))
 		return IRQ_HANDLED;
 
 	if (exynos_dsi_transfer_finish(dsi))

From e9497dc2f3e0ead4004231b8d282cb4ecdd36463 Mon Sep 17 00:00:00 2001
From: Marek Szyprowski <m.szyprowski@samsung.com>
Date: Tue, 12 Dec 2017 13:01:15 +0100
Subject: [PATCH 1266/1286] drm/exynos: Fix error value in
 exynos_drm_crtc_get_by_type()

EPERM is not the correct error value when the driver is not able to get
its resources. Change it to ENODEV.

Reported-by: Russell King - ARM Linux <linux@armlinux.org.uk>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: Inki Dae <inki.dae@samsung.com>
---
 drivers/gpu/drm/exynos/exynos_drm_crtc.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index dc01342e759a..eea90251808f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -228,7 +228,7 @@ struct exynos_drm_crtc *exynos_drm_crtc_get_by_type(struct drm_device *drm_dev,
 		if (to_exynos_crtc(crtc)->type == out_type)
 			return to_exynos_crtc(crtc);
 
-	return ERR_PTR(-EPERM);
+	return ERR_PTR(-ENODEV);
 }
 
 int exynos_drm_set_possible_crtcs(struct drm_encoder *encoder,

From e9dfe83d8985fcd2e0fac570215a039c24fb6b12 Mon Sep 17 00:00:00 2001
From: Marek Szyprowski <m.szyprowski@samsung.com>
Date: Wed, 23 May 2018 12:15:50 +0200
Subject: [PATCH 1267/1286] drm/exynos: Fix default value for zpos plane
 property

The default zpos property for all planes in Exynos DRM was fixed as zero.
Fix this by providing proper value provided by hardware drivers, which
typically matches hardware window number.

Reported-by: Seung-Woo Kim <sw0312.kim@samsung.com>
Fixes: e47726a11e11 ("drm/exynos: use generic code for managing zpos plane property")
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: Inki Dae <inki.dae@samsung.com>
---
 drivers/gpu/drm/exynos/exynos_drm_plane.c | 9 ++++-----
 1 file changed, 4 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index d2a90dae5c71..38a2a7f1204b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -289,13 +289,12 @@ static const struct drm_plane_helper_funcs plane_helper_funcs = {
 };
 
 static void exynos_plane_attach_zpos_property(struct drm_plane *plane,
-					      bool immutable)
+					      int zpos, bool immutable)
 {
-	/* FIXME */
 	if (immutable)
-		drm_plane_create_zpos_immutable_property(plane, 0);
+		drm_plane_create_zpos_immutable_property(plane, zpos);
 	else
-		drm_plane_create_zpos_property(plane, 0, 0, MAX_PLANE - 1);
+		drm_plane_create_zpos_property(plane, zpos, 0, MAX_PLANE - 1);
 }
 
 int exynos_plane_init(struct drm_device *dev,
@@ -320,7 +319,7 @@ int exynos_plane_init(struct drm_device *dev,
 	exynos_plane->index = index;
 	exynos_plane->config = config;
 
-	exynos_plane_attach_zpos_property(&exynos_plane->base,
+	exynos_plane_attach_zpos_property(&exynos_plane->base, config->zpos,
 			   !(config->capabilities & EXYNOS_DRM_PLANE_CAP_ZPOS));
 
 	return 0;

From c9dc5abb661b02239eef6cd991700707dc381110 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Thu, 17 May 2018 11:11:22 +0800
Subject: [PATCH 1268/1286] drm/amdgpu: Add static CG control for VCN on RV

Implement proper static clockgating support for VCN.

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 50 +++++++++++++++++++++------
 1 file changed, 39 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 7fbbdb1e58da..7a366418d5f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -288,14 +288,14 @@ static void vcn_v1_0_mc_resume(struct amdgpu_device *adev)
  *
  * Disable clock gating for VCN block
  */
-static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev, bool sw)
+static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev)
 {
 	uint32_t data;
 
 	/* JPEG disable CGC */
 	data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
 
-	if (sw)
+	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
 		data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
 	else
 		data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK;
@@ -310,7 +310,7 @@ static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev, bool sw)
 
 	/* UVD disable CGC */
 	data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
-	if (sw)
+	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
 		data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
 	else
 		data &= ~ UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
@@ -415,13 +415,13 @@ static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev, bool sw)
  *
  * Enable clock gating for VCN block
  */
-static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev, bool sw)
+static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev)
 {
 	uint32_t data = 0;
 
 	/* enable JPEG CGC */
 	data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
-	if (sw)
+	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
 		data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
 	else
 		data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
@@ -435,7 +435,7 @@ static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev, bool sw)
 
 	/* enable UVD CGC */
 	data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
-	if (sw)
+	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
 		data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
 	else
 		data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
@@ -500,7 +500,7 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
 	vcn_v1_0_mc_resume(adev);
 
 	/* disable clock gating */
-	vcn_v1_0_disable_clock_gating(adev, true);
+	vcn_v1_0_disable_clock_gating(adev);
 
 	/* disable interupt */
 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
@@ -681,15 +681,43 @@ static int vcn_v1_0_stop(struct amdgpu_device *adev)
 			~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 
 	/* enable clock gating */
-	vcn_v1_0_enable_clock_gating(adev, true);
+	vcn_v1_0_enable_clock_gating(adev);
 
 	return 0;
 }
 
+bool vcn_v1_0_is_idle(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == 0x2);
+}
+
+int vcn_v1_0_wait_for_idle(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	int ret = 0;
+
+	SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, 0x2, 0x2, ret);
+
+	return ret;
+}
+
 static int vcn_v1_0_set_clockgating_state(void *handle,
 					  enum amd_clockgating_state state)
 {
-	/* needed for driver unload*/
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+
+	if (enable) {
+		/* wait for STATUS to clear */
+		if (vcn_v1_0_is_idle(handle))
+			return -EBUSY;
+		vcn_v1_0_enable_clock_gating(adev);
+	} else {
+		/* disable HW gating and enable Sw gating */
+		vcn_v1_0_disable_clock_gating(adev);
+	}
 	return 0;
 }
 
@@ -1072,8 +1100,8 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
 	.hw_fini = vcn_v1_0_hw_fini,
 	.suspend = vcn_v1_0_suspend,
 	.resume = vcn_v1_0_resume,
-	.is_idle = NULL /* vcn_v1_0_is_idle */,
-	.wait_for_idle = NULL /* vcn_v1_0_wait_for_idle */,
+	.is_idle = vcn_v1_0_is_idle,
+	.wait_for_idle = vcn_v1_0_wait_for_idle,
 	.check_soft_reset = NULL /* vcn_v1_0_check_soft_reset */,
 	.pre_soft_reset = NULL /* vcn_v1_0_pre_soft_reset */,
 	.soft_reset = NULL /* vcn_v1_0_soft_reset */,

From 79953a60e4476be90fa1767fbf49a76b6a8b01ef Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Thu, 17 May 2018 11:13:51 +0800
Subject: [PATCH 1269/1286] drm/amdgpu: Enable VCN CG by default on RV

Enable VCN clockgating by default on Raven.

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/soc15.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 0e4f67e4c875..2cf9a188131d 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -710,7 +710,8 @@ static int soc15_common_early_init(void *handle)
 			AMD_CG_SUPPORT_MC_MGCG |
 			AMD_CG_SUPPORT_MC_LS |
 			AMD_CG_SUPPORT_SDMA_MGCG |
-			AMD_CG_SUPPORT_SDMA_LS;
+			AMD_CG_SUPPORT_SDMA_LS |
+			AMD_CG_SUPPORT_VCN_MGCG;
 		adev->pg_flags = AMD_PG_SUPPORT_SDMA;
 
 		if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)

From d58c5d9a42050c93f17ba82aaff0f34a30761ac7 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Thu, 17 May 2018 16:07:02 +0800
Subject: [PATCH 1270/1286] drm/amdgpu: Add VCN static PG support on RV

Implement static powergating suport on VCN.

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 11 +++
 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c   | 92 ++++++++++++++++++++++++-
 2 files changed, 102 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 2fd7db891689..181e6afa9847 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -45,6 +45,17 @@
 #define VCN_ENC_CMD_REG_WRITE		0x0000000b
 #define VCN_ENC_CMD_REG_WAIT		0x0000000c
 
+enum engine_status_constants {
+	UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON = 0x2AAAA0,
+	UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON = 0x00000002,
+	UVD_STATUS__UVD_BUSY = 0x00000004,
+	GB_ADDR_CONFIG_DEFAULT = 0x26010011,
+	UVD_STATUS__IDLE = 0x2,
+	UVD_STATUS__BUSY = 0x5,
+	UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF = 0x1,
+	UVD_STATUS__RBC_BUSY = 0x1,
+};
+
 struct amdgpu_vcn {
 	struct amdgpu_bo	*vcpu_bo;
 	void			*cpu_addr;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 7a366418d5f4..dcb60ee0d9e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -480,6 +480,94 @@ static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev)
 	WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
 }
 
+static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev)
+{
+	uint32_t data = 0;
+	int ret;
+
+	if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
+		data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
+			| 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
+			| 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
+			| 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
+			| 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
+			| 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
+			| 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
+			| 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
+			| 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
+			| 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
+			| 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
+
+		WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
+		SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON, 0xFFFFFF, ret);
+	} else {
+		data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
+			| 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
+			| 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
+			| 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
+			| 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
+			| 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
+			| 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
+			| 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
+			| 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
+			| 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
+			| 1 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
+		WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
+		SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0,  0xFFFFFFFF, ret);
+	}
+
+	/* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS , UVDU_PWR_STATUS are 0 (power on) */
+
+	data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
+	data &= ~0x103;
+	if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
+		data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | UVD_POWER_STATUS__UVD_PG_EN_MASK;
+
+	WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
+}
+
+static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
+{
+	uint32_t data = 0;
+	int ret;
+
+	if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
+		/* Before power off, this indicator has to be turned on */
+		data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
+		data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
+		data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
+		WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
+
+
+		data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
+			| 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
+			| 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
+			| 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
+			| 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
+			| 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
+			| 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
+			| 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
+			| 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
+			| 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
+			| 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
+
+		WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
+
+		data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
+			| 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
+			| 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
+			| 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
+			| 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
+			| 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT
+			| 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT
+			| 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
+			| 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
+			| 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
+			| 2 << UVD_PGFSM_STATUS__UVDW_PWR_STATUS__SHIFT);
+		SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFFFFF, ret);
+	}
+}
+
 /**
  * vcn_v1_0_start - start VCN block
  *
@@ -499,6 +587,7 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
 
 	vcn_v1_0_mc_resume(adev);
 
+	vcn_1_0_disable_static_power_gating(adev);
 	/* disable clock gating */
 	vcn_v1_0_disable_clock_gating(adev);
 
@@ -681,8 +770,9 @@ static int vcn_v1_0_stop(struct amdgpu_device *adev)
 			~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 
 	/* enable clock gating */
-	vcn_v1_0_enable_clock_gating(adev);
 
+	vcn_v1_0_enable_clock_gating(adev);
+	vcn_1_0_enable_static_power_gating(adev);
 	return 0;
 }
 

From 61c8e90d965ba944d8b56c29c2c7bb9ec34f45d5 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Thu, 17 May 2018 16:03:47 +0800
Subject: [PATCH 1271/1286] drm/amdgpu: Enable VCN static PG by default on RV

Enable static VCN powergating by default on Raven.

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/soc15.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 2cf9a188131d..68b4a22a8892 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -712,7 +712,8 @@ static int soc15_common_early_init(void *handle)
 			AMD_CG_SUPPORT_SDMA_MGCG |
 			AMD_CG_SUPPORT_SDMA_LS |
 			AMD_CG_SUPPORT_VCN_MGCG;
-		adev->pg_flags = AMD_PG_SUPPORT_SDMA;
+
+		adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
 
 		if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
 			adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |

From 22cc6c5e1958e5a08b4c44203d1810ab07ce5a16 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Wed, 16 May 2018 20:18:22 +0800
Subject: [PATCH 1272/1286] drm/amdgpu: Add runtime VCN PG support

Enable support for dynamically powering up/down VCN on demand.

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 16 +++++++------
 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c   | 32 ++++++++++++++++---------
 2 files changed, 30 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 60468385e6b4..8851bcdfc260 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -212,11 +212,11 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
 	}
 
 	if (fences == 0) {
-		if (adev->pm.dpm_enabled) {
-			/* might be used when with pg/cg
+		if (adev->pm.dpm_enabled)
 			amdgpu_dpm_enable_uvd(adev, false);
-			*/
-		}
+		else
+			amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
+							       AMD_PG_STATE_GATE);
 	} else {
 		schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
 	}
@@ -228,9 +228,11 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
 	bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
 
 	if (set_clocks && adev->pm.dpm_enabled) {
-		/* might be used when with pg/cg
-		amdgpu_dpm_enable_uvd(adev, true);
-		*/
+		if (adev->pm.dpm_enabled)
+			amdgpu_dpm_enable_uvd(adev, true);
+		else
+			amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
+							       AMD_PG_STATE_UNGATE);
 	}
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index dcb60ee0d9e2..110b294ebed3 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -35,7 +35,6 @@
 #include "mmhub/mmhub_9_1_offset.h"
 #include "mmhub/mmhub_9_1_sh_mask.h"
 
-static int vcn_v1_0_start(struct amdgpu_device *adev);
 static int vcn_v1_0_stop(struct amdgpu_device *adev);
 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
@@ -146,10 +145,6 @@ static int vcn_v1_0_hw_init(void *handle)
 	struct amdgpu_ring *ring = &adev->vcn.ring_dec;
 	int i, r;
 
-	r = vcn_v1_0_start(adev);
-	if (r)
-		goto done;
-
 	ring->ready = true;
 	r = amdgpu_ring_test_ring(ring);
 	if (r) {
@@ -185,11 +180,9 @@ static int vcn_v1_0_hw_fini(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_ring *ring = &adev->vcn.ring_dec;
-	int r;
 
-	r = vcn_v1_0_stop(adev);
-	if (r)
-		return r;
+	if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
+		vcn_v1_0_stop(adev);
 
 	ring->ready = false;
 
@@ -769,7 +762,7 @@ static int vcn_v1_0_stop(struct amdgpu_device *adev)
 	WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
 			~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 
-	/* enable clock gating */
+	WREG32_SOC15(VCN, 0, mmUVD_STATUS, 0);
 
 	vcn_v1_0_enable_clock_gating(adev);
 	vcn_1_0_enable_static_power_gating(adev);
@@ -1179,6 +1172,23 @@ static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t coun
 	}
 }
 
+static int vcn_v1_0_set_powergating_state(void *handle,
+					  enum amd_powergating_state state)
+{
+	/* This doesn't actually powergate the VCN block.
+	 * That's done in the dpm code via the SMC.  This
+	 * just re-inits the block as necessary.  The actual
+	 * gating still happens in the dpm code.  We should
+	 * revisit this when there is a cleaner line between
+	 * the smc and the hw blocks
+	 */
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (state == AMD_PG_STATE_GATE)
+		return vcn_v1_0_stop(adev);
+	else
+		return vcn_v1_0_start(adev);
+}
 
 static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
 	.name = "vcn_v1_0",
@@ -1197,7 +1207,7 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
 	.soft_reset = NULL /* vcn_v1_0_soft_reset */,
 	.post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */,
 	.set_clockgating_state = vcn_v1_0_set_clockgating_state,
-	.set_powergating_state = NULL /* vcn_v1_0_set_powergating_state */,
+	.set_powergating_state = vcn_v1_0_set_powergating_state,
 };
 
 static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {

From af4c0f650b563c7b30c1d8cd2bb926247ceb19cc Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
Date: Thu, 19 Apr 2018 10:56:02 +0200
Subject: [PATCH 1273/1286] drm/amdgpu: rework VM state machine lock handling
 v2
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Only the moved state needs a separate spin lock protection. All other
states are protected by reserving the VM anyway.

v2: fix some more incorrect cases

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 66 ++++++++------------------
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h |  4 +-
 2 files changed, 21 insertions(+), 49 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 1a8f4e0dd023..f0deedcaf1c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -119,9 +119,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
 	 * is currently evicted. add the bo to the evicted list to make sure it
 	 * is validated on next vm use to avoid fault.
 	 * */
-	spin_lock(&vm->status_lock);
 	list_move_tail(&base->vm_status, &vm->evicted);
-	spin_unlock(&vm->status_lock);
 }
 
 /**
@@ -228,7 +226,6 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 	struct ttm_bo_global *glob = adev->mman.bdev.glob;
 	int r;
 
-	spin_lock(&vm->status_lock);
 	while (!list_empty(&vm->evicted)) {
 		struct amdgpu_vm_bo_base *bo_base;
 		struct amdgpu_bo *bo;
@@ -236,10 +233,8 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 		bo_base = list_first_entry(&vm->evicted,
 					   struct amdgpu_vm_bo_base,
 					   vm_status);
-		spin_unlock(&vm->status_lock);
 
 		bo = bo_base->bo;
-		BUG_ON(!bo);
 		if (bo->parent) {
 			r = validate(param, bo);
 			if (r)
@@ -259,13 +254,14 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 				return r;
 		}
 
-		spin_lock(&vm->status_lock);
-		if (bo->tbo.type != ttm_bo_type_kernel)
+		if (bo->tbo.type != ttm_bo_type_kernel) {
+			spin_lock(&vm->moved_lock);
 			list_move(&bo_base->vm_status, &vm->moved);
-		else
+			spin_unlock(&vm->moved_lock);
+		} else {
 			list_move(&bo_base->vm_status, &vm->relocated);
+		}
 	}
-	spin_unlock(&vm->status_lock);
 
 	return 0;
 }
@@ -279,13 +275,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
  */
 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
 {
-	bool ready;
-
-	spin_lock(&vm->status_lock);
-	ready = list_empty(&vm->evicted);
-	spin_unlock(&vm->status_lock);
-
-	return ready;
+	return list_empty(&vm->evicted);
 }
 
 /**
@@ -477,9 +467,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
 			pt->parent = amdgpu_bo_ref(parent->base.bo);
 
 			amdgpu_vm_bo_base_init(&entry->base, vm, pt);
-			spin_lock(&vm->status_lock);
 			list_move(&entry->base.vm_status, &vm->relocated);
-			spin_unlock(&vm->status_lock);
 		}
 
 		if (level < AMDGPU_VM_PTB) {
@@ -926,10 +914,8 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
 		if (!entry->base.bo)
 			continue;
 
-		spin_lock(&vm->status_lock);
 		if (list_empty(&entry->base.vm_status))
 			list_add(&entry->base.vm_status, &vm->relocated);
-		spin_unlock(&vm->status_lock);
 		amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
 	}
 }
@@ -974,7 +960,6 @@ restart:
 		params.func = amdgpu_vm_do_set_ptes;
 	}
 
-	spin_lock(&vm->status_lock);
 	while (!list_empty(&vm->relocated)) {
 		struct amdgpu_vm_bo_base *bo_base, *parent;
 		struct amdgpu_vm_pt *pt, *entry;
@@ -984,13 +969,10 @@ restart:
 					   struct amdgpu_vm_bo_base,
 					   vm_status);
 		list_del_init(&bo_base->vm_status);
-		spin_unlock(&vm->status_lock);
 
 		bo = bo_base->bo->parent;
-		if (!bo) {
-			spin_lock(&vm->status_lock);
+		if (!bo)
 			continue;
-		}
 
 		parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base,
 					  bo_list);
@@ -999,12 +981,10 @@ restart:
 
 		amdgpu_vm_update_pde(&params, vm, pt, entry);
 
-		spin_lock(&vm->status_lock);
 		if (!vm->use_cpu_for_update &&
 		    (ndw - params.ib->length_dw) < 32)
 			break;
 	}
-	spin_unlock(&vm->status_lock);
 
 	if (vm->use_cpu_for_update) {
 		/* Flush HDP */
@@ -1107,9 +1087,7 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
 		if (entry->huge) {
 			/* Add the entry to the relocated list to update it. */
 			entry->huge = false;
-			spin_lock(&p->vm->status_lock);
 			list_move(&entry->base.vm_status, &p->vm->relocated);
-			spin_unlock(&p->vm->status_lock);
 		}
 		return;
 	}
@@ -1588,8 +1566,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 		amdgpu_asic_flush_hdp(adev, NULL);
 	}
 
-	spin_lock(&vm->status_lock);
+	spin_lock(&vm->moved_lock);
 	list_del_init(&bo_va->base.vm_status);
+	spin_unlock(&vm->moved_lock);
 
 	/* If the BO is not in its preferred location add it back to
 	 * the evicted list so that it gets validated again on the
@@ -1599,7 +1578,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 	    !(bo->preferred_domains &
 	    amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)))
 		list_add_tail(&bo_va->base.vm_status, &vm->evicted);
-	spin_unlock(&vm->status_lock);
 
 	list_splice_init(&bo_va->invalids, &bo_va->valids);
 	bo_va->cleared = clear;
@@ -1811,14 +1789,14 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
 	bool clear;
 	int r = 0;
 
-	spin_lock(&vm->status_lock);
+	spin_lock(&vm->moved_lock);
 	while (!list_empty(&vm->moved)) {
 		struct amdgpu_bo_va *bo_va;
 		struct reservation_object *resv;
 
 		bo_va = list_first_entry(&vm->moved,
 			struct amdgpu_bo_va, base.vm_status);
-		spin_unlock(&vm->status_lock);
+		spin_unlock(&vm->moved_lock);
 
 		resv = bo_va->base.bo->tbo.resv;
 
@@ -1839,9 +1817,9 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
 		if (!clear && resv != vm->root.base.bo->tbo.resv)
 			reservation_object_unlock(resv);
 
-		spin_lock(&vm->status_lock);
+		spin_lock(&vm->moved_lock);
 	}
-	spin_unlock(&vm->status_lock);
+	spin_unlock(&vm->moved_lock);
 
 	return r;
 }
@@ -1903,10 +1881,10 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
 		amdgpu_vm_prt_get(adev);
 
 	if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
-		spin_lock(&vm->status_lock);
+		spin_lock(&vm->moved_lock);
 		if (list_empty(&bo_va->base.vm_status))
 			list_add(&bo_va->base.vm_status, &vm->moved);
-		spin_unlock(&vm->status_lock);
+		spin_unlock(&vm->moved_lock);
 	}
 	trace_amdgpu_vm_bo_map(bo_va, mapping);
 }
@@ -2216,9 +2194,9 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
 
 	list_del(&bo_va->base.bo_list);
 
-	spin_lock(&vm->status_lock);
+	spin_lock(&vm->moved_lock);
 	list_del(&bo_va->base.vm_status);
-	spin_unlock(&vm->status_lock);
+	spin_unlock(&vm->moved_lock);
 
 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
 		list_del(&mapping->list);
@@ -2261,28 +2239,24 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
 
 		bo_base->moved = true;
 		if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
-			spin_lock(&bo_base->vm->status_lock);
 			if (bo->tbo.type == ttm_bo_type_kernel)
 				list_move(&bo_base->vm_status, &vm->evicted);
 			else
 				list_move_tail(&bo_base->vm_status,
 					       &vm->evicted);
-			spin_unlock(&bo_base->vm->status_lock);
 			continue;
 		}
 
 		if (bo->tbo.type == ttm_bo_type_kernel) {
-			spin_lock(&bo_base->vm->status_lock);
 			if (list_empty(&bo_base->vm_status))
 				list_add(&bo_base->vm_status, &vm->relocated);
-			spin_unlock(&bo_base->vm->status_lock);
 			continue;
 		}
 
-		spin_lock(&bo_base->vm->status_lock);
+		spin_lock(&bo_base->vm->moved_lock);
 		if (list_empty(&bo_base->vm_status))
 			list_add(&bo_base->vm_status, &vm->moved);
-		spin_unlock(&bo_base->vm->status_lock);
+		spin_unlock(&bo_base->vm->moved_lock);
 	}
 }
 
@@ -2391,9 +2365,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 	vm->va = RB_ROOT_CACHED;
 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
 		vm->reserved_vmid[i] = NULL;
-	spin_lock_init(&vm->status_lock);
 	INIT_LIST_HEAD(&vm->evicted);
 	INIT_LIST_HEAD(&vm->relocated);
+	spin_lock_init(&vm->moved_lock);
 	INIT_LIST_HEAD(&vm->moved);
 	INIT_LIST_HEAD(&vm->freed);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index d6827083572a..0196b9a782f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -168,9 +168,6 @@ struct amdgpu_vm {
 	/* tree of virtual addresses mapped */
 	struct rb_root_cached	va;
 
-	/* protecting invalidated */
-	spinlock_t		status_lock;
-
 	/* BOs who needs a validation */
 	struct list_head	evicted;
 
@@ -179,6 +176,7 @@ struct amdgpu_vm {
 
 	/* BOs moved, but not yet updated in the PT */
 	struct list_head	moved;
+	spinlock_t		moved_lock;
 
 	/* BO mappings freed, but not yet updated in the PT */
 	struct list_head	freed;

From 91ccdd24a1955dbec97a6d61322be214b7de1974 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
Date: Thu, 19 Apr 2018 11:02:54 +0200
Subject: [PATCH 1274/1286] drm/amdgpu: cleanup amdgpu_vm_validate_pt_bos v2
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Use list_for_each_entry_safe here.

v2: Drop the optimization, it doesn't work as expected.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 19 +++++++------------
 1 file changed, 7 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index f0deedcaf1c9..3be4d5fc60b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -224,21 +224,16 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 			      void *param)
 {
 	struct ttm_bo_global *glob = adev->mman.bdev.glob;
-	int r;
+	struct amdgpu_vm_bo_base *bo_base, *tmp;
+	int r = 0;
 
-	while (!list_empty(&vm->evicted)) {
-		struct amdgpu_vm_bo_base *bo_base;
-		struct amdgpu_bo *bo;
+	list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
+		struct amdgpu_bo *bo = bo_base->bo;
 
-		bo_base = list_first_entry(&vm->evicted,
-					   struct amdgpu_vm_bo_base,
-					   vm_status);
-
-		bo = bo_base->bo;
 		if (bo->parent) {
 			r = validate(param, bo);
 			if (r)
-				return r;
+				break;
 
 			spin_lock(&glob->lru_lock);
 			ttm_bo_move_to_lru_tail(&bo->tbo);
@@ -251,7 +246,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 		    vm->use_cpu_for_update) {
 			r = amdgpu_bo_kmap(bo, NULL);
 			if (r)
-				return r;
+				break;
 		}
 
 		if (bo->tbo.type != ttm_bo_type_kernel) {
@@ -263,7 +258,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 		}
 	}
 
-	return 0;
+	return r;
 }
 
 /**

From 789f3317ed33e34fa97c8918c075c68a62e51a4d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
Date: Thu, 19 Apr 2018 11:08:24 +0200
Subject: [PATCH 1275/1286] drm/amdgpu: further optimize amdgpu_vm_handle_moved
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Splice the moved list to a local one to avoid taking the lock over and
over again.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 27 +++++++++++++-------------
 1 file changed, 14 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 3be4d5fc60b3..4d88b060fbde 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1781,19 +1781,18 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
 			   struct amdgpu_vm *vm)
 {
+	struct amdgpu_bo_va *bo_va, *tmp;
+	struct list_head moved;
 	bool clear;
-	int r = 0;
+	int r;
 
+	INIT_LIST_HEAD(&moved);
 	spin_lock(&vm->moved_lock);
-	while (!list_empty(&vm->moved)) {
-		struct amdgpu_bo_va *bo_va;
-		struct reservation_object *resv;
+	list_splice_init(&vm->moved, &moved);
+	spin_unlock(&vm->moved_lock);
 
-		bo_va = list_first_entry(&vm->moved,
-			struct amdgpu_bo_va, base.vm_status);
-		spin_unlock(&vm->moved_lock);
-
-		resv = bo_va->base.bo->tbo.resv;
+	list_for_each_entry_safe(bo_va, tmp, &moved, base.vm_status) {
+		struct reservation_object *resv = bo_va->base.bo->tbo.resv;
 
 		/* Per VM BOs never need to bo cleared in the page tables */
 		if (resv == vm->root.base.bo->tbo.resv)
@@ -1806,17 +1805,19 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
 			clear = true;
 
 		r = amdgpu_vm_bo_update(adev, bo_va, clear);
-		if (r)
+		if (r) {
+			spin_lock(&vm->moved_lock);
+			list_splice(&moved, &vm->moved);
+			spin_unlock(&vm->moved_lock);
 			return r;
+		}
 
 		if (!clear && resv != vm->root.base.bo->tbo.resv)
 			reservation_object_unlock(resv);
 
-		spin_lock(&vm->moved_lock);
 	}
-	spin_unlock(&vm->moved_lock);
 
-	return r;
+	return 0;
 }
 
 /**

From a7f91061c60ad9cac2e6a03b642be6a4f88b3662 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
Date: Thu, 19 Apr 2018 13:58:42 +0200
Subject: [PATCH 1276/1286] drm/amdgpu: kmap PDs/PTs in
 amdgpu_vm_update_directories
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

In theory it is possible that PDs/PTs can move without eviction.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 15 ++++++++-------
 1 file changed, 8 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 4d88b060fbde..a31afac8e8e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -242,13 +242,6 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 			spin_unlock(&glob->lru_lock);
 		}
 
-		if (bo->tbo.type == ttm_bo_type_kernel &&
-		    vm->use_cpu_for_update) {
-			r = amdgpu_bo_kmap(bo, NULL);
-			if (r)
-				break;
-		}
-
 		if (bo->tbo.type != ttm_bo_type_kernel) {
 			spin_lock(&vm->moved_lock);
 			list_move(&bo_base->vm_status, &vm->moved);
@@ -940,6 +933,14 @@ restart:
 	params.adev = adev;
 
 	if (vm->use_cpu_for_update) {
+		struct amdgpu_vm_bo_base *bo_base;
+
+		list_for_each_entry(bo_base, &vm->relocated, vm_status) {
+			r = amdgpu_bo_kmap(bo_base->bo, NULL);
+			if (unlikely(r))
+				return r;
+		}
+
 		r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
 		if (unlikely(r))
 			return r;

From 862b8c5762e4e2324d18c881ce86062af72b2063 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
Date: Thu, 19 Apr 2018 14:22:56 +0200
Subject: [PATCH 1277/1286] drm/amdgpu: consistenly use VM moved flag
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Instead of sometimes checking if the vm_status is empty use the moved
flag and also reset it when the BO leaves the state machine.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 28 ++++++++++++++------------
 1 file changed, 15 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index a31afac8e8e9..f5dee4c6757c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -902,8 +902,8 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
 		if (!entry->base.bo)
 			continue;
 
-		if (list_empty(&entry->base.vm_status))
-			list_add(&entry->base.vm_status, &vm->relocated);
+		if (!entry->base.moved)
+			list_move(&entry->base.vm_status, &vm->relocated);
 		amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
 	}
 }
@@ -964,6 +964,7 @@ restart:
 		bo_base = list_first_entry(&vm->relocated,
 					   struct amdgpu_vm_bo_base,
 					   vm_status);
+		bo_base->moved = false;
 		list_del_init(&bo_base->vm_status);
 
 		bo = bo_base->bo->parent;
@@ -1877,10 +1878,10 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
 	if (mapping->flags & AMDGPU_PTE_PRT)
 		amdgpu_vm_prt_get(adev);
 
-	if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
+	if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
+	    !bo_va->base.moved) {
 		spin_lock(&vm->moved_lock);
-		if (list_empty(&bo_va->base.vm_status))
-			list_add(&bo_va->base.vm_status, &vm->moved);
+		list_move(&bo_va->base.vm_status, &vm->moved);
 		spin_unlock(&vm->moved_lock);
 	}
 	trace_amdgpu_vm_bo_map(bo_va, mapping);
@@ -2233,6 +2234,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
 
 	list_for_each_entry(bo_base, &bo->va, bo_list) {
 		struct amdgpu_vm *vm = bo_base->vm;
+		bool was_moved = bo_base->moved;
 
 		bo_base->moved = true;
 		if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
@@ -2244,16 +2246,16 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
 			continue;
 		}
 
-		if (bo->tbo.type == ttm_bo_type_kernel) {
-			if (list_empty(&bo_base->vm_status))
-				list_add(&bo_base->vm_status, &vm->relocated);
+		if (was_moved)
 			continue;
-		}
 
-		spin_lock(&bo_base->vm->moved_lock);
-		if (list_empty(&bo_base->vm_status))
-			list_add(&bo_base->vm_status, &vm->moved);
-		spin_unlock(&bo_base->vm->moved_lock);
+		if (bo->tbo.type == ttm_bo_type_kernel) {
+			list_move(&bo_base->vm_status, &vm->relocated);
+		} else {
+			spin_lock(&bo_base->vm->moved_lock);
+			list_move(&bo_base->vm_status, &vm->moved);
+			spin_unlock(&bo_base->vm->moved_lock);
+		}
 	}
 }
 

From 806f043f0253a76248c554ce9f7303bc25e43314 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
Date: Thu, 19 Apr 2018 15:01:12 +0200
Subject: [PATCH 1278/1286] drm/amdgpu: move VM BOs on LRU again
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Move all BOs belonging to a VM on the LRU with every submission.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 28 +++++++++++++++++++++-----
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h |  3 +++
 2 files changed, 26 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index f5dee4c6757c..ccba88cc8c54 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -251,6 +251,19 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 		}
 	}
 
+	spin_lock(&glob->lru_lock);
+	list_for_each_entry(bo_base, &vm->idle, vm_status) {
+		struct amdgpu_bo *bo = bo_base->bo;
+
+		if (!bo->parent)
+			continue;
+
+		ttm_bo_move_to_lru_tail(&bo->tbo);
+		if (bo->shadow)
+			ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
+	}
+	spin_unlock(&glob->lru_lock);
+
 	return r;
 }
 
@@ -965,7 +978,7 @@ restart:
 					   struct amdgpu_vm_bo_base,
 					   vm_status);
 		bo_base->moved = false;
-		list_del_init(&bo_base->vm_status);
+		list_move(&bo_base->vm_status, &vm->idle);
 
 		bo = bo_base->bo->parent;
 		if (!bo)
@@ -1571,10 +1584,14 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 	 * the evicted list so that it gets validated again on the
 	 * next command submission.
 	 */
-	if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
-	    !(bo->preferred_domains &
-	    amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)))
-		list_add_tail(&bo_va->base.vm_status, &vm->evicted);
+	if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
+		uint32_t mem_type = bo->tbo.mem.mem_type;
+
+		if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
+			list_add_tail(&bo_va->base.vm_status, &vm->evicted);
+		else
+			list_add(&bo_va->base.vm_status, &vm->idle);
+	}
 
 	list_splice_init(&bo_va->invalids, &bo_va->valids);
 	bo_va->cleared = clear;
@@ -2368,6 +2385,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 	INIT_LIST_HEAD(&vm->relocated);
 	spin_lock_init(&vm->moved_lock);
 	INIT_LIST_HEAD(&vm->moved);
+	INIT_LIST_HEAD(&vm->idle);
 	INIT_LIST_HEAD(&vm->freed);
 
 	/* create scheduler entity for page table updates */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 0196b9a782f2..061b99a18cb8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -178,6 +178,9 @@ struct amdgpu_vm {
 	struct list_head	moved;
 	spinlock_t		moved_lock;
 
+	/* All BOs of this VM not currently in the state machine */
+	struct list_head	idle;
+
 	/* BO mappings freed, but not yet updated in the PT */
 	struct list_head	freed;
 

From 52bf20f414bc34899203307034227ccc95d6adb3 Mon Sep 17 00:00:00 2001
From: Emily Deng <Emily.Deng@amd.com>
Date: Wed, 23 May 2018 15:53:03 +0800
Subject: [PATCH 1279/1286] drm/sched: add rcu_barrier after entity fini
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

To free the fence from the amdgpu_fence_slab, need twice call_rcu, to avoid
the amdgpu_fence_slab_fini call kmem_cache_destroy(amdgpu_fence_slab) before
kmem_cache_free(amdgpu_fence_slab, fence), add rcu_barrier after drm_sched_entity_fini.

The kmem_cache_free(amdgpu_fence_slab, fence)'s call trace as below:
1.drm_sched_entity_fini ->
drm_sched_entity_cleanup ->
dma_fence_put(entity->last_scheduled) ->
drm_sched_fence_release_finished ->
drm_sched_fence_release_scheduled ->
call_rcu(&fence->finished.rcu, drm_sched_fence_free)

2.drm_sched_fence_free ->
dma_fence_put(fence->parent) ->
amdgpu_fence_release ->
call_rcu(&f->rcu, amdgpu_fence_free) ->
kmem_cache_free(amdgpu_fence_slab, fence);

v2:put the barrier before the kmem_cache_destroy
v3:put the dma_fence_put(fence->parent) before call_rcu in
drm_sched_fence_release_scheduled

Signed-off-by: Emily Deng <Emily.Deng@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/scheduler/sched_fence.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index 786b47f15783..df4461648e3f 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -98,7 +98,6 @@ static void drm_sched_fence_free(struct rcu_head *rcu)
 	struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
 	struct drm_sched_fence *fence = to_drm_sched_fence(f);
 
-	dma_fence_put(fence->parent);
 	kmem_cache_free(sched_fence_slab, fence);
 }
 
@@ -114,6 +113,7 @@ static void drm_sched_fence_release_scheduled(struct dma_fence *f)
 {
 	struct drm_sched_fence *fence = to_drm_sched_fence(f);
 
+	dma_fence_put(fence->parent);
 	call_rcu(&fence->finished.rcu, drm_sched_fence_free);
 }
 

From bf20f0ab544d8982af375c87e3d870d45237eecc Mon Sep 17 00:00:00 2001
From: Alex Deucher <alexander.deucher@amd.com>
Date: Mon, 21 May 2018 10:16:28 -0500
Subject: [PATCH 1280/1286] drm/amdgpu/pp: remove duplicate assignment
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

is_dpm_running callback was assigned to the same value
twice.  Drop the duplicate.

Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
index a40f7141131c..2de48959ac93 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
@@ -2379,6 +2379,5 @@ const struct pp_smumgr_func vegam_smu_funcs = {
 	.update_sclk_threshold = vegam_update_sclk_threshold,
 	.is_hw_avfs_present = vegam_is_hw_avfs_present,
 	.thermal_avfs_enable = vegam_thermal_avfs_enable,
-	.is_dpm_running = vegam_is_dpm_running,
 	.thermal_setup_fan_table = vegam_thermal_setup_fan_table,
 };

From f9fb22a21b380b14f70048fe719875e3523ac7d8 Mon Sep 17 00:00:00 2001
From: Shaoyun Liu <Shaoyun.Liu@amd.com>
Date: Tue, 22 May 2018 11:45:41 -0400
Subject: [PATCH 1281/1286] drm/amdgpu: Update GFX info structure to match what
 vega20 used

Update to the latest version from the vbios team.

Signed-off-by: Shaoyun Liu <Shaoyun.Liu@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c | 8 +++-----
 drivers/gpu/drm/amd/include/atomfirmware.h       | 3 ++-
 2 files changed, 5 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 7014d5875d5b..236915849cfe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -354,11 +354,9 @@ int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
 				le16_to_cpu(gfx_info->v24.gc_gsprim_buff_depth);
 			adev->gfx.config.double_offchip_lds_buf =
 				gfx_info->v24.gc_double_offchip_lds_buffer;
-			adev->gfx.cu_info.wave_front_size = gfx_info->v24.gc_wave_size;
-			adev->gfx.cu_info.max_waves_per_simd =
-				le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd);
-			adev->gfx.cu_info.max_scratch_slots_per_cu =
-				gfx_info->v24.gc_max_scratch_slots_per_cu;
+			adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v24.gc_wave_size);
+			adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd);
+			adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v24.gc_max_scratch_slots_per_cu;
 			adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size);
 			return 0;
 		default:
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index fd5e80c92ed0..c6c1666ac120 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -1240,7 +1240,6 @@ struct  atom_gfx_info_v2_4 {
   uint8_t active_cu_per_sh;
   uint8_t active_rb_per_se;
   uint16_t gcgoldenoffset;
-  uint32_t rm21_sram_vmin_value;
   uint16_t gc_num_gprs;
   uint16_t gc_gsprim_buff_depth;
   uint16_t gc_parameter_cache_depth;
@@ -1251,6 +1250,8 @@ struct  atom_gfx_info_v2_4 {
   uint8_t gc_gs_table_depth;
   uint8_t gc_double_offchip_lds_buffer;
   uint8_t gc_max_scratch_slots_per_cu;
+  uint32_t sram_rm_fuses_val;
+  uint32_t sram_custom_rm_fuses_val;
 };
 
 /* 

From b8f3439fa5358ac84d29fa2f4afa115500dec74c Mon Sep 17 00:00:00 2001
From: David Francis <David.Francis@amd.com>
Date: Thu, 24 May 2018 10:40:12 -0400
Subject: [PATCH 1282/1286] drm/amd/display: Remove use of division operator
 for long longs

In fixed31_32.h, in dc_fixpt_shl,'/' was used for division of one long
long int by another long long int.  As there is no inbuilt long long
int division function in c, gcc inserted its own.  However, gcc does not
link the library that contains this function.  To avoid this, use
bitwise operators instead of /

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: David Francis <David.Francis@amd.com>
Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/include/fixed31_32.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
index 76f64e910422..bb0d4ebba9f0 100644
--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
+++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
@@ -209,7 +209,7 @@ static inline struct fixed31_32 dc_fixpt_clamp(
 static inline struct fixed31_32 dc_fixpt_shl(struct fixed31_32 arg, unsigned char shift)
 {
 	ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) ||
-		((arg.value < 0) && (arg.value >= (LLONG_MIN / (1LL << shift)))));
+		((arg.value < 0) && (arg.value >= ~(LLONG_MAX >> shift))));
 
 	arg.value = arg.value << shift;
 

From c3032fd9673468783bb20326ead823a2f321a522 Mon Sep 17 00:00:00 2001
From: Tom Stellard <tstellar@redhat.com>
Date: Thu, 24 May 2018 14:07:14 -0700
Subject: [PATCH 1283/1286] drm/amdgpu: Use dev_info() to report amdkfd is not
 supported for this ASIC

This is an important message, so it should be visible to users without
having to enable extra debugging.

Signed-off-by: Tom Stellard <tstellar@redhat.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 95fcbd8a4bf3..8f6f45567bfa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -103,7 +103,7 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
 		kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions();
 		break;
 	default:
-		dev_dbg(adev->dev, "kfd not supported on this ASIC\n");
+		dev_info(adev->dev, "kfd not supported on this ASIC\n");
 		return;
 	}
 

From 12678199c77d196ee23ca2cde4318b6a0e343687 Mon Sep 17 00:00:00 2001
From: Inki Dae <inki.dae@samsung.com>
Date: Wed, 23 May 2018 09:48:15 +0900
Subject: [PATCH 1284/1286] drm/exynos: scaler: fix static checker warning

drivers/gpu/drm/exynos/exynos_drm_scaler.c:402 scaler_task_done()
warn: signedness bug returning '(-22)'

Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Inki Dae <inki.dae@samsung.com>
---
 drivers/gpu/drm/exynos/exynos_drm_scaler.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index 63b05b7c846a..91d4382343d0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -397,7 +397,7 @@ static inline u32 scaler_get_int_status(struct scaler_context *scaler)
 	return scaler_read(SCALER_INT_STATUS);
 }
 
-static inline bool scaler_task_done(u32 val)
+static inline int scaler_task_done(u32 val)
 {
 	return val & SCALER_INT_STATUS_FRAME_END ? 0 : -EINVAL;
 }

From 19832055e2bf5e67f506bac62e4e07326fb545b7 Mon Sep 17 00:00:00 2001
From: Dan Carpenter <dan.carpenter@oracle.com>
Date: Fri, 18 May 2018 01:04:45 -0700
Subject: [PATCH 1285/1286] drm/exynos: fimc: signedness bug in
 fimc_setup_clocks()

"id" needs to be signed for the error handling to work.

Fixes: 7a2d5c77c558 ("drm/exynos: fimc: Convert driver to IPP v2 core API")
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Inki Dae <inki.dae@samsung.com>
---
 drivers/gpu/drm/exynos/exynos_drm_fimc.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 4dfbfc7f3b84..5ce84025d1cb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1200,7 +1200,7 @@ e_clk_free:
 
 int exynos_drm_check_fimc_device(struct device *dev)
 {
-	unsigned int id = of_alias_get_id(dev->of_node, "fimc");
+	int id = of_alias_get_id(dev->of_node, "fimc");
 
 	if (id >= 0 && (BIT(id) & fimc_mask))
 		return 0;

From ebe1d22b57b86b6739f2739b5a0f52435596d84d Mon Sep 17 00:00:00 2001
From: Arnd Bergmann <arnd@arndb.de>
Date: Fri, 25 May 2018 17:50:09 +0200
Subject: [PATCH 1286/1286] drm/amdgpu: fix 32-bit build warning

Casting a pointer to a 64-bit type causes a warning on 32-bit targets:

drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c:473:24: error: cast from pointer to integer of different size [-Werror=pointer-to-int-cast]
          lower_32_bits((uint64_t)wptr));
                        ^
drivers/gpu/drm/amd/amdgpu/amdgpu.h:1701:53: note: in definition of macro 'WREG32'
 #define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), 0)
                                                     ^
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c:473:10: note: in expansion of macro 'lower_32_bits'
          lower_32_bits((uint64_t)wptr));
          ^~~~~~~~~~~~~

The correct method is to cast to 'uintptr_t'.

Fixes: d5a114a6c5f7 ("drm/amdgpu: Add GFXv9 kfd2kgd interface functions")
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 8f37991df61b..f0c0d3953f69 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -470,9 +470,9 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
 		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI),
 		       upper_32_bits(guessed_wptr));
 		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR),
-		       lower_32_bits((uint64_t)wptr));
+		       lower_32_bits((uintptr_t)wptr));
 		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
-		       upper_32_bits((uint64_t)wptr));
+		       upper_32_bits((uintptr_t)wptr));
 		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1),
 		       get_queue_mask(adev, pipe_id, queue_id));
 	}